[X86] Lowering FMA intrinsics to native IR (LLVM part)

Support for Clang lowering of fused intrinsics. This patch:

1. Removes bindings to clang fma intrinsics.
2. Introduces new LLVM unmasked intrinsics with rounding mode:
     int_x86_avx512_vfmadd_pd_512
     int_x86_avx512_vfmadd_ps_512
     int_x86_avx512_vfmaddsub_pd_512
     int_x86_avx512_vfmaddsub_ps_512
     supported with a new intrinsic type (INTR_TYPE_3OP_RM).
3. Introduces new x86 fmaddsub/fmsubadd folding.
4. Introduces new tests for code emitted by sequentions introduced in Clang part.

Patch by tkrupa

Reviewers: craig.topper, sroland, spatel, RKSimon

Reviewed By: craig.topper, RKSimon

Differential Revision: https://reviews.llvm.org/D47443

llvm-svn: 333554
This commit is contained in:
Gabor Buella 2018-05-30 15:25:16 +00:00
parent 707e68fb21
commit 890e363e11
11 changed files with 9936 additions and 234 deletions

View File

@ -1922,11 +1922,11 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// FMA3 and FMA4
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_fma_vfmadd_ss : GCCBuiltin<"__builtin_ia32_vfmaddss3">,
def int_x86_fma_vfmadd_ss : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
def int_x86_fma_vfmadd_sd : GCCBuiltin<"__builtin_ia32_vfmaddsd3">,
def int_x86_fma_vfmadd_sd : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
@ -1938,254 +1938,236 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
def int_x86_fma_vfmadd_ps : GCCBuiltin<"__builtin_ia32_vfmaddps">,
def int_x86_fma_vfmadd_ps : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
def int_x86_fma_vfmadd_pd : GCCBuiltin<"__builtin_ia32_vfmaddpd">,
def int_x86_fma_vfmadd_pd : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
def int_x86_fma_vfmadd_ps_256 : GCCBuiltin<"__builtin_ia32_vfmaddps256">,
def int_x86_fma_vfmadd_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
[IntrNoMem]>;
def int_x86_fma_vfmadd_pd_256 : GCCBuiltin<"__builtin_ia32_vfmaddpd256">,
def int_x86_fma_vfmadd_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
[IntrNoMem]>;
def int_x86_fma_vfmaddsub_ps : GCCBuiltin<"__builtin_ia32_vfmaddsubps">,
def int_x86_fma_vfmaddsub_ps : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
[IntrNoMem]>;
def int_x86_fma_vfmaddsub_pd : GCCBuiltin<"__builtin_ia32_vfmaddsubpd">,
def int_x86_fma_vfmaddsub_pd : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
[IntrNoMem]>;
def int_x86_fma_vfmaddsub_ps_256 :
GCCBuiltin<"__builtin_ia32_vfmaddsubps256">,
def int_x86_fma_vfmaddsub_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
[IntrNoMem]>;
def int_x86_fma_vfmaddsub_pd_256 :
GCCBuiltin<"__builtin_ia32_vfmaddsubpd256">,
def int_x86_fma_vfmaddsub_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmadd_pd_128 :
GCCBuiltin<"__builtin_ia32_vfmaddpd128_mask">,
def int_x86_avx512_mask_vfmadd_pd_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmadd_pd_128 :
GCCBuiltin<"__builtin_ia32_vfmaddpd128_mask3">,
def int_x86_avx512_mask3_vfmadd_pd_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_maskz_vfmadd_pd_128 :
GCCBuiltin<"__builtin_ia32_vfmaddpd128_maskz">,
def int_x86_avx512_maskz_vfmadd_pd_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmadd_pd_256 :
GCCBuiltin<"__builtin_ia32_vfmaddpd256_mask">,
def int_x86_avx512_mask_vfmadd_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmadd_pd_256 :
GCCBuiltin<"__builtin_ia32_vfmaddpd256_mask3">,
def int_x86_avx512_mask3_vfmadd_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_maskz_vfmadd_pd_256 :
GCCBuiltin<"__builtin_ia32_vfmaddpd256_maskz">,
def int_x86_avx512_maskz_vfmadd_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmadd_pd_512 :
GCCBuiltin<"__builtin_ia32_vfmaddpd512_mask">,
def int_x86_avx512_vfmadd_pd_512 :
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmadd_pd_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask3_vfmadd_pd_512 :
GCCBuiltin<"__builtin_ia32_vfmaddpd512_mask3">,
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vfmadd_pd_512 :
GCCBuiltin<"__builtin_ia32_vfmaddpd512_maskz">,
def int_x86_avx512_maskz_vfmadd_pd_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vfmadd_ps_128 :
GCCBuiltin<"__builtin_ia32_vfmaddps128_mask">,
def int_x86_avx512_mask_vfmadd_ps_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmadd_ps_128 :
GCCBuiltin<"__builtin_ia32_vfmaddps128_mask3">,
def int_x86_avx512_mask3_vfmadd_ps_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_maskz_vfmadd_ps_128 :
GCCBuiltin<"__builtin_ia32_vfmaddps128_maskz">,
def int_x86_avx512_maskz_vfmadd_ps_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmadd_ps_256 :
GCCBuiltin<"__builtin_ia32_vfmaddps256_mask">,
def int_x86_avx512_mask_vfmadd_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmadd_ps_256 :
GCCBuiltin<"__builtin_ia32_vfmaddps256_mask3">,
def int_x86_avx512_mask3_vfmadd_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_maskz_vfmadd_ps_256 :
GCCBuiltin<"__builtin_ia32_vfmaddps256_maskz">,
def int_x86_avx512_maskz_vfmadd_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmadd_ps_512 :
GCCBuiltin<"__builtin_ia32_vfmaddps512_mask">,
def int_x86_avx512_vfmadd_ps_512 :
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmadd_ps_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask3_vfmadd_ps_512 :
GCCBuiltin<"__builtin_ia32_vfmaddps512_mask3">,
def int_x86_avx512_mask3_vfmadd_ps_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vfmadd_ps_512 :
GCCBuiltin<"__builtin_ia32_vfmaddps512_maskz">,
def int_x86_avx512_maskz_vfmadd_ps_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vfmaddsub_pd_128 :
GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_mask">,
def int_x86_avx512_mask_vfmaddsub_pd_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmaddsub_pd_128 :
GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_mask3">,
def int_x86_avx512_mask3_vfmaddsub_pd_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_maskz_vfmaddsub_pd_128 :
GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_maskz">,
def int_x86_avx512_maskz_vfmaddsub_pd_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmaddsub_pd_256 :
GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_mask">,
def int_x86_avx512_mask_vfmaddsub_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmaddsub_pd_256 :
GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_mask3">,
def int_x86_avx512_mask3_vfmaddsub_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_maskz_vfmaddsub_pd_256 :
GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_maskz">,
def int_x86_avx512_maskz_vfmaddsub_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmaddsub_pd_512 :
GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_mask">,
def int_x86_avx512_vfmaddsub_pd_512 :
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmaddsub_pd_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask3_vfmaddsub_pd_512 :
GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_mask3">,
def int_x86_avx512_mask3_vfmaddsub_pd_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vfmaddsub_pd_512 :
GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_maskz">,
def int_x86_avx512_maskz_vfmaddsub_pd_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vfmaddsub_ps_128 :
GCCBuiltin<"__builtin_ia32_vfmaddsubps128_mask">,
def int_x86_avx512_mask_vfmaddsub_ps_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmaddsub_ps_128 :
GCCBuiltin<"__builtin_ia32_vfmaddsubps128_mask3">,
def int_x86_avx512_mask3_vfmaddsub_ps_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_maskz_vfmaddsub_ps_128 :
GCCBuiltin<"__builtin_ia32_vfmaddsubps128_maskz">,
def int_x86_avx512_maskz_vfmaddsub_ps_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmaddsub_ps_256 :
GCCBuiltin<"__builtin_ia32_vfmaddsubps256_mask">,
def int_x86_avx512_mask_vfmaddsub_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmaddsub_ps_256 :
GCCBuiltin<"__builtin_ia32_vfmaddsubps256_mask3">,
def int_x86_avx512_mask3_vfmaddsub_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_maskz_vfmaddsub_ps_256 :
GCCBuiltin<"__builtin_ia32_vfmaddsubps256_maskz">,
def int_x86_avx512_maskz_vfmaddsub_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmaddsub_ps_512 :
GCCBuiltin<"__builtin_ia32_vfmaddsubps512_mask">,
def int_x86_avx512_vfmaddsub_ps_512 :
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfmaddsub_ps_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask3_vfmaddsub_ps_512 :
GCCBuiltin<"__builtin_ia32_vfmaddsubps512_mask3">,
def int_x86_avx512_mask3_vfmaddsub_ps_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vfmaddsub_ps_512 :
GCCBuiltin<"__builtin_ia32_vfmaddsubps512_maskz">,
def int_x86_avx512_maskz_vfmaddsub_ps_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
@ -2239,110 +2221,92 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask3_vfmsub_pd_128 :
GCCBuiltin<"__builtin_ia32_vfmsubpd128_mask3">,
def int_x86_avx512_mask3_vfmsub_pd_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmsub_pd_256 :
GCCBuiltin<"__builtin_ia32_vfmsubpd256_mask3">,
def int_x86_avx512_mask3_vfmsub_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmsub_pd_512 :
GCCBuiltin<"__builtin_ia32_vfmsubpd512_mask3">,
def int_x86_avx512_mask3_vfmsub_pd_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask3_vfmsub_ps_128 :
GCCBuiltin<"__builtin_ia32_vfmsubps128_mask3">,
def int_x86_avx512_mask3_vfmsub_ps_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmsub_ps_256 :
GCCBuiltin<"__builtin_ia32_vfmsubps256_mask3">,
def int_x86_avx512_mask3_vfmsub_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmsub_ps_512 :
GCCBuiltin<"__builtin_ia32_vfmsubps512_mask3">,
def int_x86_avx512_mask3_vfmsub_ps_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask3_vfmsubadd_pd_128 :
GCCBuiltin<"__builtin_ia32_vfmsubaddpd128_mask3">,
def int_x86_avx512_mask3_vfmsubadd_pd_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmsubadd_pd_256 :
GCCBuiltin<"__builtin_ia32_vfmsubaddpd256_mask3">,
def int_x86_avx512_mask3_vfmsubadd_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmsubadd_pd_512 :
GCCBuiltin<"__builtin_ia32_vfmsubaddpd512_mask3">,
def int_x86_avx512_mask3_vfmsubadd_pd_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask3_vfmsubadd_ps_128 :
GCCBuiltin<"__builtin_ia32_vfmsubaddps128_mask3">,
def int_x86_avx512_mask3_vfmsubadd_ps_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmsubadd_ps_256 :
GCCBuiltin<"__builtin_ia32_vfmsubaddps256_mask3">,
def int_x86_avx512_mask3_vfmsubadd_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfmsubadd_ps_512 :
GCCBuiltin<"__builtin_ia32_vfmsubaddps512_mask3">,
def int_x86_avx512_mask3_vfmsubadd_ps_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vfnmadd_pd_128 :
GCCBuiltin<"__builtin_ia32_vfnmaddpd128_mask">,
def int_x86_avx512_mask_vfnmadd_pd_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfnmadd_pd_256 :
GCCBuiltin<"__builtin_ia32_vfnmaddpd256_mask">,
def int_x86_avx512_mask_vfnmadd_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfnmadd_pd_512 :
GCCBuiltin<"__builtin_ia32_vfnmaddpd512_mask">,
def int_x86_avx512_mask_vfnmadd_pd_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vfnmadd_ps_128 :
GCCBuiltin<"__builtin_ia32_vfnmaddps128_mask">,
def int_x86_avx512_mask_vfnmadd_ps_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfnmadd_ps_256 :
GCCBuiltin<"__builtin_ia32_vfnmaddps256_mask">,
def int_x86_avx512_mask_vfnmadd_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfnmadd_ps_512 :
GCCBuiltin<"__builtin_ia32_vfnmaddps512_mask">,
def int_x86_avx512_mask_vfnmadd_ps_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
@ -2359,74 +2323,62 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vfnmsub_pd_128 :
GCCBuiltin<"__builtin_ia32_vfnmsubpd128_mask">,
def int_x86_avx512_mask_vfnmsub_pd_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfnmsub_pd_128 :
GCCBuiltin<"__builtin_ia32_vfnmsubpd128_mask3">,
def int_x86_avx512_mask3_vfnmsub_pd_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfnmsub_pd_256 :
GCCBuiltin<"__builtin_ia32_vfnmsubpd256_mask">,
def int_x86_avx512_mask_vfnmsub_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfnmsub_pd_256 :
GCCBuiltin<"__builtin_ia32_vfnmsubpd256_mask3">,
def int_x86_avx512_mask3_vfnmsub_pd_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfnmsub_pd_512 :
GCCBuiltin<"__builtin_ia32_vfnmsubpd512_mask">,
def int_x86_avx512_mask_vfnmsub_pd_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask3_vfnmsub_pd_512 :
GCCBuiltin<"__builtin_ia32_vfnmsubpd512_mask3">,
def int_x86_avx512_mask3_vfnmsub_pd_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vfnmsub_ps_128 :
GCCBuiltin<"__builtin_ia32_vfnmsubps128_mask">,
def int_x86_avx512_mask_vfnmsub_ps_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfnmsub_ps_128 :
GCCBuiltin<"__builtin_ia32_vfnmsubps128_mask3">,
def int_x86_avx512_mask3_vfnmsub_ps_128 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfnmsub_ps_256 :
GCCBuiltin<"__builtin_ia32_vfnmsubps256_mask">,
def int_x86_avx512_mask_vfnmsub_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask3_vfnmsub_ps_256 :
GCCBuiltin<"__builtin_ia32_vfnmsubps256_mask3">,
def int_x86_avx512_mask3_vfnmsub_ps_256 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_vfnmsub_ps_512 :
GCCBuiltin<"__builtin_ia32_vfnmsubps512_mask">,
def int_x86_avx512_mask_vfnmsub_ps_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask3_vfnmsub_ps_512 :
GCCBuiltin<"__builtin_ia32_vfnmsubps512_mask3">,
def int_x86_avx512_mask3_vfnmsub_ps_512 : // FIXME: remove this intrinsic.
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
llvm_i32_ty], [IntrNoMem]>;

View File

@ -20502,6 +20502,25 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
Src1, Src2, Src3),
Mask, PassThru, Subtarget, DAG);
}
case INTR_TYPE_3OP_RM: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src3 = Op.getOperand(3);
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(4);
if (!isRoundModeCurDirection(Rnd)) {
return DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src1, Src2, Src3, Rnd);
}
}
return DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, Src3);
}
case VPERM_2OP : {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
@ -30389,6 +30408,35 @@ static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
return SDValue();
}
/// Checks if the shuffle mask takes subsequent elements
/// alternately from two vectors.
/// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, int ParitySrc[2]) {
unsigned Size = Mask.size();
for (unsigned i = 0; i != Size; ++i) {
int M = Mask[i];
if (M < 0)
continue;
// Make sure we are using the matching element from the input.
if ((M % Size) != i)
return false;
// Make sure we use the same input for all elements of the same parity.
int Src = M / Size;
if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
return false;
ParitySrc[i % 2] = Src;
}
// Make sure each input is used.
if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
return false;
return true;
}
/// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
/// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
/// are written to the parameters \p Opnd0 and \p Opnd1.
@ -30444,27 +30492,8 @@ static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
}
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
int ParitySrc[2] = {-1, -1};
unsigned Size = Mask.size();
for (unsigned i = 0; i != Size; ++i) {
int M = Mask[i];
if (M < 0)
continue;
// Make sure we are using the matching element from the input.
if ((M % Size) != i)
return false;
// Make sure we use the same input for all elements of the same parity.
int Src = M / Size;
if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
return false;
ParitySrc[i % 2] = Src;
}
// Make sure each input is used.
if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
if (!isAddSubOrSubAddMask(Mask, ParitySrc))
return false;
// It's a subadd if the vector in the even parity is an FADD.
@ -30476,11 +30505,56 @@ static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
return true;
}
/// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
static SDValue combineShuffleToFMAddSub(SDNode *N,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// We only handle target-independent shuffles.
// FIXME: It would be easy and harmless to use the target shuffle mask
// extraction tool to support more.
if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
return SDValue();
MVT VT = N->getSimpleValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
return SDValue();
// We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDValue FMAdd = Op0, FMSub = Op1;
if (FMSub.getOpcode() != X86ISD::FMSUB)
std::swap(FMAdd, FMSub);
if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
FMAdd.getOperand(2) != FMSub.getOperand(2))
return SDValue();
// Check for correct shuffle mask.
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
int ParitySrc[2] = {-1, -1};
if (!isAddSubOrSubAddMask(Mask, ParitySrc))
return SDValue();
// FMAddSub takes zeroth operand from FMSub node.
SDLoc DL(N);
bool IsSubAdd = ParitySrc[0] == 0 ? Op0 == FMAdd : Op1 == FMAdd;
unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
FMAdd.getOperand(2));
}
/// Try to combine a shuffle into a target-specific add-sub or
/// mul-add-sub node.
static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
return V;
SDValue Opnd0, Opnd1;
bool IsSubAdd;
if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))

View File

@ -23,6 +23,7 @@ enum IntrinsicType : uint16_t {
INTR_NO_TYPE,
GATHER, SCATTER, PREFETCH, RDSEED, RDRAND, RDPMC, RDTSC, XTEST, XGETBV, ADX, FPCLASS, FPCLASSS,
INTR_TYPE_1OP, INTR_TYPE_2OP, INTR_TYPE_3OP, INTR_TYPE_4OP,
INTR_TYPE_3OP_RM,
CMP_MASK, CMP_MASK_CC,CMP_MASK_SCALAR_CC, VSHIFT, COMI, COMI_RM,
CVTPD2PS, CVTPD2PS_MASK,
INTR_TYPE_1OP_MASK, INTR_TYPE_1OP_MASK_RM,
@ -1324,6 +1325,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_vcvtss2si64, INTR_TYPE_2OP, X86ISD::CVTS2SI_RND, 0),
X86_INTRINSIC_DATA(avx512_vcvtss2usi32, INTR_TYPE_2OP, X86ISD::CVTS2UI_RND, 0),
X86_INTRINSIC_DATA(avx512_vcvtss2usi64, INTR_TYPE_2OP, X86ISD::CVTS2UI_RND, 0),
X86_INTRINSIC_DATA(avx512_vfmadd_pd_512, INTR_TYPE_3OP_RM, ISD::FMA, X86ISD::FMADD_RND),
X86_INTRINSIC_DATA(avx512_vfmadd_ps_512, INTR_TYPE_3OP_RM, ISD::FMA, X86ISD::FMADD_RND),
X86_INTRINSIC_DATA(avx512_vfmaddsub_pd_512, INTR_TYPE_3OP_RM, X86ISD::FMADDSUB,
X86ISD::FMADDSUB_RND),
X86_INTRINSIC_DATA(avx512_vfmaddsub_ps_512, INTR_TYPE_3OP_RM, X86ISD::FMADDSUB,
X86ISD::FMADDSUB_RND),
X86_INTRINSIC_DATA(avx512_vpdpbusd_128, INTR_TYPE_3OP, X86ISD::VPDPBUSD, 0),
X86_INTRINSIC_DATA(avx512_vpdpbusd_256, INTR_TYPE_3OP, X86ISD::VPDPBUSD, 0),

View File

@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f | FileCheck %s
declare <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
declare <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32)
declare <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i32)
declare <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i32)
define <16 x float> @test_x86_vfnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_x86_vfnmadd_ps_z:
@ -89,7 +89,7 @@ define <16 x float> @test_x86_vfmaddsubps_z(<16 x float> %a0, <16 x float> %a1,
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
%res = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 4) nounwind
ret <16 x float> %res
}
@ -99,21 +99,23 @@ define <16 x float> @test_mask_fmaddsub_ps(<16 x float> %a, <16 x float> %b, <16
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 4)
ret <16 x float> %res
%res = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i32 4)
%bc = bitcast i16 %mask to <16 x i1>
%sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a
ret <16 x float> %sel
}
declare <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) nounwind readnone
declare <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i32) nounwind readnone
define <8 x double> @test_x86_vfmaddsubpd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfmaddsubpd_z:
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
%res = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 4) nounwind
ret <8 x double> %res
}
declare <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) nounwind readnone
declare <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i32) nounwind readnone
define <8 x double> @test_mask_vfmaddsub_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmaddsub_pd:
@ -121,8 +123,10 @@ define <8 x double> @test_mask_vfmaddsub_pd(<8 x double> %a0, <8 x double> %a1,
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
ret <8 x double> %res
%res = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 4) nounwind
%bc = bitcast i8 %mask to <8 x i1>
%sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0
ret <8 x double> %sel
}
define <8 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
@ -134,9 +138,11 @@ define <8 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_512(<8 x double> %x0,
; CHECK-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1
; CHECK-NEXT: vaddpd %zmm1, %zmm3, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4)
%res1 = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0)
%res2 = fadd <8 x double> %res, %res1
%res = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 4)
%bc = bitcast i8 %x3 to <8 x i1>
%sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %x0
%res1 = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 0)
%res2 = fadd <8 x double> %sel, %res1
ret <8 x double> %res2
}
@ -183,9 +189,11 @@ define <16 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_512(<16 x float> %x0,
; CHECK-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1
; CHECK-NEXT: vaddps %zmm1, %zmm3, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4)
%res1 = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0)
%res2 = fadd <16 x float> %res, %res1
%res = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 4)
%bc = bitcast i16 %x3 to <16 x i1>
%sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %x0
%res1 = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 0)
%res2 = fadd <16 x float> %sel, %res1
ret <16 x float> %res2
}
@ -263,8 +271,10 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rne(<16 x float> %a0, <16
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 0) nounwind
ret <16 x float> %res
%res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 0) nounwind
%bc = bitcast i16 %mask to <16 x i1>
%sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a0
ret <16 x float> %sel
}
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
@ -273,8 +283,10 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtn(<16 x float> %a0, <16
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 1) nounwind
ret <16 x float> %res
%res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 1) nounwind
%bc = bitcast i16 %mask to <16 x i1>
%sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a0
ret <16 x float> %sel
}
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
@ -283,8 +295,10 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtp(<16 x float> %a0, <16
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 2) nounwind
ret <16 x float> %res
%res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 2) nounwind
%bc = bitcast i16 %mask to <16 x i1>
%sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a0
ret <16 x float> %sel
}
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
@ -293,8 +307,10 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtz(<16 x float> %a0, <16
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 3) nounwind
ret <16 x float> %res
%res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 3) nounwind
%bc = bitcast i16 %mask to <16 x i1>
%sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a0
ret <16 x float> %sel
}
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
@ -303,8 +319,10 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrb_current(<16 x float> %a0,
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind
ret <16 x float> %res
%res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 4) nounwind
%bc = bitcast i16 %mask to <16 x i1>
%sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a0
ret <16 x float> %sel
}
define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
@ -312,7 +330,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rne(<16 x float> %a0, <16
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 0) nounwind
%res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 0) nounwind
ret <16 x float> %res
}
@ -321,7 +339,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtn(<16 x float> %a0, <16
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {rd-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 1) nounwind
%res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 1) nounwind
ret <16 x float> %res
}
@ -330,7 +348,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtp(<16 x float> %a0, <16
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {ru-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 2) nounwind
%res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 2) nounwind
ret <16 x float> %res
}
@ -339,7 +357,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtz(<16 x float> %a0, <16
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {rz-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 3) nounwind
%res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 3) nounwind
ret <16 x float> %res
}
@ -348,7 +366,7 @@ define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_current(<16 x float> %a0,
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
%res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 4) nounwind
ret <16 x float> %res
}
@ -392,8 +410,10 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rne(<8 x double> %a0, <8 x
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 0) nounwind
ret <8 x double> %res
%res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 0) nounwind
%bc = bitcast i8 %mask to <8 x i1>
%sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0
ret <8 x double> %sel
}
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
@ -402,8 +422,10 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtn(<8 x double> %a0, <8 x
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 1) nounwind
ret <8 x double> %res
%res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 1) nounwind
%bc = bitcast i8 %mask to <8 x i1>
%sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0
ret <8 x double> %sel
}
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
@ -412,8 +434,10 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtp(<8 x double> %a0, <8 x
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 2) nounwind
ret <8 x double> %res
%res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 2) nounwind
%bc = bitcast i8 %mask to <8 x i1>
%sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0
ret <8 x double> %sel
}
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
@ -422,8 +446,10 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtz(<8 x double> %a0, <8 x
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 3) nounwind
ret <8 x double> %res
%res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 3) nounwind
%bc = bitcast i8 %mask to <8 x i1>
%sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0
ret <8 x double> %sel
}
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
@ -432,8 +458,10 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrb_current(<8 x double> %a0,
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
ret <8 x double> %res
%res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 4) nounwind
%bc = bitcast i8 %mask to <8 x i1>
%sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0
ret <8 x double> %sel
}
define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
@ -441,7 +469,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rne(<8 x double> %a0, <8
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 0) nounwind
%res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 0) nounwind
ret <8 x double> %res
}
@ -450,7 +478,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtn(<8 x double> %a0, <8
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {rd-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 1) nounwind
%res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 1) nounwind
ret <8 x double> %res
}
@ -459,7 +487,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtp(<8 x double> %a0, <8
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {ru-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 2) nounwind
%res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 2) nounwind
ret <8 x double> %res
}
@ -468,7 +496,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtz(<8 x double> %a0, <8
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {rz-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 3) nounwind
%res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 3) nounwind
ret <8 x double> %res
}
@ -477,7 +505,7 @@ define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_current(<8 x double> %a0,
; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
%res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 4) nounwind
ret <8 x double> %res
}
@ -490,9 +518,11 @@ define <8 x double>@test_int_x86_avx512_mask_vfmadd_pd_512(<8 x double> %x0, <8
; CHECK-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1
; CHECK-NEXT: vaddpd %zmm1, %zmm3, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4)
%res1 = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0)
%res2 = fadd <8 x double> %res, %res1
%res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 4)
%bc = bitcast i8 %x3 to <8 x i1>
%sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %x0
%res1 = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 0)
%res2 = fadd <8 x double> %sel, %res1
ret <8 x double> %res2
}
@ -539,9 +569,11 @@ define <16 x float>@test_int_x86_avx512_mask_vfmadd_ps_512(<16 x float> %x0, <16
; CHECK-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1
; CHECK-NEXT: vaddps %zmm1, %zmm3, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4)
%res1 = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0)
%res2 = fadd <16 x float> %res, %res1
%res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 4)
%bc = bitcast i16 %x3 to <16 x i1>
%sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %x0
%res1 = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 0)
%res2 = fadd <16 x float> %sel, %res1
ret <16 x float> %res2
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -12,11 +12,11 @@ define <16 x float> @test1(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
; CHECK-NEXT: retq
entry:
%sub.i = fsub <16 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i16 -1, i32 4) #2
%0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i32 4) #2
ret <16 x float> %0
}
declare <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
declare <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i32)
declare <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
declare <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
@ -27,7 +27,7 @@ define <16 x float> @test2(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
; CHECK-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
%0 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 -1, i32 4) #2
%0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i32 4) #2
%sub.i = fsub <16 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %0
ret <16 x float> %sub.i
}
@ -61,7 +61,7 @@ define <16 x float> @test5(<16 x float> %a, <16 x float> %b, <16 x float> %c) {
; CHECK-NEXT: retq
entry:
%sub.i = fsub <16 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i16 -1, i32 2) #2
%0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i32 2) #2
ret <16 x float> %0
}
@ -108,12 +108,12 @@ define <8 x double> @test9(<8 x double> %a, <8 x double> %b, <8 x double> %c) {
; CHECK-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
%0 = tail call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 -1, i32 4) #2
%0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i32 4) #2
%sub.i = fsub <8 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %0
ret <8 x double> %sub.i
}
declare <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8, i32)
declare <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i32)
define <2 x double> @test10(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test10:
@ -189,8 +189,10 @@ define <8 x double> @test12(<8 x double> %a, <8 x double> %b, <8 x double> %c, i
; KNL-NEXT: vpxorq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; KNL-NEXT: retq
entry:
%0 = tail call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 4) #2
%sub.i = fsub <8 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %0
%0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i32 4) #2
%bc = bitcast i8 %mask to <8 x i1>
%sel = select <8 x i1> %bc, <8 x double> %0, <8 x double> %a
%sub.i = fsub <8 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %sel
ret <8 x double> %sub.i
}
@ -256,10 +258,13 @@ define <16 x float> @test15(<16 x float> %a, <16 x float> %b, <16 x float> %c, i
; KNL-NEXT: vmovaps %zmm3, %zmm0
; KNL-NEXT: retq
entry:
%bc = bitcast i16 %mask to <16 x i1>
%sub.i = fsub <16 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
%0 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %sub.i, <16 x float> %b, <16 x float> %c, i16 %mask, i32 2)
%1 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %0, <16 x float> %sub.i, <16 x float> %c, i16 %mask, i32 1)
ret <16 x float> %1
%0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub.i, <16 x float> %b, <16 x float> %c, i32 2)
%sel = select <16 x i1> %bc, <16 x float> %0, <16 x float> %sub.i
%1 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sel, <16 x float> %sub.i, <16 x float> %c, i32 1)
%sel2 = select <16 x i1> %bc, <16 x float> %1, <16 x float> %sel
ret <16 x float> %sel2
}
define <16 x float> @test16(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
@ -275,10 +280,12 @@ define <16 x float> @test16(<16 x float> %a, <16 x float> %b, <16 x float> %c, i
; KNL-NEXT: vfmsubadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; KNL-NEXT: retq
%sub.i = fsub <16 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i16 %mask, i32 1)
ret <16 x float> %res
%res = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i32 1)
%bc = bitcast i16 %mask to <16 x i1>
%sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a
ret <16 x float> %sel
}
declare <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
declare <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i32)
define <8 x double> @test17(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) {
; SKX-LABEL: test17:
@ -293,10 +300,12 @@ define <8 x double> @test17(<8 x double> %a, <8 x double> %b, <8 x double> %c, i
; KNL-NEXT: vfmsubadd132pd %zmm1, %zmm2, %zmm0 {%k1}
; KNL-NEXT: retq
%sub.i = fsub <8 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %c
%res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %sub.i, i8 %mask, i32 4)
ret <8 x double> %res
%res = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %sub.i, i32 4)
%bc = bitcast i8 %mask to <8 x i1>
%sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a
ret <8 x double> %sel
}
declare <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32)
declare <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i32)
define <4 x float> @test18(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 zeroext %mask) local_unnamed_addr #0 {
; SKX-LABEL: test18:
@ -438,6 +447,6 @@ define <16 x float> @test25(<16 x float> %a, <16 x float> %b, <16 x float> %c)
entry:
%sub.i = fsub <16 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %b
%sub.i.2 = fsub <16 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a, <16 x float> %sub.i, <16 x float> %sub.i.2, i16 -1, i32 8) #2
%0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %sub.i, <16 x float> %sub.i.2, i32 8) #2
ret <16 x float> %0
}

View File

@ -0,0 +1,901 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FMA
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AVX512VL
; RUN: llc < %s -mtriple=x86_64-pc-windows -mattr=+fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FMA-WIN
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/fma-builtins.c
define <4 x float> @test_mm_fmadd_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-FMA-LABEL: test_mm_fmadd_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmadd_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmadd_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa8,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) + mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2
ret <4 x float> %0
}
define <2 x double> @test_mm_fmadd_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-FMA-LABEL: test_mm_fmadd_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmadd_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmadd_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa8,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) + mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2
ret <2 x double> %0
}
define <4 x float> @test_mm_fmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-FMA-LABEL: test_mm_fmadd_ss:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmadd_ss:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmadd_ss:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
; CHECK-FMA-WIN-NEXT: vfmadd132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x99,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = extractelement <4 x float> %a, i64 0
%1 = extractelement <4 x float> %b, i64 0
%2 = extractelement <4 x float> %c, i64 0
%3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2
%4 = insertelement <4 x float> %a, float %3, i64 0
ret <4 x float> %4
}
define <2 x double> @test_mm_fmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-FMA-LABEL: test_mm_fmadd_sd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmadd_sd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmadd_sd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
; CHECK-FMA-WIN-NEXT: vfmadd132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x99,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = extractelement <2 x double> %a, i64 0
%1 = extractelement <2 x double> %b, i64 0
%2 = extractelement <2 x double> %c, i64 0
%3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2
%4 = insertelement <2 x double> %a, double %3, i64 0
ret <2 x double> %4
}
define <4 x float> @test_mm_fmsub_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-FMA-LABEL: test_mm_fmsub_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaa,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmsub_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xaa,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmsub_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaa,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) - mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %sub.i) #2
ret <4 x float> %0
}
define <2 x double> @test_mm_fmsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-FMA-LABEL: test_mm_fmsub_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaa,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmsub_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xaa,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmsub_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaa,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) - mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %sub.i) #2
ret <2 x double> %0
}
define <4 x float> @test_mm_fmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-FMA-LABEL: test_mm_fmsub_ss:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xab,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmsub_ss:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xab,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmsub_ss:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
; CHECK-FMA-WIN-NEXT: vfmsub132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9b,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = extractelement <4 x float> %a, i64 0
%1 = extractelement <4 x float> %b, i64 0
%.rhs.i = extractelement <4 x float> %c, i64 0
%2 = fsub float -0.000000e+00, %.rhs.i
%3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2
%4 = insertelement <4 x float> %a, float %3, i64 0
ret <4 x float> %4
}
define <2 x double> @test_mm_fmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-FMA-LABEL: test_mm_fmsub_sd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xab,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmsub_sd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xab,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmsub_sd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
; CHECK-FMA-WIN-NEXT: vfmsub132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9b,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = extractelement <2 x double> %a, i64 0
%1 = extractelement <2 x double> %b, i64 0
%.rhs.i = extractelement <2 x double> %c, i64 0
%2 = fsub double -0.000000e+00, %.rhs.i
%3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2
%4 = insertelement <2 x double> %a, double %3, i64 0
ret <2 x double> %4
}
define <4 x float> @test_mm_fnmadd_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-FMA-LABEL: test_mm_fnmadd_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xac,0xc2]
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fnmadd_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xac,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fnmadd_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xac,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) + mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %b, <4 x float> %c) #2
ret <4 x float> %0
}
define <2 x double> @test_mm_fnmadd_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-FMA-LABEL: test_mm_fnmadd_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xac,0xc2]
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fnmadd_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xac,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fnmadd_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xac,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) + mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %b, <2 x double> %c) #2
ret <2 x double> %0
}
define <4 x float> @test_mm_fnmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-FMA-LABEL: test_mm_fnmadd_ss:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xad,0xc2]
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fnmadd_ss:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xad,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fnmadd_ss:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
; CHECK-FMA-WIN-NEXT: vfnmadd132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9d,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = extractelement <4 x float> %a, i64 0
%.rhs.i = extractelement <4 x float> %b, i64 0
%1 = fsub float -0.000000e+00, %.rhs.i
%2 = extractelement <4 x float> %c, i64 0
%3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2
%4 = insertelement <4 x float> %a, float %3, i64 0
ret <4 x float> %4
}
define <2 x double> @test_mm_fnmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-FMA-LABEL: test_mm_fnmadd_sd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xad,0xc2]
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fnmadd_sd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xad,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fnmadd_sd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
; CHECK-FMA-WIN-NEXT: vfnmadd132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9d,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = extractelement <2 x double> %a, i64 0
%.rhs.i = extractelement <2 x double> %b, i64 0
%1 = fsub double -0.000000e+00, %.rhs.i
%2 = extractelement <2 x double> %c, i64 0
%3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2
%4 = insertelement <2 x double> %a, double %3, i64 0
ret <2 x double> %4
}
define <4 x float> @test_mm_fnmsub_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-FMA-LABEL: test_mm_fnmsub_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xae,0xc2]
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fnmsub_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xae,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fnmsub_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xae,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) - mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
%sub1.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %b, <4 x float> %sub1.i) #2
ret <4 x float> %0
}
define <2 x double> @test_mm_fnmsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-FMA-LABEL: test_mm_fnmsub_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xae,0xc2]
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fnmsub_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xae,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fnmsub_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xae,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) - mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
%sub1.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %b, <2 x double> %sub1.i) #2
ret <2 x double> %0
}
define <4 x float> @test_mm_fnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-FMA-LABEL: test_mm_fnmsub_ss:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaf,0xc2]
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fnmsub_ss:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xaf,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fnmsub_ss:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
; CHECK-FMA-WIN-NEXT: vfnmsub132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9f,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = extractelement <4 x float> %a, i64 0
%.rhs.i = extractelement <4 x float> %b, i64 0
%1 = fsub float -0.000000e+00, %.rhs.i
%.rhs2.i = extractelement <4 x float> %c, i64 0
%2 = fsub float -0.000000e+00, %.rhs2.i
%3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2
%4 = insertelement <4 x float> %a, float %3, i64 0
ret <4 x float> %4
}
define <2 x double> @test_mm_fnmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-FMA-LABEL: test_mm_fnmsub_sd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaf,0xc2]
; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fnmsub_sd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xaf,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fnmsub_sd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
; CHECK-FMA-WIN-NEXT: vfnmsub132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9f,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = extractelement <2 x double> %a, i64 0
%.rhs.i = extractelement <2 x double> %b, i64 0
%1 = fsub double -0.000000e+00, %.rhs.i
%.rhs2.i = extractelement <2 x double> %c, i64 0
%2 = fsub double -0.000000e+00, %.rhs2.i
%3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2
%4 = insertelement <2 x double> %a, double %3, i64 0
ret <2 x double> %4
}
define <4 x float> @test_mm_fmaddsub_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-FMA-LABEL: test_mm_fmaddsub_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa6,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmaddsub_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa6,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmaddsub_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmaddsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa6,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) +/- mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2
%1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %1) #2
%3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x float> %3
}
define <2 x double> @test_mm_fmaddsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-FMA-LABEL: test_mm_fmaddsub_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa6,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmaddsub_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa6,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmaddsub_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmaddsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa6,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) +/- mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2
%1 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %1) #2
%3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> <i32 0, i32 3>
ret <2 x double> %3
}
define <4 x float> @test_mm_fmsubadd_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-FMA-LABEL: test_mm_fmsubadd_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa7,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmsubadd_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa7,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmsubadd_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsubadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa7,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) -/+ mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %sub.i) #2
%1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2
%2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x float> %2
}
define <2 x double> @test_mm_fmsubadd_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-FMA-LABEL: test_mm_fmsubadd_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa7,0xc2]
; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm_fmsubadd_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa7,0xc2]
; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm_fmsubadd_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsubadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa7,0x00]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) -/+ mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %sub.i) #2
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2
%2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> <i32 0, i32 3>
ret <2 x double> %2
}
define <8 x float> @test_mm256_fmadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-FMA-LABEL: test_mm256_fmadd_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fmadd_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fmadd_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa8,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) + mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2
ret <8 x float> %0
}
define <4 x double> @test_mm256_fmadd_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-FMA-LABEL: test_mm256_fmadd_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fmadd_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fmadd_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa8,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) + mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #2
ret <4 x double> %0
}
define <8 x float> @test_mm256_fmsub_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-FMA-LABEL: test_mm256_fmsub_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xaa,0xc2]
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fmsub_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xaa,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fmsub_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xaa,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) - mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %sub.i) #2
ret <8 x float> %0
}
define <4 x double> @test_mm256_fmsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-FMA-LABEL: test_mm256_fmsub_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xaa,0xc2]
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fmsub_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xaa,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fmsub_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xaa,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) - mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %sub.i) #2
ret <4 x double> %0
}
define <8 x float> @test_mm256_fnmadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-FMA-LABEL: test_mm256_fnmadd_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xac,0xc2]
; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fnmadd_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xac,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fnmadd_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xac,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) + mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %b, <8 x float> %c) #2
ret <8 x float> %0
}
define <4 x double> @test_mm256_fnmadd_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-FMA-LABEL: test_mm256_fnmadd_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xac,0xc2]
; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fnmadd_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xac,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fnmadd_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xac,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) + mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %a
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %b, <4 x double> %c) #2
ret <4 x double> %0
}
define <8 x float> @test_mm256_fnmsub_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-FMA-LABEL: test_mm256_fnmsub_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xae,0xc2]
; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fnmsub_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xae,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fnmsub_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xae,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) - mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
%sub1.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %b, <8 x float> %sub1.i) #2
ret <8 x float> %0
}
define <4 x double> @test_mm256_fnmsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-FMA-LABEL: test_mm256_fnmsub_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xae,0xc2]
; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fnmsub_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xae,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fnmsub_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfnmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xae,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) - mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %a
%sub1.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %b, <4 x double> %sub1.i) #2
ret <4 x double> %0
}
define <8 x float> @test_mm256_fmaddsub_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-FMA-LABEL: test_mm256_fmaddsub_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa6,0xc2]
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fmaddsub_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa6,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fmaddsub_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmaddsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa6,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) +/- mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2
%1 = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %1) #2
%3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
ret <8 x float> %3
}
define <4 x double> @test_mm256_fmaddsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-FMA-LABEL: test_mm256_fmaddsub_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa6,0xc2]
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fmaddsub_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa6,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fmaddsub_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmaddsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa6,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) +/- mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #2
%1 = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %c
%2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %1) #2
%3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x double> %3
}
define <8 x float> @test_mm256_fmsubadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-FMA-LABEL: test_mm256_fmsubadd_ps:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa7,0xc2]
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fmsubadd_ps:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa7,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fmsubadd_ps:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsubadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa7,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) -/+ mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %sub.i) #2
%1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2
%2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
ret <8 x float> %2
}
define <4 x double> @test_mm256_fmsubadd_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-FMA-LABEL: test_mm256_fmsubadd_pd:
; CHECK-FMA: # %bb.0: # %entry
; CHECK-FMA-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa7,0xc2]
; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
; CHECK-FMA-NEXT: retq # encoding: [0xc3]
;
; CHECK-AVX512VL-LABEL: test_mm256_fmsubadd_pd:
; CHECK-AVX512VL: # %bb.0: # %entry
; CHECK-AVX512VL-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa7,0xc2]
; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3]
;
; CHECK-FMA-WIN-LABEL: test_mm256_fmsubadd_pd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
; CHECK-FMA-WIN-NEXT: vfmsubadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa7,0x00]
; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) -/+ mem
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
entry:
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %sub.i) #2
%1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #2
%2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x double> %2
}
declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) #1
declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #1
declare float @llvm.fma.f32(float, float, float) #1
declare double @llvm.fma.f64(double, double, double) #1
declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #1
declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) #1

View File

@ -0,0 +1,429 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+fma,-fma4 | FileCheck %s --check-prefix=CHECK
define <4 x float> @test_mm_fmadd_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test_mm_fmadd_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-NEXT: retq
entry:
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2
ret <4 x float> %0
}
define <2 x double> @test_mm_fmadd_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test_mm_fmadd_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-NEXT: retq
entry:
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2
ret <2 x double> %0
}
define <4 x float> @test_mm_fmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test_mm_fmadd_ss:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-NEXT: retq
entry:
%0 = extractelement <4 x float> %a, i64 0
%1 = extractelement <4 x float> %b, i64 0
%2 = extractelement <4 x float> %c, i64 0
%3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2
%4 = insertelement <4 x float> %a, float %3, i64 0
ret <4 x float> %4
}
define <2 x double> @test_mm_fmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test_mm_fmadd_sd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-NEXT: retq
entry:
%0 = extractelement <2 x double> %a, i64 0
%1 = extractelement <2 x double> %b, i64 0
%2 = extractelement <2 x double> %c, i64 0
%3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2
%4 = insertelement <2 x double> %a, double %3, i64 0
ret <2 x double> %4
}
define <4 x float> @test_mm_fmsub_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test_mm_fmsub_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm2, %xmm2
; CHECK-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %sub.i) #2
ret <4 x float> %0
}
define <2 x double> @test_mm_fmsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test_mm_fmsub_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm2, %xmm2
; CHECK-NEXT: vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %sub.i) #2
ret <2 x double> %0
}
define <4 x float> @test_mm_fmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test_mm_fmsub_ss:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2
; CHECK-NEXT: retq
entry:
%0 = extractelement <4 x float> %a, i64 0
%1 = extractelement <4 x float> %b, i64 0
%.rhs.i = extractelement <4 x float> %c, i64 0
%2 = fsub float -0.000000e+00, %.rhs.i
%3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2
%4 = insertelement <4 x float> %a, float %3, i64 0
ret <4 x float> %4
}
define <2 x double> @test_mm_fmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test_mm_fmsub_sd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2
; CHECK-NEXT: retq
entry:
%0 = extractelement <2 x double> %a, i64 0
%1 = extractelement <2 x double> %b, i64 0
%.rhs.i = extractelement <2 x double> %c, i64 0
%2 = fsub double -0.000000e+00, %.rhs.i
%3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2
%4 = insertelement <2 x double> %a, double %3, i64 0
ret <2 x double> %4
}
define <4 x float> @test_mm_fnmadd_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test_mm_fnmadd_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %b, <4 x float> %c) #2
ret <4 x float> %0
}
define <2 x double> @test_mm_fnmadd_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test_mm_fnmadd_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %b, <2 x double> %c) #2
ret <2 x double> %0
}
define <4 x float> @test_mm_fnmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test_mm_fnmadd_ss:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
; CHECK-NEXT: retq
entry:
%0 = extractelement <4 x float> %a, i64 0
%.rhs.i = extractelement <4 x float> %b, i64 0
%1 = fsub float -0.000000e+00, %.rhs.i
%2 = extractelement <4 x float> %c, i64 0
%3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2
%4 = insertelement <4 x float> %a, float %3, i64 0
ret <4 x float> %4
}
define <2 x double> @test_mm_fnmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test_mm_fnmadd_sd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
; CHECK-NEXT: retq
entry:
%0 = extractelement <2 x double> %a, i64 0
%.rhs.i = extractelement <2 x double> %b, i64 0
%1 = fsub double -0.000000e+00, %.rhs.i
%2 = extractelement <2 x double> %c, i64 0
%3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2
%4 = insertelement <2 x double> %a, double %3, i64 0
ret <2 x double> %4
}
define <4 x float> @test_mm_fnmsub_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test_mm_fnmsub_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovaps {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
; CHECK-NEXT: vxorps %xmm3, %xmm0, %xmm4
; CHECK-NEXT: vxorps %xmm3, %xmm2, %xmm0
; CHECK-NEXT: vfmadd231ps {{.*#+}} xmm0 = (xmm1 * xmm4) + xmm0
; CHECK-NEXT: retq
entry:
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
%sub1.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %b, <4 x float> %sub1.i) #2
ret <4 x float> %0
}
define <2 x double> @test_mm_fnmsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test_mm_fnmsub_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00]
; CHECK-NEXT: vxorpd %xmm3, %xmm0, %xmm4
; CHECK-NEXT: vxorpd %xmm3, %xmm2, %xmm0
; CHECK-NEXT: vfmadd231pd {{.*#+}} xmm0 = (xmm1 * xmm4) + xmm0
; CHECK-NEXT: retq
entry:
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
%sub1.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %b, <2 x double> %sub1.i) #2
ret <2 x double> %0
}
define <4 x float> @test_mm_fnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test_mm_fnmsub_ss:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
; CHECK-NEXT: retq
entry:
%0 = extractelement <4 x float> %a, i64 0
%.rhs.i = extractelement <4 x float> %b, i64 0
%1 = fsub float -0.000000e+00, %.rhs.i
%.rhs2.i = extractelement <4 x float> %c, i64 0
%2 = fsub float -0.000000e+00, %.rhs2.i
%3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2
%4 = insertelement <4 x float> %a, float %3, i64 0
ret <4 x float> %4
}
define <2 x double> @test_mm_fnmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test_mm_fnmsub_sd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
; CHECK-NEXT: retq
entry:
%0 = extractelement <2 x double> %a, i64 0
%.rhs.i = extractelement <2 x double> %b, i64 0
%1 = fsub double -0.000000e+00, %.rhs.i
%.rhs2.i = extractelement <2 x double> %c, i64 0
%2 = fsub double -0.000000e+00, %.rhs2.i
%3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2
%4 = insertelement <2 x double> %a, double %3, i64 0
ret <2 x double> %4
}
define <4 x float> @test_mm_fmaddsub_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test_mm_fmaddsub_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2
; CHECK-NEXT: retq
entry:
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2
%1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %1) #2
%3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x float> %3
}
define <2 x double> @test_mm_fmaddsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test_mm_fmaddsub_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2
; CHECK-NEXT: retq
entry:
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2
%1 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %1) #2
%3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> <i32 0, i32 3>
ret <2 x double> %3
}
define <4 x float> @test_mm_fmsubadd_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test_mm_fmsubadd_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %sub.i) #2
%1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2
%2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x float> %2
}
define <2 x double> @test_mm_fmsubadd_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test_mm_fmsubadd_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %sub.i) #2
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2
%2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> <i32 0, i32 3>
ret <2 x double> %2
}
define <8 x float> @test_mm256_fmadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-LABEL: test_mm256_fmadd_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-NEXT: retq
entry:
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2
ret <8 x float> %0
}
define <4 x double> @test_mm256_fmadd_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-LABEL: test_mm256_fmadd_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-NEXT: retq
entry:
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #2
ret <4 x double> %0
}
define <8 x float> @test_mm256_fmsub_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-LABEL: test_mm256_fmsub_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorps {{.*}}(%rip), %ymm2, %ymm2
; CHECK-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %sub.i) #2
ret <8 x float> %0
}
define <4 x double> @test_mm256_fmsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-LABEL: test_mm256_fmsub_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorpd {{.*}}(%rip), %ymm2, %ymm2
; CHECK-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %sub.i) #2
ret <4 x double> %0
}
define <8 x float> @test_mm256_fnmadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-LABEL: test_mm256_fnmadd_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %b, <8 x float> %c) #2
ret <8 x float> %0
}
define <4 x double> @test_mm256_fnmadd_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-LABEL: test_mm256_fnmadd_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vxorpd {{.*}}(%rip), %ymm0, %ymm0
; CHECK-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %a
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %b, <4 x double> %c) #2
ret <4 x double> %0
}
define <8 x float> @test_mm256_fnmsub_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-LABEL: test_mm256_fnmsub_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
; CHECK-NEXT: vxorps %ymm3, %ymm0, %ymm4
; CHECK-NEXT: vxorps %ymm3, %ymm2, %ymm0
; CHECK-NEXT: vfmadd231ps {{.*#+}} ymm0 = (ymm1 * ymm4) + ymm0
; CHECK-NEXT: retq
entry:
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
%sub1.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %b, <8 x float> %sub1.i) #2
ret <8 x float> %0
}
define <4 x double> @test_mm256_fnmsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-LABEL: test_mm256_fnmsub_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovapd {{.*#+}} ymm3 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
; CHECK-NEXT: vxorpd %ymm3, %ymm0, %ymm4
; CHECK-NEXT: vxorpd %ymm3, %ymm2, %ymm0
; CHECK-NEXT: vfmadd231pd {{.*#+}} ymm0 = (ymm1 * ymm4) + ymm0
; CHECK-NEXT: retq
entry:
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %a
%sub1.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %b, <4 x double> %sub1.i) #2
ret <4 x double> %0
}
define <8 x float> @test_mm256_fmaddsub_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-LABEL: test_mm256_fmaddsub_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2
; CHECK-NEXT: retq
entry:
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2
%1 = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %1) #2
%3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
ret <8 x float> %3
}
define <4 x double> @test_mm256_fmaddsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-LABEL: test_mm256_fmaddsub_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2
; CHECK-NEXT: retq
entry:
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #2
%1 = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %c
%2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %1) #2
%3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x double> %3
}
define <8 x float> @test_mm256_fmsubadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; CHECK-LABEL: test_mm256_fmsubadd_ps:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %sub.i) #2
%1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2
%2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
ret <8 x float> %2
}
define <4 x double> @test_mm256_fmsubadd_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) {
; CHECK-LABEL: test_mm256_fmsubadd_pd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2
; CHECK-NEXT: retq
entry:
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %c
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %sub.i) #2
%1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #2
%2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x double> %2
}
declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) #1
declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #1
declare float @llvm.fma.f32(float, float, float) #1
declare double @llvm.fma.f64(double, double, double) #1
declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #1
declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) #1