2018-05-30 23:25:16 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2018-06-03 22:56:04 +08:00
|
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512vl --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64
|
2018-05-30 23:25:16 +08:00
|
|
|
|
|
|
|
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vl-builtins.c
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask_fmadd_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm0 * xmm1) + xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm0 * xmm1) + xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask_fmsub_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x9a,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm0 * xmm1) - xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x9a,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm0 * xmm1) - xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask3_fmadd_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb8,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = (xmm0 * xmm1) + xmm2
|
|
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb8,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = (xmm0 * xmm1) + xmm2
|
|
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask3_fnmadd_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fnmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xbc,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = -(xmm0 * xmm1) + xmm2
|
|
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fnmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xbc,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = -(xmm0 * xmm1) + xmm2
|
|
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__A
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %__B, <2 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_maskz_fmadd_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xa8,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xa8,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_maskz_fmsub_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xaa,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xaa,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_maskz_fnmadd_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fnmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xac,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fnmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xac,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__A
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %__B, <2 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_maskz_fnmsub_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fnmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xae,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fnmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xae,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__A
|
|
|
|
%sub1.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %__B, <2 x double> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask_fmadd_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm0 * ymm1) + ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm0 * ymm1) + ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask_fmsub_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x9a,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm0 * ymm1) - ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x9a,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm0 * ymm1) - ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask3_fmadd_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb8,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = (ymm0 * ymm1) + ymm2
|
|
|
|
; X86-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb8,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = (ymm0 * ymm1) + ymm2
|
|
|
|
; X64-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__C
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask3_fnmadd_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fnmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xbc,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = -(ymm0 * ymm1) + ymm2
|
|
|
|
; X86-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fnmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xbc,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = -(ymm0 * ymm1) + ymm2
|
|
|
|
; X64-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__A
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %__B, <4 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__C
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_maskz_fmadd_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xa8,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xa8,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_maskz_fmsub_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xaa,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xaa,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_maskz_fnmadd_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fnmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xac,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fnmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xac,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__A
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %__B, <4 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_maskz_fnmsub_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fnmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xae,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fnmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xae,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__A
|
|
|
|
%sub1.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %__B, <4 x double> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask_fmadd_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm0 * xmm1) + xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm0 * xmm1) + xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__A
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask_fmsub_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x9a,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm0 * xmm1) - xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x9a,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm0 * xmm1) - xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__A
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask3_fmadd_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb8,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = (xmm0 * xmm1) + xmm2
|
|
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb8,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = (xmm0 * xmm1) + xmm2
|
|
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__C
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask3_fnmadd_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fnmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xbc,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = -(xmm0 * xmm1) + xmm2
|
|
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fnmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xbc,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = -(xmm0 * xmm1) + xmm2
|
|
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__A
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %__B, <4 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__C
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_maskz_fmadd_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xa8,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xa8,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> zeroinitializer
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_maskz_fmsub_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xaa,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xaa,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> zeroinitializer
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_maskz_fnmadd_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fnmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xac,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fnmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xac,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__A
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %__B, <4 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> zeroinitializer
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_maskz_fnmsub_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fnmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xae,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fnmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xae,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__A
|
|
|
|
%sub1.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %__B, <4 x float> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> zeroinitializer
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask_fmadd_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x98,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm0 * ymm1) + ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x98,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm0 * ymm1) + ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__A
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask_fmsub_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x9a,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm0 * ymm1) - ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x9a,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm0 * ymm1) - ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__A
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask3_fmadd_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb8,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = (ymm0 * ymm1) + ymm2
|
|
|
|
; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb8,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = (ymm0 * ymm1) + ymm2
|
|
|
|
; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__C
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask3_fnmadd_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fnmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xbc,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = -(ymm0 * ymm1) + ymm2
|
|
|
|
; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fnmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xbc,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = -(ymm0 * ymm1) + ymm2
|
|
|
|
; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__A
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %__B, <8 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__C
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_maskz_fmadd_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xa8,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xa8,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> zeroinitializer
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_maskz_fmsub_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xaa,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xaa,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> zeroinitializer
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_maskz_fnmadd_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fnmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xac,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fnmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xac,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__A
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %__B, <8 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> zeroinitializer
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_maskz_fnmsub_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fnmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xae,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fnmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xae,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__A
|
|
|
|
%sub1.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %__B, <8 x float> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> zeroinitializer
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask_fmaddsub_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fmaddsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x96,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm0 * xmm1) +/- xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fmaddsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x96,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm0 * xmm1) +/- xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
|
|
|
|
%1 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %1) #9
|
|
|
|
%3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> <i32 0, i32 3>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%5 = select <2 x i1> %extract.i, <2 x double> %3, <2 x double> %__A
|
|
|
|
ret <2 x double> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask_fmsubadd_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fmsubadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x97,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm0 * xmm1) -/+ xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fmsubadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x97,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm0 * xmm1) -/+ xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9
|
|
|
|
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
|
|
|
|
%2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> <i32 0, i32 3>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%4 = select <2 x i1> %extract.i, <2 x double> %2, <2 x double> %__A
|
|
|
|
ret <2 x double> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask3_fmaddsub_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fmaddsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb6,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = (xmm0 * xmm1) +/- xmm2
|
|
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fmaddsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb6,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = (xmm0 * xmm1) +/- xmm2
|
|
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
|
|
|
|
%1 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %1) #9
|
|
|
|
%3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> <i32 0, i32 3>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%5 = select <2 x i1> %extract.i, <2 x double> %3, <2 x double> %__C
|
|
|
|
ret <2 x double> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_maskz_fmaddsub_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fmaddsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xa6,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fmaddsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xa6,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
|
|
|
|
%1 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %1) #9
|
|
|
|
%3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> <i32 0, i32 3>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%5 = select <2 x i1> %extract.i, <2 x double> %3, <2 x double> zeroinitializer
|
|
|
|
ret <2 x double> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_maskz_fmsubadd_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fmsubadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xa7,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fmsubadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xa7,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9
|
|
|
|
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
|
|
|
|
%2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> <i32 0, i32 3>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%4 = select <2 x i1> %extract.i, <2 x double> %2, <2 x double> zeroinitializer
|
|
|
|
ret <2 x double> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask_fmaddsub_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fmaddsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x96,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm0 * ymm1) +/- ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fmaddsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x96,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm0 * ymm1) +/- ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9
|
|
|
|
%1 = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %1) #9
|
|
|
|
%3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%5 = select <4 x i1> %extract.i, <4 x double> %3, <4 x double> %__A
|
|
|
|
ret <4 x double> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask_fmsubadd_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fmsubadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x97,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm0 * ymm1) -/+ ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fmsubadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x97,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm0 * ymm1) -/+ ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9
|
|
|
|
%1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9
|
|
|
|
%2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%4 = select <4 x i1> %extract.i, <4 x double> %2, <4 x double> %__A
|
|
|
|
ret <4 x double> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask3_fmaddsub_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fmaddsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb6,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = (ymm0 * ymm1) +/- ymm2
|
|
|
|
; X86-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fmaddsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb6,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = (ymm0 * ymm1) +/- ymm2
|
|
|
|
; X64-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9
|
|
|
|
%1 = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %1) #9
|
|
|
|
%3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%5 = select <4 x i1> %extract.i, <4 x double> %3, <4 x double> %__C
|
|
|
|
ret <4 x double> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_maskz_fmaddsub_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fmaddsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xa6,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fmaddsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xa6,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9
|
|
|
|
%1 = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %1) #9
|
|
|
|
%3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%5 = select <4 x i1> %extract.i, <4 x double> %3, <4 x double> zeroinitializer
|
|
|
|
ret <4 x double> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_maskz_fmsubadd_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fmsubadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xa7,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fmsubadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xa7,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9
|
|
|
|
%1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9
|
|
|
|
%2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%4 = select <4 x i1> %extract.i, <4 x double> %2, <4 x double> zeroinitializer
|
|
|
|
ret <4 x double> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask_fmaddsub_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fmaddsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x96,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm0 * xmm1) +/- xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fmaddsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x96,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm0 * xmm1) +/- xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9
|
|
|
|
%1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %1) #9
|
|
|
|
%3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%5 = select <4 x i1> %extract.i, <4 x float> %3, <4 x float> %__A
|
|
|
|
ret <4 x float> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask_fmsubadd_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fmsubadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x97,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm0 * xmm1) -/+ xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fmsubadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x97,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm0 * xmm1) -/+ xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9
|
|
|
|
%1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9
|
|
|
|
%2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%4 = select <4 x i1> %extract.i, <4 x float> %2, <4 x float> %__A
|
|
|
|
ret <4 x float> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask3_fmaddsub_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fmaddsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb6,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = (xmm0 * xmm1) +/- xmm2
|
|
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fmaddsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb6,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = (xmm0 * xmm1) +/- xmm2
|
|
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9
|
|
|
|
%1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %1) #9
|
|
|
|
%3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%5 = select <4 x i1> %extract.i, <4 x float> %3, <4 x float> %__C
|
|
|
|
ret <4 x float> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_maskz_fmaddsub_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fmaddsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xa6,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fmaddsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xa6,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9
|
|
|
|
%1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %1) #9
|
|
|
|
%3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%5 = select <4 x i1> %extract.i, <4 x float> %3, <4 x float> zeroinitializer
|
|
|
|
ret <4 x float> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_maskz_fmsubadd_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_maskz_fmsubadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xa7,0xc2]
|
|
|
|
; X86-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_maskz_fmsubadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xa7,0xc2]
|
|
|
|
; X64-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9
|
|
|
|
%1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9
|
|
|
|
%2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%4 = select <4 x i1> %extract.i, <4 x float> %2, <4 x float> zeroinitializer
|
|
|
|
ret <4 x float> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask_fmaddsub_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fmaddsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x96,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm0 * ymm1) +/- ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fmaddsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x96,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm0 * ymm1) +/- ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9
|
|
|
|
%1 = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %1) #9
|
|
|
|
%3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%5 = select <8 x i1> %4, <8 x float> %3, <8 x float> %__A
|
|
|
|
ret <8 x float> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask_fmsubadd_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fmsubadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x97,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm0 * ymm1) -/+ ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fmsubadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x97,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm0 * ymm1) -/+ ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9
|
|
|
|
%1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9
|
|
|
|
%2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%4 = select <8 x i1> %3, <8 x float> %2, <8 x float> %__A
|
|
|
|
ret <8 x float> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask3_fmaddsub_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fmaddsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb6,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = (ymm0 * ymm1) +/- ymm2
|
|
|
|
; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fmaddsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb6,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = (ymm0 * ymm1) +/- ymm2
|
|
|
|
; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9
|
|
|
|
%1 = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %1) #9
|
|
|
|
%3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%5 = select <8 x i1> %4, <8 x float> %3, <8 x float> %__C
|
|
|
|
ret <8 x float> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_maskz_fmaddsub_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fmaddsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xa6,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fmaddsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xa6,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9
|
|
|
|
%1 = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %1) #9
|
|
|
|
%3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
|
|
%4 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%5 = select <8 x i1> %4, <8 x float> %3, <8 x float> zeroinitializer
|
|
|
|
ret <8 x float> %5
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_maskz_fmsubadd_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_maskz_fmsubadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xa7,0xc2]
|
|
|
|
; X86-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_maskz_fmsubadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xa7,0xc2]
|
|
|
|
; X64-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9
|
|
|
|
%1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9
|
|
|
|
%2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%4 = select <8 x i1> %3, <8 x float> %2, <8 x float> zeroinitializer
|
|
|
|
ret <8 x float> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask3_fmsub_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xba,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = (xmm0 * xmm1) - xmm2
|
|
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xba,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = (xmm0 * xmm1) - xmm2
|
|
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask3_fmsub_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xba,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = (ymm0 * ymm1) - ymm2
|
|
|
|
; X86-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xba,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = (ymm0 * ymm1) - ymm2
|
|
|
|
; X64-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__C
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask3_fmsub_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xba,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = (xmm0 * xmm1) - xmm2
|
|
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xba,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = (xmm0 * xmm1) - xmm2
|
|
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__C
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask3_fmsub_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xba,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = (ymm0 * ymm1) - ymm2
|
|
|
|
; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xba,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = (ymm0 * ymm1) - ymm2
|
|
|
|
; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__C
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask3_fmsubadd_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fmsubadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb7,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = (xmm0 * xmm1) -/+ xmm2
|
|
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fmsubadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb7,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = (xmm0 * xmm1) -/+ xmm2
|
|
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9
|
|
|
|
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
|
|
|
|
%2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> <i32 0, i32 3>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%4 = select <2 x i1> %extract.i, <2 x double> %2, <2 x double> %__C
|
|
|
|
ret <2 x double> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask3_fmsubadd_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fmsubadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb7,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = (ymm0 * ymm1) -/+ ymm2
|
|
|
|
; X86-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fmsubadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb7,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = (ymm0 * ymm1) -/+ ymm2
|
|
|
|
; X64-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9
|
|
|
|
%1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9
|
|
|
|
%2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%4 = select <4 x i1> %extract.i, <4 x double> %2, <4 x double> %__C
|
|
|
|
ret <4 x double> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask3_fmsubadd_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fmsubadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb7,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = (xmm0 * xmm1) -/+ xmm2
|
|
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fmsubadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb7,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = (xmm0 * xmm1) -/+ xmm2
|
|
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9
|
|
|
|
%1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9
|
|
|
|
%2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%4 = select <4 x i1> %extract.i, <4 x float> %2, <4 x float> %__C
|
|
|
|
ret <4 x float> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask3_fmsubadd_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fmsubadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfmsubadd231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb7,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = (ymm0 * ymm1) -/+ ymm2
|
|
|
|
; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fmsubadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfmsubadd231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb7,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = (ymm0 * ymm1) -/+ ymm2
|
|
|
|
; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9
|
|
|
|
%1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9
|
|
|
|
%2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
|
|
%3 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%4 = select <8 x i1> %3, <8 x float> %2, <8 x float> %__C
|
|
|
|
ret <8 x float> %4
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask_fnmadd_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fnmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x9c,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = -(xmm0 * xmm1) + xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fnmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x9c,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = -(xmm0 * xmm1) + xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__B
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %sub.i, <2 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask_fnmadd_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fnmadd_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x9c,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = -(ymm0 * ymm1) + ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fnmadd_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x9c,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = -(ymm0 * ymm1) + ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__B
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %sub.i, <4 x double> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask_fnmadd_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fnmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = -(xmm0 * xmm1) + xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fnmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = -(xmm0 * xmm1) + xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__B
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %sub.i, <4 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__A
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask_fnmadd_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fnmadd_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmadd132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = -(ymm0 * ymm1) + ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fnmadd_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmadd132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = -(ymm0 * ymm1) + ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__B
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %sub.i, <8 x float> %__C) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__A
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask_fnmsub_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fnmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x9e,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = -(xmm0 * xmm1) - xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fnmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x9e,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = -(xmm0 * xmm1) - xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__B
|
|
|
|
%sub1.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %sub.i, <2 x double> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test_mm_mask3_fnmsub_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fnmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xbe,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = -(xmm0 * xmm1) - xmm2
|
|
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fnmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xbe,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = -(xmm0 * xmm1) - xmm2
|
|
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__B
|
|
|
|
%sub1.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %sub.i, <2 x double> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C
|
|
|
|
ret <2 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask_fnmsub_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fnmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x9e,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = -(ymm0 * ymm1) - ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fnmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x9e,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = -(ymm0 * ymm1) - ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__B
|
|
|
|
%sub1.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %sub.i, <4 x double> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test_mm256_mask3_fnmsub_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fnmsub_pd:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xbe,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = -(ymm0 * ymm1) - ymm2
|
|
|
|
; X86-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fnmsub_pd:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xbe,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = -(ymm0 * ymm1) - ymm2
|
|
|
|
; X64-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__B
|
|
|
|
%sub1.i = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %sub.i, <4 x double> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__C
|
|
|
|
ret <4 x double> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask_fnmsub_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask_fnmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xc1]
|
|
|
|
; X86-NEXT: # xmm0 = -(xmm0 * xmm1) - xmm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask_fnmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xc1]
|
|
|
|
; X64-NEXT: # xmm0 = -(xmm0 * xmm1) - xmm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__B
|
|
|
|
%sub1.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %sub.i, <4 x float> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__A
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test_mm_mask3_fnmsub_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm_mask3_fnmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xbe,0xd1]
|
|
|
|
; X86-NEXT: # xmm2 = -(xmm0 * xmm1) - xmm2
|
|
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm_mask3_fnmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xbe,0xd1]
|
|
|
|
; X64-NEXT: # xmm2 = -(xmm0 * xmm1) - xmm2
|
|
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__B
|
|
|
|
%sub1.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %sub.i, <4 x float> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__C
|
|
|
|
ret <4 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask_fnmsub_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask_fnmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xc1]
|
|
|
|
; X86-NEXT: # ymm0 = -(ymm0 * ymm1) - ymm2
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask_fnmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xc1]
|
|
|
|
; X64-NEXT: # ymm0 = -(ymm0 * ymm1) - ymm2
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__B
|
|
|
|
%sub1.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %sub.i, <8 x float> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__A
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test_mm256_mask3_fnmsub_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) {
|
2018-06-03 22:56:04 +08:00
|
|
|
; X86-LABEL: test_mm256_mask3_fnmsub_ps:
|
|
|
|
; X86: # %bb.0: # %entry
|
|
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
|
|
|
|
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
|
|
|
|
; X86-NEXT: vfnmsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xbe,0xd1]
|
|
|
|
; X86-NEXT: # ymm2 = -(ymm0 * ymm1) - ymm2
|
|
|
|
; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X86-NEXT: retl # encoding: [0xc3]
|
|
|
|
;
|
|
|
|
; X64-LABEL: test_mm256_mask3_fnmsub_ps:
|
|
|
|
; X64: # %bb.0: # %entry
|
|
|
|
; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
|
|
|
|
; X64-NEXT: vfnmsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xbe,0xd1]
|
|
|
|
; X64-NEXT: # ymm2 = -(ymm0 * ymm1) - ymm2
|
|
|
|
; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2]
|
|
|
|
; X64-NEXT: retq # encoding: [0xc3]
|
2018-05-30 23:25:16 +08:00
|
|
|
entry:
|
|
|
|
%sub.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__B
|
|
|
|
%sub1.i = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C
|
|
|
|
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %sub.i, <8 x float> %sub1.i) #9
|
|
|
|
%1 = bitcast i8 %__U to <8 x i1>
|
|
|
|
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__C
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #8
|
|
|
|
declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) #8
|
|
|
|
declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) #8
|
|
|
|
declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #8
|