forked from OSchip/llvm-project
Revert "[NFC][CodeGen] Add unary FNeg tests to X86/avx512-intrinsics-fast-isel.ll"
This reverts commit aeb89f8b33
.
llvm-svn: 363300
This commit is contained in:
parent
8bcc9bb595
commit
675be5db46
|
@ -4705,27 +4705,6 @@ entry:
|
||||||
ret <8 x double> %2
|
ret <8 x double> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_maskz_fmsubadd_round_pd_unary_fneg(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_maskz_fmsubadd_round_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsubadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_maskz_fmsubadd_round_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsubadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg, i32 8)
|
|
||||||
%1 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer
|
|
||||||
ret <8 x double> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_fmaddsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
define <8 x double> @test_mm512_fmaddsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
||||||
; CHECK-LABEL: test_mm512_fmaddsub_pd:
|
; CHECK-LABEL: test_mm512_fmaddsub_pd:
|
||||||
; CHECK: # %bb.0: # %entry
|
; CHECK: # %bb.0: # %entry
|
||||||
|
@ -4739,19 +4718,6 @@ entry:
|
||||||
ret <8 x double> %3
|
ret <8 x double> %3
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_fmaddsub_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
|
||||||
; CHECK-LABEL: test_mm512_fmaddsub_pd_unary_fneg:
|
|
||||||
; CHECK: # %bb.0: # %entry
|
|
||||||
; CHECK-NEXT: vfmaddsub213pd {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
|
|
||||||
; CHECK-NEXT: ret{{[l|q]}}
|
|
||||||
entry:
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
|
|
||||||
%1 = fneg <8 x double> %__C
|
|
||||||
%2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10
|
|
||||||
%3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
||||||
ret <8 x double> %3
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fmaddsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
define <8 x double> @test_mm512_mask_fmaddsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fmaddsub_pd:
|
; X86-LABEL: test_mm512_mask_fmaddsub_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -4775,29 +4741,6 @@ entry:
|
||||||
ret <8 x double> %5
|
ret <8 x double> %5
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fmaddsub_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_mask_fmaddsub_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmaddsub132pd {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask_fmaddsub_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmaddsub132pd {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
|
|
||||||
%1 = fneg <8 x double> %__C
|
|
||||||
%2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10
|
|
||||||
%3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
||||||
%4 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%5 = select <8 x i1> %4, <8 x double> %3, <8 x double> %__A
|
|
||||||
ret <8 x double> %5
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fmaddsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
define <8 x double> @test_mm512_mask3_fmaddsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fmaddsub_pd:
|
; X86-LABEL: test_mm512_mask3_fmaddsub_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -4823,31 +4766,6 @@ entry:
|
||||||
ret <8 x double> %5
|
ret <8 x double> %5
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fmaddsub_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fmaddsub_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmaddsub231pd {{.*#+}} zmm2 = (zmm0 * zmm1) +/- zmm2
|
|
||||||
; X86-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fmaddsub_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmaddsub231pd {{.*#+}} zmm2 = (zmm0 * zmm1) +/- zmm2
|
|
||||||
; X64-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
|
|
||||||
%1 = fneg <8 x double> %__C
|
|
||||||
%2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10
|
|
||||||
%3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
||||||
%4 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%5 = select <8 x i1> %4, <8 x double> %3, <8 x double> %__C
|
|
||||||
ret <8 x double> %5
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_maskz_fmaddsub_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
define <8 x double> @test_mm512_maskz_fmaddsub_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
||||||
; X86-LABEL: test_mm512_maskz_fmaddsub_pd:
|
; X86-LABEL: test_mm512_maskz_fmaddsub_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -4871,29 +4789,6 @@ entry:
|
||||||
ret <8 x double> %5
|
ret <8 x double> %5
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_maskz_fmaddsub_pd_unary_fneg(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_maskz_fmaddsub_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmaddsub213pd {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_maskz_fmaddsub_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmaddsub213pd {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
|
|
||||||
%1 = fneg <8 x double> %__C
|
|
||||||
%2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10
|
|
||||||
%3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
||||||
%4 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%5 = select <8 x i1> %4, <8 x double> %3, <8 x double> zeroinitializer
|
|
||||||
ret <8 x double> %5
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_fmsubadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
define <8 x double> @test_mm512_fmsubadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
||||||
; CHECK-LABEL: test_mm512_fmsubadd_pd:
|
; CHECK-LABEL: test_mm512_fmsubadd_pd:
|
||||||
; CHECK: # %bb.0: # %entry
|
; CHECK: # %bb.0: # %entry
|
||||||
|
@ -4907,19 +4802,6 @@ entry:
|
||||||
ret <8 x double> %2
|
ret <8 x double> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_fmsubadd_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
|
||||||
; CHECK-LABEL: test_mm512_fmsubadd_pd_unary_fneg:
|
|
||||||
; CHECK: # %bb.0: # %entry
|
|
||||||
; CHECK-NEXT: vfmsubadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
|
|
||||||
; CHECK-NEXT: ret{{[l|q]}}
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg.i) #10
|
|
||||||
%1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
|
|
||||||
%2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
||||||
ret <8 x double> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fmsubadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
define <8 x double> @test_mm512_mask_fmsubadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fmsubadd_pd:
|
; X86-LABEL: test_mm512_mask_fmsubadd_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -4943,29 +4825,6 @@ entry:
|
||||||
ret <8 x double> %4
|
ret <8 x double> %4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fmsubadd_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_mask_fmsubadd_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsubadd132pd {{.*#+}} zmm0 = (zmm0 * zmm1) -/+ zmm2
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask_fmsubadd_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsubadd132pd {{.*#+}} zmm0 = (zmm0 * zmm1) -/+ zmm2
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg.i) #10
|
|
||||||
%1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
|
|
||||||
%2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
||||||
%3 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%4 = select <8 x i1> %3, <8 x double> %2, <8 x double> %__A
|
|
||||||
ret <8 x double> %4
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_maskz_fmsubadd_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
define <8 x double> @test_mm512_maskz_fmsubadd_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
||||||
; X86-LABEL: test_mm512_maskz_fmsubadd_pd:
|
; X86-LABEL: test_mm512_maskz_fmsubadd_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -4989,29 +4848,6 @@ entry:
|
||||||
ret <8 x double> %4
|
ret <8 x double> %4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_maskz_fmsubadd_pd_unary_fneg(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_maskz_fmsubadd_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsubadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_maskz_fmsubadd_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsubadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg.i) #10
|
|
||||||
%1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
|
|
||||||
%2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
||||||
%3 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%4 = select <8 x i1> %3, <8 x double> %2, <8 x double> zeroinitializer
|
|
||||||
ret <8 x double> %4
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_fmaddsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_fmaddsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; CHECK-LABEL: test_mm512_fmaddsub_round_ps:
|
; CHECK-LABEL: test_mm512_fmaddsub_round_ps:
|
||||||
; CHECK: # %bb.0: # %entry
|
; CHECK: # %bb.0: # %entry
|
||||||
|
@ -5104,24 +4940,6 @@ entry:
|
||||||
ret <16 x float> %0
|
ret <16 x float> %0
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_fmsubadd_round_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_fmsubadd_round_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm2, %zmm2
|
|
||||||
; X86-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_fmsubadd_round_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm2, %zmm2
|
|
||||||
; X64-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <16 x float> %__C
|
|
||||||
%0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg, i32 8)
|
|
||||||
ret <16 x float> %0
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fmsubadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_mask_fmsubadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fmsubadd_round_ps:
|
; X86-LABEL: test_mm512_mask_fmsubadd_round_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5143,27 +4961,6 @@ entry:
|
||||||
ret <16 x float> %2
|
ret <16 x float> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fmsubadd_round_ps_unary_fneg(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_mask_fmsubadd_round_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsubadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask_fmsubadd_round_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsubadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <16 x float> %__C
|
|
||||||
%0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg, i32 8)
|
|
||||||
%1 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A
|
|
||||||
ret <16 x float> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_maskz_fmsubadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_maskz_fmsubadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; X86-LABEL: test_mm512_maskz_fmsubadd_round_ps:
|
; X86-LABEL: test_mm512_maskz_fmsubadd_round_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5185,27 +4982,6 @@ entry:
|
||||||
ret <16 x float> %2
|
ret <16 x float> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_maskz_fmsubadd_round_ps_unary_fneg(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_maskz_fmsubadd_round_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsubadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_maskz_fmsubadd_round_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsubadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <16 x float> %__C
|
|
||||||
%0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg, i32 8)
|
|
||||||
%1 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer
|
|
||||||
ret <16 x float> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_fmaddsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_fmaddsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; CHECK-LABEL: test_mm512_fmaddsub_ps:
|
; CHECK-LABEL: test_mm512_fmaddsub_ps:
|
||||||
; CHECK: # %bb.0: # %entry
|
; CHECK: # %bb.0: # %entry
|
||||||
|
@ -5219,19 +4995,6 @@ entry:
|
||||||
ret <16 x float> %3
|
ret <16 x float> %3
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_fmaddsub_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
|
||||||
; CHECK-LABEL: test_mm512_fmaddsub_ps_unary_fneg:
|
|
||||||
; CHECK: # %bb.0: # %entry
|
|
||||||
; CHECK-NEXT: vfmaddsub213ps {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
|
|
||||||
; CHECK-NEXT: ret{{[l|q]}}
|
|
||||||
entry:
|
|
||||||
%0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
|
|
||||||
%1 = fneg <16 x float> %__C
|
|
||||||
%2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10
|
|
||||||
%3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
|
|
||||||
ret <16 x float> %3
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fmaddsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_mask_fmaddsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fmaddsub_ps:
|
; X86-LABEL: test_mm512_mask_fmaddsub_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5255,29 +5018,6 @@ entry:
|
||||||
ret <16 x float> %5
|
ret <16 x float> %5
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fmaddsub_ps_unary_fneg(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_mask_fmaddsub_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmaddsub132ps {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask_fmaddsub_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmaddsub132ps {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
|
|
||||||
%1 = fneg <16 x float> %__C
|
|
||||||
%2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10
|
|
||||||
%3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
|
|
||||||
%4 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%5 = select <16 x i1> %4, <16 x float> %3, <16 x float> %__A
|
|
||||||
ret <16 x float> %5
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask3_fmaddsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
define <16 x float> @test_mm512_mask3_fmaddsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fmaddsub_ps:
|
; X86-LABEL: test_mm512_mask3_fmaddsub_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5303,31 +5043,6 @@ entry:
|
||||||
ret <16 x float> %5
|
ret <16 x float> %5
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask3_fmaddsub_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fmaddsub_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmaddsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) +/- zmm2
|
|
||||||
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fmaddsub_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmaddsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) +/- zmm2
|
|
||||||
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
|
|
||||||
%1 = fneg <16 x float> %__C
|
|
||||||
%2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10
|
|
||||||
%3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
|
|
||||||
%4 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%5 = select <16 x i1> %4, <16 x float> %3, <16 x float> %__C
|
|
||||||
ret <16 x float> %5
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_maskz_fmaddsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_maskz_fmaddsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; X86-LABEL: test_mm512_maskz_fmaddsub_ps:
|
; X86-LABEL: test_mm512_maskz_fmaddsub_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5351,29 +5066,6 @@ entry:
|
||||||
ret <16 x float> %5
|
ret <16 x float> %5
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_maskz_fmaddsub_ps_unary_fneg(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_maskz_fmaddsub_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmaddsub213ps {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_maskz_fmaddsub_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmaddsub213ps {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
|
|
||||||
%1 = fneg <16 x float> %__C
|
|
||||||
%2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10
|
|
||||||
%3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
|
|
||||||
%4 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%5 = select <16 x i1> %4, <16 x float> %3, <16 x float> zeroinitializer
|
|
||||||
ret <16 x float> %5
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_fmsubadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_fmsubadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; CHECK-LABEL: test_mm512_fmsubadd_ps:
|
; CHECK-LABEL: test_mm512_fmsubadd_ps:
|
||||||
; CHECK: # %bb.0: # %entry
|
; CHECK: # %bb.0: # %entry
|
||||||
|
@ -5387,19 +5079,6 @@ entry:
|
||||||
ret <16 x float> %2
|
ret <16 x float> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_fmsubadd_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
|
||||||
; CHECK-LABEL: test_mm512_fmsubadd_ps_unary_fneg:
|
|
||||||
; CHECK: # %bb.0: # %entry
|
|
||||||
; CHECK-NEXT: vfmsubadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
|
|
||||||
; CHECK-NEXT: ret{{[l|q]}}
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <16 x float> %__C
|
|
||||||
%0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg.i) #10
|
|
||||||
%1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
|
|
||||||
%2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
|
|
||||||
ret <16 x float> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fmsubadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_mask_fmsubadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fmsubadd_ps:
|
; X86-LABEL: test_mm512_mask_fmsubadd_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5423,29 +5102,6 @@ entry:
|
||||||
ret <16 x float> %4
|
ret <16 x float> %4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fmsubadd_ps_unary_fneg(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_mask_fmsubadd_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsubadd132ps {{.*#+}} zmm0 = (zmm0 * zmm1) -/+ zmm2
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask_fmsubadd_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsubadd132ps {{.*#+}} zmm0 = (zmm0 * zmm1) -/+ zmm2
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <16 x float> %__C
|
|
||||||
%0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg.i) #10
|
|
||||||
%1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
|
|
||||||
%2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
|
|
||||||
%3 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%4 = select <16 x i1> %3, <16 x float> %2, <16 x float> %__A
|
|
||||||
ret <16 x float> %4
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_maskz_fmsubadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_maskz_fmsubadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; X86-LABEL: test_mm512_maskz_fmsubadd_ps:
|
; X86-LABEL: test_mm512_maskz_fmsubadd_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5469,29 +5125,6 @@ entry:
|
||||||
ret <16 x float> %4
|
ret <16 x float> %4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_maskz_fmsubadd_ps_unary_fneg(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_maskz_fmsubadd_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsubadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_maskz_fmsubadd_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsubadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <16 x float> %__C
|
|
||||||
%0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg.i) #10
|
|
||||||
%1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
|
|
||||||
%2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
|
|
||||||
%3 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%4 = select <16 x i1> %3, <16 x float> %2, <16 x float> zeroinitializer
|
|
||||||
ret <16 x float> %4
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
define <8 x double> @test_mm512_mask3_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fmsub_round_pd:
|
; X86-LABEL: test_mm512_mask3_fmsub_round_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5515,29 +5148,6 @@ entry:
|
||||||
ret <8 x double> %2
|
ret <8 x double> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fmsub_round_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fmsub_round_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X86-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fmsub_round_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X64-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg, i32 8)
|
|
||||||
%1 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C
|
|
||||||
ret <8 x double> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
define <8 x double> @test_mm512_mask3_fmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fmsub_pd:
|
; X86-LABEL: test_mm512_mask3_fmsub_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5561,29 +5171,6 @@ entry:
|
||||||
ret <8 x double> %2
|
ret <8 x double> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fmsub_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fmsub_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsub231pd {{.*#+}} zmm2 = (zmm0 * zmm1) - zmm2
|
|
||||||
; X86-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fmsub_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsub231pd {{.*#+}} zmm2 = (zmm0 * zmm1) - zmm2
|
|
||||||
; X64-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg.i) #10
|
|
||||||
%1 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C
|
|
||||||
ret <8 x double> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask3_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
define <16 x float> @test_mm512_mask3_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fmsub_round_ps:
|
; X86-LABEL: test_mm512_mask3_fmsub_round_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5607,29 +5194,6 @@ entry:
|
||||||
ret <16 x float> %2
|
ret <16 x float> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask3_fmsub_round_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fmsub_round_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fmsub_round_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <16 x float> %__C
|
|
||||||
%0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg, i32 8)
|
|
||||||
%1 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C
|
|
||||||
ret <16 x float> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask3_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
define <16 x float> @test_mm512_mask3_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fmsub_ps:
|
; X86-LABEL: test_mm512_mask3_fmsub_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5653,29 +5217,6 @@ entry:
|
||||||
ret <16 x float> %2
|
ret <16 x float> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask3_fmsub_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fmsub_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) - zmm2
|
|
||||||
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fmsub_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) - zmm2
|
|
||||||
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <16 x float> %__C
|
|
||||||
%0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg.i) #10
|
|
||||||
%1 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C
|
|
||||||
ret <16 x float> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fmsubadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
define <8 x double> @test_mm512_mask3_fmsubadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fmsubadd_round_pd:
|
; X86-LABEL: test_mm512_mask3_fmsubadd_round_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5699,29 +5240,6 @@ entry:
|
||||||
ret <8 x double> %2
|
ret <8 x double> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fmsubadd_round_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fmsubadd_round_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsubadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X86-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fmsubadd_round_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsubadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X64-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg, i32 8)
|
|
||||||
%1 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C
|
|
||||||
ret <8 x double> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fmsubadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
define <8 x double> @test_mm512_mask3_fmsubadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fmsubadd_pd:
|
; X86-LABEL: test_mm512_mask3_fmsubadd_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5747,31 +5265,6 @@ entry:
|
||||||
ret <8 x double> %4
|
ret <8 x double> %4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fmsubadd_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fmsubadd_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsubadd231pd {{.*#+}} zmm2 = (zmm0 * zmm1) -/+ zmm2
|
|
||||||
; X86-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fmsubadd_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsubadd231pd {{.*#+}} zmm2 = (zmm0 * zmm1) -/+ zmm2
|
|
||||||
; X64-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg.i) #10
|
|
||||||
%1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
|
|
||||||
%2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
|
||||||
%3 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%4 = select <8 x i1> %3, <8 x double> %2, <8 x double> %__C
|
|
||||||
ret <8 x double> %4
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask3_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
define <16 x float> @test_mm512_mask3_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fmsubadd_round_ps:
|
; X86-LABEL: test_mm512_mask3_fmsubadd_round_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5795,29 +5288,6 @@ entry:
|
||||||
ret <16 x float> %2
|
ret <16 x float> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask3_fmsubadd_round_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fmsubadd_round_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsubadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fmsubadd_round_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsubadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <16 x float> %__C
|
|
||||||
%0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg, i32 8)
|
|
||||||
%1 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C
|
|
||||||
ret <16 x float> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask3_fmsubadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
define <16 x float> @test_mm512_mask3_fmsubadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fmsubadd_ps:
|
; X86-LABEL: test_mm512_mask3_fmsubadd_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5843,31 +5313,6 @@ entry:
|
||||||
ret <16 x float> %4
|
ret <16 x float> %4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask3_fmsubadd_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fmsubadd_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfmsubadd231ps {{.*#+}} zmm2 = (zmm0 * zmm1) -/+ zmm2
|
|
||||||
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fmsubadd_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfmsubadd231ps {{.*#+}} zmm2 = (zmm0 * zmm1) -/+ zmm2
|
|
||||||
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <16 x float> %__C
|
|
||||||
%0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg.i) #10
|
|
||||||
%1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
|
|
||||||
%2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
|
|
||||||
%3 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%4 = select <16 x i1> %3, <16 x float> %2, <16 x float> %__C
|
|
||||||
ret <16 x float> %4
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fnmadd_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
define <8 x double> @test_mm512_mask_fnmadd_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fnmadd_round_pd:
|
; X86-LABEL: test_mm512_mask_fnmadd_round_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5889,27 +5334,6 @@ entry:
|
||||||
ret <8 x double> %2
|
ret <8 x double> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fnmadd_round_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_mask_fnmadd_round_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfnmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask_fnmadd_round_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfnmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <8 x double> %__A
|
|
||||||
%0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %neg, <8 x double> %__B, <8 x double> %__C, i32 8)
|
|
||||||
%1 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A
|
|
||||||
ret <8 x double> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fnmadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
define <8 x double> @test_mm512_mask_fnmadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fnmadd_pd:
|
; X86-LABEL: test_mm512_mask_fnmadd_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5931,27 +5355,6 @@ entry:
|
||||||
ret <8 x double> %2
|
ret <8 x double> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fnmadd_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_mask_fnmadd_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfnmadd132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask_fnmadd_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfnmadd132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <8 x double> %__A
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %neg.i, <8 x double> %__B, <8 x double> %__C) #10
|
|
||||||
%1 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A
|
|
||||||
ret <8 x double> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fnmadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_mask_fnmadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fnmadd_round_ps:
|
; X86-LABEL: test_mm512_mask_fnmadd_round_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -5973,27 +5376,6 @@ entry:
|
||||||
ret <16 x float> %2
|
ret <16 x float> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fnmadd_round_ps_unary_fneg(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_mask_fnmadd_round_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfnmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask_fnmadd_round_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfnmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <16 x float> %__A
|
|
||||||
%0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %neg, <16 x float> %__B, <16 x float> %__C, i32 8)
|
|
||||||
%1 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A
|
|
||||||
ret <16 x float> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fnmadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_mask_fnmadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fnmadd_ps:
|
; X86-LABEL: test_mm512_mask_fnmadd_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -6015,27 +5397,6 @@ entry:
|
||||||
ret <16 x float> %2
|
ret <16 x float> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fnmadd_ps_unary_fneg(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_mask_fnmadd_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfnmadd132ps {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask_fnmadd_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfnmadd132ps {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <16 x float> %__A
|
|
||||||
%0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %neg.i, <16 x float> %__B, <16 x float> %__C) #10
|
|
||||||
%1 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A
|
|
||||||
ret <16 x float> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fnmsub_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
define <8 x double> @test_mm512_mask_fnmsub_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fnmsub_round_pd:
|
; X86-LABEL: test_mm512_mask_fnmsub_round_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -6058,28 +5419,6 @@ entry:
|
||||||
ret <8 x double> %2
|
ret <8 x double> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fnmsub_round_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_mask_fnmsub_round_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfnmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask_fnmsub_round_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfnmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <8 x double> %__B
|
|
||||||
%neg1 = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %neg, <8 x double> %neg1, i32 8)
|
|
||||||
%1 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A
|
|
||||||
ret <8 x double> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fnmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
define <8 x double> @test_mm512_mask3_fnmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fnmsub_round_pd:
|
; X86-LABEL: test_mm512_mask3_fnmsub_round_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -6104,30 +5443,6 @@ entry:
|
||||||
ret <8 x double> %2
|
ret <8 x double> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fnmsub_round_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fnmsub_round_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfnmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X86-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fnmsub_round_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfnmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X64-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <8 x double> %__B
|
|
||||||
%neg1 = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %neg, <8 x double> %neg1, i32 8)
|
|
||||||
%1 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C
|
|
||||||
ret <8 x double> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fnmsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
define <8 x double> @test_mm512_mask_fnmsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fnmsub_pd:
|
; X86-LABEL: test_mm512_mask_fnmsub_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -6150,28 +5465,6 @@ entry:
|
||||||
ret <8 x double> %2
|
ret <8 x double> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask_fnmsub_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
|
|
||||||
; X86-LABEL: test_mm512_mask_fnmsub_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfnmsub132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) - zmm2
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask_fnmsub_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfnmsub132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) - zmm2
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <8 x double> %__B
|
|
||||||
%neg2.i = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %neg.i, <8 x double> %neg2.i) #10
|
|
||||||
%1 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A
|
|
||||||
ret <8 x double> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fnmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
define <8 x double> @test_mm512_mask3_fnmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
||||||
; X86-LABEL: test_mm512_mask3_fnmsub_pd:
|
; X86-LABEL: test_mm512_mask3_fnmsub_pd:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -6196,30 +5489,6 @@ entry:
|
||||||
ret <8 x double> %2
|
ret <8 x double> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x double> @test_mm512_mask3_fnmsub_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fnmsub_pd_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfnmsub231pd {{.*#+}} zmm2 = -(zmm0 * zmm1) - zmm2
|
|
||||||
; X86-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fnmsub_pd_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfnmsub231pd {{.*#+}} zmm2 = -(zmm0 * zmm1) - zmm2
|
|
||||||
; X64-NEXT: vmovapd %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg.i = fneg <8 x double> %__B
|
|
||||||
%neg2.i = fneg <8 x double> %__C
|
|
||||||
%0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %neg.i, <8 x double> %neg2.i) #10
|
|
||||||
%1 = bitcast i8 %__U to <8 x i1>
|
|
||||||
%2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C
|
|
||||||
ret <8 x double> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fnmsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_mask_fnmsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fnmsub_round_ps:
|
; X86-LABEL: test_mm512_mask_fnmsub_round_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
@ -6266,30 +5535,6 @@ entry:
|
||||||
ret <16 x float> %2
|
ret <16 x float> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask3_fnmsub_round_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
|
|
||||||
; X86-LABEL: test_mm512_mask3_fnmsub_round_ps_unary_fneg:
|
|
||||||
; X86: # %bb.0: # %entry
|
|
||||||
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
|
|
||||||
; X86-NEXT: kmovw %eax, %k1
|
|
||||||
; X86-NEXT: vfnmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X86-NEXT: retl
|
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm512_mask3_fnmsub_round_ps_unary_fneg:
|
|
||||||
; X64: # %bb.0: # %entry
|
|
||||||
; X64-NEXT: kmovw %edi, %k1
|
|
||||||
; X64-NEXT: vfnmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
||||||
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
entry:
|
|
||||||
%neg = fneg <16 x float> %__B
|
|
||||||
%neg1 = fneg <16 x float> %__C
|
|
||||||
%0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %neg, <16 x float> %neg1, i32 8)
|
|
||||||
%1 = bitcast i16 %__U to <16 x i1>
|
|
||||||
%2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C
|
|
||||||
ret <16 x float> %2
|
|
||||||
}
|
|
||||||
|
|
||||||
define <16 x float> @test_mm512_mask_fnmsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
define <16 x float> @test_mm512_mask_fnmsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
|
||||||
; X86-LABEL: test_mm512_mask_fnmsub_ps:
|
; X86-LABEL: test_mm512_mask_fnmsub_ps:
|
||||||
; X86: # %bb.0: # %entry
|
; X86: # %bb.0: # %entry
|
||||||
|
|
Loading…
Reference in New Issue