forked from OSchip/llvm-project
Revert "[NFC][CodeGen] Add unary FNeg tests to X86/avx512vl-intrinsics-fast-isel.ll X86/combine-fabs.ll"
This reverts commit 6fe46ec25d
.
llvm-svn: 363304
This commit is contained in:
parent
21a29a9e65
commit
4890457196
|
@ -4282,28 +4282,6 @@ entry:
|
|||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_mask_fmsub_pd_unary_fneg(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
|
||||
; X86-LABEL: test_mm_mask_fmsub_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfmsub132pd {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_mask_fmsub_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfmsub132pd {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <2 x double> %__C
|
||||
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %neg.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A
|
||||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_mask3_fmadd_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm_mask3_fmadd_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -4351,30 +4329,6 @@ entry:
|
|||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_mask3_fnmadd_pd_unary_fneg(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm_mask3_fnmadd_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmadd231pd {{.*#+}} xmm2 = -(xmm0 * xmm1) + xmm2
|
||||
; X86-NEXT: vmovapd %xmm2, %xmm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_mask3_fnmadd_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmadd231pd {{.*#+}} xmm2 = -(xmm0 * xmm1) + xmm2
|
||||
; X64-NEXT: vmovapd %xmm2, %xmm0
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <2 x double> %__A
|
||||
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %neg.i, <2 x double> %__B, <2 x double> %__C) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C
|
||||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_maskz_fmadd_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fmadd_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -4418,28 +4372,6 @@ entry:
|
|||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_maskz_fmsub_pd_unary_fneg(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fmsub_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_maskz_fmsub_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <2 x double> %__C
|
||||
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %neg.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer
|
||||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_maskz_fnmadd_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fnmadd_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -4462,28 +4394,6 @@ entry:
|
|||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_maskz_fnmadd_pd_unary_fneg(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fnmadd_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_maskz_fnmadd_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <2 x double> %__A
|
||||
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %neg.i, <2 x double> %__B, <2 x double> %__C) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer
|
||||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_maskz_fnmsub_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fnmsub_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -4507,29 +4417,6 @@ entry:
|
|||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_maskz_fnmsub_pd_unary_fneg(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fnmsub_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_maskz_fnmsub_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <2 x double> %__A
|
||||
%neg1.i = fneg <2 x double> %__C
|
||||
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %neg.i, <2 x double> %__B, <2 x double> %neg1.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer
|
||||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask_fmadd_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_mask_fmadd_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -4573,28 +4460,6 @@ entry:
|
|||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask_fmsub_pd_unary_fneg(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_mask_fmsub_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfmsub132pd {{.*#+}} ymm0 = (ymm0 * ymm1) - ymm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_mask_fmsub_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfmsub132pd {{.*#+}} ymm0 = (ymm0 * ymm1) - ymm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x double> %__C
|
||||
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %neg.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A
|
||||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask3_fmadd_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm256_mask3_fmadd_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -4642,30 +4507,6 @@ entry:
|
|||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask3_fnmadd_pd_unary_fneg(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm256_mask3_fnmadd_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmadd231pd {{.*#+}} ymm2 = -(ymm0 * ymm1) + ymm2
|
||||
; X86-NEXT: vmovapd %ymm2, %ymm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_mask3_fnmadd_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmadd231pd {{.*#+}} ymm2 = -(ymm0 * ymm1) + ymm2
|
||||
; X64-NEXT: vmovapd %ymm2, %ymm0
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x double> %__A
|
||||
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %neg.i, <4 x double> %__B, <4 x double> %__C) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__C
|
||||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_maskz_fmadd_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_maskz_fmadd_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -4709,28 +4550,6 @@ entry:
|
|||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_maskz_fmsub_pd_unary_fneg(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_maskz_fmsub_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_maskz_fmsub_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x double> %__C
|
||||
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %neg.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer
|
||||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_maskz_fnmadd_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_maskz_fnmadd_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -4753,28 +4572,6 @@ entry:
|
|||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_maskz_fnmadd_pd_unary_fneg(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_maskz_fnmadd_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_maskz_fnmadd_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x double> %__A
|
||||
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %neg.i, <4 x double> %__B, <4 x double> %__C) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer
|
||||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_maskz_fnmsub_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_maskz_fnmsub_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -4798,29 +4595,6 @@ entry:
|
|||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_maskz_fnmsub_pd_unary_fneg(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_maskz_fnmsub_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_maskz_fnmsub_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x double> %__A
|
||||
%neg1.i = fneg <4 x double> %__C
|
||||
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %neg.i, <4 x double> %__B, <4 x double> %neg1.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer
|
||||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask_fmadd_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_mask_fmadd_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -4864,28 +4638,6 @@ entry:
|
|||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask_fmsub_ps_unary_fneg(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_mask_fmsub_ps_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfmsub132ps {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_mask_fmsub_ps_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfmsub132ps {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x float> %__C
|
||||
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %neg.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__A
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask3_fmadd_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm_mask3_fmadd_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -4933,30 +4685,6 @@ entry:
|
|||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask3_fnmadd_ps_unary_fneg(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm_mask3_fnmadd_ps_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmadd231ps {{.*#+}} xmm2 = -(xmm0 * xmm1) + xmm2
|
||||
; X86-NEXT: vmovaps %xmm2, %xmm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_mask3_fnmadd_ps_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmadd231ps {{.*#+}} xmm2 = -(xmm0 * xmm1) + xmm2
|
||||
; X64-NEXT: vmovaps %xmm2, %xmm0
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x float> %__A
|
||||
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %neg.i, <4 x float> %__B, <4 x float> %__C) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__C
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_maskz_fmadd_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fmadd_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -5000,28 +4728,6 @@ entry:
|
|||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_maskz_fmsub_ps_unary_fneg(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fmsub_ps_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_maskz_fmsub_ps_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x float> %__C
|
||||
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %neg.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> zeroinitializer
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_maskz_fnmadd_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fnmadd_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -5044,28 +4750,6 @@ entry:
|
|||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_maskz_fnmadd_ps_unary_fneg(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fnmadd_ps_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_maskz_fnmadd_ps_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x float> %__A
|
||||
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %neg.i, <4 x float> %__B, <4 x float> %__C) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> zeroinitializer
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_maskz_fnmsub_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fnmsub_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -5089,29 +4773,6 @@ entry:
|
|||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_maskz_fnmsub_ps_unary_fneg(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_maskz_fnmsub_ps_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_maskz_fnmsub_ps_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x float> %__A
|
||||
%neg1.i = fneg <4 x float> %__C
|
||||
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %neg.i, <4 x float> %__B, <4 x float> %neg1.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> zeroinitializer
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <8 x float> @test_mm256_mask_fmadd_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) {
|
||||
; X86-LABEL: test_mm256_mask_fmadd_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -5153,27 +4814,6 @@ entry:
|
|||
ret <8 x float> %2
|
||||
}
|
||||
|
||||
define <8 x float> @test_mm256_mask_fmsub_ps_unary_fneg(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) {
|
||||
; X86-LABEL: test_mm256_mask_fmsub_ps_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfmsub132ps {{.*#+}} ymm0 = (ymm0 * ymm1) - ymm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_mask_fmsub_ps_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfmsub132ps {{.*#+}} ymm0 = (ymm0 * ymm1) - ymm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <8 x float> %__C
|
||||
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %neg.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__A
|
||||
ret <8 x float> %2
|
||||
}
|
||||
|
||||
define <8 x float> @test_mm256_mask3_fmadd_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm256_mask3_fmadd_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -5907,32 +5547,6 @@ entry:
|
|||
ret <2 x double> %4
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_mask3_fmsubadd_pd_unary_fneg(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm_mask3_fmsubadd_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfmsubadd231pd {{.*#+}} xmm2 = (xmm0 * xmm1) -/+ xmm2
|
||||
; X86-NEXT: vmovapd %xmm2, %xmm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_mask3_fmsubadd_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfmsubadd231pd {{.*#+}} xmm2 = (xmm0 * xmm1) -/+ xmm2
|
||||
; X64-NEXT: vmovapd %xmm2, %xmm0
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <2 x double> %__C
|
||||
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %neg.i) #9
|
||||
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9
|
||||
%2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> <i32 0, i32 3>
|
||||
%3 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
||||
%4 = select <2 x i1> %extract.i, <2 x double> %2, <2 x double> %__C
|
||||
ret <2 x double> %4
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask3_fmsubadd_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm256_mask3_fmsubadd_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -5959,32 +5573,6 @@ entry:
|
|||
ret <4 x double> %4
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask3_fmsubadd_pd_unary_fneg(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm256_mask3_fmsubadd_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfmsubadd231pd {{.*#+}} ymm2 = (ymm0 * ymm1) -/+ ymm2
|
||||
; X86-NEXT: vmovapd %ymm2, %ymm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_mask3_fmsubadd_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfmsubadd231pd {{.*#+}} ymm2 = (ymm0 * ymm1) -/+ ymm2
|
||||
; X64-NEXT: vmovapd %ymm2, %ymm0
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x double> %__C
|
||||
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %neg.i) #9
|
||||
%1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9
|
||||
%2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
||||
%3 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%4 = select <4 x i1> %extract.i, <4 x double> %2, <4 x double> %__C
|
||||
ret <4 x double> %4
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask3_fmsubadd_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm_mask3_fmsubadd_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -6011,32 +5599,6 @@ entry:
|
|||
ret <4 x float> %4
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask3_fmsubadd_ps_unary_fneg(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm_mask3_fmsubadd_ps_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfmsubadd231ps {{.*#+}} xmm2 = (xmm0 * xmm1) -/+ xmm2
|
||||
; X86-NEXT: vmovaps %xmm2, %xmm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_mask3_fmsubadd_ps_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfmsubadd231ps {{.*#+}} xmm2 = (xmm0 * xmm1) -/+ xmm2
|
||||
; X64-NEXT: vmovaps %xmm2, %xmm0
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x float> %__C
|
||||
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %neg.i) #9
|
||||
%1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9
|
||||
%2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
|
||||
%3 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%4 = select <4 x i1> %extract.i, <4 x float> %2, <4 x float> %__C
|
||||
ret <4 x float> %4
|
||||
}
|
||||
|
||||
define <8 x float> @test_mm256_mask3_fmsubadd_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm256_mask3_fmsubadd_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -6062,31 +5624,6 @@ entry:
|
|||
ret <8 x float> %4
|
||||
}
|
||||
|
||||
define <8 x float> @test_mm256_mask3_fmsubadd_ps_unary_fneg(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm256_mask3_fmsubadd_ps_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfmsubadd231ps {{.*#+}} ymm2 = (ymm0 * ymm1) -/+ ymm2
|
||||
; X86-NEXT: vmovaps %ymm2, %ymm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_mask3_fmsubadd_ps_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfmsubadd231ps {{.*#+}} ymm2 = (ymm0 * ymm1) -/+ ymm2
|
||||
; X64-NEXT: vmovaps %ymm2, %ymm0
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <8 x float> %__C
|
||||
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %neg.i) #9
|
||||
%1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9
|
||||
%2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
|
||||
%3 = bitcast i8 %__U to <8 x i1>
|
||||
%4 = select <8 x i1> %3, <8 x float> %2, <8 x float> %__C
|
||||
ret <8 x float> %4
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_mask_fnmadd_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
|
||||
; X86-LABEL: test_mm_mask_fnmadd_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -6109,28 +5646,6 @@ entry:
|
|||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_mask_fnmadd_pd_unary_fneg(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
|
||||
; X86-LABEL: test_mm_mask_fnmadd_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * xmm1) + xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_mask_fnmadd_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * xmm1) + xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <2 x double> %__B
|
||||
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %neg.i, <2 x double> %__C) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A
|
||||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask_fnmadd_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_mask_fnmadd_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -6153,28 +5668,6 @@ entry:
|
|||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask_fnmadd_pd_unary_fneg(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_mask_fnmadd_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * ymm1) + ymm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_mask_fnmadd_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * ymm1) + ymm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x double> %__B
|
||||
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %neg.i, <4 x double> %__C) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A
|
||||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask_fnmadd_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_mask_fnmadd_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -6197,28 +5690,6 @@ entry:
|
|||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask_fnmadd_ps_unary_fneg(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_mask_fnmadd_ps_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * xmm1) + xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_mask_fnmadd_ps_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * xmm1) + xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x float> %__B
|
||||
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %neg.i, <4 x float> %__C) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__A
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <8 x float> @test_mm256_mask_fnmadd_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) {
|
||||
; X86-LABEL: test_mm256_mask_fnmadd_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -6240,27 +5711,6 @@ entry:
|
|||
ret <8 x float> %2
|
||||
}
|
||||
|
||||
define <8 x float> @test_mm256_mask_fnmadd_ps_unary_fneg(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) {
|
||||
; X86-LABEL: test_mm256_mask_fnmadd_ps_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * ymm1) + ymm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_mask_fnmadd_ps_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * ymm1) + ymm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <8 x float> %__B
|
||||
%0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %neg.i, <8 x float> %__C) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__A
|
||||
ret <8 x float> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_mask_fnmsub_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
|
||||
; X86-LABEL: test_mm_mask_fnmsub_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -6284,29 +5734,6 @@ entry:
|
|||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_mask_fnmsub_pd_unary_fneg(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) {
|
||||
; X86-LABEL: test_mm_mask_fnmsub_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_mask_fnmsub_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <2 x double> %__B
|
||||
%neg1.i = fneg <2 x double> %__C
|
||||
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %neg.i, <2 x double> %neg1.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A
|
||||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_mask3_fnmsub_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm_mask3_fnmsub_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -6332,31 +5759,6 @@ entry:
|
|||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_mask3_fnmsub_pd_unary_fneg(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm_mask3_fnmsub_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmsub231pd {{.*#+}} xmm2 = -(xmm0 * xmm1) - xmm2
|
||||
; X86-NEXT: vmovapd %xmm2, %xmm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_mask3_fnmsub_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmsub231pd {{.*#+}} xmm2 = -(xmm0 * xmm1) - xmm2
|
||||
; X64-NEXT: vmovapd %xmm2, %xmm0
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <2 x double> %__B
|
||||
%neg1.i = fneg <2 x double> %__C
|
||||
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %neg.i, <2 x double> %neg1.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
|
||||
%2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C
|
||||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask_fnmsub_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_mask_fnmsub_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -6380,29 +5782,6 @@ entry:
|
|||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask_fnmsub_pd_unary_fneg(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) {
|
||||
; X86-LABEL: test_mm256_mask_fnmsub_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * ymm1) - ymm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_mask_fnmsub_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * ymm1) - ymm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x double> %__B
|
||||
%neg1.i = fneg <4 x double> %__C
|
||||
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %neg.i, <4 x double> %neg1.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A
|
||||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask3_fnmsub_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm256_mask3_fnmsub_pd:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -6428,31 +5807,6 @@ entry:
|
|||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_mm256_mask3_fnmsub_pd_unary_fneg(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm256_mask3_fnmsub_pd_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmsub231pd {{.*#+}} ymm2 = -(ymm0 * ymm1) - ymm2
|
||||
; X86-NEXT: vmovapd %ymm2, %ymm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm256_mask3_fnmsub_pd_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmsub231pd {{.*#+}} ymm2 = -(ymm0 * ymm1) - ymm2
|
||||
; X64-NEXT: vmovapd %ymm2, %ymm0
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x double> %__B
|
||||
%neg1.i = fneg <4 x double> %__C
|
||||
%0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %neg.i, <4 x double> %neg1.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__C
|
||||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask_fnmsub_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_mask_fnmsub_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
@ -6476,29 +5830,6 @@ entry:
|
|||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask_fnmsub_ps_unary_fneg(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) {
|
||||
; X86-LABEL: test_mm_mask_fnmsub_ps_unary_fneg:
|
||||
; X86: # %bb.0: # %entry
|
||||
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
||||
; X86-NEXT: kmovw %eax, %k1
|
||||
; X86-NEXT: vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm2
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_mask_fnmsub_ps_unary_fneg:
|
||||
; X64: # %bb.0: # %entry
|
||||
; X64-NEXT: kmovw %edi, %k1
|
||||
; X64-NEXT: vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * xmm1) - xmm2
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%neg.i = fneg <4 x float> %__B
|
||||
%neg1.i = fneg <4 x float> %__C
|
||||
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %neg.i, <4 x float> %neg1.i) #9
|
||||
%1 = bitcast i8 %__U to <8 x i1>
|
||||
%extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
||||
%2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__A
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask3_fnmsub_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) {
|
||||
; X86-LABEL: test_mm_mask3_fnmsub_ps:
|
||||
; X86: # %bb.0: # %entry
|
||||
|
|
|
@ -85,22 +85,6 @@ define float @combine_fabs_fneg(float %a) {
|
|||
ret float %2
|
||||
}
|
||||
|
||||
define float @combine_fabs_unary_fneg(float %a) {
|
||||
; SSE-LABEL: combine_fabs_unary_fneg:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: combine_fabs_unary_fneg:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
|
||||
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%1 = fneg float %a
|
||||
%2 = call float @llvm.fabs.f32(float %1)
|
||||
ret float %2
|
||||
}
|
||||
|
||||
define <4 x float> @combine_vec_fabs_fneg(<4 x float> %a) {
|
||||
; SSE-LABEL: combine_vec_fabs_fneg:
|
||||
; SSE: # %bb.0:
|
||||
|
@ -117,22 +101,6 @@ define <4 x float> @combine_vec_fabs_fneg(<4 x float> %a) {
|
|||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x float> @combine_vec_fabs_unary_fneg(<4 x float> %a) {
|
||||
; SSE-LABEL: combine_vec_fabs_unary_fneg:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: combine_vec_fabs_unary_fneg:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
|
||||
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%1 = fneg <4 x float> %a
|
||||
%2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %1)
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
; fabs(fcopysign(x, y)) -> fabs(x)
|
||||
define float @combine_fabs_fcopysign(float %a, float %b) {
|
||||
; SSE-LABEL: combine_fabs_fcopysign:
|
||||
|
|
Loading…
Reference in New Issue