2016-07-31 00:29:19 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512F
|
|
|
|
|
|
|
|
; 'signum' test cases (PR13248)
|
|
|
|
|
|
|
|
;
|
|
|
|
; generic implementation for 128-bit vectors
|
|
|
|
;
|
|
|
|
|
|
|
|
define void @signum32a(<4 x float>*) {
|
2018-02-11 01:58:58 +08:00
|
|
|
; AVX-LABEL: signum32a:
|
|
|
|
; AVX: # %bb.0: # %entry
|
|
|
|
; AVX-NEXT: vmovaps (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX-NEXT: vcmpltps %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX-NEXT: vcvtdq2ps %xmm2, %xmm2
|
|
|
|
; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vsubps %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX-NEXT: vmovaps %xmm0, (%rdi)
|
|
|
|
; AVX-NEXT: retq
|
2016-07-31 00:29:19 +08:00
|
|
|
entry:
|
|
|
|
%1 = load <4 x float>, <4 x float>* %0
|
|
|
|
%2 = fcmp olt <4 x float> %1, zeroinitializer
|
|
|
|
%3 = sitofp <4 x i1> %2 to <4 x float>
|
|
|
|
%4 = fcmp ogt <4 x float> %1, zeroinitializer
|
|
|
|
%5 = sitofp <4 x i1> %4 to <4 x float>
|
|
|
|
%6 = fsub <4 x float> %3, %5
|
|
|
|
store <4 x float> %6, <4 x float>* %0
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @signum64a(<2 x double>*) {
|
2018-02-11 01:58:58 +08:00
|
|
|
; AVX-LABEL: signum64a:
|
|
|
|
; AVX: # %bb.0: # %entry
|
|
|
|
; AVX-NEXT: vmovapd (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX-NEXT: vcmpltpd %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
|
|
|
; AVX-NEXT: vcvtdq2pd %xmm2, %xmm2
|
|
|
|
; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
|
|
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vsubpd %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX-NEXT: vmovapd %xmm0, (%rdi)
|
|
|
|
; AVX-NEXT: retq
|
2016-07-31 00:29:19 +08:00
|
|
|
entry:
|
|
|
|
%1 = load <2 x double>, <2 x double>* %0
|
|
|
|
%2 = fcmp olt <2 x double> %1, zeroinitializer
|
|
|
|
%3 = sitofp <2 x i1> %2 to <2 x double>
|
|
|
|
%4 = fcmp ogt <2 x double> %1, zeroinitializer
|
|
|
|
%5 = sitofp <2 x i1> %4 to <2 x double>
|
|
|
|
%6 = fsub <2 x double> %3, %5
|
|
|
|
store <2 x double> %6, <2 x double>* %0
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
;
|
|
|
|
; generic implementation for 256-bit vectors
|
|
|
|
;
|
|
|
|
|
|
|
|
define void @signum32b(<8 x float>*) {
|
2018-02-11 01:58:58 +08:00
|
|
|
; AVX-LABEL: signum32b:
|
|
|
|
; AVX: # %bb.0: # %entry
|
|
|
|
; AVX-NEXT: vmovaps (%rdi), %ymm0
|
|
|
|
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
|
|
|
|
; AVX-NEXT: vcvtdq2ps %ymm2, %ymm2
|
|
|
|
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0
|
|
|
|
; AVX-NEXT: vsubps %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX-NEXT: vmovaps %ymm0, (%rdi)
|
|
|
|
; AVX-NEXT: vzeroupper
|
|
|
|
; AVX-NEXT: retq
|
2016-07-31 00:29:19 +08:00
|
|
|
entry:
|
|
|
|
%1 = load <8 x float>, <8 x float>* %0
|
|
|
|
%2 = fcmp olt <8 x float> %1, zeroinitializer
|
|
|
|
%3 = sitofp <8 x i1> %2 to <8 x float>
|
|
|
|
%4 = fcmp ogt <8 x float> %1, zeroinitializer
|
|
|
|
%5 = sitofp <8 x i1> %4 to <8 x float>
|
|
|
|
%6 = fsub <8 x float> %3, %5
|
|
|
|
store <8 x float> %6, <8 x float>* %0
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @signum64b(<4 x double>*) {
|
|
|
|
; AVX1-LABEL: signum64b:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX1-NEXT: vmovapd (%rdi), %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX1-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
|
2017-10-24 23:38:16 +08:00
|
|
|
; AVX1-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX1-NEXT: vcvtdq2pd %xmm2, %ymm2
|
|
|
|
; AVX1-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
2017-10-24 23:38:16 +08:00
|
|
|
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
|
|
|
|
; AVX1-NEXT: vsubpd %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX1-NEXT: vmovapd %ymm0, (%rdi)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: signum64b:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX2-NEXT: vmovapd (%rdi), %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX2-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
|
|
|
|
; AVX2-NEXT: vextractf128 $1, %ymm2, %xmm3
|
2017-10-24 23:38:16 +08:00
|
|
|
; AVX2-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX2-NEXT: vcvtdq2pd %xmm2, %ymm2
|
|
|
|
; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
|
2017-10-24 23:38:16 +08:00
|
|
|
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
|
|
|
|
; AVX2-NEXT: vsubpd %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX2-NEXT: vmovapd %ymm0, (%rdi)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: signum64b:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX512F-NEXT: vmovapd (%rdi), %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; AVX512F-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2018-02-11 01:58:58 +08:00
|
|
|
; AVX512F-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
|
|
|
|
; AVX512F-NEXT: vpmovqd %zmm2, %ymm2
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX512F-NEXT: vcvtdq2pd %xmm2, %ymm2
|
2018-02-11 01:58:58 +08:00
|
|
|
; AVX512F-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX512F-NEXT: vcvtdq2pd %xmm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vsubpd %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX512F-NEXT: vmovapd %ymm0, (%rdi)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%1 = load <4 x double>, <4 x double>* %0
|
|
|
|
%2 = fcmp olt <4 x double> %1, zeroinitializer
|
|
|
|
%3 = sitofp <4 x i1> %2 to <4 x double>
|
|
|
|
%4 = fcmp ogt <4 x double> %1, zeroinitializer
|
|
|
|
%5 = sitofp <4 x i1> %4 to <4 x double>
|
|
|
|
%6 = fsub <4 x double> %3, %5
|
|
|
|
store <4 x double> %6, <4 x double>* %0
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
;
|
|
|
|
; implementation using AVX intrinsics for 256-bit vectors
|
|
|
|
;
|
|
|
|
|
|
|
|
define void @signum32c(<8 x float>*) {
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX-LABEL: signum32c:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0: # %entry
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX-NEXT: vmovaps (%rdi), %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
|
|
|
|
; AVX-NEXT: vcvtdq2ps %ymm2, %ymm2
|
|
|
|
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0
|
|
|
|
; AVX-NEXT: vsubps %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX-NEXT: vmovaps %ymm0, (%rdi)
|
|
|
|
; AVX-NEXT: vzeroupper
|
|
|
|
; AVX-NEXT: retq
|
2016-07-31 00:29:19 +08:00
|
|
|
entry:
|
|
|
|
%1 = load <8 x float>, <8 x float>* %0
|
|
|
|
%2 = tail call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %1, <8 x float> zeroinitializer, i8 1)
|
|
|
|
%3 = bitcast <8 x float> %2 to <8 x i32>
|
|
|
|
%4 = sitofp <8 x i32> %3 to <8 x float>
|
|
|
|
%5 = tail call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> zeroinitializer, <8 x float> %1, i8 1)
|
|
|
|
%6 = bitcast <8 x float> %5 to <8 x i32>
|
|
|
|
%7 = sitofp <8 x i32> %6 to <8 x float>
|
|
|
|
%8 = fsub <8 x float> %4, %7
|
|
|
|
store <8 x float> %8, <8 x float>* %0
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @signum64c(<4 x double>*) {
|
|
|
|
; AVX1-LABEL: signum64c:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX1-NEXT: vmovapd (%rdi), %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX1-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
|
|
|
|
; AVX1-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
|
|
|
|
; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm0
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovaps %ymm0, (%rdi)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: signum64c:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX2-NEXT: vmovapd (%rdi), %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX2-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
|
|
|
|
; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
|
|
|
|
; AVX2-NEXT: vmovaps %ymm0, (%rdi)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: signum64c:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX512F-NEXT: vmovapd (%rdi), %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; AVX512F-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX512F-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
|
|
|
|
; AVX512F-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX512F-NEXT: vpsubd %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX512F-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX512F-NEXT: vcvtdq2pd %xmm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vmovaps %ymm0, (%rdi)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
2016-07-31 00:29:19 +08:00
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%x = load <4 x double>, <4 x double>* %0
|
|
|
|
%xgt = tail call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> %x, <4 x double> zeroinitializer, i8 1)
|
|
|
|
%igt = bitcast <4 x double> %xgt to <8 x i32>
|
|
|
|
%xlt = tail call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> zeroinitializer, <4 x double> %x, i8 1)
|
|
|
|
%ilt = bitcast <4 x double> %xlt to <8 x i32>
|
|
|
|
; it is important to use %igt twice as source in order to make LLVM use a shuffle operation
|
|
|
|
%isign = sub <8 x i32> %igt, %ilt
|
|
|
|
%ssign = shufflevector <8 x i32> %isign, <8 x i32> %isign, <4 x i32> <i32 0, i32 2, i32 12, i32 14>
|
|
|
|
%sign = tail call <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32> %ssign)
|
|
|
|
store <4 x double> %sign, <4 x double>* %0
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
declare <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone
|
|
|
|
|
|
|
|
declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone
|
|
|
|
|
|
|
|
declare <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32>) nounwind readnone
|