llvm-project/llvm/test/CodeGen/X86/vector-compare-results.ll

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

2265 lines
84 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512DQ
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
;
; 128-bit vector comparisons
;
define <2 x i1> @test_cmp_v2f64(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v2f64:
; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v2f64:
; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fcmp ogt <2 x double> %a0, %a1
ret <2 x i1> %1
}
define <4 x i1> @test_cmp_v4f32(<4 x float> %a0, <4 x float> %a1) nounwind {
; SSE-LABEL: test_cmp_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v4f32:
; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fcmp ogt <4 x float> %a0, %a1
ret <4 x i1> %1
}
define <2 x i1> @test_cmp_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v2i64:
; SSE2: # %bb.0:
[X86] When lowering unsigned v2i64 setcc without SSE42, flip the sign bits in the v2i64 type then bitcast to v4i32. This may give slightly better opportunities for DAG combine to simplify with the operations before the setcc. It also matches the type the xors will eventually be promoted to anyway so it saves a legalization step. Almost all of the test changes are because our constant pool entry is now v2i64 instead of v4i32 on 64-bit targets. On 32-bit targets getConstant should be emitting a v4i32 build_vector and a v4i32->v2i64 bitcast. There are a couple test cases where it appears we now combine a bitwise not with one of these xors which caused a new constant vector to be generated. This prevented a constant pool entry from being shared. But if that's an issue we're concerned about, it seems we need to address it another way that just relying a bitcast to hide it. This came about from experiments I've been trying with pushing the promotion of and/or/xor to vXi64 later than LegalizeVectorOps where it is today. We run LegalizeVectorOps in a bottom up order. So the and/or/xor are promoted before their users are legalized. The bitcasts added for the promotion act as a barrier to computeKnownBits if we try to use it during vector legalization of a later operation. So by moving the promotion out we can hopefully get better results from computeKnownBits/computeNumSignBits like in LowerTruncate on AVX512. I've also looked at running LegalizeVectorOps in a top down order like LegalizeDAG, but thats showing some other issues. llvm-svn: 344071
2018-10-10 03:05:50 +08:00
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v2i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: test_cmp_v2i64:
; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <2 x i64> %a0, %a1
ret <2 x i1> %1
}
define <4 x i1> @test_cmp_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE-LABEL: test_cmp_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <4 x i32> %a0, %a1
ret <4 x i1> %1
}
define <8 x i1> @test_cmp_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
; SSE-LABEL: test_cmp_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v8i16:
; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <8 x i16> %a0, %a1
ret <8 x i1> %1
}
define <16 x i1> @test_cmp_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
; SSE-LABEL: test_cmp_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v16i8:
; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <16 x i8> %a0, %a1
ret <16 x i1> %1
}
;
; 256-bit vector comparisons
;
define <4 x i1> @test_cmp_v4f64(<4 x double> %a0, <4 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v4f64:
; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm1, %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v4f64:
; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v4f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v4f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = fcmp ogt <4 x double> %a0, %a1
ret <4 x i1> %1
}
define <8 x i1> @test_cmp_v8f32(<8 x float> %a0, <8 x float> %a1) nounwind {
; SSE-LABEL: test_cmp_v8f32:
; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm1, %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v8f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = fcmp ogt <8 x float> %a0, %a1
ret <8 x i1> %1
}
define <4 x i1> @test_cmp_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v4i64:
; SSE2: # %bb.0:
[X86] When lowering unsigned v2i64 setcc without SSE42, flip the sign bits in the v2i64 type then bitcast to v4i32. This may give slightly better opportunities for DAG combine to simplify with the operations before the setcc. It also matches the type the xors will eventually be promoted to anyway so it saves a legalization step. Almost all of the test changes are because our constant pool entry is now v2i64 instead of v4i32 on 64-bit targets. On 32-bit targets getConstant should be emitting a v4i32 build_vector and a v4i32->v2i64 bitcast. There are a couple test cases where it appears we now combine a bitwise not with one of these xors which caused a new constant vector to be generated. This prevented a constant pool entry from being shared. But if that's an issue we're concerned about, it seems we need to address it another way that just relying a bitcast to hide it. This came about from experiments I've been trying with pushing the promotion of and/or/xor to vXi64 later than LegalizeVectorOps where it is today. We run LegalizeVectorOps in a bottom up order. So the and/or/xor are promoted before their users are legalized. The bitcasts added for the promotion act as a barrier to computeKnownBits if we try to use it during vector legalization of a later operation. So by moving the promotion out we can hopefully get better results from computeKnownBits/computeNumSignBits like in LowerTruncate on AVX512. I've also looked at running LegalizeVectorOps in a top down order like LegalizeDAG, but thats showing some other issues. llvm-svn: 344071
2018-10-10 03:05:50 +08:00
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
; SSE2-NEXT: pxor %xmm4, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm3, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm2
; SSE2-NEXT: pxor %xmm4, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE2-NEXT: pand %xmm4, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: packssdw %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v4i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm3, %xmm1
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
; SSE42-NEXT: packssdw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <4 x i64> %a0, %a1
ret <4 x i1> %1
}
define <8 x i1> @test_cmp_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: test_cmp_v8i32:
; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <8 x i32> %a0, %a1
ret <8 x i1> %1
}
define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: test_cmp_v16i16:
; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm3, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i16:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i16> %a0, %a1
ret <16 x i1> %1
}
define <32 x i1> @test_cmp_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
; SSE-LABEL: test_cmp_v32i8:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %rax
; SSE-NEXT: pcmpgtb %xmm2, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %ecx
; SSE-NEXT: pcmpgtb %xmm3, %xmm1
; SSE-NEXT: pmovmskb %xmm1, %edx
; SSE-NEXT: shll $16, %edx
; SSE-NEXT: orl %ecx, %edx
; SSE-NEXT: movl %edx, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v32i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sgt <32 x i8> %a0, %a1
ret <32 x i1> %1
}
;
; 512-bit vector comparisons
;
define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v8f64:
; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm3, %xmm7
; SSE-NEXT: cmpltpd %xmm2, %xmm6
; SSE-NEXT: packssdw %xmm7, %xmm6
; SSE-NEXT: cmpltpd %xmm1, %xmm5
; SSE-NEXT: cmpltpd %xmm0, %xmm4
; SSE-NEXT: packssdw %xmm5, %xmm4
; SSE-NEXT: packssdw %xmm6, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8f64:
; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v8f64:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v8f64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v8f64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <8 x double> %a0, %a1
ret <8 x i1> %1
}
define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; SSE-LABEL: test_cmp_v16f32:
; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm3, %xmm7
; SSE-NEXT: cmpltps %xmm2, %xmm6
; SSE-NEXT: packssdw %xmm7, %xmm6
; SSE-NEXT: cmpltps %xmm1, %xmm5
; SSE-NEXT: cmpltps %xmm0, %xmm4
; SSE-NEXT: packssdw %xmm5, %xmm4
; SSE-NEXT: packsswb %xmm6, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16f32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16f32:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16f32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x float> %a0, %a1
ret <16 x i1> %1
}
define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v8i64:
; SSE2: # %bb.0:
[X86] When lowering unsigned v2i64 setcc without SSE42, flip the sign bits in the v2i64 type then bitcast to v4i32. This may give slightly better opportunities for DAG combine to simplify with the operations before the setcc. It also matches the type the xors will eventually be promoted to anyway so it saves a legalization step. Almost all of the test changes are because our constant pool entry is now v2i64 instead of v4i32 on 64-bit targets. On 32-bit targets getConstant should be emitting a v4i32 build_vector and a v4i32->v2i64 bitcast. There are a couple test cases where it appears we now combine a bitwise not with one of these xors which caused a new constant vector to be generated. This prevented a constant pool entry from being shared. But if that's an issue we're concerned about, it seems we need to address it another way that just relying a bitcast to hide it. This came about from experiments I've been trying with pushing the promotion of and/or/xor to vXi64 later than LegalizeVectorOps where it is today. We run LegalizeVectorOps in a bottom up order. So the and/or/xor are promoted before their users are legalized. The bitcasts added for the promotion act as a barrier to computeKnownBits if we try to use it during vector legalization of a later operation. So by moving the promotion out we can hopefully get better results from computeKnownBits/computeNumSignBits like in LowerTruncate on AVX512. I've also looked at running LegalizeVectorOps in a top down order like LegalizeDAG, but thats showing some other issues. llvm-svn: 344071
2018-10-10 03:05:50 +08:00
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
; SSE2-NEXT: pxor %xmm8, %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm9
; SSE2-NEXT: pcmpgtd %xmm7, %xmm9
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm7, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2-NEXT: pand %xmm10, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm9[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm6
; SSE2-NEXT: pxor %xmm8, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: pcmpgtd %xmm6, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm3[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm6, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
; SSE2-NEXT: pand %xmm9, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; SSE2-NEXT: por %xmm6, %xmm2
; SSE2-NEXT: packssdw %xmm7, %xmm2
; SSE2-NEXT: pxor %xmm8, %xmm5
; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pcmpgtd %xmm5, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm5, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm4
; SSE2-NEXT: pxor %xmm8, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pcmpgtd %xmm4, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm4, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE2-NEXT: por %xmm4, %xmm0
; SSE2-NEXT: packssdw %xmm3, %xmm0
; SSE2-NEXT: packssdw %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v8i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm7, %xmm3
; SSE42-NEXT: pcmpgtq %xmm6, %xmm2
; SSE42-NEXT: packssdw %xmm3, %xmm2
; SSE42-NEXT: pcmpgtq %xmm5, %xmm1
; SSE42-NEXT: pcmpgtq %xmm4, %xmm0
; SSE42-NEXT: packssdw %xmm1, %xmm0
; SSE42-NEXT: packssdw %xmm2, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackssdw %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v8i64:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v8i64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v8i64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <8 x i64> %a0, %a1
ret <8 x i1> %1
}
define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: test_cmp_v16i32:
; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm7, %xmm3
; SSE-NEXT: pcmpgtd %xmm6, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: pcmpgtd %xmm5, %xmm1
; SSE-NEXT: pcmpgtd %xmm4, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: packsswb %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackssdw %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16i32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i32:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i32> %a0, %a1
ret <16 x i1> %1
}
define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; SSE-LABEL: test_cmp_v32i16:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %rax
; SSE-NEXT: pcmpgtw %xmm5, %xmm1
; SSE-NEXT: pcmpgtw %xmm4, %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %ecx
; SSE-NEXT: pcmpgtw %xmm7, %xmm3
; SSE-NEXT: pcmpgtw %xmm6, %xmm2
; SSE-NEXT: packsswb %xmm3, %xmm2
; SSE-NEXT: pmovmskb %xmm2, %edx
; SSE-NEXT: shll $16, %edx
; SSE-NEXT: orl %ecx, %edx
; SSE-NEXT: movl %edx, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpacksswb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpcmpgtw %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32i16:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512DQ-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
; AVX512DQ-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i16> %a0, %a1
ret <32 x i1> %1
}
define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; SSE-LABEL: test_cmp_v64i8:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %rax
; SSE-NEXT: pcmpgtb %xmm4, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %ecx
; SSE-NEXT: pcmpgtb %xmm5, %xmm1
; SSE-NEXT: pmovmskb %xmm1, %edx
; SSE-NEXT: shll $16, %edx
; SSE-NEXT: orl %ecx, %edx
; SSE-NEXT: pcmpgtb %xmm6, %xmm2
; SSE-NEXT: pmovmskb %xmm2, %ecx
; SSE-NEXT: pcmpgtb %xmm7, %xmm3
; SSE-NEXT: pmovmskb %xmm3, %esi
; SSE-NEXT: shll $16, %esi
; SSE-NEXT: orl %ecx, %esi
; SSE-NEXT: shlq $32, %rsi
; SSE-NEXT: orq %rdx, %rsi
; SSE-NEXT: movq %rsi, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v64i8:
; AVX1: # %bb.0:
; AVX1-NEXT: movq %rdi, %rax
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm4
; AVX1-NEXT: vpmovmskb %xmm4, %ecx
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %edx
; AVX1-NEXT: shll $16, %edx
; AVX1-NEXT: orl %ecx, %edx
; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %ecx
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %esi
; AVX1-NEXT: shll $16, %esi
; AVX1-NEXT: orl %ecx, %esi
; AVX1-NEXT: shlq $32, %rsi
; AVX1-NEXT: orq %rdx, %rsi
; AVX1-NEXT: movq %rsi, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v64i8:
; AVX2: # %bb.0:
; AVX2-NEXT: movq %rdi, %rax
; AVX2-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %ecx
; AVX2-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %edx
; AVX2-NEXT: shlq $32, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: movq %rdx, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 killed $ymm2
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v64i8:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512DQ-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm2
; AVX512DQ-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: # kill: def $xmm2 killed $xmm2 killed $ymm2
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v64i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: retq
%1 = icmp sgt <64 x i8> %a0, %a1
ret <64 x i1> %1
}
;
; 1024-bit vector comparisons
;
define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v16f64:
; SSE: # %bb.0:
; SSE-NEXT: movapd %xmm0, %xmm8
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm12
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm13
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm14
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm15
; SSE-NEXT: cmpltpd %xmm7, %xmm15
; SSE-NEXT: cmpltpd %xmm6, %xmm14
; SSE-NEXT: packssdw %xmm15, %xmm14
; SSE-NEXT: cmpltpd %xmm5, %xmm13
; SSE-NEXT: cmpltpd %xmm4, %xmm9
; SSE-NEXT: packssdw %xmm13, %xmm9
; SSE-NEXT: packssdw %xmm14, %xmm9
; SSE-NEXT: cmpltpd %xmm3, %xmm12
; SSE-NEXT: cmpltpd %xmm2, %xmm10
; SSE-NEXT: packssdw %xmm12, %xmm10
; SSE-NEXT: cmpltpd %xmm1, %xmm11
; SSE-NEXT: cmpltpd %xmm8, %xmm0
; SSE-NEXT: packssdw %xmm11, %xmm0
; SSE-NEXT: packssdw %xmm10, %xmm0
; SSE-NEXT: packsswb %xmm9, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16f64:
; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltpd %ymm3, %ymm7, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
; AVX1-NEXT: vpackssdw %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vcmpltpd %ymm2, %ymm6, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
; AVX1-NEXT: vpackssdw %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vcmpltpd %ymm1, %ymm5, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcmpltpd %ymm0, %ymm4, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltpd %ymm3, %ymm7, %ymm3
; AVX2-NEXT: vcmpltpd %ymm2, %ymm6, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vcmpltpd %ymm1, %ymm5, %ymm1
; AVX2-NEXT: vcmpltpd %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vpackssdw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16f64:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm2, %k0
; AVX512F-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512F-NEXT: kunpckbw %k0, %k1, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16f64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm2, %k0
; AVX512DQ-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512DQ-NEXT: kunpckbw %k0, %k1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16f64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm2, %k0
; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x double> %a0, %a1
ret <16 x i1> %1
}
define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; SSE-LABEL: test_cmp_v32f32:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %rax
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm12
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm13
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm14
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm15
; SSE-NEXT: cmpltps %xmm3, %xmm15
; SSE-NEXT: cmpltps %xmm2, %xmm14
; SSE-NEXT: packssdw %xmm15, %xmm14
; SSE-NEXT: cmpltps %xmm1, %xmm13
; SSE-NEXT: cmpltps %xmm0, %xmm12
; SSE-NEXT: packssdw %xmm13, %xmm12
; SSE-NEXT: packsswb %xmm14, %xmm12
; SSE-NEXT: pmovmskb %xmm12, %ecx
; SSE-NEXT: cmpltps %xmm7, %xmm11
; SSE-NEXT: cmpltps %xmm6, %xmm9
; SSE-NEXT: packssdw %xmm11, %xmm9
; SSE-NEXT: cmpltps %xmm5, %xmm10
; SSE-NEXT: cmpltps %xmm4, %xmm8
; SSE-NEXT: packssdw %xmm10, %xmm8
; SSE-NEXT: packsswb %xmm9, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %edx
; SSE-NEXT: shll $16, %edx
; SSE-NEXT: orl %ecx, %edx
; SSE-NEXT: movl %edx, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm3, %ymm7, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
; AVX1-NEXT: vpackssdw %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vcmpltps %ymm2, %ymm6, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
; AVX1-NEXT: vpackssdw %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vcmpltps %ymm1, %ymm5, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcmpltps %ymm0, %ymm4, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm3, %ymm7, %ymm3
; AVX2-NEXT: vcmpltps %ymm2, %ymm6, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vcmpltps %ymm1, %ymm5, %ymm1
; AVX2-NEXT: vcmpltps %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32f32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %zmm1, %zmm3, %k1
; AVX512F-NEXT: vcmpltps %zmm0, %zmm2, %k2
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32f32:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltps %zmm1, %zmm3, %k0
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm2, %k1
; AVX512DQ-NEXT: vpmovm2d %k1, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm1
; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32f32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm2, %k0
; AVX512BW-NEXT: vcmpltps %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
%1 = fcmp ogt <32 x float> %a0, %a1
ret <32 x i1> %1
}
define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v16i64:
; SSE2: # %bb.0:
[X86] When lowering unsigned v2i64 setcc without SSE42, flip the sign bits in the v2i64 type then bitcast to v4i32. This may give slightly better opportunities for DAG combine to simplify with the operations before the setcc. It also matches the type the xors will eventually be promoted to anyway so it saves a legalization step. Almost all of the test changes are because our constant pool entry is now v2i64 instead of v4i32 on 64-bit targets. On 32-bit targets getConstant should be emitting a v4i32 build_vector and a v4i32->v2i64 bitcast. There are a couple test cases where it appears we now combine a bitwise not with one of these xors which caused a new constant vector to be generated. This prevented a constant pool entry from being shared. But if that's an issue we're concerned about, it seems we need to address it another way that just relying a bitcast to hide it. This came about from experiments I've been trying with pushing the promotion of and/or/xor to vXi64 later than LegalizeVectorOps where it is today. We run LegalizeVectorOps in a bottom up order. So the and/or/xor are promoted before their users are legalized. The bitcasts added for the promotion act as a barrier to computeKnownBits if we try to use it during vector legalization of a later operation. So by moving the promotion out we can hopefully get better results from computeKnownBits/computeNumSignBits like in LowerTruncate on AVX512. I've also looked at running LegalizeVectorOps in a top down order like LegalizeDAG, but thats showing some other issues. llvm-svn: 344071
2018-10-10 03:05:50 +08:00
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
; SSE2-NEXT: pxor %xmm8, %xmm7
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE2-NEXT: pxor %xmm8, %xmm9
; SSE2-NEXT: movdqa %xmm7, %xmm10
; SSE2-NEXT: pcmpgtd %xmm9, %xmm10
; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm7, %xmm9
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm9[1,1,3,3]
; SSE2-NEXT: pand %xmm11, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm10[1,1,3,3]
; SSE2-NEXT: por %xmm7, %xmm9
; SSE2-NEXT: pxor %xmm8, %xmm6
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm7
; SSE2-NEXT: movdqa %xmm6, %xmm10
; SSE2-NEXT: pcmpgtd %xmm7, %xmm10
; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm6, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; SSE2-NEXT: pand %xmm11, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3]
; SSE2-NEXT: por %xmm7, %xmm10
; SSE2-NEXT: packssdw %xmm9, %xmm10
; SSE2-NEXT: pxor %xmm8, %xmm5
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm7
; SSE2-NEXT: movdqa %xmm5, %xmm6
; SSE2-NEXT: pcmpgtd %xmm7, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm5, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
; SSE2-NEXT: pand %xmm9, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE2-NEXT: por %xmm5, %xmm6
; SSE2-NEXT: pxor %xmm8, %xmm4
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: pxor %xmm8, %xmm5
; SSE2-NEXT: movdqa %xmm4, %xmm7
; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm7[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm4, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
; SSE2-NEXT: pand %xmm9, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,1,3,3]
; SSE2-NEXT: por %xmm5, %xmm4
; SSE2-NEXT: packssdw %xmm6, %xmm4
; SSE2-NEXT: packssdw %xmm10, %xmm4
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: pxor %xmm8, %xmm5
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm3, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
; SSE2-NEXT: pand %xmm7, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm5
; SSE2-NEXT: pxor %xmm8, %xmm2
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2-NEXT: pand %xmm7, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: packssdw %xmm5, %xmm2
; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm1, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm0
; SSE2-NEXT: pxor {{[0-9]+}}(%rsp), %xmm8
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pcmpgtd %xmm8, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm0, %xmm8
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm8[1,1,3,3]
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: packssdw %xmm3, %xmm0
; SSE2-NEXT: packssdw %xmm2, %xmm0
; SSE2-NEXT: packsswb %xmm4, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v16i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm7
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm6
; SSE42-NEXT: packssdw %xmm7, %xmm6
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm5
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm4
; SSE42-NEXT: packssdw %xmm5, %xmm4
; SSE42-NEXT: packssdw %xmm6, %xmm4
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm3
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm2
; SSE42-NEXT: packssdw %xmm3, %xmm2
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm1
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm0
; SSE42-NEXT: packssdw %xmm1, %xmm0
; SSE42-NEXT: packssdw %xmm2, %xmm0
; SSE42-NEXT: packsswb %xmm4, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm9
; AVX1-NEXT: vpcmpgtq %xmm8, %xmm9, %xmm8
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpackssdw %xmm8, %xmm3, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpackssdw %xmm8, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm7, %ymm3, %ymm3
; AVX2-NEXT: vpcmpgtq %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vpcmpgtq %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtq %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vpackssdw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16i64:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm0, %k0
; AVX512F-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512F-NEXT: kunpckbw %k0, %k1, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtq %zmm2, %zmm0, %k0
; AVX512DQ-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512DQ-NEXT: kunpckbw %k0, %k1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm2, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i64> %a0, %a1
ret <16 x i1> %1
}
define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; SSE-LABEL: test_cmp_v32i32:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %rax
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm3
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: packsswb %xmm2, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %ecx
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm7
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm6
; SSE-NEXT: packssdw %xmm7, %xmm6
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm5
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm4
; SSE-NEXT: packssdw %xmm5, %xmm4
; SSE-NEXT: packsswb %xmm6, %xmm4
; SSE-NEXT: pmovmskb %xmm4, %edx
; SSE-NEXT: shll $16, %edx
; SSE-NEXT: orl %ecx, %edx
; SSE-NEXT: movl %edx, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm9
; AVX1-NEXT: vpcmpgtd %xmm8, %xmm9, %xmm8
; AVX1-NEXT: vpcmpgtd %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpackssdw %xmm8, %xmm3, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpacksswb %xmm8, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm7, %ymm3, %ymm3
; AVX2-NEXT: vpcmpgtd %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vpcmpgtd %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtd %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32i32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %zmm3, %zmm1, %k1
; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm0, %k2
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32i32:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtd %zmm3, %zmm1, %k0
; AVX512DQ-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
; AVX512DQ-NEXT: vpmovm2d %k1, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm1
; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32i32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i32> %a0, %a1
ret <32 x i1> %1
}
define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; SSE-LABEL: test_cmp_v64i16:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %rax
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %ecx
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm3
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm2
; SSE-NEXT: packsswb %xmm3, %xmm2
; SSE-NEXT: pmovmskb %xmm2, %edx
; SSE-NEXT: shll $16, %edx
; SSE-NEXT: orl %ecx, %edx
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm5
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm4
; SSE-NEXT: packsswb %xmm5, %xmm4
; SSE-NEXT: pmovmskb %xmm4, %ecx
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm7
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm6
; SSE-NEXT: packsswb %xmm7, %xmm6
; SSE-NEXT: pmovmskb %xmm6, %esi
; SSE-NEXT: shll $16, %esi
; SSE-NEXT: orl %ecx, %esi
; SSE-NEXT: shlq $32, %rsi
; SSE-NEXT: orq %rdx, %rsi
; SSE-NEXT: movq %rsi, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v64i16:
; AVX1: # %bb.0:
; AVX1-NEXT: movq %rdi, %rax
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm9
; AVX1-NEXT: vpcmpgtw %xmm8, %xmm9, %xmm8
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm8, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %ecx
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vpcmpgtw %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vpcmpgtw %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpacksswb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %edx
; AVX1-NEXT: shll $16, %edx
; AVX1-NEXT: orl %ecx, %edx
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX1-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpcmpgtw %xmm6, %xmm2, %xmm1
; AVX1-NEXT: vpacksswb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %ecx
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm1
; AVX1-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpcmpgtw %xmm7, %xmm3, %xmm1
; AVX1-NEXT: vpacksswb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %esi
; AVX1-NEXT: shll $16, %esi
; AVX1-NEXT: orl %ecx, %esi
; AVX1-NEXT: shlq $32, %rsi
; AVX1-NEXT: orq %rdx, %rsi
; AVX1-NEXT: movq %rsi, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v64i16:
; AVX2: # %bb.0:
; AVX2-NEXT: movq %rdi, %rax
; AVX2-NEXT: vpcmpgtw %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5
; AVX2-NEXT: vpacksswb %xmm5, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtw %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
; AVX2-NEXT: vpacksswb %xmm4, %xmm0, %xmm0
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %ecx
; AVX2-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpcmpgtw %ymm6, %ymm2, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %edx
; AVX2-NEXT: shlq $32, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: movq %rdx, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v64i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm3
; AVX512F-NEXT: vpcmpgtw %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpcmpgtw %ymm5, %ymm1, %ymm1
; AVX512F-NEXT: vpcmpgtw %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512F-NEXT: vpmovdb %zmm3, %xmm3
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v64i16:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm3
; AVX512DQ-NEXT: vpcmpgtw %ymm6, %ymm2, %ymm2
; AVX512DQ-NEXT: vpcmpgtw %ymm5, %ymm1, %ymm1
; AVX512DQ-NEXT: vpcmpgtw %ymm4, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512DQ-NEXT: vpmovdb %zmm2, %xmm2
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512DQ-NEXT: vpmovdb %zmm3, %xmm3
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v64i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm2, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtw %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckdq %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: retq
%1 = icmp sgt <64 x i16> %a0, %a1
ret <64 x i1> %1
}
define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; SSE-LABEL: test_cmp_v128i8:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %rax
; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: pmovmskb %xmm0, %ecx
; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: pmovmskb %xmm1, %edx
; SSE-NEXT: shll $16, %edx
; SSE-NEXT: orl %ecx, %edx
; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm2
; SSE-NEXT: pmovmskb %xmm2, %esi
; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm3
; SSE-NEXT: pmovmskb %xmm3, %ecx
; SSE-NEXT: shll $16, %ecx
; SSE-NEXT: orl %esi, %ecx
; SSE-NEXT: shlq $32, %rcx
; SSE-NEXT: orq %rdx, %rcx
; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm4
; SSE-NEXT: pmovmskb %xmm4, %edx
; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm5
; SSE-NEXT: pmovmskb %xmm5, %esi
; SSE-NEXT: shll $16, %esi
; SSE-NEXT: orl %edx, %esi
; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm6
; SSE-NEXT: pmovmskb %xmm6, %edx
; SSE-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm7
; SSE-NEXT: pmovmskb %xmm7, %edi
; SSE-NEXT: shll $16, %edi
; SSE-NEXT: orl %edx, %edi
; SSE-NEXT: shlq $32, %rdi
; SSE-NEXT: orq %rsi, %rdi
; SSE-NEXT: movq %rdi, 8(%rax)
; SSE-NEXT: movq %rcx, (%rax)
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v128i8:
; AVX1: # %bb.0:
; AVX1-NEXT: movq %rdi, %rax
; AVX1-NEXT: vpcmpgtb %xmm4, %xmm0, %xmm8
; AVX1-NEXT: vpmovmskb %xmm8, %ecx
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpcmpgtb %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %edx
; AVX1-NEXT: shll $16, %edx
; AVX1-NEXT: orl %ecx, %edx
; AVX1-NEXT: vpcmpgtb %xmm5, %xmm1, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %esi
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %ecx
; AVX1-NEXT: shll $16, %ecx
; AVX1-NEXT: orl %esi, %ecx
; AVX1-NEXT: shlq $32, %rcx
; AVX1-NEXT: orq %rdx, %rcx
; AVX1-NEXT: vpcmpgtb %xmm6, %xmm2, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %edx
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %esi
; AVX1-NEXT: shll $16, %esi
; AVX1-NEXT: orl %edx, %esi
; AVX1-NEXT: vpcmpgtb %xmm7, %xmm3, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %edx
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm1
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %edi
; AVX1-NEXT: shll $16, %edi
; AVX1-NEXT: orl %edx, %edi
; AVX1-NEXT: shlq $32, %rdi
; AVX1-NEXT: orq %rsi, %rdi
; AVX1-NEXT: movq %rdi, 8(%rax)
; AVX1-NEXT: movq %rcx, (%rax)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v128i8:
; AVX2: # %bb.0:
; AVX2-NEXT: movq %rdi, %rax
; AVX2-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %ecx
; AVX2-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %edx
; AVX2-NEXT: shlq $32, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %ecx
; AVX2-NEXT: vpcmpgtb %ymm7, %ymm3, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %esi
; AVX2-NEXT: shlq $32, %rsi
; AVX2-NEXT: orq %rcx, %rsi
; AVX2-NEXT: movq %rsi, 8(%rdi)
; AVX2-NEXT: movq %rdx, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v128i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: movq %rdi, %rax
; AVX512F-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm4
; AVX512F-NEXT: vptestmd %zmm4, %zmm4, %k0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm0
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm1
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k2
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k3
; AVX512F-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm0
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm1
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k4
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k5
; AVX512F-NEXT: vpcmpgtb %ymm7, %ymm3, %ymm0
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm1
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k6
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k7
; AVX512F-NEXT: kmovw %k7, 14(%rdi)
; AVX512F-NEXT: kmovw %k6, 12(%rdi)
; AVX512F-NEXT: kmovw %k5, 10(%rdi)
; AVX512F-NEXT: kmovw %k4, 8(%rdi)
; AVX512F-NEXT: kmovw %k3, 6(%rdi)
; AVX512F-NEXT: kmovw %k2, 4(%rdi)
; AVX512F-NEXT: kmovw %k1, 2(%rdi)
; AVX512F-NEXT: kmovw %k0, (%rdi)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v128i8:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: movq %rdi, %rax
; AVX512DQ-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm4
; AVX512DQ-NEXT: vpmovd2m %zmm4, %k0
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1
; AVX512DQ-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm0
; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm1
; AVX512DQ-NEXT: vpmovd2m %zmm1, %k2
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k3
; AVX512DQ-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm0
; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm1
; AVX512DQ-NEXT: vpmovd2m %zmm1, %k4
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k5
; AVX512DQ-NEXT: vpcmpgtb %ymm7, %ymm3, %ymm0
; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm1
; AVX512DQ-NEXT: vpmovd2m %zmm1, %k6
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k7
; AVX512DQ-NEXT: kmovw %k7, 14(%rdi)
; AVX512DQ-NEXT: kmovw %k6, 12(%rdi)
; AVX512DQ-NEXT: kmovw %k5, 10(%rdi)
; AVX512DQ-NEXT: kmovw %k4, 8(%rdi)
; AVX512DQ-NEXT: kmovw %k3, 6(%rdi)
; AVX512DQ-NEXT: kmovw %k2, 4(%rdi)
; AVX512DQ-NEXT: kmovw %k1, 2(%rdi)
; AVX512DQ-NEXT: kmovw %k0, (%rdi)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v128i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %zmm3, %zmm1, %k0
; AVX512BW-NEXT: vpcmpgtb %zmm2, %zmm0, %k1
; AVX512BW-NEXT: vpmovm2b %k1, %zmm0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm1
; AVX512BW-NEXT: retq
%1 = icmp sgt <128 x i8> %a0, %a1
ret <128 x i1> %1
}
;
; 2048-bit vector comparisons
;
define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v32f64:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %rax
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: cmpltpd %xmm7, %xmm8
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm7
; SSE-NEXT: cmpltpd %xmm6, %xmm7
; SSE-NEXT: packssdw %xmm8, %xmm7
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm6
; SSE-NEXT: cmpltpd %xmm5, %xmm6
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
; SSE-NEXT: cmpltpd %xmm4, %xmm5
; SSE-NEXT: packssdw %xmm6, %xmm5
; SSE-NEXT: packssdw %xmm7, %xmm5
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm4
; SSE-NEXT: cmpltpd %xmm3, %xmm4
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm3
; SSE-NEXT: cmpltpd %xmm2, %xmm3
; SSE-NEXT: packssdw %xmm4, %xmm3
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm2
; SSE-NEXT: cmpltpd %xmm1, %xmm2
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: cmpltpd %xmm0, %xmm1
; SSE-NEXT: packssdw %xmm2, %xmm1
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: packssdw %xmm3, %xmm1
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm2
; SSE-NEXT: packsswb %xmm5, %xmm1
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm3
; SSE-NEXT: pmovmskb %xmm1, %ecx
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm3
; SSE-NEXT: packssdw %xmm1, %xmm3
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm2
; SSE-NEXT: packssdw %xmm1, %xmm2
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm3
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm3
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: packssdw %xmm3, %xmm1
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm3
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm3
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: packssdw %xmm3, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: packsswb %xmm2, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %edx
; SSE-NEXT: shll $16, %edx
; SSE-NEXT: orl %ecx, %edx
; SSE-NEXT: movl %edx, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32f64:
; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $32, %rsp
; AVX1-NEXT: vmovapd 16(%rbp), %ymm8
; AVX1-NEXT: vmovapd 48(%rbp), %ymm9
; AVX1-NEXT: vmovapd 80(%rbp), %ymm10
; AVX1-NEXT: vmovapd 112(%rbp), %ymm11
; AVX1-NEXT: vmovapd 144(%rbp), %ymm12
; AVX1-NEXT: vmovapd 176(%rbp), %ymm13
; AVX1-NEXT: vmovapd 208(%rbp), %ymm14
; AVX1-NEXT: vmovapd 240(%rbp), %ymm15
; AVX1-NEXT: vcmpltpd %ymm7, %ymm15, %ymm15
; AVX1-NEXT: vextractf128 $1, %ymm15, %xmm7
; AVX1-NEXT: vpackssdw %xmm7, %xmm15, %xmm15
; AVX1-NEXT: vcmpltpd %ymm6, %ymm14, %ymm6
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
; AVX1-NEXT: vpackssdw %xmm7, %xmm6, %xmm6
; AVX1-NEXT: vpackssdw %xmm15, %xmm6, %xmm6
; AVX1-NEXT: vcmpltpd %ymm5, %ymm13, %ymm5
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm7
; AVX1-NEXT: vpackssdw %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vcmpltpd %ymm4, %ymm12, %ymm4
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm7
; AVX1-NEXT: vpackssdw %xmm7, %xmm4, %xmm4
; AVX1-NEXT: vpackssdw %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpacksswb %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vcmpltpd %ymm3, %ymm11, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
; AVX1-NEXT: vpackssdw %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vcmpltpd %ymm2, %ymm10, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
; AVX1-NEXT: vpackssdw %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vcmpltpd %ymm1, %ymm9, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcmpltpd %ymm0, %ymm8, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
; AVX1-NEXT: movq %rbp, %rsp
; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32f64:
; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $32, %rsp
; AVX2-NEXT: vmovapd 16(%rbp), %ymm8
; AVX2-NEXT: vmovapd 48(%rbp), %ymm9
; AVX2-NEXT: vmovapd 80(%rbp), %ymm10
; AVX2-NEXT: vmovapd 112(%rbp), %ymm11
; AVX2-NEXT: vmovapd 144(%rbp), %ymm12
; AVX2-NEXT: vmovapd 176(%rbp), %ymm13
; AVX2-NEXT: vmovapd 208(%rbp), %ymm14
; AVX2-NEXT: vmovapd 240(%rbp), %ymm15
; AVX2-NEXT: vcmpltpd %ymm7, %ymm15, %ymm7
; AVX2-NEXT: vcmpltpd %ymm6, %ymm14, %ymm6
; AVX2-NEXT: vpackssdw %ymm7, %ymm6, %ymm6
; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,1,3]
; AVX2-NEXT: vcmpltpd %ymm5, %ymm13, %ymm5
; AVX2-NEXT: vcmpltpd %ymm4, %ymm12, %ymm4
; AVX2-NEXT: vpackssdw %ymm5, %ymm4, %ymm4
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
; AVX2-NEXT: vpackssdw %ymm6, %ymm4, %ymm4
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
; AVX2-NEXT: vcmpltpd %ymm3, %ymm11, %ymm3
; AVX2-NEXT: vcmpltpd %ymm2, %ymm10, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vcmpltpd %ymm1, %ymm9, %ymm1
; AVX2-NEXT: vcmpltpd %ymm0, %ymm8, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vpackssdw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vpacksswb %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: movq %rbp, %rsp
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32f64:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm2, %zmm6, %k0
; AVX512F-NEXT: vcmpltpd %zmm3, %zmm7, %k1
; AVX512F-NEXT: kunpckbw %k0, %k1, %k1
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm4, %k0
; AVX512F-NEXT: vcmpltpd %zmm1, %zmm5, %k2
; AVX512F-NEXT: kunpckbw %k0, %k2, %k2
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32f64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltpd %zmm2, %zmm6, %k0
; AVX512DQ-NEXT: vcmpltpd %zmm3, %zmm7, %k1
; AVX512DQ-NEXT: kunpckbw %k0, %k1, %k0
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm4, %k1
; AVX512DQ-NEXT: vcmpltpd %zmm1, %zmm5, %k2
; AVX512DQ-NEXT: kunpckbw %k1, %k2, %k1
; AVX512DQ-NEXT: vpmovm2d %k1, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm1
; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32f64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm4, %k0
; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm5, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: vcmpltpd %zmm2, %zmm6, %k1
; AVX512BW-NEXT: vcmpltpd %zmm3, %zmm7, %k2
; AVX512BW-NEXT: kunpckbw %k1, %k2, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
%1 = fcmp ogt <32 x double> %a0, %a1
ret <32 x i1> %1
}
define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32i64:
; SSE2: # %bb.0:
; SSE2-NEXT: movq %rdi, %rax
[X86] When lowering unsigned v2i64 setcc without SSE42, flip the sign bits in the v2i64 type then bitcast to v4i32. This may give slightly better opportunities for DAG combine to simplify with the operations before the setcc. It also matches the type the xors will eventually be promoted to anyway so it saves a legalization step. Almost all of the test changes are because our constant pool entry is now v2i64 instead of v4i32 on 64-bit targets. On 32-bit targets getConstant should be emitting a v4i32 build_vector and a v4i32->v2i64 bitcast. There are a couple test cases where it appears we now combine a bitwise not with one of these xors which caused a new constant vector to be generated. This prevented a constant pool entry from being shared. But if that's an issue we're concerned about, it seems we need to address it another way that just relying a bitcast to hide it. This came about from experiments I've been trying with pushing the promotion of and/or/xor to vXi64 later than LegalizeVectorOps where it is today. We run LegalizeVectorOps in a bottom up order. So the and/or/xor are promoted before their users are legalized. The bitcasts added for the promotion act as a barrier to computeKnownBits if we try to use it during vector legalization of a later operation. So by moving the promotion out we can hopefully get better results from computeKnownBits/computeNumSignBits like in LowerTruncate on AVX512. I've also looked at running LegalizeVectorOps in a top down order like LegalizeDAG, but thats showing some other issues. llvm-svn: 344071
2018-10-10 03:05:50 +08:00
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
; SSE2-NEXT: pxor %xmm8, %xmm7
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE2-NEXT: pxor %xmm8, %xmm9
; SSE2-NEXT: movdqa %xmm7, %xmm10
; SSE2-NEXT: pcmpgtd %xmm9, %xmm10
; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm7, %xmm9
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm9[1,1,3,3]
; SSE2-NEXT: pand %xmm11, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm10[1,1,3,3]
; SSE2-NEXT: por %xmm7, %xmm9
; SSE2-NEXT: pxor %xmm8, %xmm6
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm7
; SSE2-NEXT: movdqa %xmm6, %xmm10
; SSE2-NEXT: pcmpgtd %xmm7, %xmm10
; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm6, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; SSE2-NEXT: pand %xmm11, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3]
; SSE2-NEXT: por %xmm7, %xmm10
; SSE2-NEXT: packssdw %xmm9, %xmm10
; SSE2-NEXT: pxor %xmm8, %xmm5
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm7
; SSE2-NEXT: movdqa %xmm5, %xmm6
; SSE2-NEXT: pcmpgtd %xmm7, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm5, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
; SSE2-NEXT: pand %xmm9, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE2-NEXT: por %xmm5, %xmm6
; SSE2-NEXT: pxor %xmm8, %xmm4
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: pxor %xmm8, %xmm5
; SSE2-NEXT: movdqa %xmm4, %xmm7
; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm7[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm4, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
; SSE2-NEXT: pand %xmm9, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,1,3,3]
; SSE2-NEXT: por %xmm5, %xmm4
; SSE2-NEXT: packssdw %xmm6, %xmm4
; SSE2-NEXT: packssdw %xmm10, %xmm4
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: pxor %xmm8, %xmm5
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm3, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
; SSE2-NEXT: pand %xmm7, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm5
; SSE2-NEXT: pxor %xmm8, %xmm2
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2-NEXT: pand %xmm7, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: packssdw %xmm5, %xmm2
; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm1, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm0
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1
; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE2-NEXT: pand %xmm6, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm0, %xmm1
; SSE2-NEXT: packssdw %xmm3, %xmm1
; SSE2-NEXT: packssdw %xmm2, %xmm1
; SSE2-NEXT: packsswb %xmm4, %xmm1
; SSE2-NEXT: pmovmskb %xmm1, %ecx
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: pxor %xmm8, %xmm0
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1
; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE2-NEXT: pand %xmm3, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2-NEXT: por %xmm0, %xmm2
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: pxor %xmm8, %xmm0
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1
; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
; SSE2-NEXT: por %xmm0, %xmm1
; SSE2-NEXT: packssdw %xmm2, %xmm1
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: pxor %xmm8, %xmm0
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2
; SSE2-NEXT: pxor %xmm8, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; SSE2-NEXT: por %xmm0, %xmm2
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: pxor %xmm8, %xmm0
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm0, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: packssdw %xmm2, %xmm0
; SSE2-NEXT: packssdw %xmm1, %xmm0
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1
; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2
; SSE2-NEXT: pxor %xmm8, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm2
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1
; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm1, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: packssdw %xmm2, %xmm1
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2
; SSE2-NEXT: pxor %xmm8, %xmm2
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; SSE2-NEXT: pand %xmm5, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2
; SSE2-NEXT: pxor %xmm8, %xmm2
; SSE2-NEXT: pxor {{[0-9]+}}(%rsp), %xmm8
; SSE2-NEXT: movdqa %xmm8, %xmm4
; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm2, %xmm8
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm8[1,1,3,3]
; SSE2-NEXT: pand %xmm5, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
; SSE2-NEXT: por %xmm2, %xmm4
; SSE2-NEXT: packssdw %xmm3, %xmm4
; SSE2-NEXT: packssdw %xmm1, %xmm4
; SSE2-NEXT: packsswb %xmm0, %xmm4
; SSE2-NEXT: pmovmskb %xmm4, %edx
; SSE2-NEXT: shll $16, %edx
; SSE2-NEXT: orl %ecx, %edx
; SSE2-NEXT: movl %edx, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32i64:
; SSE42: # %bb.0:
; SSE42-NEXT: movq %rdi, %rax
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm12
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm14
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm15
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm7
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm6
; SSE42-NEXT: packssdw %xmm7, %xmm6
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm5
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm4
; SSE42-NEXT: packssdw %xmm5, %xmm4
; SSE42-NEXT: packssdw %xmm6, %xmm4
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm3
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm2
; SSE42-NEXT: packssdw %xmm3, %xmm2
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm1
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm0
; SSE42-NEXT: packssdw %xmm1, %xmm0
; SSE42-NEXT: packssdw %xmm2, %xmm0
; SSE42-NEXT: packsswb %xmm4, %xmm0
; SSE42-NEXT: pmovmskb %xmm0, %ecx
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm15
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm13
; SSE42-NEXT: packssdw %xmm15, %xmm13
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm14
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9
; SSE42-NEXT: packssdw %xmm14, %xmm9
; SSE42-NEXT: packssdw %xmm13, %xmm9
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm12
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm10
; SSE42-NEXT: packssdw %xmm12, %xmm10
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm11
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm8
; SSE42-NEXT: packssdw %xmm11, %xmm8
; SSE42-NEXT: packssdw %xmm10, %xmm8
; SSE42-NEXT: packsswb %xmm9, %xmm8
; SSE42-NEXT: pmovmskb %xmm8, %edx
; SSE42-NEXT: shll $16, %edx
; SSE42-NEXT: orl %ecx, %edx
; SSE42-NEXT: movl %edx, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i64:
; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $32, %rsp
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8
; AVX1-NEXT: vpcmpgtq 256(%rbp), %xmm8, %xmm8
; AVX1-NEXT: vpcmpgtq 240(%rbp), %xmm7, %xmm7
; AVX1-NEXT: vpackssdw %xmm8, %xmm7, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
; AVX1-NEXT: vpcmpgtq 224(%rbp), %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq 208(%rbp), %xmm6, %xmm6
; AVX1-NEXT: vpackssdw %xmm7, %xmm6, %xmm6
; AVX1-NEXT: vpackssdw %xmm8, %xmm6, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm7
; AVX1-NEXT: vpcmpgtq 192(%rbp), %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq 176(%rbp), %xmm5, %xmm5
; AVX1-NEXT: vpackssdw %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm7
; AVX1-NEXT: vpcmpgtq 160(%rbp), %xmm7, %xmm7
; AVX1-NEXT: vpcmpgtq 144(%rbp), %xmm4, %xmm4
; AVX1-NEXT: vpackssdw %xmm7, %xmm4, %xmm4
; AVX1-NEXT: vpackssdw %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpacksswb %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
; AVX1-NEXT: vpcmpgtq 128(%rbp), %xmm5, %xmm5
; AVX1-NEXT: vpcmpgtq 112(%rbp), %xmm3, %xmm3
; AVX1-NEXT: vpackssdw %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
; AVX1-NEXT: vpcmpgtq 96(%rbp), %xmm5, %xmm5
; AVX1-NEXT: vpcmpgtq 80(%rbp), %xmm2, %xmm2
; AVX1-NEXT: vpackssdw %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpcmpgtq 64(%rbp), %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq 48(%rbp), %xmm1, %xmm1
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq 32(%rbp), %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq 16(%rbp), %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
; AVX1-NEXT: movq %rbp, %rsp
; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i64:
; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $32, %rsp
; AVX2-NEXT: vpcmpgtq 240(%rbp), %ymm7, %ymm7
; AVX2-NEXT: vpcmpgtq 208(%rbp), %ymm6, %ymm6
; AVX2-NEXT: vpackssdw %ymm7, %ymm6, %ymm6
; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,1,3]
; AVX2-NEXT: vpcmpgtq 176(%rbp), %ymm5, %ymm5
; AVX2-NEXT: vpcmpgtq 144(%rbp), %ymm4, %ymm4
; AVX2-NEXT: vpackssdw %ymm5, %ymm4, %ymm4
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
; AVX2-NEXT: vpackssdw %ymm6, %ymm4, %ymm4
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
; AVX2-NEXT: vpcmpgtq 112(%rbp), %ymm3, %ymm3
; AVX2-NEXT: vpcmpgtq 80(%rbp), %ymm2, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vpcmpgtq 48(%rbp), %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtq 16(%rbp), %ymm0, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vpackssdw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vpacksswb %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: movq %rbp, %rsp
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32i64:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm6, %zmm2, %k0
; AVX512F-NEXT: vpcmpgtq %zmm7, %zmm3, %k1
; AVX512F-NEXT: kunpckbw %k0, %k1, %k1
; AVX512F-NEXT: vpcmpgtq %zmm4, %zmm0, %k0
; AVX512F-NEXT: vpcmpgtq %zmm5, %zmm1, %k2
; AVX512F-NEXT: kunpckbw %k0, %k2, %k2
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32i64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtq %zmm6, %zmm2, %k0
; AVX512DQ-NEXT: vpcmpgtq %zmm7, %zmm3, %k1
; AVX512DQ-NEXT: kunpckbw %k0, %k1, %k0
; AVX512DQ-NEXT: vpcmpgtq %zmm4, %zmm0, %k1
; AVX512DQ-NEXT: vpcmpgtq %zmm5, %zmm1, %k2
; AVX512DQ-NEXT: kunpckbw %k1, %k2, %k1
; AVX512DQ-NEXT: vpmovm2d %k1, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm1
; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32i64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm4, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtq %zmm5, %zmm1, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: vpcmpgtq %zmm6, %zmm2, %k1
; AVX512BW-NEXT: vpcmpgtq %zmm7, %zmm3, %k2
; AVX512BW-NEXT: kunpckbw %k1, %k2, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i64> %a0, %a1
ret <32 x i1> %1
}