2018-04-06 01:25:40 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL
|
|
|
|
|
|
|
|
;
|
|
|
|
; vXi64
|
|
|
|
;
|
|
|
|
|
|
|
|
define i64 @test_v2i64(<2 x i64> %a0) {
|
|
|
|
; SSE2-LABEL: test_v2i64:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
2018-10-10 03:05:50 +08:00
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pandn %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: movq %xmm3, %rax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v2i64:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
|
|
|
|
; SSE41-NEXT: pxor %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm2, %xmm3
|
|
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm3, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
|
|
|
; SSE41-NEXT: movq %xmm2, %rax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v2i64:
|
|
|
|
; AVX: # %bb.0:
|
|
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
|
|
|
|
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm3
|
|
|
|
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm2
|
|
|
|
; AVX-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
|
|
|
|
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: test_v2i64:
|
|
|
|
; AVX512BW: # %bb.0:
|
|
|
|
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
|
|
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX512BW-NEXT: vzeroupper
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: test_v2i64:
|
|
|
|
; AVX512VL: # %bb.0:
|
|
|
|
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512VL-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
%1 = call i64 @llvm.experimental.vector.reduce.umin.i64.v2i64(<2 x i64> %a0)
|
|
|
|
ret i64 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @test_v4i64(<4 x i64> %a0) {
|
|
|
|
; SSE2-LABEL: test_v4i64:
|
|
|
|
; SSE2: # %bb.0:
|
2018-10-10 03:05:50 +08:00
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm4
|
|
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm6, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE2-NEXT: pandn %xmm1, %xmm4
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm4
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: por %xmm4, %xmm2
|
|
|
|
; SSE2-NEXT: movq %xmm2, %rax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v4i64:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
2018-10-10 03:05:50 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: pxor %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm4
|
|
|
|
; SSE41-NEXT: pxor %xmm3, %xmm4
|
|
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm5
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm5, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm4, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
|
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm2, %xmm3
|
|
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm3, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
|
|
|
; SSE41-NEXT: movq %xmm2, %rax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v4i64:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
|
|
|
|
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm4
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm4
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
|
|
; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vxorpd %xmm2, %xmm0, %xmm3
|
2018-09-09 22:13:22 +08:00
|
|
|
; AVX1-NEXT: vxorpd %xmm2, %xmm1, %xmm2
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
|
2018-04-06 01:25:40 +08:00
|
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v4i64:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
|
|
|
|
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
|
|
|
|
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm4
|
|
|
|
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm3
|
|
|
|
; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX2-NEXT: vxorpd %ymm2, %ymm0, %ymm3
|
|
|
|
; AVX2-NEXT: vxorpd %ymm2, %ymm1, %ymm2
|
|
|
|
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
|
|
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: test_v4i64:
|
|
|
|
; AVX512BW: # %bb.0:
|
|
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
|
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX512BW-NEXT: vzeroupper
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: test_v4i64:
|
|
|
|
; AVX512VL: # %bb.0:
|
|
|
|
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512VL-NEXT: vpminuq %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX512VL-NEXT: vpminuq %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX512VL-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX512VL-NEXT: vzeroupper
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
%1 = call i64 @llvm.experimental.vector.reduce.umin.i64.v4i64(<4 x i64> %a0)
|
|
|
|
ret i64 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @test_v8i64(<8 x i64> %a0) {
|
|
|
|
; SSE2-LABEL: test_v8i64:
|
|
|
|
; SSE2: # %bb.0:
|
2018-10-10 03:05:50 +08:00
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm5
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm6
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm6
|
|
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm7
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm8, %xmm6
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm6, %xmm5
|
|
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
|
|
; SSE2-NEXT: pandn %xmm3, %xmm5
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm5
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm6
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm6
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm7, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pandn %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm6, %xmm0
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pand %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pandn %xmm5, %xmm1
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm2
|
|
|
|
; SSE2-NEXT: pxor %xmm0, %xmm4
|
|
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm3
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: movq %xmm3, %rax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v8i64:
|
|
|
|
; SSE41: # %bb.0:
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm4
|
2018-10-10 03:05:50 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm5, %xmm0
|
|
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm6
|
|
|
|
; SSE41-NEXT: pxor %xmm5, %xmm6
|
|
|
|
; SSE41-NEXT: movdqa %xmm6, %xmm7
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm7, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm6, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: pxor %xmm5, %xmm0
|
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: pxor %xmm5, %xmm1
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm6
|
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
|
2018-12-15 19:36:36 +08:00
|
|
|
; SSE41-NEXT: pand %xmm6, %xmm0
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: movapd %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: xorpd %xmm5, %xmm0
|
|
|
|
; SSE41-NEXT: movapd %xmm3, %xmm1
|
|
|
|
; SSE41-NEXT: xorpd %xmm5, %xmm1
|
|
|
|
; SSE41-NEXT: movapd %xmm1, %xmm4
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
|
|
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm5, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm1, %xmm5
|
|
|
|
; SSE41-NEXT: movdqa %xmm5, %xmm2
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm5, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
|
|
|
|
; SSE41-NEXT: movq %xmm1, %rax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v8i64:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
|
|
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
|
|
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
|
|
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm4
|
|
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm5
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2
|
|
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vxorpd %xmm3, %xmm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm4
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm4
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2
|
|
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vxorpd %xmm3, %xmm0, %xmm2
|
2018-09-09 22:13:22 +08:00
|
|
|
; AVX1-NEXT: vxorpd %xmm3, %xmm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
|
2018-04-06 01:25:40 +08:00
|
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v8i64:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
|
|
|
|
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
|
|
|
|
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm4
|
|
|
|
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm3
|
|
|
|
; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vxorpd %ymm2, %ymm0, %ymm3
|
|
|
|
; AVX2-NEXT: vxorpd %ymm2, %ymm1, %ymm4
|
|
|
|
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm3
|
|
|
|
; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX2-NEXT: vxorpd %ymm2, %ymm0, %ymm3
|
|
|
|
; AVX2-NEXT: vxorpd %ymm2, %ymm1, %ymm2
|
|
|
|
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
|
|
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v8i64:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
|
|
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i64 @llvm.experimental.vector.reduce.umin.i64.v8i64(<8 x i64> %a0)
|
|
|
|
ret i64 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @test_v16i64(<16 x i64> %a0) {
|
|
|
|
; SSE2-LABEL: test_v16i64:
|
|
|
|
; SSE2: # %bb.0:
|
2018-10-10 03:05:50 +08:00
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm9
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm9
|
|
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm10
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm10
|
|
|
|
; SSE2-NEXT: movdqa %xmm10, %xmm11
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm9, %xmm11
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm9, %xmm10
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm12, %xmm10
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm11[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm10, %xmm9
|
|
|
|
; SSE2-NEXT: pand %xmm9, %xmm2
|
|
|
|
; SSE2-NEXT: pandn %xmm6, %xmm9
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm9
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm6
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm6
|
|
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm10
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm2, %xmm10
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm2, %xmm6
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm11, %xmm6
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm10[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm6, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: pandn %xmm4, %xmm2
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm7, %xmm4
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm4
|
|
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm6
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm6
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm0, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm10, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm4, %xmm0
|
|
|
|
; SSE2-NEXT: pand %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pandn %xmm7, %xmm0
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm4
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm4
|
|
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm6
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm7, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pandn %xmm5, %xmm4
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm4
|
|
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm5
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm6, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pand %xmm1, %xmm4
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: por %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm9, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm5, %xmm0
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pandn %xmm9, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm5, %xmm0
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pandn %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm0, %xmm8
|
|
|
|
; SSE2-NEXT: movdqa %xmm8, %xmm3
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm8
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: movq %xmm3, %rax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v16i64:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm8
|
2018-10-10 03:05:50 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: pxor %xmm9, %xmm0
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm6, %xmm10
|
|
|
|
; SSE41-NEXT: pxor %xmm9, %xmm10
|
|
|
|
; SSE41-NEXT: movdqa %xmm10, %xmm11
|
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm11
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm10
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm10[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm11, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm10, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm6
|
|
|
|
; SSE41-NEXT: movdqa %xmm8, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm9, %xmm0
|
|
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm2
|
|
|
|
; SSE41-NEXT: pxor %xmm9, %xmm2
|
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm10
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm10, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm2, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm4
|
|
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm9, %xmm0
|
|
|
|
; SSE41-NEXT: movdqa %xmm7, %xmm2
|
|
|
|
; SSE41-NEXT: pxor %xmm9, %xmm2
|
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm8
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm8, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm2, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm7
|
|
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm9, %xmm0
|
|
|
|
; SSE41-NEXT: movdqa %xmm5, %xmm2
|
|
|
|
; SSE41-NEXT: pxor %xmm9, %xmm2
|
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm3
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm2, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
|
|
|
|
; SSE41-NEXT: movapd %xmm5, %xmm0
|
|
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm0
|
|
|
|
; SSE41-NEXT: movapd %xmm7, %xmm1
|
|
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm1
|
|
|
|
; SSE41-NEXT: movapd %xmm1, %xmm2
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm7
|
|
|
|
; SSE41-NEXT: movapd %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm0
|
|
|
|
; SSE41-NEXT: movapd %xmm6, %xmm1
|
|
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm1
|
|
|
|
; SSE41-NEXT: movapd %xmm1, %xmm2
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm6
|
|
|
|
; SSE41-NEXT: movapd %xmm6, %xmm0
|
|
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm0
|
|
|
|
; SSE41-NEXT: movapd %xmm7, %xmm1
|
|
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm1
|
|
|
|
; SSE41-NEXT: movapd %xmm1, %xmm2
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm7
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
|
|
|
|
; SSE41-NEXT: movdqa %xmm7, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm9, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm1, %xmm9
|
|
|
|
; SSE41-NEXT: movdqa %xmm9, %xmm2
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm9, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm1
|
|
|
|
; SSE41-NEXT: movq %xmm1, %rax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v16i64:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
|
|
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm5, %xmm5
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
|
|
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm6, %xmm6
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
|
|
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm6
|
|
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm7
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
|
|
|
|
; AVX1-NEXT: vblendvpd %ymm5, %ymm1, %ymm3, %ymm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
|
|
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm5, %xmm5
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
|
|
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm5
|
|
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm6
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
|
|
|
|
; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm3
|
|
|
|
; AVX1-NEXT: vxorpd %xmm4, %xmm1, %xmm5
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
|
|
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm0, %xmm3
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
|
|
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm2
|
|
|
|
; AVX1-NEXT: vxorpd %xmm4, %xmm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v16i64:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
|
|
|
|
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm5
|
|
|
|
; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm6
|
|
|
|
; AVX2-NEXT: vpcmpgtq %ymm5, %ymm6, %ymm5
|
|
|
|
; AVX2-NEXT: vblendvpd %ymm5, %ymm1, %ymm3, %ymm1
|
|
|
|
; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm3
|
|
|
|
; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm5
|
|
|
|
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
|
|
|
|
; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX2-NEXT: vxorpd %ymm4, %ymm0, %ymm2
|
|
|
|
; AVX2-NEXT: vxorpd %ymm4, %ymm1, %ymm3
|
|
|
|
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2
|
|
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vxorpd %ymm4, %ymm0, %ymm2
|
|
|
|
; AVX2-NEXT: vxorpd %ymm4, %ymm1, %ymm3
|
|
|
|
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2
|
|
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX2-NEXT: vxorpd %ymm4, %ymm0, %ymm2
|
|
|
|
; AVX2-NEXT: vxorpd %ymm4, %ymm1, %ymm3
|
|
|
|
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2
|
|
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v16i64:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
|
|
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i64 @llvm.experimental.vector.reduce.umin.i64.v16i64(<16 x i64> %a0)
|
|
|
|
ret i64 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
;
|
|
|
|
; vXi32
|
|
|
|
;
|
|
|
|
|
2018-12-05 14:29:44 +08:00
|
|
|
define i32 @test_v2i32(<2 x i32> %a0) {
|
|
|
|
; SSE2-LABEL: test_v2i32:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [4294967295,0,4294967295,0]
|
|
|
|
; SSE2-NEXT: pand %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
|
|
; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: movd %xmm3, %eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v2i32:
|
|
|
|
; SSE41: # %bb.0:
|
2019-03-25 03:06:35 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pxor %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
|
|
|
|
; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
2019-03-26 23:08:14 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
|
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: pxor %xmm1, %xmm3
|
|
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm3, %xmm0
|
2019-03-25 03:06:35 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
2018-12-05 14:29:44 +08:00
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v2i32:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
2019-03-25 00:30:35 +08:00
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
|
|
|
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
|
|
|
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vblendvpd %xmm1, %xmm2, %xmm0, %xmm0
|
2018-12-05 14:29:44 +08:00
|
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v2i32:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
2019-03-25 00:30:35 +08:00
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
|
|
|
; AVX2-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
|
|
|
; AVX2-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm1
|
|
|
|
; AVX2-NEXT: vblendvpd %xmm1, %xmm2, %xmm0, %xmm0
|
2018-12-05 14:29:44 +08:00
|
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: test_v2i32:
|
|
|
|
; AVX512BW: # %bb.0:
|
|
|
|
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
2019-03-25 00:30:35 +08:00
|
|
|
; AVX512BW-NEXT: vpblendd {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
|
|
|
; AVX512BW-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
|
|
|
; AVX512BW-NEXT: vpminuq %zmm0, %zmm2, %zmm0
|
2018-12-05 14:29:44 +08:00
|
|
|
; AVX512BW-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512BW-NEXT: vzeroupper
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: test_v2i32:
|
|
|
|
; AVX512VL: # %bb.0:
|
|
|
|
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
|
|
|
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
|
|
|
; AVX512VL-NEXT: vpminuq %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX512VL-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
%1 = call i32 @llvm.experimental.vector.reduce.umin.i32.v2i32(<2 x i32> %a0)
|
|
|
|
ret i32 %1
|
|
|
|
}
|
|
|
|
|
2018-04-06 01:25:40 +08:00
|
|
|
define i32 @test_v4i32(<4 x i32> %a0) {
|
|
|
|
; SSE2-LABEL: test_v4i32:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm4
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE2-NEXT: pandn %xmm1, %xmm4
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,2,3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm4
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: por %xmm4, %xmm2
|
|
|
|
; SSE2-NEXT: movd %xmm2, %eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v4i32:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pminud %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
|
|
; SSE41-NEXT: pminud %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v4i32:
|
|
|
|
; AVX: # %bb.0:
|
|
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v4i32:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i32 @llvm.experimental.vector.reduce.umin.i32.v4i32(<4 x i32> %a0)
|
|
|
|
ret i32 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_v8i32(<8 x i32> %a0) {
|
|
|
|
; SSE2-LABEL: test_v8i32:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm4
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE2-NEXT: pandn %xmm1, %xmm4
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm4, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: movd %xmm2, %eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v8i32:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pminud %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pminud %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
|
|
; SSE41-NEXT: pminud %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v8i32:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v8i32:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v8i32:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX512-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; AVX512-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i32 @llvm.experimental.vector.reduce.umin.i32.v8i32(<8 x i32> %a0)
|
|
|
|
ret i32 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_v16i32(<16 x i32> %a0) {
|
|
|
|
; SSE2-LABEL: test_v16i32:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm5
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm6
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm6
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
|
|
|
|
; SSE2-NEXT: pand %xmm6, %xmm1
|
|
|
|
; SSE2-NEXT: pandn %xmm3, %xmm6
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm6
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm3
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pandn %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pand %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pandn %xmm6, %xmm1
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm3
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm0, %xmm4
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm3
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm4
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: movd %xmm4, %eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v16i32:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pminud %xmm3, %xmm1
|
|
|
|
; SSE41-NEXT: pminud %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: pminud %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pminud %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
|
|
; SSE41-NEXT: pminud %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v16i32:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v16i32:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v16i32:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
|
|
; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i32 @llvm.experimental.vector.reduce.umin.i32.v16i32(<16 x i32> %a0)
|
|
|
|
ret i32 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_v32i32(<32 x i32> %a0) {
|
|
|
|
; SSE2-LABEL: test_v32i32:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648,2147483648,2147483648]
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm10
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm10
|
|
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm9
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm9
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm10, %xmm9
|
|
|
|
; SSE2-NEXT: pand %xmm9, %xmm2
|
|
|
|
; SSE2-NEXT: pandn %xmm6, %xmm9
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm9
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm6
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm6
|
|
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm2
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm2
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm6, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: pandn %xmm4, %xmm2
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm7, %xmm4
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm4
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm3
|
|
|
|
; SSE2-NEXT: pandn %xmm7, %xmm4
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm3
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pandn %xmm5, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm1
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pand %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pandn %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm9, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm3
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pandn %xmm9, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm2
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pandn %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm3
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm0, %xmm8
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm8
|
|
|
|
; SSE2-NEXT: pand %xmm8, %xmm3
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm8
|
|
|
|
; SSE2-NEXT: por %xmm3, %xmm8
|
|
|
|
; SSE2-NEXT: movd %xmm8, %eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v32i32:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pminud %xmm6, %xmm2
|
|
|
|
; SSE41-NEXT: pminud %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: pminud %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: pminud %xmm7, %xmm3
|
|
|
|
; SSE41-NEXT: pminud %xmm5, %xmm1
|
|
|
|
; SSE41-NEXT: pminud %xmm3, %xmm1
|
|
|
|
; SSE41-NEXT: pminud %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pminud %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; SSE41-NEXT: pminud %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v32i32:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vpminud %xmm3, %xmm1, %xmm4
|
|
|
|
; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm5
|
|
|
|
; AVX1-NEXT: vpminud %xmm4, %xmm5, %xmm4
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpminud %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpminud %xmm0, %xmm4, %xmm0
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v32i32:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpminud %ymm3, %ymm1, %ymm1
|
|
|
|
; AVX2-NEXT: vpminud %ymm2, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v32i32:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
|
|
; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i32 @llvm.experimental.vector.reduce.umin.i32.v32i32(<32 x i32> %a0)
|
|
|
|
ret i32 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
;
|
|
|
|
; vXi16
|
|
|
|
;
|
|
|
|
|
2018-12-05 14:29:44 +08:00
|
|
|
define i16 @test_v2i16(<2 x i16> %a0) {
|
|
|
|
; SSE2-LABEL: test_v2i16:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,0,0,65535,0,0,0]
|
|
|
|
; SSE2-NEXT: pand %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: movd %xmm3, %eax
|
|
|
|
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v2i16:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pxor %xmm1, %xmm1
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
|
2019-01-11 03:05:34 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
|
|
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm3, %xmm0
|
2018-12-05 14:29:44 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
|
|
|
; SSE41-NEXT: movd %xmm2, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v2i16:
|
|
|
|
; AVX: # %bb.0:
|
|
|
|
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
|
|
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
|
|
|
|
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: test_v2i16:
|
|
|
|
; AVX512BW: # %bb.0:
|
|
|
|
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX512BW-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
|
|
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
|
|
|
|
; AVX512BW-NEXT: vpminuq %zmm0, %zmm1, %zmm0
|
|
|
|
; AVX512BW-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX512BW-NEXT: vzeroupper
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: test_v2i16:
|
|
|
|
; AVX512VL: # %bb.0:
|
|
|
|
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX512VL-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
|
|
|
|
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
|
|
|
|
; AVX512VL-NEXT: vpminuq %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX512VL-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512VL-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
%1 = call i16 @llvm.experimental.vector.reduce.umin.i16.v2i16(<2 x i16> %a0)
|
|
|
|
ret i16 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i16 @test_v4i16(<4 x i16> %a0) {
|
|
|
|
; SSE2-LABEL: test_v4i16:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: pand %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pandn %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: movd %xmm2, %eax
|
|
|
|
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v4i16:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pxor %xmm1, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2],xmm1[3,4,5,6,7]
|
|
|
|
; SSE41-NEXT: pminud %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
|
|
|
|
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
|
|
|
|
; SSE41-NEXT: pminud %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v4i16:
|
|
|
|
; AVX: # %bb.0:
|
|
|
|
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
|
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3,4,5,6,7]
|
|
|
|
; AVX-NEXT: vpminud %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
|
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
|
|
|
|
; AVX-NEXT: vpminud %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v4i16:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3,4,5,6,7]
|
|
|
|
; AVX512-NEXT: vpminud %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
|
|
|
|
; AVX512-NEXT: vpminud %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i16 @llvm.experimental.vector.reduce.umin.i16.v4i16(<4 x i16> %a0)
|
|
|
|
ret i16 %1
|
|
|
|
}
|
|
|
|
|
2018-04-06 01:25:40 +08:00
|
|
|
define i16 @test_v8i16(<8 x i16> %a0) {
|
|
|
|
; SSE2-LABEL: test_v8i16:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pminsw %xmm0, %xmm1
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm1, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pminsw %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
2019-01-04 05:31:16 +08:00
|
|
|
; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v8i16:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: phminposuw %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v8i16:
|
|
|
|
; AVX: # %bb.0:
|
|
|
|
; AVX-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v8i16:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i16 @llvm.experimental.vector.reduce.umin.i16.v8i16(<8 x i16> %a0)
|
|
|
|
ret i16 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i16 @test_v16i16(<16 x i16> %a0) {
|
|
|
|
; SSE2-LABEL: test_v16i16:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: pminsw %xmm1, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm1, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pminsw %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
2019-01-04 05:31:16 +08:00
|
|
|
; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v16i16:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pminuw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: phminposuw %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v16i16:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v16i16:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v16i16:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i16 @llvm.experimental.vector.reduce.umin.i16.v16i16(<16 x i16> %a0)
|
|
|
|
ret i16 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i16 @test_v32i16(<32 x i16> %a0) {
|
|
|
|
; SSE2-LABEL: test_v32i16:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pminsw %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm2
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm0
|
|
|
|
; SSE2-NEXT: pminsw %xmm2, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm0, %xmm1
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pminsw %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
2019-01-04 05:31:16 +08:00
|
|
|
; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v32i16:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pminuw %xmm3, %xmm1
|
|
|
|
; SSE41-NEXT: pminuw %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: pminuw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: phminposuw %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v32i16:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v32i16:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v32i16:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
|
|
; AVX512-NEXT: vpminuw %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i16 @llvm.experimental.vector.reduce.umin.i16.v32i16(<32 x i16> %a0)
|
|
|
|
ret i16 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i16 @test_v64i16(<64 x i16> %a0) {
|
|
|
|
; SSE2-LABEL: test_v64i16:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [32768,32768,32768,32768,32768,32768,32768,32768]
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm6
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm2
|
|
|
|
; SSE2-NEXT: pminsw %xmm6, %xmm2
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm4
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
|
|
|
; SSE2-NEXT: pminsw %xmm4, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm2, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm7
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm3
|
|
|
|
; SSE2-NEXT: pminsw %xmm7, %xmm3
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm5
|
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm1
|
|
|
|
; SSE2-NEXT: pminsw %xmm5, %xmm1
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm3, %xmm1
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm1
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm1
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: psrld $16, %xmm0
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; SSE2-NEXT: pminsw %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movd %xmm0, %eax
|
2019-01-04 05:31:16 +08:00
|
|
|
; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
|
2018-04-06 01:25:40 +08:00
|
|
|
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v64i16:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pminuw %xmm7, %xmm3
|
|
|
|
; SSE41-NEXT: pminuw %xmm5, %xmm1
|
|
|
|
; SSE41-NEXT: pminuw %xmm3, %xmm1
|
|
|
|
; SSE41-NEXT: pminuw %xmm6, %xmm2
|
|
|
|
; SSE41-NEXT: pminuw %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: pminuw %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: pminuw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: phminposuw %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v64i16:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
|
|
|
|
; AVX1-NEXT: vpminuw %xmm4, %xmm5, %xmm4
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
|
|
|
|
; AVX1-NEXT: vpminuw %xmm5, %xmm6, %xmm5
|
|
|
|
; AVX1-NEXT: vpminuw %xmm4, %xmm5, %xmm4
|
|
|
|
; AVX1-NEXT: vpminuw %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpminuw %xmm4, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v64i16:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpminuw %ymm3, %ymm1, %ymm1
|
|
|
|
; AVX2-NEXT: vpminuw %ymm2, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v64i16:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vpminuw %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
|
|
; AVX512-NEXT: vpminuw %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i16 @llvm.experimental.vector.reduce.umin.i16.v64i16(<64 x i16> %a0)
|
|
|
|
ret i16 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
;
|
|
|
|
; vXi8
|
|
|
|
;
|
|
|
|
|
2018-12-05 14:29:44 +08:00
|
|
|
define i8 @test_v2i8(<2 x i8> %a0) {
|
|
|
|
; SSE2-LABEL: test_v2i8:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
|
|
|
|
; SSE2-NEXT: pand %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
|
|
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
|
|
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
|
|
; SSE2-NEXT: movd %xmm3, %eax
|
|
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v2i8:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
|
|
|
|
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
2019-01-11 03:05:34 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
|
|
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm3, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm2, %xmm3
|
|
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
|
|
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: por %xmm3, %xmm0
|
2018-12-05 14:29:44 +08:00
|
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
|
|
|
; SSE41-NEXT: pextrb $0, %xmm2, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v2i8:
|
|
|
|
; AVX: # %bb.0:
|
|
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
|
|
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: test_v2i8:
|
|
|
|
; AVX512BW: # %bb.0:
|
|
|
|
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
|
|
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX512BW-NEXT: vpminuq %zmm0, %zmm1, %zmm0
|
|
|
|
; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX512BW-NEXT: vzeroupper
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: test_v2i8:
|
|
|
|
; AVX512VL: # %bb.0:
|
|
|
|
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
|
|
|
|
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX512VL-NEXT: vpminuq %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX512VL-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
%1 = call i8 @llvm.experimental.vector.reduce.umin.i8.v2i8(<2 x i8> %a0)
|
|
|
|
ret i8 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i8 @test_v4i8(<4 x i8> %a0) {
|
|
|
|
; SSE2-LABEL: test_v4i8:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: pand %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm0
|
|
|
|
; SSE2-NEXT: pandn %xmm2, %xmm3
|
|
|
|
; SSE2-NEXT: por %xmm0, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: movd %xmm2, %eax
|
|
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v4i8:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
|
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pand %xmm1, %xmm2
|
|
|
|
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8],zero,zero,zero,xmm0[12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; SSE41-NEXT: pminud %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: pand %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; SSE41-NEXT: pminud %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pextrb $0, %xmm0, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v4i8:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
|
|
|
|
; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,zero,zero,xmm0[12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX1-NEXT: vpminud %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX1-NEXT: vpminud %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v4i8:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [255,255,255,255]
|
|
|
|
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,zero,zero,xmm0[12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX2-NEXT: vpminud %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX2-NEXT: vpminud %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v4i8:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm1 = [255,255,255,255]
|
|
|
|
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,zero,zero,xmm0[12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX512-NEXT: vpminud %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; AVX512-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX512-NEXT: vpminud %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i8 @llvm.experimental.vector.reduce.umin.i8.v4i8(<4 x i8> %a0)
|
|
|
|
ret i8 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i8 @test_v8i8(<8 x i8> %a0) {
|
|
|
|
; SSE2-LABEL: test_v8i8:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: pand %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
|
|
|
|
; SSE2-NEXT: pminsw %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
|
|
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
|
|
|
|
; SSE2-NEXT: pminsw %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: pand %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: psrld $16, %xmm0
|
|
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSE2-NEXT: pminsw %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v8i8:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
|
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pand %xmm1, %xmm2
|
|
|
|
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8],zero,xmm0[10],zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; SSE41-NEXT: pminuw %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pand %xmm1, %xmm2
|
|
|
|
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; SSE41-NEXT: pminuw %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: pand %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: psrld $16, %xmm0
|
|
|
|
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; SSE41-NEXT: pminuw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pextrb $0, %xmm0, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v8i8:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
|
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,xmm0[10],zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX1-NEXT: vpminuw %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX1-NEXT: vpminuw %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX1-NEXT: vpminuw %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v8i8:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpbroadcastw {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
|
|
|
|
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,xmm0[10],zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX2-NEXT: vpminuw %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX2-NEXT: vpminuw %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX2-NEXT: vpminuw %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v8i8:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vpbroadcastw {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
|
|
|
|
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,xmm0[10],zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX512-NEXT: vpminuw %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
|
|
|
|
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX512-NEXT: vpminuw %xmm0, %xmm2, %xmm0
|
|
|
|
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
|
|
|
; AVX512-NEXT: vpminuw %xmm0, %xmm1, %xmm0
|
|
|
|
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i8 @llvm.experimental.vector.reduce.umin.i8.v8i8(<8 x i8> %a0)
|
|
|
|
ret i8 %1
|
|
|
|
}
|
|
|
|
|
2018-04-06 01:25:40 +08:00
|
|
|
define i8 @test_v16i8(<16 x i8> %a0) {
|
|
|
|
; SSE2-LABEL: test_v16i8:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
|
|
; SSE2-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
|
|
; SSE2-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
|
|
; SSE2-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v16i8:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: psrlw $8, %xmm1
|
|
|
|
; SSE41-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: phminposuw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pextrb $0, %xmm0, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: test_v16i8:
|
|
|
|
; AVX: # %bb.0:
|
|
|
|
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
|
|
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v16i8:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i8 @llvm.experimental.vector.reduce.umin.i8.v16i8(<16 x i8> %a0)
|
|
|
|
ret i8 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i8 @test_v32i8(<32 x i8> %a0) {
|
|
|
|
; SSE2-LABEL: test_v32i8:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
|
|
; SSE2-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
|
|
; SSE2-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
|
|
; SSE2-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v32i8:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: psrlw $8, %xmm1
|
|
|
|
; SSE41-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: phminposuw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pextrb $0, %xmm0, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v32i8:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v32i8:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v32i8:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i8 @llvm.experimental.vector.reduce.umin.i8.v32i8(<32 x i8> %a0)
|
|
|
|
ret i8 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i8 @test_v64i8(<64 x i8> %a0) {
|
|
|
|
; SSE2-LABEL: test_v64i8:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: pminub %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pminub %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
|
|
; SSE2-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
|
|
; SSE2-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
|
|
; SSE2-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v64i8:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pminub %xmm3, %xmm1
|
|
|
|
; SSE41-NEXT: pminub %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: psrlw $8, %xmm1
|
|
|
|
; SSE41-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: phminposuw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pextrb $0, %xmm0, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v64i8:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v64i8:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v64i8:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
|
|
; AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i8 @llvm.experimental.vector.reduce.umin.i8.v64i8(<64 x i8> %a0)
|
|
|
|
ret i8 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i8 @test_v128i8(<128 x i8> %a0) {
|
|
|
|
; SSE2-LABEL: test_v128i8:
|
|
|
|
; SSE2: # %bb.0:
|
|
|
|
; SSE2-NEXT: pminub %xmm6, %xmm2
|
|
|
|
; SSE2-NEXT: pminub %xmm4, %xmm0
|
|
|
|
; SSE2-NEXT: pminub %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: pminub %xmm7, %xmm3
|
|
|
|
; SSE2-NEXT: pminub %xmm5, %xmm1
|
|
|
|
; SSE2-NEXT: pminub %xmm3, %xmm1
|
|
|
|
; SSE2-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
|
|
; SSE2-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
|
|
; SSE2-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: psrld $16, %xmm0
|
|
|
|
; SSE2-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: psrlw $8, %xmm1
|
|
|
|
; SSE2-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: test_v128i8:
|
|
|
|
; SSE41: # %bb.0:
|
|
|
|
; SSE41-NEXT: pminub %xmm7, %xmm3
|
|
|
|
; SSE41-NEXT: pminub %xmm5, %xmm1
|
|
|
|
; SSE41-NEXT: pminub %xmm3, %xmm1
|
|
|
|
; SSE41-NEXT: pminub %xmm6, %xmm2
|
|
|
|
; SSE41-NEXT: pminub %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: pminub %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: pminub %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: psrlw $8, %xmm1
|
|
|
|
; SSE41-NEXT: pminub %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: phminposuw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pextrb $0, %xmm0, %eax
|
|
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: test_v128i8:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
|
|
|
|
; AVX1-NEXT: vpminub %xmm4, %xmm5, %xmm4
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
|
|
|
|
; AVX1-NEXT: vpminub %xmm5, %xmm6, %xmm5
|
|
|
|
; AVX1-NEXT: vpminub %xmm4, %xmm5, %xmm4
|
|
|
|
; AVX1-NEXT: vpminub %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpminub %xmm4, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: test_v128i8:
|
|
|
|
; AVX2: # %bb.0:
|
|
|
|
; AVX2-NEXT: vpminub %ymm3, %ymm1, %ymm1
|
|
|
|
; AVX2-NEXT: vpminub %ymm2, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: test_v128i8:
|
|
|
|
; AVX512: # %bb.0:
|
|
|
|
; AVX512-NEXT: vpminub %zmm1, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
|
|
; AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
|
|
; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
|
|
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%1 = call i8 @llvm.experimental.vector.reduce.umin.i8.v128i8(<128 x i8> %a0)
|
|
|
|
ret i8 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i64 @llvm.experimental.vector.reduce.umin.i64.v2i64(<2 x i64>)
|
|
|
|
declare i64 @llvm.experimental.vector.reduce.umin.i64.v4i64(<4 x i64>)
|
|
|
|
declare i64 @llvm.experimental.vector.reduce.umin.i64.v8i64(<8 x i64>)
|
|
|
|
declare i64 @llvm.experimental.vector.reduce.umin.i64.v16i64(<16 x i64>)
|
|
|
|
|
2018-12-05 14:29:44 +08:00
|
|
|
declare i32 @llvm.experimental.vector.reduce.umin.i32.v2i32(<2 x i32>)
|
2018-04-06 01:25:40 +08:00
|
|
|
declare i32 @llvm.experimental.vector.reduce.umin.i32.v4i32(<4 x i32>)
|
|
|
|
declare i32 @llvm.experimental.vector.reduce.umin.i32.v8i32(<8 x i32>)
|
|
|
|
declare i32 @llvm.experimental.vector.reduce.umin.i32.v16i32(<16 x i32>)
|
|
|
|
declare i32 @llvm.experimental.vector.reduce.umin.i32.v32i32(<32 x i32>)
|
|
|
|
|
2018-12-05 14:29:44 +08:00
|
|
|
declare i16 @llvm.experimental.vector.reduce.umin.i16.v2i16(<2 x i16>)
|
|
|
|
declare i16 @llvm.experimental.vector.reduce.umin.i16.v4i16(<4 x i16>)
|
2018-04-06 01:25:40 +08:00
|
|
|
declare i16 @llvm.experimental.vector.reduce.umin.i16.v8i16(<8 x i16>)
|
|
|
|
declare i16 @llvm.experimental.vector.reduce.umin.i16.v16i16(<16 x i16>)
|
|
|
|
declare i16 @llvm.experimental.vector.reduce.umin.i16.v32i16(<32 x i16>)
|
|
|
|
declare i16 @llvm.experimental.vector.reduce.umin.i16.v64i16(<64 x i16>)
|
|
|
|
|
2018-12-05 14:29:44 +08:00
|
|
|
declare i8 @llvm.experimental.vector.reduce.umin.i8.v2i8(<2 x i8>)
|
|
|
|
declare i8 @llvm.experimental.vector.reduce.umin.i8.v4i8(<4 x i8>)
|
|
|
|
declare i8 @llvm.experimental.vector.reduce.umin.i8.v8i8(<8 x i8>)
|
2018-04-06 01:25:40 +08:00
|
|
|
declare i8 @llvm.experimental.vector.reduce.umin.i8.v16i8(<16 x i8>)
|
|
|
|
declare i8 @llvm.experimental.vector.reduce.umin.i8.v32i8(<32 x i8>)
|
|
|
|
declare i8 @llvm.experimental.vector.reduce.umin.i8.v64i8(<64 x i8>)
|
|
|
|
declare i8 @llvm.experimental.vector.reduce.umin.i8.v128i8(<128 x i8>)
|