forked from OSchip/llvm-project
1775 lines
66 KiB
LLVM
1775 lines
66 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL
|
|
|
|
;
|
|
; vXi64
|
|
;
|
|
|
|
define i64 @test_v2i64(<2 x i64> %a0) {
|
|
; SSE2-LABEL: test_v2i64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: pxor %xmm2, %xmm3
|
|
; SSE2-NEXT: pxor %xmm1, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
; SSE2-NEXT: pand %xmm3, %xmm0
|
|
; SSE2-NEXT: pandn %xmm1, %xmm3
|
|
; SSE2-NEXT: por %xmm0, %xmm3
|
|
; SSE2-NEXT: movq %xmm3, %rax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v2i64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE41-NEXT: pxor %xmm0, %xmm3
|
|
; SSE41-NEXT: pxor %xmm2, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm5, %xmm3
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm3, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
|
; SSE41-NEXT: movq %xmm2, %rax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v2i64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
|
|
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v2i64:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: test_v2i64:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
|
|
; AVX512VL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512VL-NEXT: retq
|
|
%1 = call i64 @llvm.experimental.vector.reduce.smax.i64.v2i64(<2 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
define i64 @test_v4i64(<4 x i64> %a0) {
|
|
; SSE2-LABEL: test_v4i64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE2-NEXT: pxor %xmm2, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE2-NEXT: pxor %xmm2, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm6, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm3, %xmm4
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: pandn %xmm1, %xmm4
|
|
; SSE2-NEXT: por %xmm0, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm1
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
; SSE2-NEXT: pxor %xmm0, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm4
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
; SSE2-NEXT: por %xmm4, %xmm2
|
|
; SSE2-NEXT: movq %xmm2, %rax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v4i64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,0,2147483648,0]
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: pxor %xmm3, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE41-NEXT: pxor %xmm3, %xmm4
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm6, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm4, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: pxor %xmm3, %xmm0
|
|
; SSE41-NEXT: pxor %xmm2, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm5, %xmm3
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm3, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
|
|
; SSE41-NEXT: movq %xmm2, %rax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v4i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
|
|
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v4i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v4i64:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: test_v4i64:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512VL-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512VL-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512VL-NEXT: vzeroupper
|
|
; AVX512VL-NEXT: retq
|
|
%1 = call i64 @llvm.experimental.vector.reduce.smax.i64.v4i64(<4 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
define i64 @test_v8i64(<8 x i64> %a0) {
|
|
; SSE2-LABEL: test_v8i64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm5
|
|
; SSE2-NEXT: pxor %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm6
|
|
; SSE2-NEXT: pxor %xmm4, %xmm6
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm7
|
|
; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm8, %xmm6
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm6, %xmm5
|
|
; SSE2-NEXT: pand %xmm5, %xmm0
|
|
; SSE2-NEXT: pandn %xmm2, %xmm5
|
|
; SSE2-NEXT: por %xmm0, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm0
|
|
; SSE2-NEXT: pxor %xmm4, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pxor %xmm4, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm6
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm6
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm7, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: pandn %xmm3, %xmm2
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE2-NEXT: pxor %xmm4, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm1
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm6, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm5
|
|
; SSE2-NEXT: pandn %xmm2, %xmm1
|
|
; SSE2-NEXT: por %xmm5, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pxor %xmm4, %xmm2
|
|
; SSE2-NEXT: pxor %xmm0, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE2-NEXT: pcmpgtd %xmm4, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
; SSE2-NEXT: movq %xmm3, %rax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v8i64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,0,2147483648,0]
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE41-NEXT: pxor %xmm5, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm6
|
|
; SSE41-NEXT: pxor %xmm5, %xmm6
|
|
; SSE41-NEXT: movdqa %xmm6, %xmm7
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm8, %xmm6
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm6, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
|
; SSE41-NEXT: pxor %xmm5, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE41-NEXT: pxor %xmm5, %xmm4
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm6
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm7, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm4, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
|
|
; SSE41-NEXT: movapd %xmm3, %xmm0
|
|
; SSE41-NEXT: xorpd %xmm5, %xmm0
|
|
; SSE41-NEXT: movapd %xmm2, %xmm1
|
|
; SSE41-NEXT: xorpd %xmm5, %xmm1
|
|
; SSE41-NEXT: movapd %xmm1, %xmm4
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm6, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
|
; SSE41-NEXT: pxor %xmm5, %xmm0
|
|
; SSE41-NEXT: pxor %xmm1, %xmm5
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: pcmpgtd %xmm5, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm4, %xmm5
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm5, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
|
|
; SSE41-NEXT: movq %xmm1, %rax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v8i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v8i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v8i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vmovq %xmm0, %rax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i64 @llvm.experimental.vector.reduce.smax.i64.v8i64(<8 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
define i64 @test_v16i64(<16 x i64> %a0) {
|
|
; SSE2-LABEL: test_v16i64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0]
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm9
|
|
; SSE2-NEXT: pxor %xmm8, %xmm9
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm10
|
|
; SSE2-NEXT: pxor %xmm8, %xmm10
|
|
; SSE2-NEXT: movdqa %xmm10, %xmm11
|
|
; SSE2-NEXT: pcmpgtd %xmm9, %xmm11
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm9, %xmm10
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm12, %xmm10
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm11[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm10, %xmm9
|
|
; SSE2-NEXT: pand %xmm9, %xmm1
|
|
; SSE2-NEXT: pandn %xmm5, %xmm9
|
|
; SSE2-NEXT: por %xmm1, %xmm9
|
|
; SSE2-NEXT: movdqa %xmm7, %xmm1
|
|
; SSE2-NEXT: pxor %xmm8, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm5
|
|
; SSE2-NEXT: pxor %xmm8, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm10
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm10
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm5
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm11, %xmm5
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm10[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm5, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm3
|
|
; SSE2-NEXT: pandn %xmm7, %xmm1
|
|
; SSE2-NEXT: por %xmm3, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm3
|
|
; SSE2-NEXT: pxor %xmm8, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm5
|
|
; SSE2-NEXT: pxor %xmm8, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm7
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm7
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm5
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm10, %xmm5
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm7[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm5, %xmm3
|
|
; SSE2-NEXT: pand %xmm3, %xmm0
|
|
; SSE2-NEXT: pandn %xmm4, %xmm3
|
|
; SSE2-NEXT: por %xmm0, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm0
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE2-NEXT: pxor %xmm8, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm0, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm7, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm0, %xmm4
|
|
; SSE2-NEXT: pand %xmm4, %xmm2
|
|
; SSE2-NEXT: pandn %xmm6, %xmm4
|
|
; SSE2-NEXT: por %xmm2, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm0
|
|
; SSE2-NEXT: pxor %xmm8, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm2
|
|
; SSE2-NEXT: pxor %xmm8, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm5
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm6, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm2, %xmm0
|
|
; SSE2-NEXT: pand %xmm0, %xmm3
|
|
; SSE2-NEXT: pandn %xmm4, %xmm0
|
|
; SSE2-NEXT: por %xmm3, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pxor %xmm8, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm9, %xmm3
|
|
; SSE2-NEXT: pxor %xmm8, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
; SSE2-NEXT: pand %xmm3, %xmm9
|
|
; SSE2-NEXT: pandn %xmm1, %xmm3
|
|
; SSE2-NEXT: por %xmm9, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
|
; SSE2-NEXT: pxor %xmm8, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pxor %xmm8, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pandn %xmm3, %xmm2
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pxor %xmm8, %xmm1
|
|
; SSE2-NEXT: pxor %xmm0, %xmm8
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE2-NEXT: pcmpgtd %xmm8, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm8
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,3,3]
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
; SSE2-NEXT: pand %xmm3, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
; SSE2-NEXT: por %xmm2, %xmm3
|
|
; SSE2-NEXT: movq %xmm3, %rax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16i64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm8
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,0,2147483648,0]
|
|
; SSE41-NEXT: movdqa %xmm5, %xmm10
|
|
; SSE41-NEXT: pxor %xmm9, %xmm10
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: pxor %xmm9, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm11
|
|
; SSE41-NEXT: pcmpgtd %xmm10, %xmm11
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm12, %xmm10
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm11[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm10, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
|
|
; SSE41-NEXT: movdqa %xmm7, %xmm0
|
|
; SSE41-NEXT: pxor %xmm9, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm1
|
|
; SSE41-NEXT: pxor %xmm9, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm10
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm10
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm11, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm7
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm0
|
|
; SSE41-NEXT: pxor %xmm9, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm8, %xmm1
|
|
; SSE41-NEXT: pxor %xmm9, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm10, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm4
|
|
; SSE41-NEXT: movdqa %xmm6, %xmm0
|
|
; SSE41-NEXT: pxor %xmm9, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE41-NEXT: pxor %xmm9, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm8, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm6
|
|
; SSE41-NEXT: movapd %xmm6, %xmm0
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm0
|
|
; SSE41-NEXT: movapd %xmm4, %xmm1
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm1
|
|
; SSE41-NEXT: movapd %xmm1, %xmm2
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm3, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm6
|
|
; SSE41-NEXT: movapd %xmm7, %xmm0
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm0
|
|
; SSE41-NEXT: movapd %xmm5, %xmm1
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm1
|
|
; SSE41-NEXT: movapd %xmm1, %xmm2
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm3, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm7
|
|
; SSE41-NEXT: movapd %xmm7, %xmm0
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm0
|
|
; SSE41-NEXT: movapd %xmm6, %xmm1
|
|
; SSE41-NEXT: xorpd %xmm9, %xmm1
|
|
; SSE41-NEXT: movapd %xmm1, %xmm2
|
|
; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm3, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm1, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm7
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
|
|
; SSE41-NEXT: movdqa %xmm7, %xmm0
|
|
; SSE41-NEXT: pxor %xmm9, %xmm0
|
|
; SSE41-NEXT: pxor %xmm1, %xmm9
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: pcmpgtd %xmm9, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm9
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3]
|
|
; SSE41-NEXT: pand %xmm3, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: por %xmm4, %xmm0
|
|
; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm1
|
|
; SSE41-NEXT: movq %xmm1, %rax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
|
|
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm5
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
|
|
; AVX1-NEXT: vblendvpd %ymm4, %ymm0, %ymm2, %ymm0
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
|
|
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
|
|
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm1, %ymm3, %ymm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
|
|
; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm4
|
|
; AVX2-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
|
|
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
|
|
; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
|
|
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
|
|
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vmovq %xmm0, %rax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i64 @llvm.experimental.vector.reduce.smax.i64.v16i64(<16 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
;
|
|
; vXi32
|
|
;
|
|
|
|
define i32 @test_v4i32(<4 x i32> %a0) {
|
|
; SSE2-LABEL: test_v4i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pandn %xmm1, %xmm2
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v4i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmaxsd %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v4i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v4i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: retq
|
|
%1 = call i32 @llvm.experimental.vector.reduce.smax.i32.v4i32(<4 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @test_v8i32(<8 x i32> %a0) {
|
|
; SSE2-LABEL: test_v8i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pandn %xmm1, %xmm2
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
; SSE2-NEXT: movd %xmm2, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v8i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmaxsd %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v8i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v8i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v8i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i32 @llvm.experimental.vector.reduce.smax.i32.v8i32(<8 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @test_v16i32(<16 x i32> %a0) {
|
|
; SSE2-LABEL: test_v16i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
; SSE2-NEXT: pandn %xmm3, %xmm4
|
|
; SSE2-NEXT: por %xmm1, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm0
|
|
; SSE2-NEXT: pandn %xmm2, %xmm1
|
|
; SSE2-NEXT: por %xmm0, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: pcmpgtd %xmm4, %xmm0
|
|
; SSE2-NEXT: pand %xmm0, %xmm1
|
|
; SSE2-NEXT: pandn %xmm4, %xmm0
|
|
; SSE2-NEXT: por %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pandn %xmm1, %xmm2
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmaxsd %xmm3, %xmm1
|
|
; SSE41-NEXT: pmaxsd %xmm2, %xmm0
|
|
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmaxsd %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmaxsd %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i32 @llvm.experimental.vector.reduce.smax.i32.v16i32(<16 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @test_v32i32(<32 x i32> %a0) {
|
|
; SSE2-LABEL: test_v32i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm8
|
|
; SSE2-NEXT: pcmpgtd %xmm6, %xmm8
|
|
; SSE2-NEXT: pand %xmm8, %xmm2
|
|
; SSE2-NEXT: pandn %xmm6, %xmm8
|
|
; SSE2-NEXT: por %xmm2, %xmm8
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pcmpgtd %xmm4, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pandn %xmm4, %xmm2
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm0
|
|
; SSE2-NEXT: pcmpgtd %xmm7, %xmm0
|
|
; SSE2-NEXT: pand %xmm0, %xmm3
|
|
; SSE2-NEXT: pandn %xmm7, %xmm0
|
|
; SSE2-NEXT: por %xmm3, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE2-NEXT: pcmpgtd %xmm5, %xmm3
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
; SSE2-NEXT: pandn %xmm5, %xmm3
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm3
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm3, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE2-NEXT: pcmpgtd %xmm8, %xmm0
|
|
; SSE2-NEXT: pand %xmm0, %xmm2
|
|
; SSE2-NEXT: pandn %xmm8, %xmm0
|
|
; SSE2-NEXT: por %xmm2, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pandn %xmm1, %xmm2
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
; SSE2-NEXT: movd %xmm2, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v32i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmaxsd %xmm6, %xmm2
|
|
; SSE41-NEXT: pmaxsd %xmm4, %xmm0
|
|
; SSE41-NEXT: pmaxsd %xmm2, %xmm0
|
|
; SSE41-NEXT: pmaxsd %xmm7, %xmm3
|
|
; SSE41-NEXT: pmaxsd %xmm5, %xmm1
|
|
; SSE41-NEXT: pmaxsd %xmm3, %xmm1
|
|
; SSE41-NEXT: pmaxsd %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; SSE41-NEXT: pmaxsd %xmm0, %xmm1
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v32i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpmaxsd %xmm3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vpmaxsd %xmm2, %xmm0, %xmm5
|
|
; AVX1-NEXT: vpmaxsd %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
; AVX1-NEXT: vpmaxsd %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vpmaxsd %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmaxsd %xmm0, %xmm4, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v32i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmaxsd %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpmaxsd %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v32i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i32 @llvm.experimental.vector.reduce.smax.i32.v32i32(<32 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
;
|
|
; vXi16
|
|
;
|
|
|
|
define i16 @test_v8i16(<8 x i16> %a0) {
|
|
; SSE2-LABEL: test_v8i16:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: pmaxsw %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE2-NEXT: pmaxsw %xmm1, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
; SSE2-NEXT: pmaxsw %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v8i16:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: phminposuw %xmm0, %xmm0
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v8i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v8i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512-NEXT: retq
|
|
%1 = call i16 @llvm.experimental.vector.reduce.smax.i16.v8i16(<8 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
define i16 @test_v16i16(<16 x i16> %a0) {
|
|
; SSE2-LABEL: test_v16i16:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pmaxsw %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: pmaxsw %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE2-NEXT: pmaxsw %xmm1, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
; SSE2-NEXT: pmaxsw %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16i16:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmaxsw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: phminposuw %xmm0, %xmm0
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i16 @llvm.experimental.vector.reduce.smax.i16.v16i16(<16 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
define i16 @test_v32i16(<32 x i16> %a0) {
|
|
; SSE2-LABEL: test_v32i16:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pmaxsw %xmm3, %xmm1
|
|
; SSE2-NEXT: pmaxsw %xmm2, %xmm0
|
|
; SSE2-NEXT: pmaxsw %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: pmaxsw %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE2-NEXT: pmaxsw %xmm1, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrld $16, %xmm1
|
|
; SSE2-NEXT: pmaxsw %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v32i16:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmaxsw %xmm3, %xmm1
|
|
; SSE41-NEXT: pmaxsw %xmm2, %xmm0
|
|
; SSE41-NEXT: pmaxsw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: phminposuw %xmm0, %xmm0
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v32i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmaxsw %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v32i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v32i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i16 @llvm.experimental.vector.reduce.smax.i16.v32i16(<32 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
define i16 @test_v64i16(<64 x i16> %a0) {
|
|
; SSE2-LABEL: test_v64i16:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pmaxsw %xmm6, %xmm2
|
|
; SSE2-NEXT: pmaxsw %xmm4, %xmm0
|
|
; SSE2-NEXT: pmaxsw %xmm2, %xmm0
|
|
; SSE2-NEXT: pmaxsw %xmm7, %xmm3
|
|
; SSE2-NEXT: pmaxsw %xmm5, %xmm1
|
|
; SSE2-NEXT: pmaxsw %xmm3, %xmm1
|
|
; SSE2-NEXT: pmaxsw %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
; SSE2-NEXT: pmaxsw %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; SSE2-NEXT: pmaxsw %xmm0, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: psrld $16, %xmm0
|
|
; SSE2-NEXT: pmaxsw %xmm1, %xmm0
|
|
; SSE2-NEXT: movd %xmm0, %eax
|
|
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v64i16:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmaxsw %xmm7, %xmm3
|
|
; SSE41-NEXT: pmaxsw %xmm5, %xmm1
|
|
; SSE41-NEXT: pmaxsw %xmm3, %xmm1
|
|
; SSE41-NEXT: pmaxsw %xmm6, %xmm2
|
|
; SSE41-NEXT: pmaxsw %xmm4, %xmm0
|
|
; SSE41-NEXT: pmaxsw %xmm2, %xmm0
|
|
; SSE41-NEXT: pmaxsw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: phminposuw %xmm0, %xmm0
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v64i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
|
|
; AVX1-NEXT: vpmaxsw %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
|
|
; AVX1-NEXT: vpmaxsw %xmm5, %xmm6, %xmm5
|
|
; AVX1-NEXT: vpmaxsw %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpmaxsw %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmaxsw %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmaxsw %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v64i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmaxsw %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpmaxsw %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v64i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmaxsw %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i16 @llvm.experimental.vector.reduce.smax.i16.v64i16(<64 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
;
|
|
; vXi8
|
|
;
|
|
|
|
define i8 @test_v16i8(<16 x i8> %a0) {
|
|
; SSE2-LABEL: test_v16i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pandn %xmm1, %xmm2
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: psrld $16, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: psrlw $8, %xmm2
|
|
; SSE41-NEXT: pminub %xmm0, %xmm2
|
|
; SSE41-NEXT: phminposuw %xmm2, %xmm0
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: pextrb $0, %xmm0, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v16i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
; AVX-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX512-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512-NEXT: retq
|
|
%1 = call i8 @llvm.experimental.vector.reduce.smax.i8.v16i8(<16 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v32i8(<32 x i8> %a0) {
|
|
; SSE2-LABEL: test_v32i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pandn %xmm1, %xmm2
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE2-NEXT: psrld $16, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
; SSE2-NEXT: movd %xmm2, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v32i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmaxsb %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: psrlw $8, %xmm2
|
|
; SSE41-NEXT: pminub %xmm0, %xmm2
|
|
; SSE41-NEXT: phminposuw %xmm2, %xmm0
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: pextrb $0, %xmm0, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v32i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v32i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v32i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX512-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i8 @llvm.experimental.vector.reduce.smax.i8.v32i8(<32 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v64i8(<64 x i8> %a0) {
|
|
; SSE2-LABEL: test_v64i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE2-NEXT: pcmpgtb %xmm3, %xmm4
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
; SSE2-NEXT: pandn %xmm3, %xmm4
|
|
; SSE2-NEXT: por %xmm1, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm0
|
|
; SSE2-NEXT: pandn %xmm2, %xmm1
|
|
; SSE2-NEXT: por %xmm0, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: pcmpgtb %xmm4, %xmm0
|
|
; SSE2-NEXT: pand %xmm0, %xmm1
|
|
; SSE2-NEXT: pandn %xmm4, %xmm0
|
|
; SSE2-NEXT: por %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pandn %xmm1, %xmm2
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: psrld $16, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v64i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmaxsb %xmm3, %xmm1
|
|
; SSE41-NEXT: pmaxsb %xmm2, %xmm0
|
|
; SSE41-NEXT: pmaxsb %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: psrlw $8, %xmm2
|
|
; SSE41-NEXT: pminub %xmm0, %xmm2
|
|
; SSE41-NEXT: phminposuw %xmm2, %xmm0
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: pextrb $0, %xmm0, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v64i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmaxsb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v64i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v64i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX512-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i8 @llvm.experimental.vector.reduce.smax.i8.v64i8(<64 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v128i8(<128 x i8> %a0) {
|
|
; SSE2-LABEL: test_v128i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm8
|
|
; SSE2-NEXT: pcmpgtb %xmm6, %xmm8
|
|
; SSE2-NEXT: pand %xmm8, %xmm2
|
|
; SSE2-NEXT: pandn %xmm6, %xmm8
|
|
; SSE2-NEXT: por %xmm2, %xmm8
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pcmpgtb %xmm4, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pandn %xmm4, %xmm2
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm0
|
|
; SSE2-NEXT: pcmpgtb %xmm7, %xmm0
|
|
; SSE2-NEXT: pand %xmm0, %xmm3
|
|
; SSE2-NEXT: pandn %xmm7, %xmm0
|
|
; SSE2-NEXT: por %xmm3, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE2-NEXT: pcmpgtb %xmm5, %xmm3
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
; SSE2-NEXT: pandn %xmm5, %xmm3
|
|
; SSE2-NEXT: por %xmm1, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm3
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm3, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE2-NEXT: pcmpgtb %xmm8, %xmm0
|
|
; SSE2-NEXT: pand %xmm0, %xmm2
|
|
; SSE2-NEXT: pandn %xmm8, %xmm0
|
|
; SSE2-NEXT: por %xmm2, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pandn %xmm1, %xmm2
|
|
; SSE2-NEXT: por %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE2-NEXT: psrld $16, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: por %xmm2, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
; SSE2-NEXT: por %xmm1, %xmm2
|
|
; SSE2-NEXT: movd %xmm2, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v128i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmaxsb %xmm7, %xmm3
|
|
; SSE41-NEXT: pmaxsb %xmm5, %xmm1
|
|
; SSE41-NEXT: pmaxsb %xmm3, %xmm1
|
|
; SSE41-NEXT: pmaxsb %xmm6, %xmm2
|
|
; SSE41-NEXT: pmaxsb %xmm4, %xmm0
|
|
; SSE41-NEXT: pmaxsb %xmm2, %xmm0
|
|
; SSE41-NEXT: pmaxsb %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: psrlw $8, %xmm2
|
|
; SSE41-NEXT: pminub %xmm0, %xmm2
|
|
; SSE41-NEXT: phminposuw %xmm2, %xmm0
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: pextrb $0, %xmm0, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v128i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
|
|
; AVX1-NEXT: vpmaxsb %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
|
|
; AVX1-NEXT: vpmaxsb %xmm5, %xmm6, %xmm5
|
|
; AVX1-NEXT: vpmaxsb %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpmaxsb %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmaxsb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmaxsb %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v128i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmaxsb %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpmaxsb %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v128i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmaxsb %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX512-NEXT: vpminub %xmm2, %xmm0, %xmm0
|
|
; AVX512-NEXT: vphminposuw %xmm0, %xmm0
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i8 @llvm.experimental.vector.reduce.smax.i8.v128i8(<128 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
declare i64 @llvm.experimental.vector.reduce.smax.i64.v2i64(<2 x i64>)
|
|
declare i64 @llvm.experimental.vector.reduce.smax.i64.v4i64(<4 x i64>)
|
|
declare i64 @llvm.experimental.vector.reduce.smax.i64.v8i64(<8 x i64>)
|
|
declare i64 @llvm.experimental.vector.reduce.smax.i64.v16i64(<16 x i64>)
|
|
|
|
declare i32 @llvm.experimental.vector.reduce.smax.i32.v4i32(<4 x i32>)
|
|
declare i32 @llvm.experimental.vector.reduce.smax.i32.v8i32(<8 x i32>)
|
|
declare i32 @llvm.experimental.vector.reduce.smax.i32.v16i32(<16 x i32>)
|
|
declare i32 @llvm.experimental.vector.reduce.smax.i32.v32i32(<32 x i32>)
|
|
|
|
declare i16 @llvm.experimental.vector.reduce.smax.i16.v8i16(<8 x i16>)
|
|
declare i16 @llvm.experimental.vector.reduce.smax.i16.v16i16(<16 x i16>)
|
|
declare i16 @llvm.experimental.vector.reduce.smax.i16.v32i16(<32 x i16>)
|
|
declare i16 @llvm.experimental.vector.reduce.smax.i16.v64i16(<64 x i16>)
|
|
|
|
declare i8 @llvm.experimental.vector.reduce.smax.i8.v16i8(<16 x i8>)
|
|
declare i8 @llvm.experimental.vector.reduce.smax.i8.v32i8(<32 x i8>)
|
|
declare i8 @llvm.experimental.vector.reduce.smax.i8.v64i8(<64 x i8>)
|
|
declare i8 @llvm.experimental.vector.reduce.smax.i8.v128i8(<128 x i8>)
|