forked from OSchip/llvm-project
3134 lines
146 KiB
LLVM
3134 lines
146 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL --check-prefix=AVX512BWVL
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL --check-prefix=AVX512DQVL
|
|
|
|
;
|
|
; vXi64
|
|
;
|
|
|
|
define i64 @test_v2i64(<2 x i64> %a0) {
|
|
; SSE-LABEL: test_v2i64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: movq %xmm0, %rax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v2i64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v2i64:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v2i64:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v2i64:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v2i64:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i64 @llvm.experimental.vector.reduce.mul.i64.v2i64(<2 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
define i64 @test_v4i64(<4 x i64> %a0) {
|
|
; SSE-LABEL: test_v4i64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: movq %xmm0, %rax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v4i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v4i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} ymm2 = ymm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm2
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v4i64:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpsrlq $32, %ymm0, %ymm2
|
|
; AVX512BW-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %ymm1, %ymm3
|
|
; AVX512BW-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
|
|
; AVX512BW-NEXT: vpaddq %ymm2, %ymm3, %ymm2
|
|
; AVX512BW-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX512BW-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX512BW-NEXT: vpaddq %ymm2, %ymm0, %ymm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpsrldq {{.*#+}} ymm2 = ymm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX512BW-NEXT: vpmuludq %ymm2, %ymm0, %ymm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %ymm0, %ymm3
|
|
; AVX512BW-NEXT: vpmuludq %ymm1, %ymm3, %ymm3
|
|
; AVX512BW-NEXT: vpaddq %ymm3, %ymm2, %ymm2
|
|
; AVX512BW-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX512BW-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v4i64:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %ymm0, %ymm2
|
|
; AVX512BWVL-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %ymm1, %ymm3
|
|
; AVX512BWVL-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
|
|
; AVX512BWVL-NEXT: vpaddq %ymm2, %ymm3, %ymm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX512BWVL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpaddq %ymm2, %ymm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %ymm0, %ymm2
|
|
; AVX512BWVL-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %ymm1, %ymm3
|
|
; AVX512BWVL-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
|
|
; AVX512BWVL-NEXT: vpaddq %ymm2, %ymm3, %ymm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX512BWVL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v4i64:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v4i64:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullq %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i64 @llvm.experimental.vector.reduce.mul.i64.v4i64(<4 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
define i64 @test_v8i64(<8 x i64> %a0) {
|
|
; SSE-LABEL: test_v8i64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm4
|
|
; SSE-NEXT: movdqa %xmm3, %xmm5
|
|
; SSE-NEXT: psrlq $32, %xmm5
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm5
|
|
; SSE-NEXT: paddq %xmm4, %xmm5
|
|
; SSE-NEXT: psllq $32, %xmm5
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE-NEXT: paddq %xmm5, %xmm1
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm4
|
|
; SSE-NEXT: paddq %xmm3, %xmm4
|
|
; SSE-NEXT: psllq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE-NEXT: paddq %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: movq %xmm0, %rax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v8i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm4
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm5, %xmm3, %xmm5
|
|
; AVX1-NEXT: vpaddq %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
|
|
; AVX1-NEXT: vpmuludq %xmm4, %xmm0, %xmm4
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3
|
|
; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v8i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} ymm2 = ymm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm2
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v8i64:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v8i64:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v8i64:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v8i64:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i64 @llvm.experimental.vector.reduce.mul.i64.v8i64(<8 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
define i64 @test_v16i64(<16 x i64> %a0) {
|
|
; SSE-LABEL: test_v16i64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movdqa %xmm2, %xmm8
|
|
; SSE-NEXT: psrlq $32, %xmm8
|
|
; SSE-NEXT: pmuludq %xmm6, %xmm8
|
|
; SSE-NEXT: movdqa %xmm6, %xmm9
|
|
; SSE-NEXT: psrlq $32, %xmm9
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm9
|
|
; SSE-NEXT: paddq %xmm8, %xmm9
|
|
; SSE-NEXT: psllq $32, %xmm9
|
|
; SSE-NEXT: pmuludq %xmm6, %xmm2
|
|
; SSE-NEXT: paddq %xmm9, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm8
|
|
; SSE-NEXT: psrlq $32, %xmm8
|
|
; SSE-NEXT: pmuludq %xmm4, %xmm8
|
|
; SSE-NEXT: movdqa %xmm4, %xmm6
|
|
; SSE-NEXT: psrlq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm6
|
|
; SSE-NEXT: paddq %xmm8, %xmm6
|
|
; SSE-NEXT: psllq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm4, %xmm0
|
|
; SSE-NEXT: paddq %xmm6, %xmm0
|
|
; SSE-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm7, %xmm4
|
|
; SSE-NEXT: movdqa %xmm7, %xmm6
|
|
; SSE-NEXT: psrlq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm6
|
|
; SSE-NEXT: paddq %xmm4, %xmm6
|
|
; SSE-NEXT: psllq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm7, %xmm3
|
|
; SSE-NEXT: paddq %xmm6, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm5, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm6
|
|
; SSE-NEXT: psrlq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm6
|
|
; SSE-NEXT: paddq %xmm4, %xmm6
|
|
; SSE-NEXT: psllq $32, %xmm6
|
|
; SSE-NEXT: pmuludq %xmm5, %xmm1
|
|
; SSE-NEXT: paddq %xmm6, %xmm1
|
|
; SSE-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm4
|
|
; SSE-NEXT: movdqa %xmm3, %xmm5
|
|
; SSE-NEXT: psrlq $32, %xmm5
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm5
|
|
; SSE-NEXT: paddq %xmm4, %xmm5
|
|
; SSE-NEXT: psllq $32, %xmm5
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE-NEXT: paddq %xmm5, %xmm1
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm4
|
|
; SSE-NEXT: paddq %xmm3, %xmm4
|
|
; SSE-NEXT: psllq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE-NEXT: paddq %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: movq %xmm0, %rax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm5, %xmm1, %xmm5
|
|
; AVX1-NEXT: vpaddq %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm5
|
|
; AVX1-NEXT: vpaddq %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm6
|
|
; AVX1-NEXT: vpmuludq %xmm6, %xmm0, %xmm6
|
|
; AVX1-NEXT: vpaddq %xmm5, %xmm6, %xmm5
|
|
; AVX1-NEXT: vpsllq $32, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm6
|
|
; AVX1-NEXT: vpaddq %xmm5, %xmm6, %xmm5
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm6
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm6, %xmm6
|
|
; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm7
|
|
; AVX1-NEXT: vpmuludq %xmm7, %xmm1, %xmm7
|
|
; AVX1-NEXT: vpaddq %xmm6, %xmm7, %xmm6
|
|
; AVX1-NEXT: vpsllq $32, %xmm6, %xmm6
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpaddq %xmm6, %xmm1, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm6
|
|
; AVX1-NEXT: vpmuludq %xmm6, %xmm0, %xmm6
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm6, %xmm3
|
|
; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm1
|
|
; AVX1-NEXT: vpmuludq %xmm4, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm2
|
|
; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmuludq %xmm4, %xmm5, %xmm2
|
|
; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm0, %xmm1, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovq %xmm0, %rax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm4
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm4, %ymm4
|
|
; AVX2-NEXT: vpsrlq $32, %ymm3, %ymm5
|
|
; AVX2-NEXT: vpmuludq %ymm5, %ymm1, %ymm5
|
|
; AVX2-NEXT: vpaddq %ymm4, %ymm5, %ymm4
|
|
; AVX2-NEXT: vpsllq $32, %ymm4, %ymm4
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpaddq %ymm4, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm2, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpsrlq $32, %ymm2, %ymm4
|
|
; AVX2-NEXT: vpmuludq %ymm4, %ymm0, %ymm4
|
|
; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm3
|
|
; AVX2-NEXT: vpsllq $32, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} ymm2 = ymm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm2
|
|
; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v16i64:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v16i64:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
|
|
; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
|
|
; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
|
|
; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v16i64:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQ-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v16i64:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovq %xmm0, %rax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i64 @llvm.experimental.vector.reduce.mul.i64.v16i64(<16 x i64> %a0)
|
|
ret i64 %1
|
|
}
|
|
|
|
;
|
|
; vXi32
|
|
;
|
|
|
|
define i32 @test_v4i32(<4 x i32> %a0) {
|
|
; SSE2-LABEL: test_v4i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,1,1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v4i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v4i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v4i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: retq
|
|
%1 = call i32 @llvm.experimental.vector.reduce.mul.i32.v4i32(<4 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @test_v8i32(<8 x i32> %a0) {
|
|
; SSE2-LABEL: test_v8i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,0,0]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v8i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v8i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v8i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v8i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i32 @llvm.experimental.vector.reduce.mul.i32.v8i32(<8 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @test_v16i32(<16 x i32> %a0) {
|
|
; SSE2-LABEL: test_v16i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm5
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm6
|
|
; SSE2-NEXT: pmuludq %xmm5, %xmm6
|
|
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,2,0,0]
|
|
; SSE2-NEXT: pmuludq %xmm6, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmulld %xmm3, %xmm1
|
|
; SSE41-NEXT: pmulld %xmm2, %xmm0
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpmulld %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmulld %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i32 @llvm.experimental.vector.reduce.mul.i32.v16i32(<16 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @test_v32i32(<32 x i32> %a0) {
|
|
; SSE2-LABEL: test_v32i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm6, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm0
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm7, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm5, %xmm1
|
|
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm8, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm9, %xmm3
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm5[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm10, %xmm2
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm2
|
|
; SSE2-NEXT: pmuludq %xmm3, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,2,0,0]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v32i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmulld %xmm6, %xmm2
|
|
; SSE41-NEXT: pmulld %xmm4, %xmm0
|
|
; SSE41-NEXT: pmulld %xmm2, %xmm0
|
|
; SSE41-NEXT: pmulld %xmm7, %xmm3
|
|
; SSE41-NEXT: pmulld %xmm5, %xmm1
|
|
; SSE41-NEXT: pmulld %xmm3, %xmm1
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; SSE41-NEXT: pmulld %xmm0, %xmm1
|
|
; SSE41-NEXT: movd %xmm1, %eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v32i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vpmulld %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v32i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm2, %ymm1
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v32i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i32 @llvm.experimental.vector.reduce.mul.i32.v32i32(<32 x i32> %a0)
|
|
ret i32 %1
|
|
}
|
|
|
|
;
|
|
; vXi16
|
|
;
|
|
|
|
define i16 @test_v8i16(<8 x i16> %a0) {
|
|
; SSE-LABEL: test_v8i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: psrld $16, %xmm1
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: movd %xmm1, %eax
|
|
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v8i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v8i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512-NEXT: retq
|
|
%1 = call i16 @llvm.experimental.vector.reduce.mul.i16.v8i16(<8 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
define i16 @test_v16i16(<16 x i16> %a0) {
|
|
; SSE-LABEL: test_v16i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: psrld $16, %xmm1
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: movd %xmm1, %eax
|
|
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = call i16 @llvm.experimental.vector.reduce.mul.i16.v16i16(<16 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
define i16 @test_v32i16(<32 x i16> %a0) {
|
|
; SSE-LABEL: test_v32i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: psrld $16, %xmm1
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: movd %xmm1, %eax
|
|
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v32i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v32i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v32i16:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v32i16:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BWVL-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v32i16:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQ-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v32i16:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQVL-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i16 @llvm.experimental.vector.reduce.mul.i16.v32i16(<32 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
define i16 @test_v64i16(<64 x i16> %a0) {
|
|
; SSE-LABEL: test_v64i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pmullw %xmm6, %xmm2
|
|
; SSE-NEXT: pmullw %xmm4, %xmm0
|
|
; SSE-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE-NEXT: pmullw %xmm7, %xmm3
|
|
; SSE-NEXT: pmullw %xmm5, %xmm1
|
|
; SSE-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; SSE-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE-NEXT: psrld $16, %xmm0
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: movd %xmm0, %eax
|
|
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v64i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v64i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm2, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v64i16:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BW-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v64i16:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512BWVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512BWVL-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v64i16:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm2, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQ-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQ-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v64i16:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm2, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vmovd %xmm0, %eax
|
|
; AVX512DQVL-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i16 @llvm.experimental.vector.reduce.mul.i16.v64i16(<64 x i16> %a0)
|
|
ret i16 %1
|
|
}
|
|
|
|
;
|
|
; vXi8
|
|
;
|
|
|
|
define i8 @test_v16i8(<16 x i8> %a0) {
|
|
; SSE2-LABEL: test_v16i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm1, %xmm0
|
|
; SSE2-NEXT: pxor %xmm3, %xmm3
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,2,3,3]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm3
|
|
; SSE2-NEXT: pand %xmm1, %xmm3
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm1, %xmm2
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm2
|
|
; SSE2-NEXT: movd %xmm2, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm1, %xmm0
|
|
; SSE41-NEXT: pxor %xmm2, %xmm2
|
|
; SSE41-NEXT: packuswb %xmm2, %xmm0
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm0
|
|
; SSE41-NEXT: pand %xmm1, %xmm0
|
|
; SSE41-NEXT: packuswb %xmm2, %xmm0
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm0
|
|
; SSE41-NEXT: pand %xmm1, %xmm0
|
|
; SSE41-NEXT: packuswb %xmm2, %xmm0
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: psrlw $8, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: pextrb $0, %xmm0, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpmullw %xmm0, %xmm3, %xmm0
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmullw %xmm0, %xmm3, %xmm0
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v16i8:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v16i8:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512BWVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512BWVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512BWVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512BWVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
|
|
; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v16i8:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512DQ-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v16i8:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v16i8(<16 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v32i8(<32 x i8> %a0) {
|
|
; SSE2-LABEL: test_v32i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm2, %xmm3
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pxor %xmm3, %xmm3
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,2,3,3]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm3
|
|
; SSE2-NEXT: pand %xmm2, %xmm3
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v32i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm1, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE41-NEXT: pand %xmm1, %xmm3
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm3
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm3
|
|
; SSE41-NEXT: pand %xmm1, %xmm3
|
|
; SSE41-NEXT: pxor %xmm0, %xmm0
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm3
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE41-NEXT: pand %xmm1, %xmm3
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm3
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE41-NEXT: pand %xmm1, %xmm3
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm3
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; SSE41-NEXT: psrlw $8, %xmm3
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm3
|
|
; SSE41-NEXT: pextrb $0, %xmm3, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v32i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
|
|
; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm3
|
|
; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpand %xmm1, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v32i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v32i8:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v32i8:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v32i8:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512DQ-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v32i8:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v32i8(<32 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v64i8(<64 x i8> %a0) {
|
|
; SSE2-LABEL: test_v64i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm4, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm5, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm5
|
|
; SSE2-NEXT: pand %xmm4, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm5, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE2-NEXT: pand %xmm4, %xmm3
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,2,3,3]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm4, %xmm2
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v64i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm2, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm5, %xmm4
|
|
; SSE41-NEXT: pand %xmm2, %xmm4
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm4
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE41-NEXT: pand %xmm2, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm5
|
|
; SSE41-NEXT: pand %xmm2, %xmm5
|
|
; SSE41-NEXT: packuswb %xmm1, %xmm5
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm5, %xmm4
|
|
; SSE41-NEXT: pand %xmm2, %xmm4
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: pand %xmm2, %xmm1
|
|
; SSE41-NEXT: packuswb %xmm4, %xmm1
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: pand %xmm2, %xmm1
|
|
; SSE41-NEXT: pxor %xmm0, %xmm0
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm1
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE41-NEXT: pand %xmm2, %xmm1
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm1
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE41-NEXT: pand %xmm2, %xmm1
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm1
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: psrlw $8, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: pextrb $0, %xmm1, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v64i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm3
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
|
; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm4, %xmm1
|
|
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm0, %xmm3, %xmm0
|
|
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm3
|
|
; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v64i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm2
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm3
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm3
|
|
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm4, %xmm4
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm3, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v64i8:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v64i8:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v64i8:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm2
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm3
|
|
; AVX512DQ-NEXT: vpmullw %ymm2, %ymm3, %ymm2
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm2, %zmm2
|
|
; AVX512DQ-NEXT: vpmovdb %zmm2, %xmm2
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm2, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm0, %ymm1, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512DQ-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v64i8:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm2
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm3
|
|
; AVX512DQVL-NEXT: vpmullw %ymm2, %ymm3, %ymm2
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm2, %zmm2
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm2, %xmm2
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm1, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm2, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm0, %ymm1, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v64i8(<64 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
define i8 @test_v128i8(<128 x i8> %a0) {
|
|
; SSE2-LABEL: test_v128i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm8
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm9
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm8, %xmm9
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm8, %xmm9
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm5, %xmm1
|
|
; SSE2-NEXT: pand %xmm8, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm9, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm7, %xmm9
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm9, %xmm5
|
|
; SSE2-NEXT: pand %xmm8, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm7, %xmm3
|
|
; SSE2-NEXT: pand %xmm8, %xmm3
|
|
; SSE2-NEXT: packuswb %xmm5, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm7
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm5, %xmm7
|
|
; SSE2-NEXT: pand %xmm8, %xmm7
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm0
|
|
; SSE2-NEXT: pand %xmm8, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm7, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
|
; SSE2-NEXT: pand %xmm8, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm6, %xmm2
|
|
; SSE2-NEXT: pand %xmm8, %xmm2
|
|
; SSE2-NEXT: packuswb %xmm5, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
|
; SSE2-NEXT: pand %xmm8, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE2-NEXT: pand %xmm8, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm5, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm4
|
|
; SSE2-NEXT: pand %xmm8, %xmm4
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE2-NEXT: pand %xmm8, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm4, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE2-NEXT: pand %xmm8, %xmm3
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: pand %xmm8, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: pand %xmm8, %xmm0
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,2,3,3]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm8, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm8, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm2
|
|
; SSE2-NEXT: pand %xmm8, %xmm2
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm8, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm1
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v128i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm9 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm8 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm5, %xmm1
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm5, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm9, %xmm8
|
|
; SSE41-NEXT: pand %xmm5, %xmm8
|
|
; SSE41-NEXT: packuswb %xmm1, %xmm8
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm9 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm7, %xmm3
|
|
; SSE41-NEXT: pand %xmm5, %xmm3
|
|
; SSE41-NEXT: pmullw %xmm9, %xmm1
|
|
; SSE41-NEXT: pand %xmm5, %xmm1
|
|
; SSE41-NEXT: packuswb %xmm3, %xmm1
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm0
|
|
; SSE41-NEXT: pand %xmm5, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm7, %xmm3
|
|
; SSE41-NEXT: pand %xmm5, %xmm3
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm3
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm6, %xmm2
|
|
; SSE41-NEXT: pand %xmm5, %xmm2
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm4
|
|
; SSE41-NEXT: pand %xmm5, %xmm4
|
|
; SSE41-NEXT: packuswb %xmm2, %xmm4
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm3
|
|
; SSE41-NEXT: pand %xmm5, %xmm3
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE41-NEXT: pand %xmm5, %xmm0
|
|
; SSE41-NEXT: packuswb %xmm3, %xmm0
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero,xmm8[4],zero,xmm8[5],zero,xmm8[6],zero,xmm8[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm8
|
|
; SSE41-NEXT: pand %xmm5, %xmm8
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE41-NEXT: pand %xmm5, %xmm3
|
|
; SSE41-NEXT: packuswb %xmm8, %xmm3
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm0
|
|
; SSE41-NEXT: pand %xmm5, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm2
|
|
; SSE41-NEXT: pand %xmm5, %xmm2
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm2
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm2
|
|
; SSE41-NEXT: pand %xmm5, %xmm2
|
|
; SSE41-NEXT: pxor %xmm0, %xmm0
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm2
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm2
|
|
; SSE41-NEXT: pand %xmm5, %xmm2
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm2
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm2
|
|
; SSE41-NEXT: pand %xmm5, %xmm2
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm2
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; SSE41-NEXT: psrlw $8, %xmm2
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm2
|
|
; SSE41-NEXT: pextrb $0, %xmm2, %eax
|
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v128i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm7, %xmm7
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
|
; AVX1-NEXT: vpand %xmm4, %xmm7, %xmm7
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm5, %xmm6, %xmm5
|
|
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpackuswb %xmm7, %xmm5, %xmm8
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm9, %xmm7, %xmm7
|
|
; AVX1-NEXT: vpand %xmm4, %xmm7, %xmm7
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm6, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpackuswb %xmm7, %xmm5, %xmm6
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm5, %xmm7, %xmm5
|
|
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm5, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm5, %xmm2
|
|
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero,xmm8[4],zero,xmm8[5],zero,xmm8[6],zero,xmm8[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v128i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
|
|
; AVX2-NEXT: vpmovsxbw %xmm4, %ymm4
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
|
|
; AVX2-NEXT: vpmovsxbw %xmm5, %ymm5
|
|
; AVX2-NEXT: vpmullw %ymm4, %ymm5, %ymm5
|
|
; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm6, %xmm6
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm5, %xmm5
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
|
|
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
|
|
; AVX2-NEXT: vpmovsxbw %xmm6, %ymm6
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm7
|
|
; AVX2-NEXT: vpmovsxbw %xmm7, %ymm7
|
|
; AVX2-NEXT: vpmullw %ymm6, %ymm7, %ymm6
|
|
; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm7
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm7, %xmm7
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm6, %xmm6
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm3, %ymm2
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm6, %ymm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm5, %ymm2
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm2, %ymm1
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_v128i8:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm2
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm3
|
|
; AVX512BW-NEXT: vpmullw %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm2, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm0, %zmm1, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512BWVL-LABEL: test_v128i8:
|
|
; AVX512BWVL: # %bb.0:
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm2
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm3
|
|
; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm3, %zmm2
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm2, %ymm2
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm2, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm0, %zmm1, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
; AVX512BWVL-NEXT: retq
|
|
;
|
|
; AVX512DQ-LABEL: test_v128i8:
|
|
; AVX512DQ: # %bb.0:
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm4
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm4, %ymm4
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm5
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm5, %ymm5
|
|
; AVX512DQ-NEXT: vpmullw %ymm4, %ymm5, %ymm4
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm4, %zmm4
|
|
; AVX512DQ-NEXT: vpmovdb %zmm4, %xmm4
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm3, %xmm5
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm5, %ymm5
|
|
; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm6
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm6, %ymm6
|
|
; AVX512DQ-NEXT: vpmullw %ymm5, %ymm6, %ymm5
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm5, %zmm5
|
|
; AVX512DQ-NEXT: vpmovdb %zmm5, %xmm5
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm2, %ymm2
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmullw %ymm2, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm3, %ymm2
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm1, %zmm1
|
|
; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm5, %ymm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm4, %ymm2
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm2, %ymm1
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm1, %zmm1
|
|
; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQ-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512DQ-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQ-NEXT: vzeroupper
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512DQVL-LABEL: test_v128i8:
|
|
; AVX512DQVL: # %bb.0:
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm2, %xmm4
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm4, %ymm4
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm5
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm5, %ymm5
|
|
; AVX512DQVL-NEXT: vpmullw %ymm4, %ymm5, %ymm4
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm4, %zmm4
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm4, %xmm4
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm3, %xmm5
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm5, %ymm5
|
|
; AVX512DQVL-NEXT: vextracti128 $1, %ymm1, %xmm6
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm6, %ymm6
|
|
; AVX512DQVL-NEXT: vpmullw %ymm5, %ymm6, %ymm5
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm5, %zmm5
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm5, %xmm5
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm2, %ymm2
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmullw %ymm2, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm3, %ymm2
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm1, %zmm1
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm1, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm5, %ymm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm4, %ymm2
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm2, %ymm1
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm1, %zmm1
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm1, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpsrlw $8, %xmm0, %xmm1
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512DQVL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512DQVL-NEXT: vpextrb $0, %xmm0, %eax
|
|
; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512DQVL-NEXT: vzeroupper
|
|
; AVX512DQVL-NEXT: retq
|
|
%1 = call i8 @llvm.experimental.vector.reduce.mul.i8.v128i8(<128 x i8> %a0)
|
|
ret i8 %1
|
|
}
|
|
|
|
declare i64 @llvm.experimental.vector.reduce.mul.i64.v2i64(<2 x i64>)
|
|
declare i64 @llvm.experimental.vector.reduce.mul.i64.v4i64(<4 x i64>)
|
|
declare i64 @llvm.experimental.vector.reduce.mul.i64.v8i64(<8 x i64>)
|
|
declare i64 @llvm.experimental.vector.reduce.mul.i64.v16i64(<16 x i64>)
|
|
|
|
declare i32 @llvm.experimental.vector.reduce.mul.i32.v4i32(<4 x i32>)
|
|
declare i32 @llvm.experimental.vector.reduce.mul.i32.v8i32(<8 x i32>)
|
|
declare i32 @llvm.experimental.vector.reduce.mul.i32.v16i32(<16 x i32>)
|
|
declare i32 @llvm.experimental.vector.reduce.mul.i32.v32i32(<32 x i32>)
|
|
|
|
declare i16 @llvm.experimental.vector.reduce.mul.i16.v8i16(<8 x i16>)
|
|
declare i16 @llvm.experimental.vector.reduce.mul.i16.v16i16(<16 x i16>)
|
|
declare i16 @llvm.experimental.vector.reduce.mul.i16.v32i16(<32 x i16>)
|
|
declare i16 @llvm.experimental.vector.reduce.mul.i16.v64i16(<64 x i16>)
|
|
|
|
declare i8 @llvm.experimental.vector.reduce.mul.i8.v16i8(<16 x i8>)
|
|
declare i8 @llvm.experimental.vector.reduce.mul.i8.v32i8(<32 x i8>)
|
|
declare i8 @llvm.experimental.vector.reduce.mul.i8.v64i8(<64 x i8>)
|
|
declare i8 @llvm.experimental.vector.reduce.mul.i8.v128i8(<128 x i8>)
|