forked from OSchip/llvm-project
1513 lines
59 KiB
LLVM
1513 lines
59 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
|
|
|
|
define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
|
|
; SSE2-LABEL: mul_v16i8c:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm1
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm1
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm3, %xmm1
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm0
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE2-NEXT: pand %xmm3, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v16i8c:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm1
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm3, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE41-NEXT: pand %xmm3, %xmm0
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: mul_v16i8c:
|
|
; AVX2: # %bb.0: # %entry
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512F-LABEL: mul_v16i8c:
|
|
; AVX512F: # %bb.0: # %entry
|
|
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512F-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512F-NEXT: vzeroupper
|
|
; AVX512F-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: mul_v16i8c:
|
|
; AVX512BW: # %bb.0: # %entry
|
|
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
entry:
|
|
%A = mul <16 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 >
|
|
ret <16 x i8> %A
|
|
}
|
|
|
|
define <8 x i16> @mul_v8i16c(<8 x i16> %i) nounwind {
|
|
; SSE-LABEL: mul_v8i16c:
|
|
; SSE: # %bb.0: # %entry
|
|
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v8i16c:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <8 x i16> %i, < i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117 >
|
|
ret <8 x i16> %A
|
|
}
|
|
|
|
define <4 x i32> @mul_v4i32c(<4 x i32> %i) nounwind {
|
|
; SSE2-LABEL: mul_v4i32c:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [117,117,117,117]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v4i32c:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v4i32c:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [117,117,117,117]
|
|
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <4 x i32> %i, < i32 117, i32 117, i32 117, i32 117 >
|
|
ret <4 x i32> %A
|
|
}
|
|
|
|
define <2 x i64> @mul_v2i64c(<2 x i64> %i) nounwind {
|
|
; SSE-LABEL: mul_v2i64c:
|
|
; SSE: # %bb.0: # %entry
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [117,117]
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm0
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: psllq $32, %xmm0
|
|
; SSE-NEXT: paddq %xmm2, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v2i64c:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [117,117]
|
|
; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
|
|
; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
|
|
; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsllq $32, %xmm0, %xmm0
|
|
; AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <2 x i64> %i, < i64 117, i64 117 >
|
|
ret <2 x i64> %A
|
|
}
|
|
|
|
define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
|
|
; SSE2-LABEL: mul_v16i8:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm3
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm2, %xmm3
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm1
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm0
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v16i8:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: pmovsxbw %xmm1, %xmm3
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm2
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm2
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm3, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm1, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: pand %xmm3, %xmm0
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: mul_v16i8:
|
|
; AVX2: # %bb.0: # %entry
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512F-LABEL: mul_v16i8:
|
|
; AVX512F: # %bb.0: # %entry
|
|
; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512F-NEXT: vzeroupper
|
|
; AVX512F-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: mul_v16i8:
|
|
; AVX512BW: # %bb.0: # %entry
|
|
; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX512BW-NEXT: vzeroupper
|
|
; AVX512BW-NEXT: retq
|
|
entry:
|
|
%A = mul <16 x i8> %i, %j
|
|
ret <16 x i8> %A
|
|
}
|
|
|
|
define <8 x i16> @mul_v8i16(<8 x i16> %i, <8 x i16> %j) nounwind {
|
|
; SSE-LABEL: mul_v8i16:
|
|
; SSE: # %bb.0: # %entry
|
|
; SSE-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v8i16:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <8 x i16> %i, %j
|
|
ret <8 x i16> %A
|
|
}
|
|
|
|
define <4 x i32> @mul_v4i32(<4 x i32> %i, <4 x i32> %j) nounwind {
|
|
; SSE2-LABEL: mul_v4i32:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v4i32:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v4i32:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <4 x i32> %i, %j
|
|
ret <4 x i32> %A
|
|
}
|
|
|
|
define <2 x i64> @mul_v2i64(<2 x i64> %i, <2 x i64> %j) nounwind {
|
|
; SSE-LABEL: mul_v2i64:
|
|
; SSE: # %bb.0: # %entry
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE-NEXT: paddq %xmm2, %xmm3
|
|
; SSE-NEXT: psllq $32, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v2i64:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpsrlq $32, %xmm0, %xmm2
|
|
; AVX-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
|
|
; AVX-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX-NEXT: vpsllq $32, %xmm2, %xmm2
|
|
; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <2 x i64> %i, %j
|
|
ret <2 x i64> %A
|
|
}
|
|
|
|
declare void @foo()
|
|
|
|
define <4 x i32> @mul_v4i32spill(<4 x i32> %i, <4 x i32> %j) nounwind {
|
|
; SSE2-LABEL: mul_v4i32spill:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: subq $40, %rsp
|
|
; SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; SSE2-NEXT: callq foo
|
|
; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSE2-NEXT: addq $40, %rsp
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v4i32spill:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: subq $40, %rsp
|
|
; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; SSE41-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; SSE41-NEXT: callq foo
|
|
; SSE41-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; SSE41-NEXT: pmulld {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
|
|
; SSE41-NEXT: addq $40, %rsp
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v4i32spill:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: subq $40, %rsp
|
|
; AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: callq foo
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpmulld {{[0-9]+}}(%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: addq $40, %rsp
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
; Use a call to force spills.
|
|
call void @foo()
|
|
%A = mul <4 x i32> %i, %j
|
|
ret <4 x i32> %A
|
|
}
|
|
|
|
define <2 x i64> @mul_v2i64spill(<2 x i64> %i, <2 x i64> %j) nounwind {
|
|
; SSE-LABEL: mul_v2i64spill:
|
|
; SSE: # %bb.0: # %entry
|
|
; SSE-NEXT: subq $40, %rsp
|
|
; SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; SSE-NEXT: callq foo
|
|
; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm3, %xmm1
|
|
; SSE-NEXT: psrlq $32, %xmm1
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE-NEXT: paddq %xmm2, %xmm1
|
|
; SSE-NEXT: psllq $32, %xmm1
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm0
|
|
; SSE-NEXT: paddq %xmm1, %xmm0
|
|
; SSE-NEXT: addq $40, %rsp
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v2i64spill:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: subq $40, %rsp
|
|
; AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: callq foo
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm3 # 16-byte Reload
|
|
; AVX-NEXT: vpsrlq $32, %xmm3, %xmm0
|
|
; AVX-NEXT: vmovdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
|
|
; AVX-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrlq $32, %xmm2, %xmm1
|
|
; AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
|
|
; AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0
|
|
; AVX-NEXT: vpsllq $32, %xmm0, %xmm0
|
|
; AVX-NEXT: vpmuludq %xmm2, %xmm3, %xmm1
|
|
; AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0
|
|
; AVX-NEXT: addq $40, %rsp
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
; Use a call to force spills.
|
|
call void @foo()
|
|
%A = mul <2 x i64> %i, %j
|
|
ret <2 x i64> %A
|
|
}
|
|
|
|
define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
|
|
; SSE2-LABEL: mul_v32i8c:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm2
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm4, %xmm2
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm0
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm0
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm2
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm2
|
|
; SSE2-NEXT: pand %xmm4, %xmm2
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm1
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v32i8c:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm2
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117]
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm2
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm5, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm0
|
|
; SSE41-NEXT: pand %xmm5, %xmm0
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm2
|
|
; SSE41-NEXT: pmovsxbw %xmm1, %xmm3
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm3
|
|
; SSE41-NEXT: pand %xmm5, %xmm3
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm0
|
|
; SSE41-NEXT: pand %xmm5, %xmm0
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: mul_v32i8c:
|
|
; AVX2: # %bb.0: # %entry
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
|
|
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512F-LABEL: mul_v32i8c:
|
|
; AVX512F: # %bb.0: # %entry
|
|
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm1
|
|
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
|
|
; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
|
|
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
|
|
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
|
|
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm0
|
|
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
|
|
; AVX512F-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: mul_v32i8c:
|
|
; AVX512BW: # %bb.0: # %entry
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: retq
|
|
entry:
|
|
%A = mul <32 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 >
|
|
ret <32 x i8> %A
|
|
}
|
|
|
|
define <16 x i16> @mul_v16i16c(<16 x i16> %i) nounwind {
|
|
; SSE-LABEL: mul_v16i16c:
|
|
; SSE: # %bb.0: # %entry
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
|
|
; SSE-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE-NEXT: pmullw %xmm2, %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v16i16c:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <16 x i16> %i, < i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117 >
|
|
ret <16 x i16> %A
|
|
}
|
|
|
|
define <8 x i32> @mul_v8i32c(<8 x i32> %i) nounwind {
|
|
; SSE2-LABEL: mul_v8i32c:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v8i32c:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117]
|
|
; SSE41-NEXT: pmulld %xmm2, %xmm0
|
|
; SSE41-NEXT: pmulld %xmm2, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v8i32c:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpbroadcastd {{.*#+}} ymm1 = [117,117,117,117,117,117,117,117]
|
|
; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <8 x i32> %i, < i32 117, i32 117, i32 117, i32 117, i32 117, i32 117, i32 117, i32 117 >
|
|
ret <8 x i32> %A
|
|
}
|
|
|
|
define <4 x i64> @mul_v4i64c(<4 x i64> %i) nounwind {
|
|
; SSE-LABEL: mul_v4i64c:
|
|
; SSE: # %bb.0: # %entry
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [117,117]
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm0
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE-NEXT: psllq $32, %xmm0
|
|
; SSE-NEXT: paddq %xmm3, %xmm0
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE-NEXT: psrlq $32, %xmm1
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm1
|
|
; SSE-NEXT: psllq $32, %xmm1
|
|
; SSE-NEXT: paddq %xmm3, %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v4i64c:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [117,117,117,117]
|
|
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
|
|
; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
|
|
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX-NEXT: vpsllq $32, %ymm0, %ymm0
|
|
; AVX-NEXT: vpaddq %ymm0, %ymm2, %ymm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <4 x i64> %i, < i64 117, i64 117, i64 117, i64 117 >
|
|
ret <4 x i64> %A
|
|
}
|
|
|
|
define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
|
|
; SSE2-LABEL: mul_v32i8:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm5
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm4, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm2
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm0
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm5, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm5
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm5
|
|
; SSE2-NEXT: pand %xmm4, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm3
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm1
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm5, %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v32i8:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: pmovsxbw %xmm2, %xmm5
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm4
|
|
; SSE41-NEXT: pmullw %xmm5, %xmm4
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm5, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm2, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE41-NEXT: pand %xmm5, %xmm0
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm4
|
|
; SSE41-NEXT: pmovsxbw %xmm3, %xmm0
|
|
; SSE41-NEXT: pmovsxbw %xmm1, %xmm2
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm2
|
|
; SSE41-NEXT: pand %xmm5, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm1, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
; SSE41-NEXT: pand %xmm5, %xmm1
|
|
; SSE41-NEXT: packuswb %xmm1, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: mul_v32i8:
|
|
; AVX2: # %bb.0: # %entry
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
|
|
; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512F-LABEL: mul_v32i8:
|
|
; AVX512F: # %bb.0: # %entry
|
|
; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm2
|
|
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm3
|
|
; AVX512F-NEXT: vpmullw %ymm2, %ymm3, %ymm2
|
|
; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
|
|
; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
|
|
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm1
|
|
; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
|
|
; AVX512F-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: mul_v32i8:
|
|
; AVX512BW: # %bb.0: # %entry
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: retq
|
|
entry:
|
|
%A = mul <32 x i8> %i, %j
|
|
ret <32 x i8> %A
|
|
}
|
|
|
|
define <16 x i16> @mul_v16i16(<16 x i16> %i, <16 x i16> %j) nounwind {
|
|
; SSE-LABEL: mul_v16i16:
|
|
; SSE: # %bb.0: # %entry
|
|
; SSE-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v16i16:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <16 x i16> %i, %j
|
|
ret <16 x i16> %A
|
|
}
|
|
|
|
define <8 x i32> @mul_v8i32(<8 x i32> %i, <8 x i32> %j) nounwind {
|
|
; SSE2-LABEL: mul_v8i32:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v8i32:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: pmulld %xmm2, %xmm0
|
|
; SSE41-NEXT: pmulld %xmm3, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v8i32:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <8 x i32> %i, %j
|
|
ret <8 x i32> %A
|
|
}
|
|
|
|
define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind {
|
|
; SSE-LABEL: mul_v4i64:
|
|
; SSE: # %bb.0: # %entry
|
|
; SSE-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm4
|
|
; SSE-NEXT: movdqa %xmm2, %xmm5
|
|
; SSE-NEXT: psrlq $32, %xmm5
|
|
; SSE-NEXT: pmuludq %xmm0, %xmm5
|
|
; SSE-NEXT: paddq %xmm4, %xmm5
|
|
; SSE-NEXT: psllq $32, %xmm5
|
|
; SSE-NEXT: pmuludq %xmm2, %xmm0
|
|
; SSE-NEXT: paddq %xmm5, %xmm0
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $32, %xmm2
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE-NEXT: psrlq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm1, %xmm4
|
|
; SSE-NEXT: paddq %xmm2, %xmm4
|
|
; SSE-NEXT: psllq $32, %xmm4
|
|
; SSE-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE-NEXT: paddq %xmm4, %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v4i64:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpsrlq $32, %ymm0, %ymm2
|
|
; AVX-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
|
|
; AVX-NEXT: vpsrlq $32, %ymm1, %ymm3
|
|
; AVX-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
|
|
; AVX-NEXT: vpaddq %ymm2, %ymm3, %ymm2
|
|
; AVX-NEXT: vpsllq $32, %ymm2, %ymm2
|
|
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX-NEXT: vpaddq %ymm2, %ymm0, %ymm0
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%A = mul <4 x i64> %i, %j
|
|
ret <4 x i64> %A
|
|
}
|
|
|
|
define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
|
|
; SSE2-LABEL: mul_v64i8c:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm6
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm6
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117]
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm5, %xmm6
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm0
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm0
|
|
; SSE2-NEXT: pand %xmm5, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm6, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm6
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm6
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
|
; SSE2-NEXT: pand %xmm5, %xmm6
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm1
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm1
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm6, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm6
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm6
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
|
; SSE2-NEXT: pand %xmm5, %xmm6
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm2
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm2
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
; SSE2-NEXT: packuswb %xmm6, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm6
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm6
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm6
|
|
; SSE2-NEXT: pand %xmm5, %xmm6
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm3
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm3
|
|
; SSE2-NEXT: pand %xmm5, %xmm3
|
|
; SSE2-NEXT: packuswb %xmm6, %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v64i8c:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [117,117,117,117,117,117,117,117]
|
|
; SSE41-NEXT: pmullw %xmm6, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm7, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm1, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm6, %xmm1
|
|
; SSE41-NEXT: pand %xmm7, %xmm1
|
|
; SSE41-NEXT: packuswb %xmm1, %xmm0
|
|
; SSE41-NEXT: pmovsxbw %xmm4, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm6, %xmm1
|
|
; SSE41-NEXT: pand %xmm7, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm4, %xmm4
|
|
; SSE41-NEXT: pmullw %xmm6, %xmm4
|
|
; SSE41-NEXT: pand %xmm7, %xmm4
|
|
; SSE41-NEXT: packuswb %xmm4, %xmm1
|
|
; SSE41-NEXT: pmovsxbw %xmm2, %xmm4
|
|
; SSE41-NEXT: pmullw %xmm6, %xmm4
|
|
; SSE41-NEXT: pand %xmm7, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm2, %xmm2
|
|
; SSE41-NEXT: pmullw %xmm6, %xmm2
|
|
; SSE41-NEXT: pand %xmm7, %xmm2
|
|
; SSE41-NEXT: packuswb %xmm2, %xmm4
|
|
; SSE41-NEXT: pmovsxbw %xmm3, %xmm5
|
|
; SSE41-NEXT: pmullw %xmm6, %xmm5
|
|
; SSE41-NEXT: pand %xmm7, %xmm5
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm2, %xmm2
|
|
; SSE41-NEXT: pmullw %xmm6, %xmm2
|
|
; SSE41-NEXT: pand %xmm7, %xmm2
|
|
; SSE41-NEXT: packuswb %xmm2, %xmm5
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm5, %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: mul_v64i8c:
|
|
; AVX2: # %bb.0: # %entry
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
|
; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4
|
|
; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
|
|
; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4
|
|
; AVX2-NEXT: vpshufb %xmm5, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
|
|
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
|
|
; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4
|
|
; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
|
|
; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
|
|
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512F-LABEL: mul_v64i8c:
|
|
; AVX512F: # %bb.0: # %entry
|
|
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm2
|
|
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
|
|
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
|
; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
|
|
; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
|
|
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512F-NEXT: vpmullw %ymm3, %ymm0, %ymm0
|
|
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
|
|
; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm2
|
|
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
|
; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
|
|
; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
|
|
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm1
|
|
; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512F-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
|
|
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
|
|
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
|
|
; AVX512F-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: mul_v64i8c:
|
|
; AVX512BW: # %bb.0: # %entry
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
|
|
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]
|
|
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
|
|
; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmullw %zmm2, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
|
|
; AVX512BW-NEXT: retq
|
|
entry:
|
|
%A = mul <64 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 >
|
|
ret <64 x i8> %A
|
|
}
|
|
|
|
define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
|
|
; SSE2-LABEL: mul_v64i8:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm8
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm8
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm9
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm9
|
|
; SSE2-NEXT: pmullw %xmm8, %xmm9
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm8, %xmm9
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm4
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm0
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm0
|
|
; SSE2-NEXT: pand %xmm8, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm9, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm9
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm9
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm4
|
|
; SSE2-NEXT: pmullw %xmm9, %xmm4
|
|
; SSE2-NEXT: pand %xmm8, %xmm4
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm1
|
|
; SSE2-NEXT: pmullw %xmm5, %xmm1
|
|
; SSE2-NEXT: pand %xmm8, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm4, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm5
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
|
; SSE2-NEXT: pand %xmm8, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm6
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm2
|
|
; SSE2-NEXT: pmullw %xmm6, %xmm2
|
|
; SSE2-NEXT: pand %xmm8, %xmm2
|
|
; SSE2-NEXT: packuswb %xmm5, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm7, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: psraw $8, %xmm5
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
|
; SSE2-NEXT: pand %xmm8, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm7
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: psraw $8, %xmm3
|
|
; SSE2-NEXT: pmullw %xmm7, %xmm3
|
|
; SSE2-NEXT: pand %xmm8, %xmm3
|
|
; SSE2-NEXT: packuswb %xmm5, %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v64i8:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm8
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: pmovsxbw %xmm4, %xmm9
|
|
; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
|
|
; SSE41-NEXT: pmullw %xmm9, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm9, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm4, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm1, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm1
|
|
; SSE41-NEXT: pand %xmm9, %xmm1
|
|
; SSE41-NEXT: packuswb %xmm1, %xmm0
|
|
; SSE41-NEXT: pmovsxbw %xmm5, %xmm4
|
|
; SSE41-NEXT: pmovsxbw %xmm8, %xmm1
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm1
|
|
; SSE41-NEXT: pand %xmm9, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm4, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm5, %xmm5
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm5
|
|
; SSE41-NEXT: pand %xmm9, %xmm5
|
|
; SSE41-NEXT: packuswb %xmm5, %xmm1
|
|
; SSE41-NEXT: pmovsxbw %xmm6, %xmm5
|
|
; SSE41-NEXT: pmovsxbw %xmm2, %xmm4
|
|
; SSE41-NEXT: pmullw %xmm5, %xmm4
|
|
; SSE41-NEXT: pand %xmm9, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm5, %xmm5
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm2, %xmm2
|
|
; SSE41-NEXT: pmullw %xmm5, %xmm2
|
|
; SSE41-NEXT: pand %xmm9, %xmm2
|
|
; SSE41-NEXT: packuswb %xmm2, %xmm4
|
|
; SSE41-NEXT: pmovsxbw %xmm7, %xmm2
|
|
; SSE41-NEXT: pmovsxbw %xmm3, %xmm5
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm5
|
|
; SSE41-NEXT: pand %xmm9, %xmm5
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm2, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxbw %xmm3, %xmm3
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE41-NEXT: pand %xmm9, %xmm3
|
|
; SSE41-NEXT: packuswb %xmm3, %xmm5
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm5, %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: mul_v64i8:
|
|
; AVX2: # %bb.0: # %entry
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
|
|
; AVX2-NEXT: vpmovsxbw %xmm4, %ymm4
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
|
|
; AVX2-NEXT: vpmovsxbw %xmm5, %ymm5
|
|
; AVX2-NEXT: vpmullw %ymm4, %ymm5, %ymm5
|
|
; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm6, %xmm6
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm5, %xmm5
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
|
|
; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
|
|
; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5
|
|
; AVX2-NEXT: vpmovsxbw %xmm5, %ymm5
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm5, %ymm2
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm5
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm5, %xmm5
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
|
|
; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
|
|
; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
|
|
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512F-LABEL: mul_v64i8:
|
|
; AVX512F: # %bb.0: # %entry
|
|
; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm4
|
|
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm5
|
|
; AVX512F-NEXT: vpmullw %ymm4, %ymm5, %ymm4
|
|
; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4
|
|
; AVX512F-NEXT: vpmovdb %zmm4, %xmm4
|
|
; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm2
|
|
; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2
|
|
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm0
|
|
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
|
|
; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm4, %ymm0
|
|
; AVX512F-NEXT: vpmovsxbw %xmm3, %ymm2
|
|
; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm4
|
|
; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm2
|
|
; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
|
|
; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
|
|
; AVX512F-NEXT: vextracti128 $1, %ymm3, %xmm3
|
|
; AVX512F-NEXT: vpmovsxbw %xmm3, %ymm3
|
|
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm1
|
|
; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm1
|
|
; AVX512F-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
|
|
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
|
|
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
|
|
; AVX512F-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: mul_v64i8:
|
|
; AVX512BW: # %bb.0: # %entry
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm2
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm3
|
|
; AVX512BW-NEXT: vpmullw %zmm2, %zmm3, %zmm2
|
|
; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm1
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
|
|
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
|
|
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
|
|
; AVX512BW-NEXT: retq
|
|
entry:
|
|
%A = mul <64 x i8> %i, %j
|
|
ret <64 x i8> %A
|
|
}
|
|
|
|
; PR30845
|
|
define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
|
|
; SSE2-LABEL: mul_v4i64_zero_upper:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: pxor %xmm3, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
|
; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm2
|
|
; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
|
|
; SSE2-NEXT: movaps %xmm2, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v4i64_zero_upper:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
|
|
; SSE41-NEXT: pmuludq %xmm2, %xmm4
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
|
|
; SSE41-NEXT: pmuludq %xmm3, %xmm0
|
|
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3]
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v4i64_zero_upper:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
|
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%val1a = zext <4 x i32> %val1 to <4 x i64>
|
|
%val2a = zext <4 x i32> %val2 to <4 x i64>
|
|
%res64 = mul <4 x i64> %val1a, %val2a
|
|
%rescast = bitcast <4 x i64> %res64 to <8 x i32>
|
|
%res = shufflevector <8 x i32> %rescast, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
|
|
ret <4 x i32> %res
|
|
}
|
|
|
|
define <4 x i32> @mul_v4i64_zero_upper_left(<4 x i32> %val1, <4 x i64> %val2) {
|
|
; SSE2-LABEL: mul_v4i64_zero_upper_left:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: pxor %xmm3, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
|
|
; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE2-NEXT: psrlq $32, %xmm2
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm2
|
|
; SSE2-NEXT: psllq $32, %xmm2
|
|
; SSE2-NEXT: paddq %xmm3, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm0
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: psrlq $32, %xmm1
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm1
|
|
; SSE2-NEXT: psllq $32, %xmm1
|
|
; SSE2-NEXT: paddq %xmm1, %xmm0
|
|
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v4i64_zero_upper_left:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm0
|
|
; SSE41-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE41-NEXT: psrlq $32, %xmm1
|
|
; SSE41-NEXT: pmuludq %xmm4, %xmm1
|
|
; SSE41-NEXT: psllq $32, %xmm1
|
|
; SSE41-NEXT: paddq %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm1
|
|
; SSE41-NEXT: pmuludq %xmm2, %xmm1
|
|
; SSE41-NEXT: psrlq $32, %xmm2
|
|
; SSE41-NEXT: pmuludq %xmm3, %xmm2
|
|
; SSE41-NEXT: psllq $32, %xmm2
|
|
; SSE41-NEXT: paddq %xmm1, %xmm2
|
|
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v4i64_zero_upper_left:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
|
|
; AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
|
|
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX-NEXT: vpsllq $32, %ymm0, %ymm0
|
|
; AVX-NEXT: vpaddq %ymm0, %ymm2, %ymm0
|
|
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%val1a = zext <4 x i32> %val1 to <4 x i64>
|
|
%res64 = mul <4 x i64> %val1a, %val2
|
|
%rescast = bitcast <4 x i64> %res64 to <8 x i32>
|
|
%res = shufflevector <8 x i32> %rescast, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
|
|
ret <4 x i32> %res
|
|
}
|
|
|
|
define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) {
|
|
; SSE2-LABEL: mul_v4i64_zero_lower:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: pxor %xmm4, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
|
|
; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
|
|
; SSE2-NEXT: psrlq $32, %xmm2
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm2
|
|
; SSE2-NEXT: psllq $32, %xmm2
|
|
; SSE2-NEXT: psrlq $32, %xmm1
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm3
|
|
; SSE2-NEXT: psllq $32, %xmm3
|
|
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm2[1,3]
|
|
; SSE2-NEXT: movaps %xmm3, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v4i64_zero_lower:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
|
; SSE41-NEXT: psrlq $32, %xmm1
|
|
; SSE41-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE41-NEXT: psllq $32, %xmm0
|
|
; SSE41-NEXT: psrlq $32, %xmm2
|
|
; SSE41-NEXT: pmuludq %xmm3, %xmm2
|
|
; SSE41-NEXT: psllq $32, %xmm2
|
|
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: mul_v4i64_zero_lower:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
; AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
|
|
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX-NEXT: vpsllq $32, %ymm0, %ymm0
|
|
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: retq
|
|
entry:
|
|
%val1a = zext <4 x i32> %val1 to <4 x i64>
|
|
%val2a = and <4 x i64> %val2, <i64 -4294967296, i64 -4294967296, i64 -4294967296, i64 -4294967296>
|
|
%res64 = mul <4 x i64> %val1a, %val2a
|
|
%rescast = bitcast <4 x i64> %res64 to <8 x i32>
|
|
%res = shufflevector <8 x i32> %rescast, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
|
|
ret <4 x i32> %res
|
|
}
|
|
|
|
define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
|
|
; SSE2-LABEL: mul_v8i64_zero_upper:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: pxor %xmm6, %xmm6
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
|
|
; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
|
|
; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm7
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
|
|
; SSE2-NEXT: pmuludq %xmm7, %xmm4
|
|
; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm2
|
|
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm2[1,3]
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm0
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm5
|
|
; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm3
|
|
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm3[1,3]
|
|
; SSE2-NEXT: movaps %xmm4, %xmm0
|
|
; SSE2-NEXT: movaps %xmm5, %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v8i64_zero_upper:
|
|
; SSE41: # %bb.0: # %entry
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
|
|
; SSE41-NEXT: pmuludq %xmm4, %xmm1
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
|
|
; SSE41-NEXT: pmuludq %xmm5, %xmm0
|
|
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
|
|
; SSE41-NEXT: pmuludq %xmm6, %xmm2
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero
|
|
; SSE41-NEXT: pmuludq %xmm7, %xmm1
|
|
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: mul_v8i64_zero_upper:
|
|
; AVX2: # %bb.0: # %entry
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,3],ymm0[1,3],ymm2[5,7],ymm0[5,7]
|
|
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: mul_v8i64_zero_upper:
|
|
; AVX512: # %bb.0: # %entry
|
|
; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
|
|
; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
|
|
; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
|
; AVX512-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
|
|
; AVX512-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
|
; AVX512-NEXT: retq
|
|
entry:
|
|
%val1a = zext <8 x i32> %val1 to <8 x i64>
|
|
%val2a = zext <8 x i32> %val2 to <8 x i64>
|
|
%res64 = mul <8 x i64> %val1a, %val2a
|
|
%rescast = bitcast <8 x i64> %res64 to <16 x i32>
|
|
%res = shufflevector <16 x i32> %rescast, <16 x i32> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7,i32 9, i32 11, i32 13, i32 15 >
|
|
ret <8 x i32> %res
|
|
}
|
|
|
|
define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
|
|
; SSE2-LABEL: mul_v8i64_sext:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7]
|
|
; SSE2-NEXT: movdqa %xmm9, %xmm0
|
|
; SSE2-NEXT: psrad $31, %xmm0
|
|
; SSE2-NEXT: psrad $16, %xmm9
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: psrad $31, %xmm3
|
|
; SSE2-NEXT: psrad $16, %xmm0
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm8 = xmm3[0,2,2,3,4,5,6,7]
|
|
; SSE2-NEXT: movdqa %xmm8, %xmm3
|
|
; SSE2-NEXT: psrad $31, %xmm3
|
|
; SSE2-NEXT: psrad $16, %xmm8
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm1[0,2,2,3,4,5,6,7]
|
|
; SSE2-NEXT: movdqa %xmm7, %xmm1
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
; SSE2-NEXT: psrad $16, %xmm7
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,0,1]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE2-NEXT: psrad $31, %xmm5
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: psrad $31, %xmm5
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: psrlq $32, %xmm5
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm6
|
|
; SSE2-NEXT: psrlq $32, %xmm6
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm6
|
|
; SSE2-NEXT: paddq %xmm5, %xmm6
|
|
; SSE2-NEXT: psllq $32, %xmm6
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm0
|
|
; SSE2-NEXT: paddq %xmm6, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm7, %xmm4
|
|
; SSE2-NEXT: psrlq $32, %xmm4
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE2-NEXT: psrlq $32, %xmm5
|
|
; SSE2-NEXT: pmuludq %xmm7, %xmm5
|
|
; SSE2-NEXT: paddq %xmm4, %xmm5
|
|
; SSE2-NEXT: psllq $32, %xmm5
|
|
; SSE2-NEXT: pmuludq %xmm7, %xmm1
|
|
; SSE2-NEXT: paddq %xmm5, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE2-NEXT: psrlq $32, %xmm4
|
|
; SSE2-NEXT: pmuludq %xmm9, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm9, %xmm5
|
|
; SSE2-NEXT: psrlq $32, %xmm5
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm5
|
|
; SSE2-NEXT: paddq %xmm4, %xmm5
|
|
; SSE2-NEXT: psllq $32, %xmm5
|
|
; SSE2-NEXT: pmuludq %xmm9, %xmm2
|
|
; SSE2-NEXT: paddq %xmm5, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE2-NEXT: psrlq $32, %xmm4
|
|
; SSE2-NEXT: pmuludq %xmm8, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm8, %xmm5
|
|
; SSE2-NEXT: psrlq $32, %xmm5
|
|
; SSE2-NEXT: pmuludq %xmm3, %xmm5
|
|
; SSE2-NEXT: paddq %xmm4, %xmm5
|
|
; SSE2-NEXT: psllq $32, %xmm5
|
|
; SSE2-NEXT: pmuludq %xmm8, %xmm3
|
|
; SSE2-NEXT: paddq %xmm5, %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: mul_v8i64_sext:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
|
|
; SSE41-NEXT: pmovsxwq %xmm3, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxwq %xmm3, %xmm5
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
|
|
; SSE41-NEXT: pmovsxwq %xmm3, %xmm6
|
|
; SSE41-NEXT: pmovsxwq %xmm0, %xmm7
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxdq %xmm0, %xmm3
|
|
; SSE41-NEXT: pmuldq %xmm4, %xmm3
|
|
; SSE41-NEXT: pmovsxdq %xmm2, %xmm2
|
|
; SSE41-NEXT: pmuldq %xmm5, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: pmovsxdq %xmm0, %xmm4
|
|
; SSE41-NEXT: pmuldq %xmm6, %xmm4
|
|
; SSE41-NEXT: pmovsxdq %xmm1, %xmm0
|
|
; SSE41-NEXT: pmuldq %xmm7, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: mul_v8i64_sext:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
|
; AVX2-NEXT: vpmovsxwq %xmm2, %ymm2
|
|
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
|
|
; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3
|
|
; AVX2-NEXT: vpmuldq %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vmovdqa %ymm2, %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: mul_v8i64_sext:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmovsxwq %xmm0, %zmm0
|
|
; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
|
|
; AVX512-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = sext <8 x i16> %val1 to <8 x i64>
|
|
%2 = sext <8 x i32> %val2 to <8 x i64>
|
|
%3 = mul <8 x i64> %1, %2
|
|
ret <8 x i64> %3
|
|
}
|