From 2b96841d1d00400b72ed8c1b9a42d9103a636f88 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Fri, 29 Sep 2017 10:02:01 +0000 Subject: [PATCH] [X86][SSE] Added more tests for vector multiplications as utility for D37896 Added additional tests for vector multiplications with multipliers that are: * powers of 2 displaced by 1, * product of a power of 2 displaced by one with another power of 2. Patch by @pacxx (Michael Haidl) Differential Revision: https://reviews.llvm.org/D38350 llvm-svn: 314504 --- llvm/test/CodeGen/X86/vector-mul.ll | 302 ++++++++++++++++++++++++++++ 1 file changed, 302 insertions(+) diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index acfe06a83492..880bf5591d15 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -701,6 +701,236 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind { ret <2 x i64> %1 } +define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_neg_15_63: +; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967281,4294967295,4294967233,4294967295] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: movdqa %xmm0, %xmm3 +; X86-NEXT: psrlq $32, %xmm3 +; X86-NEXT: pmuludq %xmm1, %xmm3 +; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddq %xmm3, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_neg_15_63: +; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551601,18446744073709551553] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: movdqa %xmm0, %xmm3 +; X64-NEXT: psrlq $32, %xmm3 +; X64-NEXT: pmuludq %xmm1, %xmm3 +; X64-NEXT: pmuludq {{.*}}(%rip), %xmm0 +; X64-NEXT: paddq %xmm3, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_neg_15_63: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551601,18446744073709551553] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-AVX-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, + ret <2 x i64> %1 +} + +define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_neg_17_65: +; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967279,4294967295,4294967231,4294967295] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: movdqa %xmm0, %xmm3 +; X86-NEXT: psrlq $32, %xmm3 +; X86-NEXT: pmuludq %xmm1, %xmm3 +; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddq %xmm3, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_neg_17_65: +; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551599,18446744073709551551] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: movdqa %xmm0, %xmm3 +; X64-NEXT: psrlq $32, %xmm3 +; X64-NEXT: pmuludq %xmm1, %xmm3 +; X64-NEXT: pmuludq {{.*}}(%rip), %xmm0 +; X64-NEXT: paddq %xmm3, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_neg_17_65: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551599,18446744073709551551] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 +; X64-AVX-NEXT: vpmuludq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, + ret <2 x i64> %1 +} + +define <2 x i64> @mul_v2i64_0_1(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_0_1: +; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [0,0,1,0] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_0_1: +; X64: # BB#0: +; X64-NEXT: movl $1, %eax +; X64-NEXT: movq %rax, %xmm1 +; X64-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_0_1: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: movl $1, %eax +; X64-AVX-NEXT: vmovq %rax, %xmm1 +; X64-AVX-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, + ret <2 x i64> %1 +} + +define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_neg_0_1: +; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [0,0,4294967295,4294967295] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: movdqa %xmm0, %xmm3 +; X86-NEXT: psrlq $32, %xmm3 +; X86-NEXT: pmuludq %xmm1, %xmm3 +; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddq %xmm3, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_neg_0_1: +; X64: # BB#0: +; X64-NEXT: movdqa %xmm0, %xmm1 +; X64-NEXT: psrlq $32, %xmm1 +; X64-NEXT: movq $-1, %rax +; X64-NEXT: movq %rax, %xmm2 +; X64-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7] +; X64-NEXT: pmuludq %xmm2, %xmm1 +; X64-NEXT: movl $4294967295, %eax # imm = 0xFFFFFFFF +; X64-NEXT: movq %rax, %xmm3 +; X64-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5,6,7] +; X64-NEXT: pmuludq %xmm0, %xmm3 +; X64-NEXT: paddq %xmm1, %xmm3 +; X64-NEXT: psllq $32, %xmm3 +; X64-NEXT: pmuludq %xmm2, %xmm0 +; X64-NEXT: paddq %xmm3, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_neg_0_1: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm1 +; X64-AVX-NEXT: movq $-1, %rax +; X64-AVX-NEXT: vmovq %rax, %xmm2 +; X64-AVX-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7] +; X64-AVX-NEXT: vpmuludq %xmm2, %xmm1, %xmm1 +; X64-AVX-NEXT: movl $4294967295, %eax # imm = 0xFFFFFFFF +; X64-AVX-NEXT: vmovq %rax, %xmm3 +; X64-AVX-NEXT: vpslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5,6,7] +; X64-AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3 +; X64-AVX-NEXT: vpaddq %xmm1, %xmm3, %xmm1 +; X64-AVX-NEXT: vpsllq $32, %xmm1, %xmm1 +; X64-AVX-NEXT: vpmuludq %xmm2, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, + ret <2 x i64> %1 +} + +define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_15_neg_63: +; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [15,0,4294967233,4294967295] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: movdqa %xmm0, %xmm3 +; X86-NEXT: psrlq $32, %xmm3 +; X86-NEXT: pmuludq %xmm1, %xmm3 +; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddq %xmm3, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_15_neg_63: +; X64: # BB#0: +; X64-NEXT: movdqa %xmm0, %xmm1 +; X64-NEXT: psrlq $32, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm2 = [15,18446744073709551553] +; X64-NEXT: pmuludq %xmm2, %xmm1 +; X64-NEXT: movl $4294967295, %eax # imm = 0xFFFFFFFF +; X64-NEXT: movq %rax, %xmm3 +; X64-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5,6,7] +; X64-NEXT: pmuludq %xmm0, %xmm3 +; X64-NEXT: paddq %xmm1, %xmm3 +; X64-NEXT: psllq $32, %xmm3 +; X64-NEXT: pmuludq %xmm2, %xmm0 +; X64-NEXT: paddq %xmm3, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_15_neg_63: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm1 +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [15,18446744073709551553] +; X64-AVX-NEXT: vpmuludq %xmm2, %xmm1, %xmm1 +; X64-AVX-NEXT: movl $4294967295, %eax # imm = 0xFFFFFFFF +; X64-AVX-NEXT: vmovq %rax, %xmm3 +; X64-AVX-NEXT: vpslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5,6,7] +; X64-AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3 +; X64-AVX-NEXT: vpaddq %xmm1, %xmm3, %xmm1 +; X64-AVX-NEXT: vpsllq $32, %xmm1, %xmm1 +; X64-AVX-NEXT: vpmuludq %xmm2, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, + ret <2 x i64> %1 +} + define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind { ; X86-LABEL: mul_v4i32_0_15_31_7: ; X86: # BB#0: @@ -798,3 +1028,75 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8> %1 = mul <16 x i8> %a0, ret <16 x i8> %1 } + +define <2 x i64> @mul_v2i64_68_132(<2 x i64> %x) nounwind { +; X86-LABEL: mul_v2i64_68_132: +; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [68,0,132,0] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_68_132: +; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [68,132] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_68_132: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [68,132] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq + %mul = mul <2 x i64> %x, + ret <2 x i64> %mul +} + +define <2 x i64> @mul_v2i64_60_120(<2 x i64> %x) nounwind { +; X86-LABEL: mul_v2i64_60_120: +; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [60,0,124,0] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_60_120: +; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [60,124] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_60_120: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [60,124] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq + %mul = mul <2 x i64> %x, + ret <2 x i64> %mul +}