From 67d25b298aad54386a7ecab0b9c95cd663409aa5 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Fri, 10 Mar 2017 16:07:39 +0000 Subject: [PATCH] [X86][MMX] Regenerate mmx bitcast tests llvm-svn: 297474 --- llvm/test/CodeGen/X86/bitcast-mmx.ll | 122 +++++++++++++++++++-------- 1 file changed, 89 insertions(+), 33 deletions(-) diff --git a/llvm/test/CodeGen/X86/bitcast-mmx.ll b/llvm/test/CodeGen/X86/bitcast-mmx.ll index 4107f3914f81..7a6f6893ca3e 100644 --- a/llvm/test/CodeGen/X86/bitcast-mmx.ll +++ b/llvm/test/CodeGen/X86/bitcast-mmx.ll @@ -1,12 +1,20 @@ -; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64 -define i32 @t0(i64 %x) { -; CHECK-LABEL: t0: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movd %[[REG1:[a-z]+]], %mm0 -; CHECK-NEXT: pshufw $238, %mm0, %mm0 -; CHECK-NEXT: movd %mm0, %eax -; CHECK-NEXT: retq +define i32 @t0(i64 %x) nounwind { +; X86-LABEL: t0: +; X86: # BB#0: # %entry +; X86-NEXT: pshufw $238, {{[0-9]+}}(%esp), %mm0 # mm0 = mem[2,3,2,3] +; X86-NEXT: movd %mm0, %eax +; X86-NEXT: retl +; +; X64-LABEL: t0: +; X64: # BB#0: # %entry +; X64-NEXT: movd %rdi, %mm0 +; X64-NEXT: pshufw $238, %mm0, %mm0 # mm0 = mm0[2,3,2,3] +; X64-NEXT: movd %mm0, %eax +; X64-NEXT: retq entry: %0 = bitcast i64 %x to <4 x i16> %1 = bitcast <4 x i16> %0 to x86_mmx @@ -19,14 +27,29 @@ entry: ret i32 %7 } -define i64 @t1(i64 %x, i32 %n) { -; CHECK-LABEL: t1: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movd %[[REG2:[a-z]+]], %mm0 -; CHECK-NEXT: movd %[[REG1]], %mm1 -; CHECK-NEXT: psllq %mm0, %mm1 -; CHECK-NEXT: movd %mm1, %rax -; CHECK-NEXT: retq +define i64 @t1(i64 %x, i32 %n) nounwind { +; X86-LABEL: t1: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movq 8(%ebp), %mm0 +; X86-NEXT: psllq 16(%ebp), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: t1: +; X64: # BB#0: # %entry +; X64-NEXT: movd %esi, %mm0 +; X64-NEXT: movd %rdi, %mm1 +; X64-NEXT: psllq %mm0, %mm1 +; X64-NEXT: movd %mm1, %rax +; X64-NEXT: retq entry: %0 = bitcast i64 %x to x86_mmx %1 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %0, i32 %n) @@ -34,16 +57,32 @@ entry: ret i64 %2 } -define i64 @t2(i64 %x, i32 %n, i32 %w) { -; CHECK-LABEL: t2: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movd %[[REG4:[a-z]+]], %mm0 -; CHECK-NEXT: movd %[[REG6:[a-z0-9]+]], %mm1 -; CHECK-NEXT: psllq %mm0, %mm1 -; CHECK-NEXT: movd %[[REG1]], %mm0 -; CHECK-NEXT: por %mm1, %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: retq +define i64 @t2(i64 %x, i32 %n, i32 %w) nounwind { +; X86-LABEL: t2: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movd 20(%ebp), %mm0 +; X86-NEXT: psllq 16(%ebp), %mm0 +; X86-NEXT: por 8(%ebp), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: t2: +; X64: # BB#0: # %entry +; X64-NEXT: movd %esi, %mm0 +; X64-NEXT: movd %edx, %mm1 +; X64-NEXT: psllq %mm0, %mm1 +; X64-NEXT: movd %rdi, %mm0 +; X64-NEXT: por %mm1, %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq entry: %0 = insertelement <2 x i32> undef, i32 %w, i32 0 %1 = insertelement <2 x i32> %0, i32 0, i32 1 @@ -55,13 +94,30 @@ entry: ret i64 %6 } -define i64 @t3(<1 x i64>* %y, i32* %n) { -; CHECK-LABEL: t3: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movq (%[[REG1]]), %mm0 -; CHECK-NEXT: psllq (%[[REG3:[a-z]+]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: retq +define i64 @t3(<1 x i64>* %y, i32* %n) nounwind { +; X86-LABEL: t3: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 12(%ebp), %eax +; X86-NEXT: movl 8(%ebp), %ecx +; X86-NEXT: movq (%ecx), %mm0 +; X86-NEXT: psllq (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: t3: +; X64: # BB#0: # %entry +; X64-NEXT: movq (%rdi), %mm0 +; X64-NEXT: psllq (%rsi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq entry: %0 = bitcast <1 x i64>* %y to x86_mmx* %1 = load x86_mmx, x86_mmx* %0, align 8