forked from OSchip/llvm-project
210 lines
11 KiB
LLVM
210 lines
11 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc %s -O0 -mtriple=x86_64-unknown-unknown -mattr=+avx512f -o - | FileCheck %s
|
|
|
|
declare fastcc <38 x double> @test()
|
|
|
|
define void @pr34653() {
|
|
; CHECK-LABEL: pr34653:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: pushq %rbp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
|
; CHECK-NEXT: .cfi_offset %rbp, -16
|
|
; CHECK-NEXT: movq %rsp, %rbp
|
|
; CHECK-NEXT: .cfi_def_cfa_register %rbp
|
|
; CHECK-NEXT: andq $-512, %rsp # imm = 0xFE00
|
|
; CHECK-NEXT: subq $2048, %rsp # imm = 0x800
|
|
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
|
|
; CHECK-NEXT: callq test
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, %xmm1
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm2
|
|
; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm3
|
|
; CHECK-NEXT: vmovaps %xmm3, %xmm4
|
|
; CHECK-NEXT: vmovaps %xmm2, %xmm5
|
|
; CHECK-NEXT: vmovaps %xmm5, %xmm6
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm7
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm8
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm9
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm10
|
|
; CHECK-NEXT: vextractf32x4 $3, %zmm10, %xmm11
|
|
; CHECK-NEXT: vmovaps %xmm11, %xmm12
|
|
; CHECK-NEXT: vextractf32x4 $2, %zmm10, %xmm13
|
|
; CHECK-NEXT: vmovaps %xmm13, %xmm14
|
|
; CHECK-NEXT: vmovaps %xmm10, %xmm15
|
|
; CHECK-NEXT: vmovaps %xmm15, %xmm2
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vextractf32x4 $3, %zmm9, %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vextractf32x4 $2, %zmm9, %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps %xmm9, %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vextractf32x4 $3, %zmm8, %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vextractf32x4 $2, %zmm8, %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps %xmm8, %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vextractf32x4 $3, %zmm7, %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vextractf32x4 $2, %zmm7, %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps %xmm7, %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm11 = xmm11[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm13 = xmm13[1,0]
|
|
; CHECK-NEXT: # kill: %ymm10<def> %ymm10<kill> %zmm10<kill>
|
|
; CHECK-NEXT: vextractf128 $1, %ymm10, %xmm10
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps %xmm10, %xmm0
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm15 = xmm15[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: # kill: %ymm9<def> %ymm9<kill> %zmm9<kill>
|
|
; CHECK-NEXT: vextractf128 $1, %ymm9, %xmm9
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps %xmm9, %xmm0
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: # kill: %ymm8<def> %ymm8<kill> %zmm8<kill>
|
|
; CHECK-NEXT: vextractf128 $1, %ymm8, %xmm8
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps %xmm8, %xmm0
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: # kill: %ymm7<def> %ymm7<kill> %zmm7<kill>
|
|
; CHECK-NEXT: vextractf128 $1, %ymm7, %xmm7
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps %xmm7, %xmm0
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm10 = xmm10[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm8 = xmm8[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm7 = xmm7[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm8, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm13, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm1, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm14, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm2, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm4, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm9, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm10, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm15, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm11, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm3, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm6, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm5, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm12, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm7, {{[0-9]+}}(%rsp) # 8-byte Spill
|
|
; CHECK-NEXT: movq %rbp, %rsp
|
|
; CHECK-NEXT: popq %rbp
|
|
; CHECK-NEXT: vzeroupper
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%v = call fastcc <38 x double> @test()
|
|
%v.0 = extractelement <38 x double> %v, i32 0
|
|
ret void
|
|
}
|
|
|