forked from OSchip/llvm-project
193 lines
11 KiB
LLVM
193 lines
11 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc %s -O0 -mtriple=x86_64-unknown-unknown -mattr=+avx512f -o - | FileCheck %s
|
|
|
|
declare fastcc <38 x double> @test()
|
|
|
|
define void @pr34653() {
|
|
; CHECK-LABEL: pr34653:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: pushq %rbp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
|
; CHECK-NEXT: .cfi_offset %rbp, -16
|
|
; CHECK-NEXT: movq %rsp, %rbp
|
|
; CHECK-NEXT: .cfi_def_cfa_register %rbp
|
|
; CHECK-NEXT: andq $-512, %rsp # imm = 0xFE00
|
|
; CHECK-NEXT: subq $2048, %rsp # imm = 0x800
|
|
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
|
|
; CHECK-NEXT: callq test
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, %xmm1
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm2
|
|
; CHECK-NEXT: vmovaps %xmm2, %xmm3
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm4
|
|
; CHECK-NEXT: vmovaps %xmm4, %xmm5
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm6
|
|
; CHECK-NEXT: vmovaps %xmm6, %xmm7
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm8
|
|
; CHECK-NEXT: vmovaps %xmm8, %xmm9
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm10
|
|
; CHECK-NEXT: vmovaps %xmm10, %xmm11
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm12
|
|
; CHECK-NEXT: vmovaps %xmm12, %xmm13
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm14
|
|
; CHECK-NEXT: vmovaps %xmm14, %xmm15
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %zmm0, %zmm16
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %zmm0, %zmm17
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %zmm0, %zmm18
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %zmm0, %zmm19
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %zmm0, %zmm20
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %zmm0, %zmm21
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %zmm0, %zmm22
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
|
|
; CHECK-NEXT: vmovaps %zmm0, %zmm23
|
|
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm6 = xmm6[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm8 = xmm8[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm10 = xmm10[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm12 = xmm12[1,0]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm14 = xmm14[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm24 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm24 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm25 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm25 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm26 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm26 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm27 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm27 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm28 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm28 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm29 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm29 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm30 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm30 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm31 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm31 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
|
|
; CHECK-NEXT: # xmm0 = mem[0],zero
|
|
; CHECK-NEXT: vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; CHECK-NEXT: movq %rbp, %rsp
|
|
; CHECK-NEXT: popq %rbp
|
|
; CHECK-NEXT: .cfi_def_cfa %rsp, 8
|
|
; CHECK-NEXT: vzeroupper
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%v = call fastcc <38 x double> @test()
|
|
%v.0 = extractelement <38 x double> %v, i32 0
|
|
ret void
|
|
}
|
|
|