2018-11-01 02:54:06 +08:00
|
|
|
; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s
|
|
|
|
; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -amdgpu-spill-sgpr-to-smem=0 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s
|
|
|
|
; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s
|
|
|
|
; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-spill-sgpr-to-smem=0 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s
|
|
|
|
; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-spill-sgpr-to-smem=1 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=TOSMEM -check-prefix=GCN %s
|
2016-09-03 14:57:55 +08:00
|
|
|
|
|
|
|
; XXX - Why does it like to use vcc?
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}spill_m0:
|
2016-12-10 03:49:48 +08:00
|
|
|
; TOSMEM: s_mov_b32 s[[LO:[0-9]+]], SCRATCH_RSRC_DWORD0
|
|
|
|
; TOSMEM: s_mov_b32 s[[HI:[0-9]+]], 0xe80000
|
2016-10-29 03:43:31 +08:00
|
|
|
|
2016-11-26 01:37:09 +08:00
|
|
|
; GCN-DAG: s_cmp_lg_u32
|
2016-09-03 14:57:55 +08:00
|
|
|
|
2016-11-26 01:37:09 +08:00
|
|
|
; TOVGPR-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
|
2018-11-07 14:57:03 +08:00
|
|
|
; TOVGPR: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]], 2
|
2016-09-03 14:57:55 +08:00
|
|
|
|
2016-11-26 01:37:09 +08:00
|
|
|
; TOVMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
|
|
|
|
; TOVMEM-DAG: v_mov_b32_e32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]]
|
2018-11-07 14:57:03 +08:00
|
|
|
; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:12 ; 4-byte Folded Spill
|
2016-11-26 01:37:09 +08:00
|
|
|
|
|
|
|
; TOSMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
|
2018-11-07 14:57:03 +08:00
|
|
|
; TOSMEM: s_add_u32 m0, s3, 0x300{{$}}
|
2016-11-26 01:37:09 +08:00
|
|
|
; TOSMEM-NOT: [[M0_COPY]]
|
2016-12-10 03:49:48 +08:00
|
|
|
; TOSMEM: s_buffer_store_dword [[M0_COPY]], s{{\[}}[[LO]]:[[HI]]], m0 ; 4-byte Folded Spill
|
2016-11-26 01:37:09 +08:00
|
|
|
|
2016-09-03 14:57:55 +08:00
|
|
|
; GCN: s_cbranch_scc1 [[ENDIF:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN: [[ENDIF]]:
|
2018-11-07 14:57:03 +08:00
|
|
|
; TOVGPR: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[SPILL_VREG]], 2
|
2016-11-26 01:37:09 +08:00
|
|
|
; TOVGPR: s_mov_b32 m0, [[M0_RESTORE]]
|
2016-09-03 14:57:55 +08:00
|
|
|
|
2018-11-07 14:57:03 +08:00
|
|
|
; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:12 ; 4-byte Folded Reload
|
2016-09-03 14:57:55 +08:00
|
|
|
; TOVMEM: s_waitcnt vmcnt(0)
|
2016-11-26 01:37:09 +08:00
|
|
|
; TOVMEM: v_readfirstlane_b32 [[M0_RESTORE:s[0-9]+]], [[RELOAD_VREG]]
|
|
|
|
; TOVMEM: s_mov_b32 m0, [[M0_RESTORE]]
|
2016-09-03 14:57:55 +08:00
|
|
|
|
2018-11-07 14:57:03 +08:00
|
|
|
; TOSMEM: s_add_u32 m0, s3, 0x300{{$}}
|
2016-12-10 03:49:48 +08:00
|
|
|
; TOSMEM: s_buffer_load_dword [[M0_RESTORE:s[0-9]+]], s{{\[}}[[LO]]:[[HI]]], m0 ; 4-byte Folded Reload
|
2016-11-26 01:37:09 +08:00
|
|
|
; TOSMEM-NOT: [[M0_RESTORE]]
|
|
|
|
; TOSMEM: s_mov_b32 m0, [[M0_RESTORE]]
|
|
|
|
|
|
|
|
; GCN: s_add_i32 s{{[0-9]+}}, m0, 1
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 {
|
2016-09-03 14:57:55 +08:00
|
|
|
entry:
|
|
|
|
%m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
|
|
|
|
%cmp0 = icmp eq i32 %cond, 0
|
|
|
|
br i1 %cmp0, label %if, label %endif
|
|
|
|
|
|
|
|
if:
|
|
|
|
call void asm sideeffect "v_nop", ""() #0
|
|
|
|
br label %endif
|
|
|
|
|
|
|
|
endif:
|
|
|
|
%foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{M0}"(i32 %m0) #0
|
|
|
|
store i32 %foo, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
@lds = internal addrspace(3) global [64 x float] undef
|
|
|
|
|
2016-11-30 03:39:53 +08:00
|
|
|
; m0 is killed, so it isn't necessary during the entry block spill to preserve it
|
|
|
|
; GCN-LABEL: {{^}}spill_kill_m0_lds:
|
2016-11-26 01:37:09 +08:00
|
|
|
; GCN: s_mov_b32 m0, s6
|
|
|
|
; GCN: v_interp_mov_f32
|
|
|
|
|
2016-11-30 03:39:53 +08:00
|
|
|
; TOSMEM-NOT: s_m0
|
2017-02-23 05:05:25 +08:00
|
|
|
; TOSMEM: s_add_u32 m0, s7, 0x100
|
2017-09-11 21:55:39 +08:00
|
|
|
; TOSMEM-NEXT: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill
|
2017-04-25 03:37:54 +08:00
|
|
|
; FIXME: RegScavenger::isRegUsed() always returns true if m0 is reserved, so we have to save and restore it
|
|
|
|
; FIXME-TOSMEM-NOT: m0
|
2016-11-26 01:37:09 +08:00
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
; FIXME-TOSMEM-NOT: m0
|
2017-09-11 21:55:39 +08:00
|
|
|
; TOSMEM: s_add_u32 m0, s7, 0x300
|
|
|
|
; TOSMEM: s_buffer_store_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Spill
|
2017-04-25 03:37:54 +08:00
|
|
|
; FIXME-TOSMEM-NOT: m0
|
2016-11-26 01:37:09 +08:00
|
|
|
|
|
|
|
; TOSMEM: s_mov_b64 exec,
|
|
|
|
; TOSMEM: s_cbranch_execz
|
|
|
|
; TOSMEM: s_branch
|
|
|
|
|
|
|
|
; TOSMEM: BB{{[0-9]+_[0-9]+}}:
|
2018-11-07 14:57:03 +08:00
|
|
|
; TOSMEM: s_add_u32 m0, s7, 0x500
|
2016-12-02 08:54:45 +08:00
|
|
|
; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload
|
2016-11-26 01:37:09 +08:00
|
|
|
|
|
|
|
|
2016-09-03 14:57:55 +08:00
|
|
|
; GCN-NOT: v_readlane_b32 m0
|
2016-11-26 01:37:09 +08:00
|
|
|
; GCN-NOT: s_buffer_store_dword m0
|
|
|
|
; GCN-NOT: s_buffer_load_dword m0
|
2018-02-14 02:00:25 +08:00
|
|
|
define amdgpu_ps void @spill_kill_m0_lds(<16 x i8> addrspace(4)* inreg %arg, <16 x i8> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %m0) #0 {
|
2016-09-03 14:57:55 +08:00
|
|
|
main_body:
|
2017-02-16 10:01:13 +08:00
|
|
|
%tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
|
2016-11-30 03:39:53 +08:00
|
|
|
%cmp = fcmp ueq float 0.000000e+00, %tmp
|
2016-09-03 14:57:55 +08:00
|
|
|
br i1 %cmp, label %if, label %else
|
|
|
|
|
2016-11-30 03:39:53 +08:00
|
|
|
if: ; preds = %main_body
|
2016-09-03 14:57:55 +08:00
|
|
|
%lds_ptr = getelementptr [64 x float], [64 x float] addrspace(3)* @lds, i32 0, i32 0
|
[AMDGPU] Don't force WQM for DS op
Summary:
Previously, all DS ops forced WQM in a pixel shader. That was a hack to
allow for graphics frontends using ds_swizzle to implement explicit
derivatives, on SI/CI at least where DPP is not available. But it forced
WQM for _any_ DS op.
With this commit, DS ops no longer force WQM. Both graphics frontends
(Mesa and LLPC) need to change to issue an explicit llvm.amdgcn.wqm
intrinsic call when calculating explicit derivatives.
The required Mesa change is: "amd/common: use llvm.amdgcn.wqm for
explicit derivatives".
Subscribers: qcolombet, arsenm, kzhuravl, wdng, nhaehnle, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D46051
Change-Id: I9b745b626fa91bbd66456e6cf41ee07eeea42f81
llvm-svn: 331633
2018-05-07 21:21:26 +08:00
|
|
|
%lds_data_ = load float, float addrspace(3)* %lds_ptr
|
|
|
|
%lds_data = call float @llvm.amdgcn.wqm.f32(float %lds_data_)
|
2016-09-03 14:57:55 +08:00
|
|
|
br label %endif
|
|
|
|
|
2016-11-30 03:39:53 +08:00
|
|
|
else: ; preds = %main_body
|
2017-02-16 10:01:13 +08:00
|
|
|
%interp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
|
2016-11-30 03:39:53 +08:00
|
|
|
br label %endif
|
|
|
|
|
|
|
|
endif: ; preds = %else, %if
|
|
|
|
%export = phi float [ %lds_data, %if ], [ %interp, %else ]
|
2017-02-22 08:27:34 +08:00
|
|
|
%tmp4 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %export, float %export)
|
|
|
|
call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp4, <2 x half> %tmp4, i1 true, i1 true) #0
|
2016-11-30 03:39:53 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Force save and restore of m0 during SMEM spill
|
|
|
|
; GCN-LABEL: {{^}}m0_unavailable_spill:
|
|
|
|
|
|
|
|
; GCN: ; def m0, 1
|
|
|
|
|
|
|
|
; GCN: s_mov_b32 m0, s2
|
|
|
|
; GCN: v_interp_mov_f32
|
|
|
|
|
|
|
|
; GCN: ; clobber m0
|
|
|
|
|
2017-06-21 02:43:14 +08:00
|
|
|
; TOSMEM: s_mov_b32 s2, m0
|
2017-02-23 05:05:25 +08:00
|
|
|
; TOSMEM: s_add_u32 m0, s3, 0x100
|
2016-12-02 08:54:45 +08:00
|
|
|
; TOSMEM-NEXT: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill
|
2017-06-21 02:43:14 +08:00
|
|
|
; TOSMEM: s_mov_b32 m0, s2
|
2016-11-30 03:39:53 +08:00
|
|
|
|
|
|
|
; TOSMEM: s_mov_b64 exec,
|
|
|
|
; TOSMEM: s_cbranch_execz
|
|
|
|
; TOSMEM: s_branch
|
|
|
|
|
|
|
|
; TOSMEM: BB{{[0-9]+_[0-9]+}}:
|
2017-04-25 03:37:54 +08:00
|
|
|
; TOSMEM: s_add_u32 m0, s3, 0x100
|
2016-12-02 08:54:45 +08:00
|
|
|
; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload
|
2016-11-30 03:39:53 +08:00
|
|
|
|
|
|
|
; GCN-NOT: v_readlane_b32 m0
|
|
|
|
; GCN-NOT: s_buffer_store_dword m0
|
|
|
|
; GCN-NOT: s_buffer_load_dword m0
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @m0_unavailable_spill(i32 %m0.arg) #0 {
|
2016-11-30 03:39:53 +08:00
|
|
|
main_body:
|
|
|
|
%m0 = call i32 asm sideeffect "; def $0, 1", "={M0}"() #0
|
2017-02-16 10:01:13 +08:00
|
|
|
%tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0.arg)
|
2016-11-30 03:39:53 +08:00
|
|
|
call void asm sideeffect "; clobber $0", "~{M0}"() #0
|
|
|
|
%cmp = fcmp ueq float 0.000000e+00, %tmp
|
|
|
|
br i1 %cmp, label %if, label %else
|
|
|
|
|
|
|
|
if: ; preds = %main_body
|
|
|
|
store volatile i32 8, i32 addrspace(1)* undef
|
|
|
|
br label %endif
|
|
|
|
|
|
|
|
else: ; preds = %main_body
|
|
|
|
store volatile i32 11, i32 addrspace(1)* undef
|
2016-09-03 14:57:55 +08:00
|
|
|
br label %endif
|
|
|
|
|
|
|
|
endif:
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2016-11-26 01:37:09 +08:00
|
|
|
; GCN-LABEL: {{^}}restore_m0_lds:
|
2016-12-02 08:54:45 +08:00
|
|
|
; TOSMEM: s_load_dwordx2 [[REG:s\[[0-9]+:[0-9]+\]]]
|
2016-11-26 01:37:09 +08:00
|
|
|
; TOSMEM: s_cmp_eq_u32
|
2017-04-25 03:37:54 +08:00
|
|
|
; FIXME: RegScavenger::isRegUsed() always returns true if m0 is reserved, so we have to save and restore it
|
|
|
|
; FIXME-TOSMEM-NOT: m0
|
2017-02-23 05:05:25 +08:00
|
|
|
; TOSMEM: s_add_u32 m0, s3, 0x100
|
2016-12-10 03:49:48 +08:00
|
|
|
; TOSMEM: s_buffer_store_dword s{{[0-9]+}}, s[88:91], m0 ; 4-byte Folded Spill
|
2017-04-25 03:37:54 +08:00
|
|
|
; FIXME-TOSMEM-NOT: m0
|
2018-11-07 14:57:03 +08:00
|
|
|
; TOSMEM: s_add_u32 m0, s3, 0x200
|
|
|
|
; TOSMEM: s_buffer_store_dwordx2 [[REG]], s[88:91], m0 ; 8-byte Folded Spill
|
|
|
|
; FIXME-TOSMEM-NOT: m0
|
2016-11-26 01:37:09 +08:00
|
|
|
; TOSMEM: s_cbranch_scc1
|
|
|
|
|
|
|
|
; TOSMEM: s_mov_b32 m0, -1
|
|
|
|
|
2017-06-21 02:43:14 +08:00
|
|
|
; TOSMEM: s_mov_b32 s0, m0
|
2018-11-07 14:57:03 +08:00
|
|
|
; TOSMEM: s_add_u32 m0, s3, 0x200
|
2016-12-10 03:49:48 +08:00
|
|
|
; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload
|
2017-06-21 02:43:14 +08:00
|
|
|
; TOSMEM: s_mov_b32 m0, s0
|
2016-11-26 01:37:09 +08:00
|
|
|
; TOSMEM: s_waitcnt lgkmcnt(0)
|
|
|
|
|
|
|
|
; TOSMEM: ds_write_b64
|
|
|
|
|
2017-04-25 03:37:54 +08:00
|
|
|
; FIXME-TOSMEM-NOT: m0
|
2018-11-07 14:57:03 +08:00
|
|
|
; TOSMEM: s_add_u32 m0, s3, 0x100
|
2016-12-10 03:49:48 +08:00
|
|
|
; TOSMEM: s_buffer_load_dword s0, s[88:91], m0 ; 4-byte Folded Reload
|
2017-04-25 03:37:54 +08:00
|
|
|
; FIXME-TOSMEM-NOT: m0
|
2016-11-26 01:37:09 +08:00
|
|
|
; TOSMEM: s_waitcnt lgkmcnt(0)
|
2016-11-30 03:39:53 +08:00
|
|
|
; TOSMEM-NOT: m0
|
2016-11-26 01:37:09 +08:00
|
|
|
; TOSMEM: s_mov_b32 m0, s0
|
|
|
|
; TOSMEM: ; use m0
|
|
|
|
|
|
|
|
; TOSMEM: s_dcache_wb
|
|
|
|
; TOSMEM: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @restore_m0_lds(i32 %arg) {
|
2016-11-26 01:37:09 +08:00
|
|
|
%m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
|
2018-02-14 02:00:25 +08:00
|
|
|
%sval = load volatile i64, i64 addrspace(4)* undef
|
2016-11-26 01:37:09 +08:00
|
|
|
%cmp = icmp eq i32 %arg, 0
|
|
|
|
br i1 %cmp, label %ret, label %bb
|
|
|
|
|
|
|
|
bb:
|
|
|
|
store volatile i64 %sval, i64 addrspace(3)* undef
|
|
|
|
call void asm sideeffect "; use $0", "{M0}"(i32 %m0) #0
|
|
|
|
br label %ret
|
|
|
|
|
|
|
|
ret:
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-02-22 08:02:21 +08:00
|
|
|
declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
|
|
|
|
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
|
2017-02-22 08:27:34 +08:00
|
|
|
declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
|
|
|
|
declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
|
[AMDGPU] Don't force WQM for DS op
Summary:
Previously, all DS ops forced WQM in a pixel shader. That was a hack to
allow for graphics frontends using ds_swizzle to implement explicit
derivatives, on SI/CI at least where DPP is not available. But it forced
WQM for _any_ DS op.
With this commit, DS ops no longer force WQM. Both graphics frontends
(Mesa and LLPC) need to change to issue an explicit llvm.amdgcn.wqm
intrinsic call when calculating explicit derivatives.
The required Mesa change is: "amd/common: use llvm.amdgcn.wqm for
explicit derivatives".
Subscribers: qcolombet, arsenm, kzhuravl, wdng, nhaehnle, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D46051
Change-Id: I9b745b626fa91bbd66456e6cf41ee07eeea42f81
llvm-svn: 331633
2018-05-07 21:21:26 +08:00
|
|
|
declare float @llvm.amdgcn.wqm.f32(float) #1
|
2016-09-03 14:57:55 +08:00
|
|
|
|
|
|
|
attributes #0 = { nounwind }
|
2017-02-16 10:01:13 +08:00
|
|
|
attributes #1 = { nounwind readnone }
|