llvm-project/llvm/test/CodeGen/AMDGPU/waitcnt.mir

60 lines
2.0 KiB
YAML

# RUN: llc -march=amdgcn -mcpu=fiji -run-pass si-insert-waits %s -o - | FileCheck %s
--- |
define void @flat_zero_waitcnt(i32 addrspace(1)* %global4,
<4 x i32> addrspace(1)* %global16,
i32 addrspace(4)* %flat4,
<4 x i32> addrspace(4)* %flat16) {
ret void
}
...
---
# CHECK-LABEL: name: flat_zero_waitcnt
# CHECK-LABEL: bb.0:
# CHECK: FLAT_LOAD_DWORD
# CHECK: FLAT_LOAD_DWORDX4
# Global loads will return in order so we should:
# s_waitcnt vmcnt(1) lgkmcnt(0)
# CHECK-NEXT: S_WAITCNT 113
# CHECK-LABEL: bb.1:
# CHECK: FLAT_LOAD_DWORD
# CHECK: FLAT_LOAD_DWORDX4
# The first load has no mem operand, so we should assume it accesses the flat
# address space.
# s_waitcnt vmcnt(0) lgkmcnt(0)
# CHECK-NEXT: S_WAITCNT 112
# CHECK-LABEL: bb.2:
# CHECK: FLAT_LOAD_DWORD
# CHECK: FLAT_LOAD_DWORDX4
# One outstand loads access the flat address space.
# s_waitcnt vmcnt(0) lgkmcnt(0)
# CHECK-NEXT: S_WAITCNT 112
name: flat_zero_waitcnt
body: |
bb.0:
successors: %bb.1
%vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.global4)
%vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
%vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
S_BRANCH %bb.1
bb.1:
successors: %bb.2
%vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr
%vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
%vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
S_BRANCH %bb.2
bb.2:
%vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.flat4)
%vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.flat16)
%vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
S_ENDPGM
...