2020-01-18 09:51:01 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2020-01-23 05:56:48 +08:00
|
|
|
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=CI %s
|
|
|
|
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI %s
|
2020-01-18 09:51:01 +08:00
|
|
|
|
|
|
|
; FIXME: Merge with other test. DS offset folding doesn't work due to
|
|
|
|
; register bank copies, and no return optimization is missing.
|
|
|
|
|
|
|
|
|
|
|
|
declare i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* nocapture, i32, i32, i32, i1) #2
|
|
|
|
declare i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* nocapture, i32, i32, i32, i1) #2
|
|
|
|
declare i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* nocapture, i32, i32, i32, i1) #2
|
|
|
|
|
|
|
|
declare i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* nocapture, i64, i32, i32, i1) #2
|
|
|
|
declare i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* nocapture, i64, i32, i32, i1) #2
|
|
|
|
declare i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* nocapture, i64, i32, i32, i1) #2
|
|
|
|
|
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x() #1
|
|
|
|
|
|
|
|
define amdgpu_kernel void @lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
|
|
|
|
; CI-LABEL: lds_atomic_dec_ret_i32:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
|
|
; CI-NEXT: s_load_dword s2, s[4:5], 0x2
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; CI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s2
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: ds_dec_rtn_u32 v2, v1, v0
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: lds_atomic_dec_ret_i32:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x8
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; VI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s2
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: ds_dec_rtn_u32 v2, v1, v0
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: lds_atomic_dec_ret_i32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dword s0, s[0:1], 0x2c
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: ds_dec_rtn_u32 v2, v0, v1
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_store_dword v[0:1], v2, off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
store i32 %result, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @lds_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
|
|
|
|
; CI-LABEL: lds_atomic_dec_ret_i32_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
|
|
; CI-NEXT: s_load_dword s2, s[4:5], 0x2
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; CI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_add_u32 s2, s2, 16
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s2
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: ds_dec_rtn_u32 v2, v1, v0
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: lds_atomic_dec_ret_i32_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x8
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; VI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_add_u32 s2, s2, 16
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s2
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: ds_dec_rtn_u32 v2, v1, v0
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: lds_atomic_dec_ret_i32_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dword s0, s[0:1], 0x2c
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: ds_dec_rtn_u32 v2, v0, v1
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_store_dword v[0:1], v2, off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %gep, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
store i32 %result, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @lds_atomic_dec_noret_i32(i32 addrspace(3)* %ptr) nounwind {
|
2020-01-23 05:56:48 +08:00
|
|
|
; GCN-LABEL: lds_atomic_dec_noret_i32:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dword s0, s[4:5], 0x0
|
|
|
|
; GCN-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; GCN-NEXT: s_mov_b32 m0, -1
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: v_mov_b32_e32 v1, s0
|
|
|
|
; GCN-NEXT: ds_dec_rtn_u32 v0, v1, v0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: lds_atomic_dec_noret_i32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dword s0, s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: ds_dec_rtn_u32 v0, v0, v1
|
|
|
|
; GFX9-NEXT: s_endpgm
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-LABEL: lds_atomic_dec_noret_i32:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dword s0, s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; CI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s0
|
|
|
|
; CI-NEXT: ds_dec_rtn_u32 v0, v1, v0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: lds_atomic_dec_noret_i32:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dword s0, s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; VI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s0
|
|
|
|
; VI-NEXT: ds_dec_rtn_u32 v0, v1, v0
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @lds_atomic_dec_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
|
|
|
|
; CI-LABEL: lds_atomic_dec_noret_i32_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dword s0, s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; CI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s0
|
|
|
|
; CI-NEXT: ds_dec_rtn_u32 v0, v1, v0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: lds_atomic_dec_noret_i32_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dword s0, s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; VI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s0
|
|
|
|
; VI-NEXT: ds_dec_rtn_u32 v0, v1, v0
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: lds_atomic_dec_noret_i32_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dword s0, s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: ds_dec_rtn_u32 v0, v0, v1
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %gep, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
|
|
|
|
; CI-LABEL: global_atomic_dec_ret_i32:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; CI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_ret_i32:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; VI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_ret_i32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_store_dword v[0:1], v2, off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
store i32 %result, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
|
|
|
|
; CI-LABEL: global_atomic_dec_ret_i32_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_add_u32 s2, s2, 16
|
|
|
|
; CI-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; CI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_ret_i32_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: s_add_u32 s2, s2, 16
|
|
|
|
; VI-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; VI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_ret_i32_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s2, s2, 16
|
|
|
|
; GFX9-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_store_dword v[0:1], v2, off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
store i32 %result, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_noret_i32(i32 addrspace(1)* %ptr) nounwind {
|
|
|
|
; CI-LABEL: global_atomic_dec_noret_i32:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_noret_i32:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_noret_i32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_noret_i32_offset(i32 addrspace(1)* %ptr) nounwind {
|
|
|
|
; CI-LABEL: global_atomic_dec_noret_i32_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; CI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_noret_i32_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_noret_i32_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
|
|
|
|
; CI-LABEL: global_atomic_dec_ret_i32_offset_addr64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; CI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v4, 42
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s3
|
|
|
|
; CI-NEXT: v_add_i32_e32 v2, vcc, v2, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v3, vcc, v3, v1, vcc
|
|
|
|
; CI-NEXT: v_add_i32_e32 v2, vcc, 20, v2
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
|
|
|
|
; CI-NEXT: flat_atomic_dec v4, v[2:3], v4 glc
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s1
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, v2, v0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: flat_store_dword v[0:1], v4
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_ret_i32_offset_addr64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1]
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, 42
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s3
|
|
|
|
; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v3, vcc, v3, v1, vcc
|
|
|
|
; VI-NEXT: v_add_u32_e32 v2, vcc, 20, v2
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
|
|
|
|
; VI-NEXT: flat_atomic_dec v4, v[2:3], v4 glc
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v4
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_ret_i32_offset_addr64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, 4, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, 4, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, 4, v0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v2, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s2, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 20, v1
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 42
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v5, v3, vcc
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v0, v[0:1], v4 glc
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_store_dword v[2:3], v0, off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id
|
|
|
|
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id
|
|
|
|
%gep = getelementptr i32, i32 addrspace(1)* %gep.tid, i32 5
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
store i32 %result, i32 addrspace(1)* %out.gep
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_addr64(i32 addrspace(1)* %ptr) #0 {
|
|
|
|
; CI-LABEL: global_atomic_dec_noret_i32_offset_addr64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; CI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v4, 42
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s1
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, v2, v0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, 20, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: flat_atomic_dec v0, v[0:1], v4 glc
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_noret_i32_offset_addr64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1]
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, 42
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 20, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: flat_atomic_dec v0, v[0:1], v4 glc
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_noret_i32_offset_addr64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, 4, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, 4, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v0, 4, v0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v2, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 20, v0
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id
|
|
|
|
%gep = getelementptr i32, i32 addrspace(1)* %gep.tid, i32 5
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_ret_i32(i32* %out, i32* %ptr) #0 {
|
|
|
|
; CI-LABEL: flat_atomic_dec_ret_i32:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; CI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_ret_i32:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; VI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_ret_i32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %ptr, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
store i32 %result, i32* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset(i32* %out, i32* %ptr) #0 {
|
|
|
|
; CI-LABEL: flat_atomic_dec_ret_i32_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_add_u32 s2, s2, 16
|
|
|
|
; CI-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; CI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_ret_i32_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: s_add_u32 s2, s2, 16
|
|
|
|
; VI-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; VI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_ret_i32_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s2, s2, 16
|
|
|
|
; GFX9-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i32, i32* %ptr, i32 4
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %gep, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
store i32 %result, i32* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_noret_i32(i32* %ptr) nounwind {
|
|
|
|
; CI-LABEL: flat_atomic_dec_noret_i32:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_noret_i32:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_noret_i32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %ptr, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset(i32* %ptr) nounwind {
|
|
|
|
; CI-LABEL: flat_atomic_dec_noret_i32_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; CI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_noret_i32_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_noret_i32_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i32, i32* %ptr, i32 4
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %gep, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_addr64(i32* %out, i32* %ptr) #0 {
|
|
|
|
; CI-LABEL: flat_atomic_dec_ret_i32_offset_addr64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; CI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v4, 42
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s3
|
|
|
|
; CI-NEXT: v_add_i32_e32 v2, vcc, v2, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v3, vcc, v3, v1, vcc
|
|
|
|
; CI-NEXT: v_add_i32_e32 v2, vcc, 20, v2
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
|
|
|
|
; CI-NEXT: flat_atomic_dec v4, v[2:3], v4 glc
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s1
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, v2, v0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: flat_store_dword v[0:1], v4
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_ret_i32_offset_addr64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1]
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, 42
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s3
|
|
|
|
; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v3, vcc, v3, v1, vcc
|
|
|
|
; VI-NEXT: v_add_u32_e32 v2, vcc, 20, v2
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
|
|
|
|
; VI-NEXT: flat_atomic_dec v4, v[2:3], v4 glc
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v4
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_ret_i32_offset_addr64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, 4, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, 4, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, 4, v0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v2, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s2, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 20, v1
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 42
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v5, v3, vcc
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v0, v[0:1], v4 glc
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: flat_store_dword v[2:3], v0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%gep.tid = getelementptr i32, i32* %ptr, i32 %id
|
|
|
|
%out.gep = getelementptr i32, i32* %out, i32 %id
|
|
|
|
%gep = getelementptr i32, i32* %gep.tid, i32 5
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %gep, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
store i32 %result, i32* %out.gep
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_addr64(i32* %ptr) #0 {
|
|
|
|
; CI-LABEL: flat_atomic_dec_noret_i32_offset_addr64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; CI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v4, 42
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s1
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, v2, v0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, 20, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: flat_atomic_dec v0, v[0:1], v4 glc
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_noret_i32_offset_addr64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1]
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, 42
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 20, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: flat_atomic_dec v0, v[0:1], v4 glc
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_noret_i32_offset_addr64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, 4, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, 4, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v0, 4, v0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v2, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 20, v0
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%gep.tid = getelementptr i32, i32* %ptr, i32 %id
|
|
|
|
%gep = getelementptr i32, i32* %gep.tid, i32 5
|
|
|
|
%result = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %gep, i32 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_ret_i64(i64* %out, i64* %ptr) #0 {
|
|
|
|
; CI-LABEL: flat_atomic_dec_ret_i64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_ret_i64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_ret_i64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %ptr, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
store i64 %result, i64* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset(i64* %out, i64* %ptr) #0 {
|
|
|
|
; CI-LABEL: flat_atomic_dec_ret_i64_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_add_u32 s2, s2, 32
|
|
|
|
; CI-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_ret_i64_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: s_add_u32 s2, s2, 32
|
|
|
|
; VI-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_ret_i64_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s2, s2, 32
|
|
|
|
; GFX9-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i64, i64* %ptr, i32 4
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %gep, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
store i64 %result, i64* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_noret_i64(i64* %ptr) nounwind {
|
|
|
|
; CI-LABEL: flat_atomic_dec_noret_i64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_noret_i64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_noret_i64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %ptr, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset(i64* %ptr) nounwind {
|
|
|
|
; CI-LABEL: flat_atomic_dec_noret_i64_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; CI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_noret_i64_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_noret_i64_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i64, i64* %ptr, i32 4
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %gep, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset_addr64(i64* %out, i64* %ptr) #0 {
|
|
|
|
; CI-LABEL: flat_atomic_dec_ret_i64_offset_addr64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; CI-NEXT: v_lshl_b64 v[0:1], v[0:1], 3
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v5, s3
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v4, s2
|
|
|
|
; CI-NEXT: v_add_i32_e32 v4, vcc, v4, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v5, vcc, v5, v1, vcc
|
|
|
|
; CI-NEXT: v_add_i32_e32 v4, vcc, 40, v4
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[2:3], v[4:5], v[2:3] glc
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, v4, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_ret_i64_offset_addr64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; VI-NEXT: v_lshlrev_b64 v[0:1], 3, v[0:1]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s3
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s2
|
|
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, v4, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, v5, v1, vcc
|
|
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 40, v4
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[2:3], v[4:5], v[2:3] glc
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, v4, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_ret_i64_offset_addr64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, 8, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, 8, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, 8, v0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v2, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s2, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 40, v1
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v5, v3, vcc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, 0
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[4:5] glc
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%gep.tid = getelementptr i64, i64* %ptr, i32 %id
|
|
|
|
%out.gep = getelementptr i64, i64* %out, i32 %id
|
|
|
|
%gep = getelementptr i64, i64* %gep.tid, i32 5
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %gep, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
store i64 %result, i64* %out.gep
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(i64* %ptr) #0 {
|
|
|
|
; CI-LABEL: flat_atomic_dec_noret_i64_offset_addr64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; CI-NEXT: v_lshl_b64 v[0:1], v[0:1], 3
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, v4, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, 40, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: flat_atomic_dec_noret_i64_offset_addr64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; VI-NEXT: v_lshlrev_b64 v[0:1], 3, v[0:1]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, v4, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 40, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: flat_atomic_dec_noret_i64_offset_addr64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, 8, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, 8, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v0, 8, v0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v2, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 40, v0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%gep.tid = getelementptr i64, i64* %ptr, i32 %id
|
|
|
|
%gep = getelementptr i64, i64* %gep.tid, i32 5
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %gep, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-01-26 12:20:38 +08:00
|
|
|
@lds0 = internal addrspace(3) global [512 x i32] undef
|
2020-01-18 09:51:01 +08:00
|
|
|
|
|
|
|
define amdgpu_kernel void @atomic_dec_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
|
|
|
|
; CI-LABEL: atomic_dec_shl_base_lds_0:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_add_i32_e32 v2, vcc, 2, v0
|
|
|
|
; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, 0, v0
|
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, 8, v0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, 9
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_mov_b32 m0, -1
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: ds_dec_rtn_u32 v3, v0, v1
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s3
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: flat_store_dword v[0:1], v3
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: atomic_dec_shl_base_lds_0:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_add_u32_e32 v2, vcc, 2, v0
|
|
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 0, v0
|
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 8, v0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, 9
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_mov_b32 m0, -1
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: ds_dec_rtn_u32 v3, v0, v1
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v3
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: atomic_dec_shl_base_lds_0:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, 4, v0
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 2, v0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 9
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, 0, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, 8, v0
|
|
|
|
; GFX9-NEXT: ds_dec_rtn_u32 v2, v0, v2
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; GFX9-NEXT: global_store_dword v[0:1], v3, off
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: global_store_dword v[0:1], v2, off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
|
|
|
|
%idx.0 = add nsw i32 %tid.x, 2
|
|
|
|
%arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds0, i32 0, i32 %idx.0
|
|
|
|
%val0 = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %arrayidx0, i32 9, i32 0, i32 0, i1 false)
|
|
|
|
store i32 %idx.0, i32 addrspace(1)* %add_use
|
|
|
|
store i32 %val0, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @lds_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
|
|
|
|
; CI-LABEL: lds_atomic_dec_ret_i64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
|
|
; CI-NEXT: s_load_dword s2, s[4:5], 0x2
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; CI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s2
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: lds_atomic_dec_ret_i64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x8
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; VI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: lds_atomic_dec_ret_i64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dword s0, s[0:1], 0x2c
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; GFX9-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s3
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %ptr, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
store i64 %result, i64 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
|
|
|
|
; CI-LABEL: lds_atomic_dec_ret_i64_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
|
|
; CI-NEXT: s_load_dword s2, s[4:5], 0x2
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; CI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_add_u32 s2, s2, 32
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s2
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: lds_atomic_dec_ret_i64_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x8
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; VI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_add_u32 s2, s2, 32
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: lds_atomic_dec_ret_i64_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dword s0, s[0:1], 0x2c
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; GFX9-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s3
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %gep, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
store i64 %result, i64 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @lds_atomic_dec_noret_i64(i64 addrspace(3)* %ptr) nounwind {
|
|
|
|
; CI-LABEL: lds_atomic_dec_noret_i64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dword s0, s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; CI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; CI-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: lds_atomic_dec_noret_i64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dword s0, s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; VI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: lds_atomic_dec_noret_i64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dword s0, s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; GFX9-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %ptr, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
|
|
|
|
; CI-LABEL: lds_atomic_dec_noret_i64_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dword s0, s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; CI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; CI-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: lds_atomic_dec_noret_i64_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dword s0, s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; VI-NEXT: s_mov_b32 m0, -1
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: lds_atomic_dec_noret_i64_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dword s0, s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; GFX9-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %gep, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
|
|
|
|
; CI-LABEL: global_atomic_dec_ret_i64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_ret_i64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_ret_i64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
store i64 %result, i64 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
|
|
|
|
; CI-LABEL: global_atomic_dec_ret_i64_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_add_u32 s2, s2, 32
|
|
|
|
; CI-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_ret_i64_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: s_add_u32 s2, s2, 32
|
|
|
|
; VI-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_ret_i64_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s2, s2, 32
|
|
|
|
; GFX9-NEXT: s_addc_u32 s3, s3, 0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i32 4
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
store i64 %result, i64 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_noret_i64(i64 addrspace(1)* %ptr) nounwind {
|
|
|
|
; CI-LABEL: global_atomic_dec_noret_i64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_noret_i64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_noret_i64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_noret_i64_offset(i64 addrspace(1)* %ptr) nounwind {
|
|
|
|
; CI-LABEL: global_atomic_dec_noret_i64_offset:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; CI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_noret_i64_offset:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_noret_i64_offset:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i32 4
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
|
|
|
|
; CI-LABEL: global_atomic_dec_ret_i64_offset_addr64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; CI-NEXT: v_lshl_b64 v[0:1], v[0:1], 3
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v5, s3
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v4, s2
|
|
|
|
; CI-NEXT: v_add_i32_e32 v4, vcc, v4, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v5, vcc, v5, v1, vcc
|
|
|
|
; CI-NEXT: v_add_i32_e32 v4, vcc, 40, v4
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[2:3], v[4:5], v[2:3] glc
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, v4, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_ret_i64_offset_addr64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; VI-NEXT: v_lshlrev_b64 v[0:1], 3, v[0:1]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s3
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s2
|
|
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, v4, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, v5, v1, vcc
|
|
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 40, v4
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[2:3], v[4:5], v[2:3] glc
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, v4, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_ret_i64_offset_addr64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, 8, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, 8, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, 8, v0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v2, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s2, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 40, v1
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v5, v3, vcc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 42
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, 0
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[4:5] glc
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%gep.tid = getelementptr i64, i64 addrspace(1)* %ptr, i32 %id
|
|
|
|
%out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id
|
|
|
|
%gep = getelementptr i64, i64 addrspace(1)* %gep.tid, i32 5
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
store i64 %result, i64 addrspace(1)* %out.gep
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_addr64(i64 addrspace(1)* %ptr) #0 {
|
|
|
|
; CI-LABEL: global_atomic_dec_noret_i64_offset_addr64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; CI-NEXT: v_lshl_b64 v[0:1], v[0:1], 3
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, 0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, v4, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, 40, v0
|
|
|
|
; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
|
|
; CI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: global_atomic_dec_noret_i64_offset_addr64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; VI-NEXT: v_lshlrev_b64 v[0:1], 3, v[0:1]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, 0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
AMDGPU/GlobalISel: Only map VOP operands to VGPRs
This trivially avoids violating the constant bus restriction.
Previously this was allowing one SGPR in the first source
operand, which technically also avoided violating this for most
operations (but not for special cases reading vcc).
We do need to write some new, smarter operand folds to pick the
optimal SGPR to use in some kind of post-isel fold, but that's purely
an optimization.
I was originally thinking we would pick which operands should be SGPRs
in RegBankSelect, but I think this isn't really manageable. There
would be additional complexity to handle every G_* instruction, and
then any nontrivial instruction patterns would need to know when to
avoid violating it, which is likely to be very error prone.
I think having all inputs being canonically copies to VGPRs will
simplify the operand folding logic. The current folding we do is
backwards, and only considers one operand at a time, relative to
operands it already has. It therefore poorly handles the case where
there is already a constant bus operand user. If all operands are
copies, it's somewhat simpler to consider all input operands at once
to choose the optimal constant bus user.
Since the failure mode for constant bus violations is now a verifier
error and not an selection failure, this moves towards a place where
we can turn on the fallback mode. The SGPR copy folding optimizations
can be left for later.
2020-01-14 00:24:25 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, v4, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 40, v0
|
|
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: global_atomic_dec_noret_i64_offset_addr64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, 8, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, 8, v0
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v0, 8, v0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v2, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 40, v0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 42
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0
|
|
|
|
; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%gep.tid = getelementptr i64, i64 addrspace(1)* %ptr, i32 %id
|
|
|
|
%gep = getelementptr i64, i64 addrspace(1)* %gep.tid, i32 5
|
|
|
|
%result = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-01-26 12:20:38 +08:00
|
|
|
@lds1 = internal addrspace(3) global [512 x i64] undef, align 8
|
2020-01-18 09:51:01 +08:00
|
|
|
|
|
|
|
define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
|
|
|
|
; CI-LABEL: atomic_dec_shl_base_lds_0_i64:
|
|
|
|
; CI: ; %bb.0:
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_add_i32_e32 v4, vcc, 2, v0
|
|
|
|
; CI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
2020-01-23 05:56:48 +08:00
|
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, 0, v0
|
|
|
|
; CI-NEXT: v_add_i32_e32 v2, vcc, 16, v0
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: v_mov_b32_e32 v0, 9
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v1, 0
|
2020-01-06 01:22:21 +08:00
|
|
|
; CI-NEXT: s_mov_b32 m0, -1
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s2
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s3
|
2020-01-30 01:42:26 +08:00
|
|
|
; CI-NEXT: flat_store_dword v[2:3], v4
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; CI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
2020-01-18 09:51:01 +08:00
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: atomic_dec_shl_base_lds_0_i64:
|
|
|
|
; VI: ; %bb.0:
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 2, v0
|
|
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
2020-01-23 05:56:48 +08:00
|
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 0, v0
|
|
|
|
; VI-NEXT: v_add_u32_e32 v2, vcc, 16, v0
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, 9
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0
|
2020-01-06 01:22:21 +08:00
|
|
|
; VI-NEXT: s_mov_b32 m0, -1
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: ds_dec_rtn_u64 v[0:1], v2, v[0:1]
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s3
|
2020-01-30 01:42:26 +08:00
|
|
|
; VI-NEXT: flat_store_dword v[2:3], v4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
2020-01-18 09:51:01 +08:00
|
|
|
; VI-NEXT: s_endpgm
|
|
|
|
; GFX9-LABEL: atomic_dec_shl_base_lds_0_i64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, 8, v0
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 9
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 2, v0
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, 0, v3
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, 16, v0
|
|
|
|
; GFX9-NEXT: ds_dec_rtn_u64 v[0:1], v0, v[1:2]
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s3
|
|
|
|
; GFX9-NEXT: global_store_dword v[2:3], v4, off
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
|
|
|
|
%idx.0 = add nsw i32 %tid.x, 2
|
|
|
|
%arrayidx0 = getelementptr inbounds [512 x i64], [512 x i64] addrspace(3)* @lds1, i32 0, i32 %idx.0
|
|
|
|
%val0 = call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %arrayidx0, i64 9, i32 0, i32 0, i1 false)
|
|
|
|
store i32 %idx.0, i32 addrspace(1)* %add_use
|
|
|
|
store i64 %val0, i64 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
attributes #0 = { nounwind }
|
|
|
|
attributes #1 = { nounwind readnone }
|
|
|
|
attributes #2 = { nounwind argmemonly }
|