forked from OSchip/llvm-project
3479 lines
148 KiB
LLVM
3479 lines
148 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -global-isel -amdgpu-codegenprepare-disable-idiv-expansion=1 -amdgpu-bypass-slow-div=0 -mtriple=amdgcn-amd-amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck --check-prefix=GFX8 %s
|
|
; RUN: llc -global-isel -amdgpu-codegenprepare-disable-idiv-expansion=1 -amdgpu-bypass-slow-div=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 %s
|
|
; RUN: llc -global-isel -amdgpu-codegenprepare-disable-idiv-expansion=1 -amdgpu-bypass-slow-div=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
|
|
|
|
define amdgpu_kernel void @sdivrem_i32(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) {
|
|
; GFX8-LABEL: sdivrem_i32:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x10
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_ashr_i32 s8, s7, 31
|
|
; GFX8-NEXT: s_add_i32 s0, s7, s8
|
|
; GFX8-NEXT: s_xor_b32 s7, s0, s8
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, s7
|
|
; GFX8-NEXT: s_sub_i32 s0, 0, s7
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v1, s0, v0
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX8-NEXT: s_ashr_i32 s4, s6, 31
|
|
; GFX8-NEXT: s_add_i32 s5, s6, s4
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX8-NEXT: s_xor_b32 s5, s5, s4
|
|
; GFX8-NEXT: s_xor_b32 s6, s4, s8
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, s5, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v2, s7
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v2
|
|
; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s5, v3
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s7, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s7, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s7, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s7, v3
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, s6, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s6, v2
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, s4, v3
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, s4, v3
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v3
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: sdivrem_i32:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x10
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s6, s1, 31
|
|
; GFX9-NEXT: s_add_i32 s1, s1, s6
|
|
; GFX9-NEXT: s_xor_b32 s7, s1, s6
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s7
|
|
; GFX9-NEXT: s_sub_i32 s1, 0, s7
|
|
; GFX9-NEXT: s_ashr_i32 s8, s0, 31
|
|
; GFX9-NEXT: s_add_i32 s0, s0, s8
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_xor_b32 s9, s0, s8
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, s1, v0
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX9-NEXT: s_xor_b32 s4, s8, s6
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s9, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, v0, s7
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v0
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, s9, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s4, v0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v0, s4, v0
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, s8, v1
|
|
; GFX9-NEXT: v_subrev_u32_e32 v1, s8, v1
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_store_dword v2, v0, s[0:1]
|
|
; GFX9-NEXT: global_store_dword v2, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: sdivrem_i32:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x10
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_ashr_i32 s6, s1, 31
|
|
; GFX10-NEXT: s_ashr_i32 s8, s0, 31
|
|
; GFX10-NEXT: s_add_i32 s1, s1, s6
|
|
; GFX10-NEXT: s_add_i32 s0, s0, s8
|
|
; GFX10-NEXT: s_xor_b32 s7, s1, s6
|
|
; GFX10-NEXT: s_xor_b32 s0, s0, s8
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, s7
|
|
; GFX10-NEXT: s_sub_i32 s1, 0, s7
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, s1, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, v0, s7
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v0
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v1, s0, v1
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX10-NEXT: s_xor_b32 s4, s8, s6
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s7, v1
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s7, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s7, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s7, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: v_xor_b32_e32 v0, s4, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, s8, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v0, s4, v0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v1, s8, v1
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_store_dword v2, v0, s[0:1]
|
|
; GFX10-NEXT: global_store_dword v2, v1, s[2:3]
|
|
; GFX10-NEXT: s_endpgm
|
|
%div = sdiv i32 %x, %y
|
|
store i32 %div, i32 addrspace(1)* %out0
|
|
%rem = srem i32 %x, %y
|
|
store i32 %rem, i32 addrspace(1)* %out1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sdivrem_i64(i64 addrspace(1)* %out0, i64 addrspace(1)* %out1, i64 %x, i64 %y) {
|
|
; GFX8-LABEL: sdivrem_i64:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_ashr_i32 s2, s9, 31
|
|
; GFX8-NEXT: s_ashr_i32 s12, s11, 31
|
|
; GFX8-NEXT: s_add_u32 s0, s8, s2
|
|
; GFX8-NEXT: s_cselect_b32 s1, 1, 0
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 1
|
|
; GFX8-NEXT: s_cmp_lg_u32 s1, 0
|
|
; GFX8-NEXT: s_addc_u32 s1, s9, s2
|
|
; GFX8-NEXT: s_add_u32 s8, s10, s12
|
|
; GFX8-NEXT: s_cselect_b32 s3, 1, 0
|
|
; GFX8-NEXT: s_and_b32 s3, s3, 1
|
|
; GFX8-NEXT: s_cmp_lg_u32 s3, 0
|
|
; GFX8-NEXT: s_mov_b32 s13, s12
|
|
; GFX8-NEXT: s_addc_u32 s9, s11, s12
|
|
; GFX8-NEXT: s_xor_b64 s[8:9], s[8:9], s[12:13]
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, s9
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v1, s8
|
|
; GFX8-NEXT: s_mov_b32 s3, s2
|
|
; GFX8-NEXT: s_xor_b64 s[10:11], s[0:1], s[2:3]
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f800000, v0
|
|
; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_sub_u32 s0, 0, s8
|
|
; GFX8-NEXT: s_cselect_b32 s1, 1, 0
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 1
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
|
|
; GFX8-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
|
|
; GFX8-NEXT: v_trunc_f32_e32 v1, v1
|
|
; GFX8-NEXT: v_mul_f32_e32 v2, 0xcf800000, v1
|
|
; GFX8-NEXT: v_add_f32_e32 v0, v2, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_cmp_lg_u32 s1, 0
|
|
; GFX8-NEXT: s_subb_u32 s1, 0, s9
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, s0, v1
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, s1, v0
|
|
; GFX8-NEXT: v_mul_hi_u32 v5, s0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v4, s0, v0
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v5
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v1, v4
|
|
; GFX8-NEXT: v_mul_lo_u32 v5, v0, v2
|
|
; GFX8-NEXT: v_mul_hi_u32 v6, v0, v4
|
|
; GFX8-NEXT: v_mul_hi_u32 v4, v1, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v6
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v6, v1, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v5, v0, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v6, v4
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v5
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v6, v5
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v4, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v5, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v2, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, s1, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, s0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v5, s0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v4, s0, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v6, s9
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v5
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v1, v4
|
|
; GFX8-NEXT: v_mul_lo_u32 v5, v0, v2
|
|
; GFX8-NEXT: v_mul_hi_u32 v7, v0, v4
|
|
; GFX8-NEXT: v_mul_hi_u32 v4, v1, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v7, v1, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v5, v0, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v7, v4
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v5
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v7, v5
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v4, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v5, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v2, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, s11, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, s10, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v5, s10, v0
|
|
; GFX8-NEXT: v_mul_hi_u32 v0, s11, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, s11
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v5
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v5, s11, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
|
|
; GFX8-NEXT: v_mul_hi_u32 v3, s10, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v5, v0
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, s11, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, s9, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, s8, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v7, s8, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v5, s8, v0
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v7
|
|
; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s10, v5
|
|
; GFX8-NEXT: v_subb_u32_e64 v4, s[0:1], v4, v2, vcc
|
|
; GFX8-NEXT: v_sub_u32_e64 v2, s[0:1], s11, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s9, v4
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s8, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s9, v4
|
|
; GFX8-NEXT: v_subb_u32_e32 v2, vcc, v2, v6, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v7, s[0:1]
|
|
; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s8, v3
|
|
; GFX8-NEXT: v_subbrev_u32_e64 v8, s[0:1], 0, v2, vcc
|
|
; GFX8-NEXT: v_add_u32_e64 v9, s[0:1], 1, v0
|
|
; GFX8-NEXT: v_addc_u32_e64 v10, s[0:1], 0, v1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s9, v8
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s8, v7
|
|
; GFX8-NEXT: v_subb_u32_e32 v2, vcc, v2, v6, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s9, v8
|
|
; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s8, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v12, s[0:1]
|
|
; GFX8-NEXT: v_add_u32_e64 v12, s[0:1], 1, v9
|
|
; GFX8-NEXT: v_subbrev_u32_e32 v2, vcc, 0, v2, vcc
|
|
; GFX8-NEXT: v_addc_u32_e64 v13, s[0:1], 0, v10, s[0:1]
|
|
; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v10, v10, v13, vcc
|
|
; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
|
|
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v11
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, v7, v6, s[0:1]
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v2, v8, v2, s[0:1]
|
|
; GFX8-NEXT: s_xor_b64 s[0:1], s[2:3], s[12:13]
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc
|
|
; GFX8-NEXT: v_xor_b32_e32 v0, s0, v0
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
|
|
; GFX8-NEXT: v_xor_b32_e32 v1, s1, v1
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, s1
|
|
; GFX8-NEXT: v_subrev_u32_e32 v0, vcc, s0, v0
|
|
; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, s2, v3
|
|
; GFX8-NEXT: v_xor_b32_e32 v4, s2, v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v5, s2
|
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s2, v3
|
|
; GFX8-NEXT: v_subb_u32_e32 v3, vcc, v4, v5, vcc
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v5, s5
|
|
; GFX8-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: sdivrem_i64:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s2, s9, 31
|
|
; GFX9-NEXT: s_ashr_i32 s12, s11, 31
|
|
; GFX9-NEXT: s_add_u32 s0, s8, s2
|
|
; GFX9-NEXT: s_cselect_b32 s1, 1, 0
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 1
|
|
; GFX9-NEXT: s_cmp_lg_u32 s1, 0
|
|
; GFX9-NEXT: s_addc_u32 s1, s9, s2
|
|
; GFX9-NEXT: s_add_u32 s8, s10, s12
|
|
; GFX9-NEXT: s_cselect_b32 s3, 1, 0
|
|
; GFX9-NEXT: s_and_b32 s3, s3, 1
|
|
; GFX9-NEXT: s_cmp_lg_u32 s3, 0
|
|
; GFX9-NEXT: s_mov_b32 s13, s12
|
|
; GFX9-NEXT: s_addc_u32 s9, s11, s12
|
|
; GFX9-NEXT: s_xor_b64 s[8:9], s[8:9], s[12:13]
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s9
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s8
|
|
; GFX9-NEXT: s_mov_b32 s3, s2
|
|
; GFX9-NEXT: s_xor_b64 s[10:11], s[0:1], s[2:3]
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f800000, v0
|
|
; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_sub_u32 s0, 0, s8
|
|
; GFX9-NEXT: s_cselect_b32 s1, 1, 0
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 1
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
|
|
; GFX9-NEXT: v_trunc_f32_e32 v1, v1
|
|
; GFX9-NEXT: v_mul_f32_e32 v2, 0xcf800000, v1
|
|
; GFX9-NEXT: v_add_f32_e32 v0, v2, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_cmp_lg_u32 s1, 0
|
|
; GFX9-NEXT: s_subb_u32 s1, 0, s9
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s0, v1
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s1, v0
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, s0, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, s0, v0
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, s11
|
|
; GFX9-NEXT: v_add3_u32 v2, v3, v2, v4
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v1, v5
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v0, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v6, v0, v5
|
|
; GFX9-NEXT: v_mul_hi_u32 v5, v1, v5
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v6
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v1, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v4, v3
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v0, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, v6, v5
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v5, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v4, v3
|
|
; GFX9-NEXT: v_add_u32_e32 v5, v6, v5
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX9-NEXT: v_add3_u32 v2, v5, v4, v2
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v3
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v2, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s1, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s0, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, s0, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, s0, v0
|
|
; GFX9-NEXT: v_add3_u32 v2, v2, v3, v4
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v1, v5
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v0, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v6, v0, v5
|
|
; GFX9-NEXT: v_mul_hi_u32 v5, v1, v5
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v6
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v1, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v4, v3
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v0, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, v6, v5
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v5, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v4, v3
|
|
; GFX9-NEXT: v_add_u32_e32 v5, v6, v5
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX9-NEXT: v_add3_u32 v2, v5, v4, v2
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v3
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v2, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s11, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s10, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v5, s10, v0
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s11, v0
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s9
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v5
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, s11, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v3, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, s10, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, s11, v1
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v5, v0
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v5, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; GFX9-NEXT: v_add3_u32 v1, v3, v2, v1
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s9, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s8, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v5, s8, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, s8, v0
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, 0
|
|
; GFX9-NEXT: v_add3_u32 v2, v2, v3, v5
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v3, vcc, s10, v7
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v5, s[0:1], v8, v2, vcc
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s9, v5
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, s11, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s8, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s9, v5
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v8, s[0:1]
|
|
; GFX9-NEXT: v_subrev_co_u32_e32 v8, vcc, s8, v3
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v9, s[0:1], 0, v2, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e64 v10, s[0:1], 1, v0
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v11, s[0:1], 0, v1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s9, v9
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s8, v8
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s9, v9
|
|
; GFX9-NEXT: v_subrev_co_u32_e32 v4, vcc, s8, v8
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, v13, s[0:1]
|
|
; GFX9-NEXT: v_add_co_u32_e64 v13, s[0:1], 1, v10
|
|
; GFX9-NEXT: v_subbrev_co_u32_e32 v2, vcc, 0, v2, vcc
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v14, s[0:1], 0, v11, s[0:1]
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v13, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v11, v11, v14, vcc
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v12
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, v8, v4, s[0:1]
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, v9, v2, s[0:1]
|
|
; GFX9-NEXT: s_xor_b64 s[0:1], s[2:3], s[12:13]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s0, v0
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, s1, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s1
|
|
; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, s0, v0
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v4, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, s2, v3
|
|
; GFX9-NEXT: v_xor_b32_e32 v4, s2, v2
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s2
|
|
; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s2, v3
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v4, v5, vcc
|
|
; GFX9-NEXT: global_store_dwordx2 v6, v[0:1], s[4:5]
|
|
; GFX9-NEXT: global_store_dwordx2 v6, v[2:3], s[6:7]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: sdivrem_i64:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_ashr_i32 s2, s9, 31
|
|
; GFX10-NEXT: s_ashr_i32 s12, s11, 31
|
|
; GFX10-NEXT: s_add_u32 s0, s8, s2
|
|
; GFX10-NEXT: s_cselect_b32 s1, 1, 0
|
|
; GFX10-NEXT: s_mov_b32 s13, s12
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 1
|
|
; GFX10-NEXT: s_cmp_lg_u32 s1, 0
|
|
; GFX10-NEXT: s_addc_u32 s1, s9, s2
|
|
; GFX10-NEXT: s_add_u32 s8, s10, s12
|
|
; GFX10-NEXT: s_cselect_b32 s3, 1, 0
|
|
; GFX10-NEXT: s_and_b32 s3, s3, 1
|
|
; GFX10-NEXT: s_cmp_lg_u32 s3, 0
|
|
; GFX10-NEXT: s_mov_b32 s3, s2
|
|
; GFX10-NEXT: s_addc_u32 s9, s11, s12
|
|
; GFX10-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX10-NEXT: s_xor_b64 s[8:9], s[8:9], s[12:13]
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, s9
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, s8
|
|
; GFX10-NEXT: s_sub_u32 s10, 0, s8
|
|
; GFX10-NEXT: s_cselect_b32 s11, 1, 0
|
|
; GFX10-NEXT: s_and_b32 s11, s11, 1
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f800000, v0
|
|
; GFX10-NEXT: s_cmp_lg_u32 s11, 0
|
|
; GFX10-NEXT: s_subb_u32 s11, 0, s9
|
|
; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
|
|
; GFX10-NEXT: v_trunc_f32_e32 v1, v1
|
|
; GFX10-NEXT: v_mul_f32_e32 v2, 0xcf800000, v1
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX10-NEXT: v_add_f32_e32 v0, v2, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v2, s10, v1
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, s11, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v4, s10, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v5, s10, v0
|
|
; GFX10-NEXT: v_add3_u32 v2, v3, v2, v4
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, v1, v5
|
|
; GFX10-NEXT: v_mul_hi_u32 v6, v1, v5
|
|
; GFX10-NEXT: v_mul_hi_u32 v5, v0, v5
|
|
; GFX10-NEXT: v_mul_lo_u32 v4, v0, v2
|
|
; GFX10-NEXT: v_mul_lo_u32 v7, v1, v2
|
|
; GFX10-NEXT: v_mul_hi_u32 v8, v0, v2
|
|
; GFX10-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX10-NEXT: v_add_co_u32 v3, s14, v3, v4
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, s14
|
|
; GFX10-NEXT: v_add_co_u32 v6, s14, v7, v6
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v7, 0, 1, s14
|
|
; GFX10-NEXT: v_add_co_u32 v3, s14, v3, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s14
|
|
; GFX10-NEXT: v_add_co_u32 v5, s14, v6, v8
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, 0, 1, s14
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, v4, v3
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v4, v7, v6
|
|
; GFX10-NEXT: v_add_co_u32 v3, s14, v5, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s14
|
|
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
|
|
; GFX10-NEXT: v_add3_u32 v2, v4, v5, v2
|
|
; GFX10-NEXT: v_mul_hi_u32 v3, s10, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v5, s10, v0
|
|
; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v2, vcc_lo
|
|
; GFX10-NEXT: v_mul_lo_u32 v2, s11, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v4, s10, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v6, v1, v5
|
|
; GFX10-NEXT: v_add3_u32 v2, v2, v4, v3
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, v1, v5
|
|
; GFX10-NEXT: v_mul_hi_u32 v5, v0, v5
|
|
; GFX10-NEXT: v_mul_lo_u32 v4, v0, v2
|
|
; GFX10-NEXT: v_mul_lo_u32 v7, v1, v2
|
|
; GFX10-NEXT: v_mul_hi_u32 v8, v0, v2
|
|
; GFX10-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX10-NEXT: v_add_co_u32 v3, s10, v3, v4
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, s10
|
|
; GFX10-NEXT: v_add_co_u32 v6, s10, v7, v6
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v7, 0, 1, s10
|
|
; GFX10-NEXT: v_add_co_u32 v3, s10, v3, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s10
|
|
; GFX10-NEXT: v_add_co_u32 v5, s10, v6, v8
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, 0, 1, s10
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, v4, v3
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v4, v7, v6
|
|
; GFX10-NEXT: v_add_co_u32 v3, s10, v5, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s10
|
|
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
|
|
; GFX10-NEXT: v_add3_u32 v2, v4, v5, v2
|
|
; GFX10-NEXT: v_mul_hi_u32 v4, s1, v0
|
|
; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v2, vcc_lo
|
|
; GFX10-NEXT: v_mul_lo_u32 v2, s1, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, s0, v1
|
|
; GFX10-NEXT: v_mul_lo_u32 v5, s1, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v6, s0, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, s1, v1
|
|
; GFX10-NEXT: v_add_co_u32 v2, s10, v2, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s10
|
|
; GFX10-NEXT: v_add_co_u32 v4, s10, v5, v4
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s10
|
|
; GFX10-NEXT: v_add_co_u32 v0, s10, v2, v0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s10
|
|
; GFX10-NEXT: v_add_co_u32 v2, s10, v4, v6
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, s10
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v3, v0
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, v5, v4
|
|
; GFX10-NEXT: v_add_co_u32 v0, s10, v2, v0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, s10
|
|
; GFX10-NEXT: v_mul_lo_u32 v5, s8, v0
|
|
; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v0, 1
|
|
; GFX10-NEXT: v_add3_u32 v1, v3, v2, v1
|
|
; GFX10-NEXT: v_mul_lo_u32 v2, s9, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v3, s8, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v4, s8, v1
|
|
; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v1, vcc_lo
|
|
; GFX10-NEXT: v_add3_u32 v2, v2, v4, v3
|
|
; GFX10-NEXT: v_add_co_u32 v3, vcc_lo, v6, 1
|
|
; GFX10-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, 0, v7, vcc_lo
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v8, s1, v2
|
|
; GFX10-NEXT: v_sub_co_u32 v5, vcc_lo, s0, v5
|
|
; GFX10-NEXT: v_sub_co_ci_u32_e64 v2, s0, s1, v2, vcc_lo
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v8, vcc_lo, s9, v8, vcc_lo
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s8, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, -1, vcc_lo
|
|
; GFX10-NEXT: v_sub_co_u32 v10, vcc_lo, v5, s8
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e64 v11, s0, 0, v8, vcc_lo
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s9, v2
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v8, vcc_lo, s9, v8, vcc_lo
|
|
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s9, v11
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v12, 0, -1, s0
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s8, v10
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v13, 0, -1, s0
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s9, v11
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v14, 0, -1, s0
|
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s9, v2
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v9, v12, v9, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v12, v14, v13, vcc_lo
|
|
; GFX10-NEXT: v_sub_co_u32 v13, vcc_lo, v10, s8
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo
|
|
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
|
|
; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, v12
|
|
; GFX10-NEXT: v_cmp_ne_u32_e64 s1, 0, v9
|
|
; GFX10-NEXT: s_xor_b64 s[8:9], s[2:3], s[12:13]
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, v10, v13, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v7, v11, v8, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v3, s1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v4, s1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v3, v5, v6, s1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v7, s1
|
|
; GFX10-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX10-NEXT: v_xor_b32_e32 v0, s8, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, s9, v1
|
|
; GFX10-NEXT: v_xor_b32_e32 v3, s2, v3
|
|
; GFX10-NEXT: v_xor_b32_e32 v5, s2, v2
|
|
; GFX10-NEXT: v_sub_co_u32 v0, vcc_lo, v0, s8
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v1, vcc_lo, s9, v1, vcc_lo
|
|
; GFX10-NEXT: v_sub_co_u32 v2, vcc_lo, v3, s2
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v3, vcc_lo, s2, v5, vcc_lo
|
|
; GFX10-NEXT: global_store_dwordx2 v4, v[0:1], s[4:5]
|
|
; GFX10-NEXT: global_store_dwordx2 v4, v[2:3], s[6:7]
|
|
; GFX10-NEXT: s_endpgm
|
|
%div = sdiv i64 %x, %y
|
|
store i64 %div, i64 addrspace(1)* %out0
|
|
%rem = srem i64 %x, %y
|
|
store i64 %rem, i64 addrspace(1)* %out1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sdivrem_v2i32(<2 x i32> addrspace(1)* %out0, <2 x i32> addrspace(1)* %out1, <2 x i32> %x, <2 x i32> %y) {
|
|
; GFX8-LABEL: sdivrem_v2i32:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x18
|
|
; GFX8-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x10
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_ashr_i32 s8, s0, 31
|
|
; GFX8-NEXT: s_add_i32 s0, s0, s8
|
|
; GFX8-NEXT: s_xor_b32 s9, s0, s8
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, s9
|
|
; GFX8-NEXT: s_ashr_i32 s11, s1, 31
|
|
; GFX8-NEXT: s_add_i32 s0, s1, s11
|
|
; GFX8-NEXT: s_sub_i32 s1, 0, s9
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_xor_b32 s12, s0, s11
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v2, s12
|
|
; GFX8-NEXT: s_ashr_i32 s10, s2, 31
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_add_i32 s0, s2, s10
|
|
; GFX8-NEXT: s_xor_b32 s0, s0, s10
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v1, s1, v0
|
|
; GFX8-NEXT: s_ashr_i32 s2, s3, 31
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX8-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v2
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, v0, s9
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 1, v0
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s0, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s9, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v3, s[0:1], s9, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 1, v0
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s9, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v3, s[0:1], s9, v2
|
|
; GFX8-NEXT: s_sub_i32 s0, 0, s12
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, s0, v1
|
|
; GFX8-NEXT: s_add_i32 s1, s3, s2
|
|
; GFX8-NEXT: s_xor_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_xor_b32 s0, s10, s8
|
|
; GFX8-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
; GFX8-NEXT: v_xor_b32_e32 v0, s0, v0
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, s10, v2
|
|
; GFX8-NEXT: v_subrev_u32_e32 v0, vcc, s0, v0
|
|
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, s1, v1
|
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s10, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v1, s12
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v1
|
|
; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s1, v3
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s12, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s12, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s12, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s12, v3
|
|
; GFX8-NEXT: s_xor_b32 s0, s2, s11
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, s4
|
|
; GFX8-NEXT: v_subrev_u32_e32 v1, vcc, s0, v1
|
|
; GFX8-NEXT: v_mov_b32_e32 v5, s5
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, s2, v3
|
|
; GFX8-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, s2, v3
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: sdivrem_v2i32:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x18
|
|
; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x10
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s10, s6, 31
|
|
; GFX9-NEXT: s_add_i32 s0, s6, s10
|
|
; GFX9-NEXT: s_xor_b32 s6, s0, s10
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX9-NEXT: s_ashr_i32 s5, s7, 31
|
|
; GFX9-NEXT: s_add_i32 s7, s7, s5
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_xor_b32 s7, s7, s5
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s7
|
|
; GFX9-NEXT: s_sub_i32 s11, 0, s6
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
|
; GFX9-NEXT: s_ashr_i32 s4, s8, 31
|
|
; GFX9-NEXT: s_add_i32 s8, s8, s4
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s11, v0
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX9-NEXT: s_xor_b32 s8, s8, s4
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, v0, v2
|
|
; GFX9-NEXT: s_sub_i32 s12, 0, s7
|
|
; GFX9-NEXT: s_ashr_i32 s11, s9, 31
|
|
; GFX9-NEXT: s_add_i32 s9, s9, s11
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s8, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s12, v1
|
|
; GFX9-NEXT: s_xor_b32 s9, s9, s11
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v0, s6
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v0
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, s8, v3
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s6, v3
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s6, v3
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, s9, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s6, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s6, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v1, s7
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, s4, v2
|
|
; GFX9-NEXT: s_xor_b32 s6, s4, s10
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, s9, v3
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s7, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v3
|
|
; GFX9-NEXT: v_subrev_u32_e32 v2, s4, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s7, v3
|
|
; GFX9-NEXT: s_xor_b32 s4, s11, s5
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s6, v0
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, s4, v1
|
|
; GFX9-NEXT: v_subrev_u32_e32 v0, s6, v0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v1, s4, v1
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, s11, v3
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s11, v3
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[0:1]
|
|
; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: sdivrem_v2i32:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x18
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_ashr_i32 s2, s0, 31
|
|
; GFX10-NEXT: s_ashr_i32 s3, s1, 31
|
|
; GFX10-NEXT: s_add_i32 s0, s0, s2
|
|
; GFX10-NEXT: s_add_i32 s1, s1, s3
|
|
; GFX10-NEXT: s_xor_b32 s8, s0, s2
|
|
; GFX10-NEXT: s_xor_b32 s9, s1, s3
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, s8
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, s9
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x10
|
|
; GFX10-NEXT: s_sub_i32 s6, 0, s8
|
|
; GFX10-NEXT: s_sub_i32 s7, 0, s9
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_ashr_i32 s10, s0, 31
|
|
; GFX10-NEXT: s_ashr_i32 s11, s1, 31
|
|
; GFX10-NEXT: s_add_i32 s0, s0, s10
|
|
; GFX10-NEXT: v_mul_lo_u32 v2, s6, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, s7, v1
|
|
; GFX10-NEXT: s_add_i32 s1, s1, s11
|
|
; GFX10-NEXT: s_xor_b32 s0, s0, s10
|
|
; GFX10-NEXT: s_xor_b32 s1, s1, s11
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX10-NEXT: v_mul_hi_u32 v2, v0, v2
|
|
; GFX10-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v2
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v3
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, s1, v1
|
|
; GFX10-NEXT: v_mul_lo_u32 v2, v0, s8
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, v1, s9
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v4, 1, v0
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v5, 1, v1
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v2, s0, v2
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v3, s1, v3
|
|
; GFX10-NEXT: s_xor_b32 s1, s10, s2
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v6, s8, v2
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s9, v3
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s8, v2
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v7, s9, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v4, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v6, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v5, 1, v1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v4, 1, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s8, v2
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s9, v3
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v6, s8, v2
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v7, s9, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v4, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v6, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
|
|
; GFX10-NEXT: s_xor_b32 s0, s11, s3
|
|
; GFX10-NEXT: v_xor_b32_e32 v0, s1, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
; GFX10-NEXT: v_xor_b32_e32 v2, s10, v2
|
|
; GFX10-NEXT: v_xor_b32_e32 v3, s11, v3
|
|
; GFX10-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v0, s1, v0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v1, s0, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v2, s10, v2
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s11, v3
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_store_dwordx2 v4, v[0:1], s[4:5]
|
|
; GFX10-NEXT: global_store_dwordx2 v4, v[2:3], s[6:7]
|
|
; GFX10-NEXT: s_endpgm
|
|
%div = sdiv <2 x i32> %x, %y
|
|
store <2 x i32> %div, <2 x i32> addrspace(1)* %out0
|
|
%rem = srem <2 x i32> %x, %y
|
|
store <2 x i32> %rem, <2 x i32> addrspace(1)* %out1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sdivrem_v4i32(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, <4 x i32> %x, <4 x i32> %y) {
|
|
; GFX8-LABEL: sdivrem_v4i32:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x20
|
|
; GFX8-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x10
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, 0x4f7ffffe
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_ashr_i32 s12, s0, 31
|
|
; GFX8-NEXT: s_add_i32 s0, s0, s12
|
|
; GFX8-NEXT: s_xor_b32 s13, s0, s12
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, s13
|
|
; GFX8-NEXT: s_ashr_i32 s15, s1, 31
|
|
; GFX8-NEXT: s_add_i32 s0, s1, s15
|
|
; GFX8-NEXT: s_sub_i32 s1, 0, s13
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_xor_b32 s16, s0, s15
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v2, s16
|
|
; GFX8-NEXT: s_ashr_i32 s14, s8, 31
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_add_i32 s0, s8, s14
|
|
; GFX8-NEXT: s_xor_b32 s0, s0, s14
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v1, s1, v0
|
|
; GFX8-NEXT: s_ashr_i32 s8, s9, 31
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX8-NEXT: v_mul_f32_e32 v1, v2, v3
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, v0, s13
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v0
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s0, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s13, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s13, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v0
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s13, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s13, v2
|
|
; GFX8-NEXT: s_sub_i32 s0, 0, s16
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v4, s0, v1
|
|
; GFX8-NEXT: s_add_i32 s1, s9, s8
|
|
; GFX8-NEXT: s_xor_b32 s1, s1, s8
|
|
; GFX8-NEXT: s_xor_b32 s0, s14, s12
|
|
; GFX8-NEXT: v_mul_hi_u32 v4, v1, v4
|
|
; GFX8-NEXT: v_xor_b32_e32 v0, s0, v0
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, s14, v2
|
|
; GFX8-NEXT: s_ashr_i32 s9, s2, 31
|
|
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v4
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, s1, v1
|
|
; GFX8-NEXT: v_subrev_u32_e32 v0, vcc, s0, v0
|
|
; GFX8-NEXT: v_subrev_u32_e32 v4, vcc, s14, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v5, v1, s16
|
|
; GFX8-NEXT: s_add_i32 s0, s2, s9
|
|
; GFX8-NEXT: s_xor_b32 s2, s0, s9
|
|
; GFX8-NEXT: s_ashr_i32 s12, s10, 31
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s1, v5
|
|
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 1, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s16, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v5, s2
|
|
; GFX8-NEXT: v_subrev_u32_e64 v6, s[0:1], s16, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v5, v5
|
|
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 1, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s16, v2
|
|
; GFX8-NEXT: v_mul_f32_e32 v5, v5, v3
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v5, v5
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v6, s[0:1], s16, v2
|
|
; GFX8-NEXT: s_sub_i32 s0, 0, s2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v6, s0, v5
|
|
; GFX8-NEXT: s_add_i32 s1, s10, s12
|
|
; GFX8-NEXT: s_xor_b32 s1, s1, s12
|
|
; GFX8-NEXT: s_xor_b32 s0, s8, s15
|
|
; GFX8-NEXT: v_mul_hi_u32 v6, v5, v6
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, s8, v2
|
|
; GFX8-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
; GFX8-NEXT: v_subrev_u32_e32 v1, vcc, s0, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v6
|
|
; GFX8-NEXT: v_mul_hi_u32 v6, s1, v5
|
|
; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s8, v2
|
|
; GFX8-NEXT: s_ashr_i32 s8, s3, 31
|
|
; GFX8-NEXT: v_mul_lo_u32 v7, v6, s2
|
|
; GFX8-NEXT: s_add_i32 s0, s3, s8
|
|
; GFX8-NEXT: s_xor_b32 s3, s0, s8
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s1, v7
|
|
; GFX8-NEXT: v_add_u32_e32 v7, vcc, 1, v6
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s2, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v7, s3
|
|
; GFX8-NEXT: v_subrev_u32_e64 v8, s[0:1], s2, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v7, v7
|
|
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 1, v6
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s2, v2
|
|
; GFX8-NEXT: v_mul_f32_e32 v3, v7, v3
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX8-NEXT: v_subrev_u32_e64 v7, s[0:1], s2, v2
|
|
; GFX8-NEXT: s_sub_i32 s0, 0, s3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v7, v2, v7, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, s0, v3
|
|
; GFX8-NEXT: s_ashr_i32 s2, s11, 31
|
|
; GFX8-NEXT: s_add_i32 s1, s11, s2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, v3, v2
|
|
; GFX8-NEXT: s_xor_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_xor_b32 s0, s12, s9
|
|
; GFX8-NEXT: v_xor_b32_e32 v6, s0, v6
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
|
|
; GFX8-NEXT: v_mul_hi_u32 v3, s1, v2
|
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s0, v6
|
|
; GFX8-NEXT: v_xor_b32_e32 v6, s12, v7
|
|
; GFX8-NEXT: v_mul_lo_u32 v7, v3, s3
|
|
; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s12, v6
|
|
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 1, v3
|
|
; GFX8-NEXT: v_sub_u32_e32 v7, vcc, s1, v7
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s3, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v8, s[0:1], s3, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 1, v3
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s3, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v8, s[0:1], s3, v7
|
|
; GFX8-NEXT: s_xor_b32 s0, s2, s8
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, s0, v3
|
|
; GFX8-NEXT: v_mov_b32_e32 v9, s5
|
|
; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, s0, v3
|
|
; GFX8-NEXT: v_mov_b32_e32 v8, s4
|
|
; GFX8-NEXT: v_xor_b32_e32 v7, s2, v7
|
|
; GFX8-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
|
|
; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s2, v7
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: sdivrem_v4i32:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[12:15], s[4:5], 0x20
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0x4f7ffffe
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s6, s12, 31
|
|
; GFX9-NEXT: s_add_i32 s0, s12, s6
|
|
; GFX9-NEXT: s_xor_b32 s7, s0, s6
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s7
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX9-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x10
|
|
; GFX9-NEXT: s_ashr_i32 s4, s13, 31
|
|
; GFX9-NEXT: s_add_i32 s5, s13, s4
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_sub_i32 s12, 0, s7
|
|
; GFX9-NEXT: s_xor_b32 s5, s5, s4
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s5
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_sub_i32 s13, 0, s5
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s12, v0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s12, s8, 31
|
|
; GFX9-NEXT: s_add_i32 s8, s8, s12
|
|
; GFX9-NEXT: s_xor_b32 s8, s8, s12
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v0, v3
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, v1, v2
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX9-NEXT: s_xor_b32 s6, s12, s6
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v3
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s8, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s13, v1
|
|
; GFX9-NEXT: s_ashr_i32 s13, s9, 31
|
|
; GFX9-NEXT: s_add_i32 s9, s9, s13
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v0, s7
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
; GFX9-NEXT: v_add_u32_e32 v5, 1, v0
|
|
; GFX9-NEXT: s_xor_b32 s4, s13, s4
|
|
; GFX9-NEXT: v_sub_u32_e32 v4, s8, v4
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v5, s7, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v5, 1, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v5, s7, v4
|
|
; GFX9-NEXT: s_xor_b32 s7, s9, s13
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s6, v0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v0, s6, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, v1, s5
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, s12, v3
|
|
; GFX9-NEXT: s_ashr_i32 s6, s14, 31
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s12, v3
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, s7, v5
|
|
; GFX9-NEXT: s_add_i32 s7, s14, s6
|
|
; GFX9-NEXT: s_xor_b32 s7, s7, s6
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v5, s7
|
|
; GFX9-NEXT: v_add_u32_e32 v6, 1, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s5, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v5, v5
|
|
; GFX9-NEXT: v_subrev_u32_e32 v6, s5, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v6, 1, v1
|
|
; GFX9-NEXT: v_mul_f32_e32 v5, v5, v2
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s5, v3
|
|
; GFX9-NEXT: s_sub_i32 s8, 0, s7
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, s8, v5
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, s4, v1
|
|
; GFX9-NEXT: v_subrev_u32_e32 v1, s4, v1
|
|
; GFX9-NEXT: s_ashr_i32 s4, s15, 31
|
|
; GFX9-NEXT: s_add_i32 s9, s15, s4
|
|
; GFX9-NEXT: v_mul_hi_u32 v6, v5, v6
|
|
; GFX9-NEXT: s_xor_b32 s9, s9, s4
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v8, s9
|
|
; GFX9-NEXT: v_subrev_u32_e32 v7, s5, v3
|
|
; GFX9-NEXT: s_ashr_i32 s5, s10, 31
|
|
; GFX9-NEXT: s_add_i32 s8, s10, s5
|
|
; GFX9-NEXT: s_xor_b32 s8, s8, s5
|
|
; GFX9-NEXT: v_add_u32_e32 v5, v5, v6
|
|
; GFX9-NEXT: v_mul_hi_u32 v6, s8, v5
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v8, v8
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, s13, v3
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v6, s7
|
|
; GFX9-NEXT: v_mul_f32_e32 v2, v8, v2
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2
|
|
; GFX9-NEXT: v_subrev_u32_e32 v5, s13, v3
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, s8, v7
|
|
; GFX9-NEXT: s_sub_i32 s8, 0, s9
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, s8, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v7, 1, v6
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v7, s7, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, v2, v8
|
|
; GFX9-NEXT: v_add_u32_e32 v7, 1, v6
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v7, s7, v3
|
|
; GFX9-NEXT: s_ashr_i32 s7, s11, 31
|
|
; GFX9-NEXT: s_add_i32 s8, s11, s7
|
|
; GFX9-NEXT: s_xor_b32 s8, s8, s7
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v2, v8
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, s8, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc
|
|
; GFX9-NEXT: s_xor_b32 s6, s5, s6
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, s5, v3
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v8, s9
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, s6, v6
|
|
; GFX9-NEXT: v_subrev_u32_e32 v6, s5, v3
|
|
; GFX9-NEXT: s_xor_b32 s4, s7, s4
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, s8, v7
|
|
; GFX9-NEXT: v_add_u32_e32 v7, 1, v8
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s9, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v8, s9, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v8, 1, v7
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s9, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v8, s9, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v8, v3, v8, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, s4, v7
|
|
; GFX9-NEXT: v_subrev_u32_e32 v2, s6, v2
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s4, v3
|
|
; GFX9-NEXT: v_xor_b32_e32 v7, s7, v8
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, 0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v7, s7, v7
|
|
; GFX9-NEXT: global_store_dwordx4 v8, v[0:3], s[0:1]
|
|
; GFX9-NEXT: global_store_dwordx4 v8, v[4:7], s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: sdivrem_v4i32:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_clause 0x1
|
|
; GFX10-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x20
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
|
|
; GFX10-NEXT: v_mov_b32_e32 v4, 0x4f7ffffe
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_ashr_i32 s12, s8, 31
|
|
; GFX10-NEXT: s_ashr_i32 s14, s10, 31
|
|
; GFX10-NEXT: s_add_i32 s6, s8, s12
|
|
; GFX10-NEXT: s_add_i32 s8, s10, s14
|
|
; GFX10-NEXT: s_xor_b32 s10, s6, s12
|
|
; GFX10-NEXT: s_ashr_i32 s13, s9, 31
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, s10
|
|
; GFX10-NEXT: s_ashr_i32 s15, s11, 31
|
|
; GFX10-NEXT: s_add_i32 s7, s9, s13
|
|
; GFX10-NEXT: s_add_i32 s9, s11, s15
|
|
; GFX10-NEXT: s_xor_b32 s11, s7, s13
|
|
; GFX10-NEXT: s_xor_b32 s8, s8, s14
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, s11
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v2, s8
|
|
; GFX10-NEXT: s_xor_b32 s9, s9, s15
|
|
; GFX10-NEXT: s_sub_i32 s6, 0, s10
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v3, s9
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
|
; GFX10-NEXT: s_sub_i32 s7, 0, s11
|
|
; GFX10-NEXT: s_sub_i32 s19, 0, s8
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: s_ashr_i32 s16, s0, 31
|
|
; GFX10-NEXT: s_ashr_i32 s17, s1, 31
|
|
; GFX10-NEXT: s_add_i32 s0, s0, s16
|
|
; GFX10-NEXT: s_ashr_i32 s18, s2, 31
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v1, v1, v4
|
|
; GFX10-NEXT: v_mul_f32_e32 v2, v2, v4
|
|
; GFX10-NEXT: s_xor_b32 s0, s0, s16
|
|
; GFX10-NEXT: v_mul_f32_e32 v3, v3, v4
|
|
; GFX10-NEXT: v_mul_lo_u32 v4, s6, v0
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v2, v2
|
|
; GFX10-NEXT: s_sub_i32 s6, 0, s9
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX10-NEXT: s_add_i32 s1, s1, s17
|
|
; GFX10-NEXT: v_mul_lo_u32 v5, s7, v1
|
|
; GFX10-NEXT: v_mul_lo_u32 v6, s19, v2
|
|
; GFX10-NEXT: v_mul_hi_u32 v4, v0, v4
|
|
; GFX10-NEXT: v_mul_lo_u32 v7, s6, v3
|
|
; GFX10-NEXT: s_add_i32 s2, s2, s18
|
|
; GFX10-NEXT: s_ashr_i32 s19, s3, 31
|
|
; GFX10-NEXT: s_xor_b32 s1, s1, s17
|
|
; GFX10-NEXT: s_xor_b32 s2, s2, s18
|
|
; GFX10-NEXT: v_mul_hi_u32 v5, v1, v5
|
|
; GFX10-NEXT: v_mul_hi_u32 v6, v2, v6
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v4
|
|
; GFX10-NEXT: v_mul_hi_u32 v7, v3, v7
|
|
; GFX10-NEXT: s_add_i32 s3, s3, s19
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX10-NEXT: s_xor_b32 s3, s3, s19
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v5
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v6
|
|
; GFX10-NEXT: s_xor_b32 s12, s16, s12
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, v3, v7
|
|
; GFX10-NEXT: s_xor_b32 s13, s17, s13
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, s1, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v2, s2, v2
|
|
; GFX10-NEXT: v_mul_lo_u32 v4, v0, s10
|
|
; GFX10-NEXT: v_mul_hi_u32 v3, s3, v3
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v8, 1, v0
|
|
; GFX10-NEXT: s_xor_b32 s14, s18, s14
|
|
; GFX10-NEXT: v_mul_lo_u32 v5, v1, s11
|
|
; GFX10-NEXT: v_mul_lo_u32 v6, v2, s8
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v4, s0, v4
|
|
; GFX10-NEXT: v_mul_lo_u32 v7, v3, s9
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v9, 1, v1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v10, 1, v2
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v11, 1, v3
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s10, v4
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v5, s1, v5
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v6, s2, v6
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v7, s3, v7
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v8, s10, v4
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s11, v5
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s1, s8, v6
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s2, s9, v7
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v9, s0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v9, s11, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v10, s1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v10, s8, v6
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v11, s2
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v11, s9, v7
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v8, 1, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s10, v4
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v9, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v10, s1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v11, s2
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v9, 1, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v8, s10, v4
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v10, 1, v2
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s11, v5
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s1, s8, v6
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v11, 1, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s9, v7
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v9, s0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v9, s11, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v10, s1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v10, s8, v6
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v12, s9, v7
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v9, s0
|
|
; GFX10-NEXT: s_xor_b32 s0, s19, s15
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v10, s1
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v7, v7, v12, vcc_lo
|
|
; GFX10-NEXT: v_xor_b32_e32 v0, s12, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, s13, v1
|
|
; GFX10-NEXT: v_xor_b32_e32 v2, s14, v2
|
|
; GFX10-NEXT: v_xor_b32_e32 v3, s0, v3
|
|
; GFX10-NEXT: v_xor_b32_e32 v4, s16, v4
|
|
; GFX10-NEXT: v_xor_b32_e32 v5, s17, v5
|
|
; GFX10-NEXT: v_xor_b32_e32 v6, s18, v6
|
|
; GFX10-NEXT: v_xor_b32_e32 v7, s19, v7
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v0, s12, v0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v1, s13, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v2, s14, v2
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s0, v3
|
|
; GFX10-NEXT: v_mov_b32_e32 v8, 0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v4, s16, v4
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v5, s17, v5
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v6, s18, v6
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v7, s19, v7
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_store_dwordx4 v8, v[0:3], s[4:5]
|
|
; GFX10-NEXT: global_store_dwordx4 v8, v[4:7], s[6:7]
|
|
; GFX10-NEXT: s_endpgm
|
|
%div = sdiv <4 x i32> %x, %y
|
|
store <4 x i32> %div, <4 x i32> addrspace(1)* %out0
|
|
%rem = srem <4 x i32> %x, %y
|
|
store <4 x i32> %rem, <4 x i32> addrspace(1)* %out1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sdivrem_v2i64(<2 x i64> addrspace(1)* %out0, <2 x i64> addrspace(1)* %out1, <2 x i64> %x, <2 x i64> %y) {
|
|
; GFX8-LABEL: sdivrem_v2i64:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x10
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x20
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_ashr_i32 s6, s9, 31
|
|
; GFX8-NEXT: s_ashr_i32 s12, s1, 31
|
|
; GFX8-NEXT: s_add_u32 s14, s8, s6
|
|
; GFX8-NEXT: s_cselect_b32 s7, 1, 0
|
|
; GFX8-NEXT: s_and_b32 s7, s7, 1
|
|
; GFX8-NEXT: s_cmp_lg_u32 s7, 0
|
|
; GFX8-NEXT: s_addc_u32 s15, s9, s6
|
|
; GFX8-NEXT: s_add_u32 s0, s0, s12
|
|
; GFX8-NEXT: s_cselect_b32 s7, 1, 0
|
|
; GFX8-NEXT: s_and_b32 s7, s7, 1
|
|
; GFX8-NEXT: s_cmp_lg_u32 s7, 0
|
|
; GFX8-NEXT: s_mov_b32 s13, s12
|
|
; GFX8-NEXT: s_addc_u32 s1, s1, s12
|
|
; GFX8-NEXT: s_xor_b64 s[8:9], s[0:1], s[12:13]
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, s9
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v1, s8
|
|
; GFX8-NEXT: s_mov_b32 s7, s6
|
|
; GFX8-NEXT: s_xor_b64 s[14:15], s[14:15], s[6:7]
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f800000, v0
|
|
; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_sub_u32 s0, 0, s8
|
|
; GFX8-NEXT: s_cselect_b32 s1, 1, 0
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 1
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
|
|
; GFX8-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
|
|
; GFX8-NEXT: v_trunc_f32_e32 v1, v1
|
|
; GFX8-NEXT: v_mul_f32_e32 v2, 0xcf800000, v1
|
|
; GFX8-NEXT: v_add_f32_e32 v0, v2, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_cmp_lg_u32 s1, 0
|
|
; GFX8-NEXT: s_subb_u32 s1, 0, s9
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, s0, v1
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, s1, v0
|
|
; GFX8-NEXT: v_mul_hi_u32 v5, s0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v4, s0, v0
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v5
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v1, v4
|
|
; GFX8-NEXT: v_mul_lo_u32 v5, v0, v2
|
|
; GFX8-NEXT: v_mul_hi_u32 v6, v0, v4
|
|
; GFX8-NEXT: v_mul_hi_u32 v4, v1, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v6
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v6, v1, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v5, v0, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v6, v4
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v5
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v6, v5
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v4, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v5, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v2, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, s1, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, s0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v5, s0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v4, s0, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v6, s9
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v5
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v1, v4
|
|
; GFX8-NEXT: v_mul_lo_u32 v5, v0, v2
|
|
; GFX8-NEXT: v_mul_hi_u32 v7, v0, v4
|
|
; GFX8-NEXT: v_mul_hi_u32 v4, v1, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v7, v1, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v5, v0, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v7, v4
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v5
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v7, v5
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v4, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v5, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v2, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, s15, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, s14, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v5, s14, v0
|
|
; GFX8-NEXT: v_mul_hi_u32 v0, s15, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, s15
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v5
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v5, s15, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
|
|
; GFX8-NEXT: v_mul_hi_u32 v3, s14, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v5, v0
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, s15, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, s9, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, s8, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v7, s8, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v5, s8, v0
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v7
|
|
; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s14, v5
|
|
; GFX8-NEXT: v_subb_u32_e64 v4, s[0:1], v4, v2, vcc
|
|
; GFX8-NEXT: v_sub_u32_e64 v2, s[0:1], s15, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s9, v4
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s8, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s9, v4
|
|
; GFX8-NEXT: v_subb_u32_e32 v2, vcc, v2, v6, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v7, s[0:1]
|
|
; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s8, v3
|
|
; GFX8-NEXT: v_subbrev_u32_e64 v8, s[0:1], 0, v2, vcc
|
|
; GFX8-NEXT: v_add_u32_e64 v9, s[0:1], 1, v0
|
|
; GFX8-NEXT: v_addc_u32_e64 v10, s[0:1], 0, v1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s9, v8
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s8, v7
|
|
; GFX8-NEXT: v_subb_u32_e32 v2, vcc, v2, v6, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s9, v8
|
|
; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s8, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v12, s[0:1]
|
|
; GFX8-NEXT: v_add_u32_e64 v12, s[0:1], 1, v9
|
|
; GFX8-NEXT: v_subbrev_u32_e32 v2, vcc, 0, v2, vcc
|
|
; GFX8-NEXT: v_addc_u32_e64 v13, s[0:1], 0, v10, s[0:1]
|
|
; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v10, v10, v13, vcc
|
|
; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
|
|
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v11
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, v7, v6, s[0:1]
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v2, v8, v2, s[0:1]
|
|
; GFX8-NEXT: s_xor_b64 s[0:1], s[6:7], s[12:13]
|
|
; GFX8-NEXT: v_xor_b32_e32 v0, s0, v0
|
|
; GFX8-NEXT: s_ashr_i32 s8, s11, 31
|
|
; GFX8-NEXT: s_ashr_i32 s12, s3, 31
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e32 v0, vcc, s0, v0
|
|
; GFX8-NEXT: s_add_u32 s0, s10, s8
|
|
; GFX8-NEXT: v_xor_b32_e32 v1, s1, v1
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, s1
|
|
; GFX8-NEXT: s_cselect_b32 s1, 1, 0
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 1
|
|
; GFX8-NEXT: s_cmp_lg_u32 s1, 0
|
|
; GFX8-NEXT: s_addc_u32 s1, s11, s8
|
|
; GFX8-NEXT: s_add_u32 s2, s2, s12
|
|
; GFX8-NEXT: s_cselect_b32 s7, 1, 0
|
|
; GFX8-NEXT: s_and_b32 s7, s7, 1
|
|
; GFX8-NEXT: s_cmp_lg_u32 s7, 0
|
|
; GFX8-NEXT: s_mov_b32 s13, s12
|
|
; GFX8-NEXT: s_addc_u32 s3, s3, s12
|
|
; GFX8-NEXT: s_xor_b64 s[2:3], s[2:3], s[12:13]
|
|
; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v4, s3
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v5, s2
|
|
; GFX8-NEXT: s_mov_b32 s9, s8
|
|
; GFX8-NEXT: s_xor_b64 s[10:11], s[0:1], s[8:9]
|
|
; GFX8-NEXT: v_mul_f32_e32 v4, 0x4f800000, v4
|
|
; GFX8-NEXT: v_add_f32_e32 v4, v4, v5
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v4, v4
|
|
; GFX8-NEXT: s_sub_u32 s0, 0, s2
|
|
; GFX8-NEXT: s_cselect_b32 s1, 1, 0
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 1
|
|
; GFX8-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
|
|
; GFX8-NEXT: v_mul_f32_e32 v6, 0x2f800000, v4
|
|
; GFX8-NEXT: v_trunc_f32_e32 v6, v6
|
|
; GFX8-NEXT: v_mul_f32_e32 v7, 0xcf800000, v6
|
|
; GFX8-NEXT: v_add_f32_e32 v4, v7, v4
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v7, v4
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v6, v6
|
|
; GFX8-NEXT: s_cmp_lg_u32 s1, 0
|
|
; GFX8-NEXT: s_subb_u32 s1, 0, s3
|
|
; GFX8-NEXT: v_mul_lo_u32 v4, s1, v7
|
|
; GFX8-NEXT: v_mul_lo_u32 v8, s0, v6
|
|
; GFX8-NEXT: v_mul_hi_u32 v10, s0, v7
|
|
; GFX8-NEXT: v_mul_lo_u32 v9, s0, v7
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, s6, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8
|
|
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v4, v10
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, s6, v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v5, s6
|
|
; GFX8-NEXT: v_mul_lo_u32 v10, v6, v9
|
|
; GFX8-NEXT: v_mul_lo_u32 v11, v7, v8
|
|
; GFX8-NEXT: v_subrev_u32_e32 v4, vcc, s6, v3
|
|
; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v2, v5, vcc
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, v7, v9
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v10, v11
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v6, v8
|
|
; GFX8-NEXT: v_mul_hi_u32 v9, v6, v9
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v10, v2
|
|
; GFX8-NEXT: v_mul_hi_u32 v10, v7, v8
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v9
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v10
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v10
|
|
; GFX8-NEXT: v_mul_hi_u32 v8, v6, v8
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v9, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v8, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v7, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v6, v3, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v6, s1, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v7, s0, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v9, s0, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v8, s0, v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v10, s3
|
|
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v7
|
|
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v9
|
|
; GFX8-NEXT: v_mul_lo_u32 v7, v3, v8
|
|
; GFX8-NEXT: v_mul_lo_u32 v9, v2, v6
|
|
; GFX8-NEXT: v_mul_hi_u32 v11, v2, v8
|
|
; GFX8-NEXT: v_mul_hi_u32 v8, v3, v8
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v9
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v11
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v11, v3, v6
|
|
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v9, v7
|
|
; GFX8-NEXT: v_mul_hi_u32 v9, v2, v6
|
|
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v11, v8
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v9
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v9, vcc, v11, v9
|
|
; GFX8-NEXT: v_mul_hi_u32 v6, v3, v6
|
|
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v8, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v9, v8
|
|
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v8
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v7
|
|
; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v6, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v6, s11, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v7, s10, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v9, s10, v2
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, s11, v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v8, s11
|
|
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v9
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v9, s11, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v7, v6
|
|
; GFX8-NEXT: v_mul_hi_u32 v7, s10, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v9, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v9, v7
|
|
; GFX8-NEXT: v_mul_hi_u32 v3, s11, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v6
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v7, v6
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v6
|
|
; GFX8-NEXT: v_mul_lo_u32 v6, s3, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v7, s2, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v11, s2, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v9, s2, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v7
|
|
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v11
|
|
; GFX8-NEXT: v_sub_u32_e32 v7, vcc, s10, v9
|
|
; GFX8-NEXT: v_subb_u32_e64 v8, s[0:1], v8, v6, vcc
|
|
; GFX8-NEXT: v_sub_u32_e64 v6, s[0:1], s11, v6
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s3, v8
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s2, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s3, v8
|
|
; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v6, v10, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v9, v9, v11, s[0:1]
|
|
; GFX8-NEXT: v_subrev_u32_e32 v11, vcc, s2, v7
|
|
; GFX8-NEXT: v_subbrev_u32_e64 v12, s[0:1], 0, v6, vcc
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s3, v12
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s2, v11
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1]
|
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s3, v12
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1]
|
|
; GFX8-NEXT: v_add_u32_e64 v14, s[0:1], 1, v2
|
|
; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v6, v10, vcc
|
|
; GFX8-NEXT: v_addc_u32_e64 v15, s[0:1], 0, v3, s[0:1]
|
|
; GFX8-NEXT: v_add_u32_e32 v10, vcc, 1, v14
|
|
; GFX8-NEXT: v_addc_u32_e32 v16, vcc, 0, v15, vcc
|
|
; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
|
|
; GFX8-NEXT: v_subrev_u32_e64 v13, s[0:1], s2, v11
|
|
; GFX8-NEXT: v_subbrev_u32_e64 v6, s[0:1], 0, v6, s[0:1]
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v10, v14, v10, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc
|
|
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v9, v11, v13, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v6, v12, v6, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v10, s[0:1]
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v14, s[0:1]
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v7, v7, v9, s[0:1]
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v6, v8, v6, s[0:1]
|
|
; GFX8-NEXT: s_xor_b64 s[0:1], s[8:9], s[12:13]
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, s0, v2
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, s1, v3
|
|
; GFX8-NEXT: v_mov_b32_e32 v8, s1
|
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s0, v2
|
|
; GFX8-NEXT: v_subb_u32_e32 v3, vcc, v3, v8, vcc
|
|
; GFX8-NEXT: v_xor_b32_e32 v7, s8, v7
|
|
; GFX8-NEXT: v_xor_b32_e32 v8, s8, v6
|
|
; GFX8-NEXT: v_mov_b32_e32 v9, s8
|
|
; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s8, v7
|
|
; GFX8-NEXT: v_subb_u32_e32 v7, vcc, v8, v9, vcc
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v9, s5
|
|
; GFX8-NEXT: v_mov_b32_e32 v8, s4
|
|
; GFX8-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
|
|
; GFX8-NEXT: s_nop 0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: sdivrem_v2i64:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x10
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x20
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s6, s9, 31
|
|
; GFX9-NEXT: s_ashr_i32 s12, s1, 31
|
|
; GFX9-NEXT: s_add_u32 s14, s8, s6
|
|
; GFX9-NEXT: s_cselect_b32 s7, 1, 0
|
|
; GFX9-NEXT: s_and_b32 s7, s7, 1
|
|
; GFX9-NEXT: s_cmp_lg_u32 s7, 0
|
|
; GFX9-NEXT: s_addc_u32 s15, s9, s6
|
|
; GFX9-NEXT: s_add_u32 s0, s0, s12
|
|
; GFX9-NEXT: s_cselect_b32 s7, 1, 0
|
|
; GFX9-NEXT: s_and_b32 s7, s7, 1
|
|
; GFX9-NEXT: s_cmp_lg_u32 s7, 0
|
|
; GFX9-NEXT: s_mov_b32 s13, s12
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, s12
|
|
; GFX9-NEXT: s_xor_b64 s[8:9], s[0:1], s[12:13]
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s9
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s8
|
|
; GFX9-NEXT: s_mov_b32 s7, s6
|
|
; GFX9-NEXT: s_xor_b64 s[14:15], s[14:15], s[6:7]
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f800000, v0
|
|
; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_sub_u32 s0, 0, s8
|
|
; GFX9-NEXT: s_cselect_b32 s1, 1, 0
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 1
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
|
|
; GFX9-NEXT: v_trunc_f32_e32 v1, v1
|
|
; GFX9-NEXT: v_mul_f32_e32 v2, 0xcf800000, v1
|
|
; GFX9-NEXT: v_add_f32_e32 v0, v2, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_cmp_lg_u32 s1, 0
|
|
; GFX9-NEXT: s_subb_u32 s1, 0, s9
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s0, v1
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s1, v0
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, s0, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, s0, v0
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, s15
|
|
; GFX9-NEXT: v_add3_u32 v2, v3, v2, v4
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v1, v5
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v0, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v6, v0, v5
|
|
; GFX9-NEXT: v_mul_hi_u32 v5, v1, v5
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v6
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v1, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v4, v3
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v0, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, v6, v5
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v5, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v4, v3
|
|
; GFX9-NEXT: v_add_u32_e32 v5, v6, v5
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX9-NEXT: v_add3_u32 v2, v5, v4, v2
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v3
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v2, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s1, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s0, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, s0, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, s0, v0
|
|
; GFX9-NEXT: v_add3_u32 v2, v2, v3, v4
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v1, v5
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v0, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v6, v0, v5
|
|
; GFX9-NEXT: v_mul_hi_u32 v5, v1, v5
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v6
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v1, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v4, v3
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v0, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, v6, v5
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v5, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v4, v3
|
|
; GFX9-NEXT: v_add_u32_e32 v5, v6, v5
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX9-NEXT: v_add3_u32 v2, v5, v4, v2
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v3
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v2, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s15, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s14, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, s14, v0
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s15, v0
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s9
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, s15, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v3, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, s14, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, s15, v1
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v4, v0
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v4, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; GFX9-NEXT: v_add3_u32 v1, v3, v2, v1
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s9, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s8, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, s8, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, s8, v0
|
|
; GFX9-NEXT: v_add3_u32 v2, v2, v3, v4
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v3, vcc, s14, v6
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v4, s[0:1], v7, v2, vcc
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s9, v4
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, s15, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s8, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s9, v4
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v5, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[0:1]
|
|
; GFX9-NEXT: v_subrev_co_u32_e32 v7, vcc, s8, v3
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v8, s[0:1], 0, v2, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e64 v9, s[0:1], 1, v0
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v10, s[0:1], 0, v1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s9, v8
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s8, v7
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s9, v8
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v11, v11, v12, s[0:1]
|
|
; GFX9-NEXT: v_add_co_u32_e64 v12, s[0:1], 1, v9
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v5, vcc
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v13, s[0:1], 0, v10, s[0:1]
|
|
; GFX9-NEXT: v_subrev_co_u32_e32 v5, vcc, s8, v7
|
|
; GFX9-NEXT: v_subbrev_co_u32_e32 v2, vcc, 0, v2, vcc
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v11
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v5, v7, v5, s[0:1]
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, v8, v2, s[0:1]
|
|
; GFX9-NEXT: s_xor_b64 s[0:1], s[6:7], s[12:13]
|
|
; GFX9-NEXT: s_ashr_i32 s8, s11, 31
|
|
; GFX9-NEXT: s_ashr_i32 s12, s3, 31
|
|
; GFX9-NEXT: s_add_u32 s10, s10, s8
|
|
; GFX9-NEXT: s_cselect_b32 s7, 1, 0
|
|
; GFX9-NEXT: s_and_b32 s7, s7, 1
|
|
; GFX9-NEXT: s_cmp_lg_u32 s7, 0
|
|
; GFX9-NEXT: s_addc_u32 s11, s11, s8
|
|
; GFX9-NEXT: s_add_u32 s2, s2, s12
|
|
; GFX9-NEXT: s_cselect_b32 s7, 1, 0
|
|
; GFX9-NEXT: s_and_b32 s7, s7, 1
|
|
; GFX9-NEXT: s_cmp_lg_u32 s7, 0
|
|
; GFX9-NEXT: s_mov_b32 s13, s12
|
|
; GFX9-NEXT: s_addc_u32 s3, s3, s12
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
|
|
; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[12:13]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v12, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v13, vcc
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v6, s3
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v7, s2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
|
|
; GFX9-NEXT: v_mul_f32_e32 v4, 0x4f800000, v6
|
|
; GFX9-NEXT: v_add_f32_e32 v4, v4, v7
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v4
|
|
; GFX9-NEXT: s_mov_b32 s9, s8
|
|
; GFX9-NEXT: s_xor_b64 s[10:11], s[10:11], s[8:9]
|
|
; GFX9-NEXT: s_sub_u32 s7, 0, s2
|
|
; GFX9-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
|
|
; GFX9-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
|
|
; GFX9-NEXT: v_trunc_f32_e32 v5, v5
|
|
; GFX9-NEXT: v_mul_f32_e32 v6, 0xcf800000, v5
|
|
; GFX9-NEXT: v_add_f32_e32 v4, v6, v4
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v4, v4
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5
|
|
; GFX9-NEXT: s_cselect_b32 s14, 1, 0
|
|
; GFX9-NEXT: s_and_b32 s14, s14, 1
|
|
; GFX9-NEXT: s_cmp_lg_u32 s14, 0
|
|
; GFX9-NEXT: s_subb_u32 s14, 0, s3
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, s14, v4
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, s7, v5
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, s7, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, s7, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s0, v0
|
|
; GFX9-NEXT: v_add3_u32 v6, v6, v7, v8
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, s1, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v10, s1
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v5, v9
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v4, v6
|
|
; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, s0, v0
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v10, vcc
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v4, v9
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v10
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v5, v6
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v5, v9
|
|
; GFX9-NEXT: v_add_u32_e32 v7, v8, v7
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, v4, v6
|
|
; GFX9-NEXT: v_mul_hi_u32 v6, v5, v6
|
|
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v10, v9
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v9, v8
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v8, v7
|
|
; GFX9-NEXT: v_add_u32_e32 v9, v10, v9
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
|
; GFX9-NEXT: v_add3_u32 v6, v9, v8, v6
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v6, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, s14, v4
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, s7, v5
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, s7, v4
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, s7, v4
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, s6, v3
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, s6, v2
|
|
; GFX9-NEXT: v_add3_u32 v6, v6, v7, v8
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v5, v9
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v4, v6
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v4, v9
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v5, v9
|
|
; GFX9-NEXT: v_mov_b32_e32 v10, s6
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v11
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v5, v6
|
|
; GFX9-NEXT: v_add_u32_e32 v7, v8, v7
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, v4, v6
|
|
; GFX9-NEXT: v_mul_hi_u32 v6, v5, v6
|
|
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v11, v9
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v9, v8
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v8, v7
|
|
; GFX9-NEXT: v_add_u32_e32 v9, v11, v9
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
|
; GFX9-NEXT: v_add3_u32 v6, v9, v8, v6
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v4, v7
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v5, v6, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, s11, v7
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, s10, v6
|
|
; GFX9-NEXT: v_subrev_co_u32_e32 v4, vcc, s6, v3
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v2, v10, vcc
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, s10, v7
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v8, v9
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v3, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s11, v6
|
|
; GFX9-NEXT: v_mul_hi_u32 v7, s11, v7
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v8, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, s10, v6
|
|
; GFX9-NEXT: v_mul_hi_u32 v6, s11, v6
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v7
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v8
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v3, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v7, v7, v8
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
|
|
; GFX9-NEXT: v_add3_u32 v3, v7, v3, v6
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, s3, v2
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, s2, v3
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, s2, v2
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, s2, v2
|
|
; GFX9-NEXT: v_mov_b32_e32 v11, s11
|
|
; GFX9-NEXT: v_mov_b32_e32 v9, s3
|
|
; GFX9-NEXT: v_add3_u32 v6, v6, v7, v8
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v7, vcc, s10, v10
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v8, s[0:1], v11, v6, vcc
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s3, v8
|
|
; GFX9-NEXT: v_sub_u32_e32 v6, s11, v6
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s2, v7
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s3, v8
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v6, vcc, v6, v9, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, v11, s[0:1]
|
|
; GFX9-NEXT: v_subrev_co_u32_e32 v11, vcc, s2, v7
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v12, s[0:1], 0, v6, vcc
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s3, v12
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s2, v11
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1]
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s3, v12
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1]
|
|
; GFX9-NEXT: v_add_co_u32_e64 v14, s[0:1], 1, v2
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v6, vcc, v6, v9, vcc
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v15, s[0:1], 0, v3, s[0:1]
|
|
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, 1, v14
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v15, vcc
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v9, v14, v9, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc
|
|
; GFX9-NEXT: v_subrev_co_u32_e64 v15, s[0:1], s2, v11
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v6, s[0:1], 0, v6, s[0:1]
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v10
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[0:1]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v9, v11, v15, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v12, v6, vcc
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v14, s[0:1]
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v9, s[0:1]
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v6, v8, v6, s[0:1]
|
|
; GFX9-NEXT: s_xor_b64 s[0:1], s[8:9], s[12:13]
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, s0, v2
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, s1, v3
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, s1
|
|
; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s0, v2
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v8, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v7, s8, v7
|
|
; GFX9-NEXT: v_mov_b32_e32 v13, 0
|
|
; GFX9-NEXT: v_xor_b32_e32 v8, s8, v6
|
|
; GFX9-NEXT: v_mov_b32_e32 v9, s8
|
|
; GFX9-NEXT: v_subrev_co_u32_e32 v6, vcc, s8, v7
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v8, v9, vcc
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_store_dwordx4 v13, v[0:3], s[4:5]
|
|
; GFX9-NEXT: global_store_dwordx4 v13, v[4:7], s[6:7]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: sdivrem_v2i64:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_clause 0x1
|
|
; GFX10-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x10
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x20
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_ashr_i32 s12, s9, 31
|
|
; GFX10-NEXT: s_ashr_i32 s6, s1, 31
|
|
; GFX10-NEXT: s_add_u32 s14, s8, s12
|
|
; GFX10-NEXT: s_cselect_b32 s7, 1, 0
|
|
; GFX10-NEXT: s_mov_b32 s13, s12
|
|
; GFX10-NEXT: s_and_b32 s7, s7, 1
|
|
; GFX10-NEXT: s_cmp_lg_u32 s7, 0
|
|
; GFX10-NEXT: s_addc_u32 s15, s9, s12
|
|
; GFX10-NEXT: s_add_u32 s0, s0, s6
|
|
; GFX10-NEXT: s_cselect_b32 s7, 1, 0
|
|
; GFX10-NEXT: s_and_b32 s8, s7, 1
|
|
; GFX10-NEXT: s_mov_b32 s7, s6
|
|
; GFX10-NEXT: s_cmp_lg_u32 s8, 0
|
|
; GFX10-NEXT: s_addc_u32 s1, s1, s6
|
|
; GFX10-NEXT: s_xor_b64 s[8:9], s[0:1], s[6:7]
|
|
; GFX10-NEXT: s_xor_b64 s[0:1], s[14:15], s[12:13]
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, s9
|
|
; GFX10-NEXT: s_sub_u32 s20, 0, s8
|
|
; GFX10-NEXT: s_cselect_b32 s14, 1, 0
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, s8
|
|
; GFX10-NEXT: s_and_b32 s14, s14, 1
|
|
; GFX10-NEXT: v_mul_f32_e32 v1, 0x4f800000, v1
|
|
; GFX10-NEXT: s_cmp_lg_u32 s14, 0
|
|
; GFX10-NEXT: s_subb_u32 s21, 0, s9
|
|
; GFX10-NEXT: s_ashr_i32 s14, s11, 31
|
|
; GFX10-NEXT: s_xor_b64 s[18:19], s[12:13], s[6:7]
|
|
; GFX10-NEXT: s_ashr_i32 s16, s3, 31
|
|
; GFX10-NEXT: v_add_f32_e32 v0, v1, v0
|
|
; GFX10-NEXT: s_add_u32 s6, s10, s14
|
|
; GFX10-NEXT: s_cselect_b32 s7, 1, 0
|
|
; GFX10-NEXT: s_mov_b32 s17, s16
|
|
; GFX10-NEXT: s_and_b32 s7, s7, 1
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: s_cmp_lg_u32 s7, 0
|
|
; GFX10-NEXT: s_mov_b32 s15, s14
|
|
; GFX10-NEXT: s_addc_u32 s7, s11, s14
|
|
; GFX10-NEXT: s_add_u32 s2, s2, s16
|
|
; GFX10-NEXT: s_cselect_b32 s10, 1, 0
|
|
; GFX10-NEXT: s_and_b32 s10, s10, 1
|
|
; GFX10-NEXT: s_cmp_lg_u32 s10, 0
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
|
|
; GFX10-NEXT: s_addc_u32 s3, s3, s16
|
|
; GFX10-NEXT: s_xor_b64 s[10:11], s[6:7], s[14:15]
|
|
; GFX10-NEXT: s_xor_b64 s[2:3], s[2:3], s[16:17]
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, s3
|
|
; GFX10-NEXT: v_mul_f32_e32 v2, 0x2f800000, v0
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v3, s2
|
|
; GFX10-NEXT: s_sub_u32 s6, 0, s2
|
|
; GFX10-NEXT: s_cselect_b32 s7, 1, 0
|
|
; GFX10-NEXT: v_mul_f32_e32 v1, 0x4f800000, v1
|
|
; GFX10-NEXT: v_trunc_f32_e32 v2, v2
|
|
; GFX10-NEXT: s_and_b32 s7, s7, 1
|
|
; GFX10-NEXT: s_cmp_lg_u32 s7, 0
|
|
; GFX10-NEXT: v_add_f32_e32 v1, v1, v3
|
|
; GFX10-NEXT: v_mul_f32_e32 v3, 0xcf800000, v2
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v2, v2
|
|
; GFX10-NEXT: s_subb_u32 s7, 0, s3
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
|
; GFX10-NEXT: v_add_f32_e32 v0, v3, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, s20, v2
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v1, 0x5f7ffffc, v1
|
|
; GFX10-NEXT: v_mul_lo_u32 v4, s21, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v5, s20, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v6, s20, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v7, 0x2f800000, v1
|
|
; GFX10-NEXT: v_add3_u32 v3, v4, v3, v5
|
|
; GFX10-NEXT: v_trunc_f32_e32 v4, v7
|
|
; GFX10-NEXT: v_mul_lo_u32 v5, v2, v6
|
|
; GFX10-NEXT: v_mul_hi_u32 v7, v0, v6
|
|
; GFX10-NEXT: v_mul_hi_u32 v6, v2, v6
|
|
; GFX10-NEXT: v_mul_lo_u32 v8, v0, v3
|
|
; GFX10-NEXT: v_mul_lo_u32 v10, v2, v3
|
|
; GFX10-NEXT: v_mul_f32_e32 v9, 0xcf800000, v4
|
|
; GFX10-NEXT: v_mul_hi_u32 v11, v0, v3
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v4, v4
|
|
; GFX10-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX10-NEXT: v_add_f32_e32 v1, v9, v1
|
|
; GFX10-NEXT: v_add_co_u32 v5, s13, v5, v8
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v8, 0, 1, s13
|
|
; GFX10-NEXT: v_add_co_u32 v6, s13, v10, v6
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v10, 0, 1, s13
|
|
; GFX10-NEXT: v_add_co_u32 v5, s13, v5, v7
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s13
|
|
; GFX10-NEXT: v_mul_lo_u32 v9, s6, v4
|
|
; GFX10-NEXT: v_add_co_u32 v6, s13, v6, v11
|
|
; GFX10-NEXT: v_mul_lo_u32 v12, s7, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v13, s6, v1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v5, v8, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v7, 0, 1, s13
|
|
; GFX10-NEXT: v_mul_lo_u32 v11, s6, v1
|
|
; GFX10-NEXT: v_add_co_u32 v5, s13, v6, v5
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v7, v10, v7
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, 0, 1, s13
|
|
; GFX10-NEXT: v_add3_u32 v8, v12, v9, v13
|
|
; GFX10-NEXT: v_mul_lo_u32 v9, v4, v11
|
|
; GFX10-NEXT: v_mul_hi_u32 v10, v1, v11
|
|
; GFX10-NEXT: v_mul_hi_u32 v11, v4, v11
|
|
; GFX10-NEXT: v_add3_u32 v3, v7, v6, v3
|
|
; GFX10-NEXT: v_mul_lo_u32 v6, v1, v8
|
|
; GFX10-NEXT: v_mul_lo_u32 v7, v4, v8
|
|
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v5
|
|
; GFX10-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, v2, v3, vcc_lo
|
|
; GFX10-NEXT: v_mul_hi_u32 v5, v1, v8
|
|
; GFX10-NEXT: v_mul_lo_u32 v12, s21, v0
|
|
; GFX10-NEXT: v_add_co_u32 v6, s13, v9, v6
|
|
; GFX10-NEXT: v_mul_hi_u32 v13, s20, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v14, s20, v2
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, s13
|
|
; GFX10-NEXT: v_add_co_u32 v7, s13, v7, v11
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v11, 0, 1, s13
|
|
; GFX10-NEXT: v_add_co_u32 v6, s13, v6, v10
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, s20, v0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, 0, 1, s13
|
|
; GFX10-NEXT: v_add_co_u32 v5, s13, v7, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v7, 0, 1, s13
|
|
; GFX10-NEXT: v_add3_u32 v12, v12, v14, v13
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v6, v9, v6
|
|
; GFX10-NEXT: v_mul_hi_u32 v8, v4, v8
|
|
; GFX10-NEXT: v_mul_lo_u32 v10, v2, v3
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v7, v11, v7
|
|
; GFX10-NEXT: v_mul_lo_u32 v11, v0, v12
|
|
; GFX10-NEXT: v_add_co_u32 v5, s13, v5, v6
|
|
; GFX10-NEXT: v_mul_hi_u32 v9, v0, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, 0, 1, s13
|
|
; GFX10-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX10-NEXT: v_mul_lo_u32 v13, v2, v12
|
|
; GFX10-NEXT: v_add_co_u32 v1, vcc_lo, v1, v5
|
|
; GFX10-NEXT: v_add_co_u32 v5, s13, v10, v11
|
|
; GFX10-NEXT: v_mul_hi_u32 v14, v0, v12
|
|
; GFX10-NEXT: v_add3_u32 v6, v7, v6, v8
|
|
; GFX10-NEXT: v_mul_lo_u32 v10, s7, v1
|
|
; GFX10-NEXT: v_add_co_u32 v5, s7, v5, v9
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v7, 0, 1, s13
|
|
; GFX10-NEXT: v_add_co_u32 v3, s13, v13, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s7
|
|
; GFX10-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, v4, v6, vcc_lo
|
|
; GFX10-NEXT: v_add_co_u32 v3, s7, v3, v14
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v5, v7, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v8, 0, 1, s13
|
|
; GFX10-NEXT: v_mul_hi_u32 v11, s6, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, s7
|
|
; GFX10-NEXT: v_mul_lo_u32 v13, s6, v4
|
|
; GFX10-NEXT: v_mul_hi_u32 v7, v2, v12
|
|
; GFX10-NEXT: v_mul_lo_u32 v6, s6, v1
|
|
; GFX10-NEXT: v_add_co_u32 v3, s6, v3, v5
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v8, v8, v9
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s6
|
|
; GFX10-NEXT: v_add3_u32 v9, v10, v13, v11
|
|
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
|
|
; GFX10-NEXT: v_add3_u32 v5, v8, v5, v7
|
|
; GFX10-NEXT: v_mul_lo_u32 v10, v4, v6
|
|
; GFX10-NEXT: v_mul_lo_u32 v7, v1, v9
|
|
; GFX10-NEXT: v_mul_hi_u32 v11, v1, v6
|
|
; GFX10-NEXT: v_mul_hi_u32 v6, v4, v6
|
|
; GFX10-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, v2, v5, vcc_lo
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, v4, v9
|
|
; GFX10-NEXT: v_mul_lo_u32 v5, s1, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v12, s0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v13, s0, v2
|
|
; GFX10-NEXT: v_add_co_u32 v7, s6, v10, v7
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v10, 0, 1, s6
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s1, v0
|
|
; GFX10-NEXT: v_add_co_u32 v3, s6, v3, v6
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, 0, 1, s6
|
|
; GFX10-NEXT: v_add_co_u32 v7, s6, v7, v11
|
|
; GFX10-NEXT: v_mul_lo_u32 v14, s1, v2
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v7, 0, 1, s6
|
|
; GFX10-NEXT: v_add_co_u32 v5, s6, v5, v13
|
|
; GFX10-NEXT: v_mul_hi_u32 v15, s0, v2
|
|
; GFX10-NEXT: v_mul_hi_u32 v8, v1, v9
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v11, 0, 1, s6
|
|
; GFX10-NEXT: v_add_co_u32 v5, s7, v5, v12
|
|
; GFX10-NEXT: v_add_co_u32 v0, s6, v14, v0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s7
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v12, 0, 1, s6
|
|
; GFX10-NEXT: v_mul_hi_u32 v2, s1, v2
|
|
; GFX10-NEXT: v_add_co_u32 v0, s6, v0, v15
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v5, v11, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v13, 0, 1, s6
|
|
; GFX10-NEXT: v_add_co_u32 v3, s6, v3, v8
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v8, 0, 1, s6
|
|
; GFX10-NEXT: v_add_co_u32 v0, s6, v0, v5
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v11, v12, v13
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s6
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v7, v10, v7
|
|
; GFX10-NEXT: v_mul_hi_u32 v9, v4, v9
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v6, v6, v8
|
|
; GFX10-NEXT: v_mul_hi_u32 v8, s8, v0
|
|
; GFX10-NEXT: v_add3_u32 v2, v11, v5, v2
|
|
; GFX10-NEXT: v_add_co_u32 v3, s6, v3, v7
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s6
|
|
; GFX10-NEXT: v_mul_lo_u32 v7, s9, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v10, s8, v2
|
|
; GFX10-NEXT: v_add_co_u32 v1, vcc_lo, v1, v3
|
|
; GFX10-NEXT: v_add3_u32 v5, v6, v5, v9
|
|
; GFX10-NEXT: v_mul_lo_u32 v6, s8, v0
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX10-NEXT: v_mov_b32_e32 v9, 0
|
|
; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v4, v5, vcc_lo
|
|
; GFX10-NEXT: v_add3_u32 v4, v7, v10, v8
|
|
; GFX10-NEXT: v_mul_lo_u32 v5, s11, v1
|
|
; GFX10-NEXT: v_sub_co_u32 v6, vcc_lo, s0, v6
|
|
; GFX10-NEXT: v_mul_lo_u32 v14, s10, v3
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v8, s1, v4
|
|
; GFX10-NEXT: v_sub_co_ci_u32_e64 v4, s0, s1, v4, vcc_lo
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s8, v6
|
|
; GFX10-NEXT: v_mul_hi_u32 v7, s11, v1
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v8, vcc_lo, s9, v8, vcc_lo
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s9, v4
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v10, 0, -1, s0
|
|
; GFX10-NEXT: v_mul_lo_u32 v15, s11, v3
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, s10, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v17, s10, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v11, 0, -1, vcc_lo
|
|
; GFX10-NEXT: v_sub_co_u32 v12, vcc_lo, v6, s8
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e64 v13, s0, 0, v8, vcc_lo
|
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s9, v4
|
|
; GFX10-NEXT: v_mul_hi_u32 v3, s11, v3
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v8, vcc_lo, s9, v8, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v10, v11, v10, s0
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s9, v13
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v11, 0, -1, s0
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s8, v12
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v16, 0, -1, s0
|
|
; GFX10-NEXT: v_add_co_u32 v5, s0, v5, v14
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v14, 0, 1, s0
|
|
; GFX10-NEXT: v_add_co_u32 v7, s0, v15, v7
|
|
; GFX10-NEXT: v_add_co_u32 v1, s1, v5, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s0
|
|
; GFX10-NEXT: v_add_co_u32 v7, s0, v7, v17
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v15, 0, 1, s0
|
|
; GFX10-NEXT: v_add_co_u32 v17, s0, v0, 1
|
|
; GFX10-NEXT: v_add_co_ci_u32_e64 v18, s0, 0, v2, s0
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v1, v14, v1
|
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s9, v13
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v5, v5, v15
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, v16, s0
|
|
; GFX10-NEXT: v_add_co_u32 v7, s0, v7, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0
|
|
; GFX10-NEXT: v_add_co_u32 v14, s0, v17, 1
|
|
; GFX10-NEXT: v_add_co_ci_u32_e64 v15, s0, 0, v18, s0
|
|
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v11
|
|
; GFX10-NEXT: v_add3_u32 v3, v5, v1, v3
|
|
; GFX10-NEXT: v_sub_co_u32 v1, s0, v12, s8
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e64 v5, s0, 0, v8, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v8, v17, v14, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v14, v18, v15, vcc_lo
|
|
; GFX10-NEXT: v_mul_lo_u32 v15, s3, v7
|
|
; GFX10-NEXT: v_mul_lo_u32 v16, s2, v3
|
|
; GFX10-NEXT: v_mul_hi_u32 v17, s2, v7
|
|
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
|
|
; GFX10-NEXT: v_mul_lo_u32 v10, s2, v7
|
|
; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, v11
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v14, vcc_lo
|
|
; GFX10-NEXT: v_add3_u32 v8, v15, v16, v17
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v12, v1, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v5, s0
|
|
; GFX10-NEXT: v_sub_co_u32 v10, s0, s10, v10
|
|
; GFX10-NEXT: v_sub_co_ci_u32_e64 v11, s1, s11, v8, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v1, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v1, s11, v8
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s3, v11
|
|
; GFX10-NEXT: v_xor_b32_e32 v0, s18, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v2, s19, v2
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc_lo
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e64 v8, vcc_lo, s3, v1, s0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v10
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v12, 0, -1, vcc_lo
|
|
; GFX10-NEXT: v_sub_co_u32 v13, vcc_lo, v10, s2
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e64 v14, s0, 0, v8, vcc_lo
|
|
; GFX10-NEXT: v_sub_co_u32 v0, s0, v0, s18
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e64 v1, s0, s19, v2, s0
|
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s3, v11
|
|
; GFX10-NEXT: v_xor_b32_e32 v2, s12, v6
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v8, vcc_lo, s3, v8, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v12, s0
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s3, v14
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, 0, -1, s0
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s2, v13
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v12, 0, -1, s0
|
|
; GFX10-NEXT: v_add_co_u32 v15, s0, v7, 1
|
|
; GFX10-NEXT: v_add_co_ci_u32_e64 v16, s0, 0, v3, s0
|
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s3, v14
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v12, s0
|
|
; GFX10-NEXT: v_add_co_u32 v12, s0, v15, 1
|
|
; GFX10-NEXT: v_add_co_ci_u32_e64 v17, s0, 0, v16, s0
|
|
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
|
|
; GFX10-NEXT: v_sub_co_u32 v6, s0, v13, s2
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e64 v8, s0, 0, v8, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v12, v15, v12, vcc_lo
|
|
; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v5, v13, v6, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v6, v14, v8, vcc_lo
|
|
; GFX10-NEXT: v_xor_b32_e32 v8, s12, v4
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v12, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v15, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v5, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, v11, v6, s0
|
|
; GFX10-NEXT: s_xor_b64 s[0:1], s[14:15], s[16:17]
|
|
; GFX10-NEXT: v_sub_co_u32 v4, vcc_lo, v2, s12
|
|
; GFX10-NEXT: v_xor_b32_e32 v2, s0, v7
|
|
; GFX10-NEXT: v_xor_b32_e32 v3, s1, v3
|
|
; GFX10-NEXT: v_xor_b32_e32 v7, s14, v10
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v5, vcc_lo, s12, v8, vcc_lo
|
|
; GFX10-NEXT: v_xor_b32_e32 v8, s14, v6
|
|
; GFX10-NEXT: v_sub_co_u32 v2, vcc_lo, v2, s0
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
|
|
; GFX10-NEXT: v_sub_co_u32 v6, vcc_lo, v7, s14
|
|
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v7, vcc_lo, s14, v8, vcc_lo
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_store_dwordx4 v9, v[0:3], s[4:5]
|
|
; GFX10-NEXT: global_store_dwordx4 v9, v[4:7], s[6:7]
|
|
; GFX10-NEXT: s_endpgm
|
|
%div = sdiv <2 x i64> %x, %y
|
|
store <2 x i64> %div, <2 x i64> addrspace(1)* %out0
|
|
%rem = srem <2 x i64> %x, %y
|
|
store <2 x i64> %rem, <2 x i64> addrspace(1)* %out1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sdiv_i8(i8 addrspace(1)* %out0, i8 addrspace(1)* %out1, i8 %x, i8 %y) {
|
|
; GFX8-LABEL: sdiv_i8:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s6, s[4:5], 0x10
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_bfe_i32 s0, s6, 0x80008
|
|
; GFX8-NEXT: s_ashr_i32 s7, s0, 31
|
|
; GFX8-NEXT: s_add_i32 s0, s0, s7
|
|
; GFX8-NEXT: s_xor_b32 s8, s0, s7
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, s8
|
|
; GFX8-NEXT: s_sub_i32 s0, 0, s8
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v1, s0, v0
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX8-NEXT: s_sext_i32_i8 s4, s6
|
|
; GFX8-NEXT: s_ashr_i32 s5, s4, 31
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX8-NEXT: s_add_i32 s4, s4, s5
|
|
; GFX8-NEXT: s_xor_b32 s4, s4, s5
|
|
; GFX8-NEXT: s_xor_b32 s6, s5, s7
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, s4, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v2, s8
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v2
|
|
; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s4, v3
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s8, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s8, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s8, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s8, v3
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, s6, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s6, v2
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, s5, v3
|
|
; GFX8-NEXT: flat_store_byte v[0:1], v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, s5, v3
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: flat_store_byte v[0:1], v3
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: sdiv_i8:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s0, s[4:5], 0x10
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_bfe_i32 s1, s0, 0x80008
|
|
; GFX9-NEXT: s_ashr_i32 s6, s1, 31
|
|
; GFX9-NEXT: s_add_i32 s1, s1, s6
|
|
; GFX9-NEXT: s_xor_b32 s7, s1, s6
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s7
|
|
; GFX9-NEXT: s_sub_i32 s1, 0, s7
|
|
; GFX9-NEXT: s_sext_i32_i8 s0, s0
|
|
; GFX9-NEXT: s_ashr_i32 s8, s0, 31
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_add_i32 s0, s0, s8
|
|
; GFX9-NEXT: s_xor_b32 s9, s0, s8
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, s1, v0
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX9-NEXT: s_xor_b32 s4, s8, s6
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s9, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, v0, s7
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v0
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, s9, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s4, v0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v0, s4, v0
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, s8, v1
|
|
; GFX9-NEXT: v_subrev_u32_e32 v1, s8, v1
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_store_byte v2, v0, s[0:1]
|
|
; GFX9-NEXT: global_store_byte v2, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: sdiv_i8:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dword s0, s[4:5], 0x10
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_bfe_i32 s1, s0, 0x80008
|
|
; GFX10-NEXT: s_sext_i32_i8 s0, s0
|
|
; GFX10-NEXT: s_ashr_i32 s6, s1, 31
|
|
; GFX10-NEXT: s_ashr_i32 s8, s0, 31
|
|
; GFX10-NEXT: s_add_i32 s1, s1, s6
|
|
; GFX10-NEXT: s_add_i32 s0, s0, s8
|
|
; GFX10-NEXT: s_xor_b32 s7, s1, s6
|
|
; GFX10-NEXT: s_xor_b32 s0, s0, s8
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, s7
|
|
; GFX10-NEXT: s_sub_i32 s1, 0, s7
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, s1, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, v0, s7
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v0
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v1, s0, v1
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX10-NEXT: s_xor_b32 s4, s8, s6
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s7, v1
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s7, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s7, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s7, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: v_xor_b32_e32 v0, s4, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, s8, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v0, s4, v0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v1, s8, v1
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_store_byte v2, v0, s[0:1]
|
|
; GFX10-NEXT: global_store_byte v2, v1, s[2:3]
|
|
; GFX10-NEXT: s_endpgm
|
|
%div = sdiv i8 %x, %y
|
|
store i8 %div, i8 addrspace(1)* %out0
|
|
%rem = srem i8 %x, %y
|
|
store i8 %rem, i8 addrspace(1)* %out1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sdivrem_v2i8(<2 x i8> addrspace(1)* %out0, <2 x i8> addrspace(1)* %out1, <2 x i8> %x, <2 x i8> %y) {
|
|
; GFX8-LABEL: sdivrem_v2i8:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s2, s[4:5], 0x10
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_bfe_i32 s0, s2, 0x80010
|
|
; GFX8-NEXT: s_ashr_i32 s3, s0, 31
|
|
; GFX8-NEXT: s_add_i32 s0, s0, s3
|
|
; GFX8-NEXT: s_xor_b32 s8, s0, s3
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, s8
|
|
; GFX8-NEXT: s_sub_i32 s6, 0, s8
|
|
; GFX8-NEXT: s_bfe_i32 s1, s2, 0x80018
|
|
; GFX8-NEXT: s_ashr_i32 s10, s1, 31
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_add_i32 s1, s1, s10
|
|
; GFX8-NEXT: s_xor_b32 s11, s1, s10
|
|
; GFX8-NEXT: s_sext_i32_i8 s0, s2
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v2, s11
|
|
; GFX8-NEXT: s_ashr_i32 s9, s0, 31
|
|
; GFX8-NEXT: s_add_i32 s0, s0, s9
|
|
; GFX8-NEXT: v_mul_lo_u32 v1, s6, v0
|
|
; GFX8-NEXT: s_xor_b32 s0, s0, s9
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX8-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v2
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, v0, s8
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 1, v0
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s0, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s8, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v3, s[0:1], s8, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 1, v0
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s8, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v3, s[0:1], s8, v2
|
|
; GFX8-NEXT: s_sub_i32 s1, 0, s11
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, s1, v1
|
|
; GFX8-NEXT: s_bfe_i32 s1, s2, 0x80008
|
|
; GFX8-NEXT: s_ashr_i32 s2, s1, 31
|
|
; GFX8-NEXT: s_add_i32 s1, s1, s2
|
|
; GFX8-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
; GFX8-NEXT: s_xor_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_xor_b32 s0, s9, s3
|
|
; GFX8-NEXT: v_xor_b32_e32 v0, s0, v0
|
|
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, s1, v1
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, s9, v2
|
|
; GFX8-NEXT: v_subrev_u32_e32 v0, vcc, s0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v1, s11
|
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s9, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v1
|
|
; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s1, v3
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s11, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s11, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s11, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s11, v3
|
|
; GFX8-NEXT: s_xor_b32 s0, s2, s10
|
|
; GFX8-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e32 v1, vcc, s0, v1
|
|
; GFX8-NEXT: s_movk_i32 s0, 0xff
|
|
; GFX8-NEXT: v_and_b32_e32 v1, s0, v1
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, s2, v3
|
|
; GFX8-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, s2, v3
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX8-NEXT: flat_store_short v[0:1], v4
|
|
; GFX8-NEXT: v_and_b32_e32 v0, s0, v3
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
|
|
; GFX8-NEXT: v_or_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: flat_store_short v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: sdivrem_v2i8:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s6, s[4:5], 0x10
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_bfe_i32 s0, s6, 0x80010
|
|
; GFX9-NEXT: s_ashr_i32 s7, s0, 31
|
|
; GFX9-NEXT: s_add_i32 s0, s0, s7
|
|
; GFX9-NEXT: s_xor_b32 s8, s0, s7
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s8
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX9-NEXT: s_bfe_i32 s5, s6, 0x80018
|
|
; GFX9-NEXT: s_ashr_i32 s9, s5, 31
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_add_i32 s5, s5, s9
|
|
; GFX9-NEXT: s_xor_b32 s5, s5, s9
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s5
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_sub_i32 s10, 0, s8
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
|
; GFX9-NEXT: s_sext_i32_i8 s4, s6
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s10, v0
|
|
; GFX9-NEXT: s_ashr_i32 s10, s4, 31
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, v0, v2
|
|
; GFX9-NEXT: s_add_i32 s4, s4, s10
|
|
; GFX9-NEXT: s_xor_b32 s4, s4, s10
|
|
; GFX9-NEXT: s_sub_i32 s11, 0, s5
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v2
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s4, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, s11, v1
|
|
; GFX9-NEXT: s_bfe_i32 s6, s6, 0x80008
|
|
; GFX9-NEXT: s_ashr_i32 s11, s6, 31
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v0, s8
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, v1, v2
|
|
; GFX9-NEXT: s_add_i32 s6, s6, s11
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v0
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, s4, v3
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s8, v3
|
|
; GFX9-NEXT: s_xor_b32 s4, s6, s11
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s8, v3
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, s4, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s8, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s8, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v1, s5
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
|
; GFX9-NEXT: s_xor_b32 s6, s10, s7
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s6, v0
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, s4, v3
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s5, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s5, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s5, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX9-NEXT: s_xor_b32 s4, s11, s9
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, s4, v1
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s5, v3
|
|
; GFX9-NEXT: v_subrev_u32_e32 v1, s4, v1
|
|
; GFX9-NEXT: s_movk_i32 s4, 0xff
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX9-NEXT: v_and_b32_e32 v1, s4, v1
|
|
; GFX9-NEXT: v_subrev_u32_e32 v0, s6, v0
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, s11, v3
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s11, v3
|
|
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, s10, v2
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_store_short v1, v0, s[0:1]
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s4, v3
|
|
; GFX9-NEXT: v_subrev_u32_e32 v2, s10, v2
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
|
|
; GFX9-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
; GFX9-NEXT: global_store_short v1, v0, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: sdivrem_v2i8:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dword s0, s[4:5], 0x10
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_bfe_i32 s1, s0, 0x80018
|
|
; GFX10-NEXT: s_bfe_i32 s2, s0, 0x80010
|
|
; GFX10-NEXT: s_ashr_i32 s3, s1, 31
|
|
; GFX10-NEXT: s_ashr_i32 s8, s2, 31
|
|
; GFX10-NEXT: s_add_i32 s1, s1, s3
|
|
; GFX10-NEXT: s_add_i32 s2, s2, s8
|
|
; GFX10-NEXT: s_xor_b32 s1, s1, s3
|
|
; GFX10-NEXT: s_xor_b32 s2, s2, s8
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, s1
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, s2
|
|
; GFX10-NEXT: s_sub_i32 s6, 0, s1
|
|
; GFX10-NEXT: s_sub_i32 s7, 0, s2
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX10-NEXT: v_mul_lo_u32 v2, s6, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, s7, v1
|
|
; GFX10-NEXT: s_sext_i32_i8 s6, s0
|
|
; GFX10-NEXT: s_bfe_i32 s0, s0, 0x80008
|
|
; GFX10-NEXT: s_ashr_i32 s9, s6, 31
|
|
; GFX10-NEXT: s_ashr_i32 s10, s0, 31
|
|
; GFX10-NEXT: s_add_i32 s6, s6, s9
|
|
; GFX10-NEXT: s_add_i32 s0, s0, s10
|
|
; GFX10-NEXT: v_mul_hi_u32 v2, v0, v2
|
|
; GFX10-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
; GFX10-NEXT: s_xor_b32 s0, s0, s10
|
|
; GFX10-NEXT: s_xor_b32 s6, s6, s9
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v2
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v3
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, s6, v1
|
|
; GFX10-NEXT: v_mul_lo_u32 v2, v0, s1
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, v1, s2
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v5, 1, v0
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v4, 1, v1
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v2, s0, v2
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v3, s6, v3
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s1, v2
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v6, s1, v2
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v7, s2, v3
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s2, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v4, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v7, s0
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v5, 1, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s1, v2
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v6, s1, v2
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v4, 1, v1
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s2, v3
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v7, s2, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc_lo
|
|
; GFX10-NEXT: s_xor_b32 s1, s10, s3
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v4, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v7, s0
|
|
; GFX10-NEXT: v_xor_b32_e32 v0, s1, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v2, s10, v2
|
|
; GFX10-NEXT: s_xor_b32 s0, s9, s8
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v0, s1, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v3, s9, v3
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v2, s10, v2
|
|
; GFX10-NEXT: s_movk_i32 s1, 0xff
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v1, s0, v1
|
|
; GFX10-NEXT: v_and_b32_sdwa v0, v0, s1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s9, v3
|
|
; GFX10-NEXT: v_and_b32_sdwa v2, v2, s1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
|
|
; GFX10-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_store_short v1, v0, s[4:5]
|
|
; GFX10-NEXT: global_store_short v1, v2, s[6:7]
|
|
; GFX10-NEXT: s_endpgm
|
|
%div = sdiv <2 x i8> %x, %y
|
|
store <2 x i8> %div, <2 x i8> addrspace(1)* %out0
|
|
%rem = srem <2 x i8> %x, %y
|
|
store <2 x i8> %rem, <2 x i8> addrspace(1)* %out1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sdiv_i16(i16 addrspace(1)* %out0, i16 addrspace(1)* %out1, i16 %x, i16 %y) {
|
|
; GFX8-LABEL: sdiv_i16:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s6, s[4:5], 0x10
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_bfe_i32 s0, s6, 0x100010
|
|
; GFX8-NEXT: s_ashr_i32 s7, s0, 31
|
|
; GFX8-NEXT: s_add_i32 s0, s0, s7
|
|
; GFX8-NEXT: s_xor_b32 s8, s0, s7
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, s8
|
|
; GFX8-NEXT: s_sub_i32 s0, 0, s8
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v1, s0, v0
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX8-NEXT: s_sext_i32_i16 s4, s6
|
|
; GFX8-NEXT: s_ashr_i32 s5, s4, 31
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX8-NEXT: s_add_i32 s4, s4, s5
|
|
; GFX8-NEXT: s_xor_b32 s4, s4, s5
|
|
; GFX8-NEXT: s_xor_b32 s6, s5, s7
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, s4, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v2, s8
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v2
|
|
; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s4, v3
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s8, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s8, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s8, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s8, v3
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, s6, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s6, v2
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, s5, v3
|
|
; GFX8-NEXT: flat_store_short v[0:1], v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, s5, v3
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: flat_store_short v[0:1], v3
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: sdiv_i16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s0, s[4:5], 0x10
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_bfe_i32 s1, s0, 0x100010
|
|
; GFX9-NEXT: s_ashr_i32 s6, s1, 31
|
|
; GFX9-NEXT: s_add_i32 s1, s1, s6
|
|
; GFX9-NEXT: s_xor_b32 s7, s1, s6
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s7
|
|
; GFX9-NEXT: s_sub_i32 s1, 0, s7
|
|
; GFX9-NEXT: s_sext_i32_i16 s0, s0
|
|
; GFX9-NEXT: s_ashr_i32 s8, s0, 31
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_add_i32 s0, s0, s8
|
|
; GFX9-NEXT: s_xor_b32 s9, s0, s8
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, s1, v0
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX9-NEXT: s_xor_b32 s4, s8, s6
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s9, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, v0, s7
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v0
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, s9, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s4, v0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v0, s4, v0
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, s8, v1
|
|
; GFX9-NEXT: v_subrev_u32_e32 v1, s8, v1
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_store_short v2, v0, s[0:1]
|
|
; GFX9-NEXT: global_store_short v2, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: sdiv_i16:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dword s0, s[4:5], 0x10
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_bfe_i32 s1, s0, 0x100010
|
|
; GFX10-NEXT: s_sext_i32_i16 s0, s0
|
|
; GFX10-NEXT: s_ashr_i32 s6, s1, 31
|
|
; GFX10-NEXT: s_ashr_i32 s8, s0, 31
|
|
; GFX10-NEXT: s_add_i32 s1, s1, s6
|
|
; GFX10-NEXT: s_add_i32 s0, s0, s8
|
|
; GFX10-NEXT: s_xor_b32 s7, s1, s6
|
|
; GFX10-NEXT: s_xor_b32 s0, s0, s8
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, s7
|
|
; GFX10-NEXT: s_sub_i32 s1, 0, s7
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, s1, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, v0, s7
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v0
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v1, s0, v1
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX10-NEXT: s_xor_b32 s4, s8, s6
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s7, v1
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s7, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s7, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s7, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: v_xor_b32_e32 v0, s4, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, s8, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v0, s4, v0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v1, s8, v1
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_store_short v2, v0, s[0:1]
|
|
; GFX10-NEXT: global_store_short v2, v1, s[2:3]
|
|
; GFX10-NEXT: s_endpgm
|
|
%div = sdiv i16 %x, %y
|
|
store i16 %div, i16 addrspace(1)* %out0
|
|
%rem = srem i16 %x, %y
|
|
store i16 %rem, i16 addrspace(1)* %out1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sdivrem_v2i16(<2 x i16> addrspace(1)* %out0, <2 x i16> addrspace(1)* %out1, <2 x i16> %x, <2 x i16> %y) {
|
|
; GFX8-LABEL: sdivrem_v2i16:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s0, s[4:5], 0x14
|
|
; GFX8-NEXT: s_load_dword s8, s[4:5], 0x10
|
|
; GFX8-NEXT: s_mov_b32 s9, 0x100010
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_sext_i32_i16 s1, s0
|
|
; GFX8-NEXT: s_ashr_i32 s2, s1, 31
|
|
; GFX8-NEXT: s_add_i32 s1, s1, s2
|
|
; GFX8-NEXT: s_xor_b32 s3, s1, s2
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, s3
|
|
; GFX8-NEXT: s_sub_i32 s6, 0, s3
|
|
; GFX8-NEXT: s_sext_i32_i16 s1, s8
|
|
; GFX8-NEXT: s_bfe_i32 s0, s0, s9
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_ashr_i32 s10, s1, 31
|
|
; GFX8-NEXT: s_ashr_i32 s11, s0, 31
|
|
; GFX8-NEXT: s_add_i32 s1, s1, s10
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_add_i32 s0, s0, s11
|
|
; GFX8-NEXT: s_xor_b32 s1, s1, s10
|
|
; GFX8-NEXT: s_xor_b32 s12, s0, s11
|
|
; GFX8-NEXT: v_mul_lo_u32 v1, s6, v0
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v2, s12
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v0, s1, v0
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v1, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, v0, s3
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 1, v0
|
|
; GFX8-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s1, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s3, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v3, s[0:1], s3, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 1, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s3, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v3, s[0:1], s3, v2
|
|
; GFX8-NEXT: s_sub_i32 s1, 0, s12
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, s1, v1
|
|
; GFX8-NEXT: s_bfe_i32 s1, s8, s9
|
|
; GFX8-NEXT: s_xor_b32 s0, s10, s2
|
|
; GFX8-NEXT: s_ashr_i32 s2, s1, 31
|
|
; GFX8-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
; GFX8-NEXT: s_add_i32 s1, s1, s2
|
|
; GFX8-NEXT: s_xor_b32 s1, s1, s2
|
|
; GFX8-NEXT: v_xor_b32_e32 v0, s0, v0
|
|
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, s1, v1
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, s10, v2
|
|
; GFX8-NEXT: v_subrev_u32_e32 v0, vcc, s0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v1, s12
|
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s10, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v1
|
|
; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s1, v3
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s12, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s12, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s12, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s12, v3
|
|
; GFX8-NEXT: s_xor_b32 s0, s2, s11
|
|
; GFX8-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e32 v1, vcc, s0, v1
|
|
; GFX8-NEXT: s_mov_b32 s0, 0xffff
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, s2, v3
|
|
; GFX8-NEXT: v_and_b32_e32 v1, s0, v1
|
|
; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, s2, v3
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; GFX8-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; GFX8-NEXT: v_and_b32_e32 v0, s0, v3
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; GFX8-NEXT: v_or_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v4
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: sdivrem_v2i16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s6, s[4:5], 0x14
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_sext_i32_i16 s0, s6
|
|
; GFX9-NEXT: s_ashr_i32 s7, s0, 31
|
|
; GFX9-NEXT: s_add_i32 s0, s0, s7
|
|
; GFX9-NEXT: s_xor_b32 s8, s0, s7
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s8
|
|
; GFX9-NEXT: s_load_dword s9, s[4:5], 0x10
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX9-NEXT: s_mov_b32 s4, 0x100010
|
|
; GFX9-NEXT: s_bfe_i32 s6, s6, s4
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_ashr_i32 s10, s6, 31
|
|
; GFX9-NEXT: s_add_i32 s6, s6, s10
|
|
; GFX9-NEXT: s_sub_i32 s11, 0, s8
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_xor_b32 s6, s6, s10
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v2, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_sext_i32_i16 s5, s9
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, s11, v0
|
|
; GFX9-NEXT: s_ashr_i32 s11, s5, 31
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
|
; GFX9-NEXT: s_add_i32 s5, s5, s11
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX9-NEXT: s_xor_b32 s5, s5, s11
|
|
; GFX9-NEXT: s_bfe_i32 s4, s9, s4
|
|
; GFX9-NEXT: s_sub_i32 s9, 0, s6
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v2
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s5, v0
|
|
; GFX9-NEXT: s_xor_b32 s7, s11, s7
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, s9, v1
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, v0, s8
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v0
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, s5, v2
|
|
; GFX9-NEXT: s_ashr_i32 s5, s4, 31
|
|
; GFX9-NEXT: s_add_i32 s4, s4, s5
|
|
; GFX9-NEXT: s_xor_b32 s4, s4, s5
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, s4, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s8, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s8, v2
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v1, s6
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s8, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s8, v2
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, s4, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s6, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s6, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s6, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v4, s6, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX9-NEXT: s_xor_b32 s4, s5, s10
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s7, v0
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, s11, v2
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, s4, v1
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, s5, v3
|
|
; GFX9-NEXT: v_subrev_u32_e32 v0, s7, v0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v2, s11, v2
|
|
; GFX9-NEXT: v_sub_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
|
|
; GFX9-NEXT: v_sub_u32_sdwa v3, v3, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff
|
|
; GFX9-NEXT: v_and_or_b32 v0, v0, v4, v1
|
|
; GFX9-NEXT: v_and_or_b32 v1, v2, v4, v3
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_store_dword v2, v0, s[0:1]
|
|
; GFX9-NEXT: global_store_dword v2, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: sdivrem_v2i16:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dword s0, s[4:5], 0x14
|
|
; GFX10-NEXT: s_mov_b32 s1, 0x100010
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_sext_i32_i16 s2, s0
|
|
; GFX10-NEXT: s_bfe_i32 s0, s0, s1
|
|
; GFX10-NEXT: s_ashr_i32 s3, s2, 31
|
|
; GFX10-NEXT: s_ashr_i32 s8, s0, 31
|
|
; GFX10-NEXT: s_add_i32 s2, s2, s3
|
|
; GFX10-NEXT: s_add_i32 s0, s0, s8
|
|
; GFX10-NEXT: s_xor_b32 s2, s2, s3
|
|
; GFX10-NEXT: s_xor_b32 s9, s0, s8
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, s2
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, s9
|
|
; GFX10-NEXT: s_load_dword s0, s[4:5], 0x10
|
|
; GFX10-NEXT: s_sub_i32 s6, 0, s2
|
|
; GFX10-NEXT: s_sub_i32 s7, 0, s9
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v1, v1
|
|
; GFX10-NEXT: v_mul_lo_u32 v2, s6, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, s7, v1
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_sext_i32_i16 s6, s0
|
|
; GFX10-NEXT: s_bfe_i32 s0, s0, s1
|
|
; GFX10-NEXT: s_ashr_i32 s1, s6, 31
|
|
; GFX10-NEXT: s_ashr_i32 s10, s0, 31
|
|
; GFX10-NEXT: s_add_i32 s6, s6, s1
|
|
; GFX10-NEXT: s_add_i32 s0, s0, s10
|
|
; GFX10-NEXT: v_mul_hi_u32 v2, v0, v2
|
|
; GFX10-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
; GFX10-NEXT: s_xor_b32 s6, s6, s1
|
|
; GFX10-NEXT: s_xor_b32 s0, s0, s10
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v2
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v3
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s6, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, s0, v1
|
|
; GFX10-NEXT: v_mul_lo_u32 v2, v0, s2
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, v1, s9
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v4, 1, v0
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v5, 1, v1
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v2, s6, v2
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v3, s0, v3
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v6, s2, v2
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s9, v3
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s2, v2
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v7, s9, v3
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v4, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v6, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v5, 1, v1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v4, 1, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e64 s0, s2, v2
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s9, v3
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v6, s2, v2
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v7, s9, v3
|
|
; GFX10-NEXT: s_xor_b32 s2, s1, s3
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v4, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v6, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc_lo
|
|
; GFX10-NEXT: s_xor_b32 s0, s10, s8
|
|
; GFX10-NEXT: v_xor_b32_e32 v0, s2, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
; GFX10-NEXT: v_xor_b32_e32 v2, s1, v2
|
|
; GFX10-NEXT: v_xor_b32_e32 v3, s10, v3
|
|
; GFX10-NEXT: v_mov_b32_e32 v4, 0xffff
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v0, s2, v0
|
|
; GFX10-NEXT: v_sub_nc_u32_sdwa v1, v1, s0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v2, s1, v2
|
|
; GFX10-NEXT: v_sub_nc_u32_sdwa v3, v3, s10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
|
|
; GFX10-NEXT: v_and_or_b32 v0, v0, v4, v1
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: v_and_or_b32 v2, v2, v4, v3
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[4:5]
|
|
; GFX10-NEXT: global_store_dword v1, v2, s[6:7]
|
|
; GFX10-NEXT: s_endpgm
|
|
%div = sdiv <2 x i16> %x, %y
|
|
store <2 x i16> %div, <2 x i16> addrspace(1)* %out0
|
|
%rem = srem <2 x i16> %x, %y
|
|
store <2 x i16> %rem, <2 x i16> addrspace(1)* %out1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sdivrem_i3(i3 addrspace(1)* %out0, i3 addrspace(1)* %out1, i3 %x, i3 %y) {
|
|
; GFX8-LABEL: sdivrem_i3:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s6, s[4:5], 0x10
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_bfe_i32 s0, s6, 0x30008
|
|
; GFX8-NEXT: s_ashr_i32 s7, s0, 31
|
|
; GFX8-NEXT: s_add_i32 s0, s0, s7
|
|
; GFX8-NEXT: s_xor_b32 s8, s0, s7
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, s8
|
|
; GFX8-NEXT: s_sub_i32 s0, 0, s8
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v1, s0, v0
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX8-NEXT: s_bfe_i32 s4, s6, 0x30000
|
|
; GFX8-NEXT: s_ashr_i32 s5, s4, 31
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX8-NEXT: s_add_i32 s4, s4, s5
|
|
; GFX8-NEXT: s_xor_b32 s4, s4, s5
|
|
; GFX8-NEXT: s_xor_b32 s6, s5, s7
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, s4, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v2, s8
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v2
|
|
; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s4, v3
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s8, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s8, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s8, v3
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v4, s[0:1], s8, v3
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, s6, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s6, v2
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, s5, v3
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, s5, v3
|
|
; GFX8-NEXT: flat_store_byte v[0:1], v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 7, v3
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: flat_store_byte v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: sdivrem_i3:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s0, s[4:5], 0x10
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_bfe_i32 s1, s0, 0x30008
|
|
; GFX9-NEXT: s_ashr_i32 s6, s1, 31
|
|
; GFX9-NEXT: s_add_i32 s1, s1, s6
|
|
; GFX9-NEXT: s_xor_b32 s7, s1, s6
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s7
|
|
; GFX9-NEXT: s_sub_i32 s1, 0, s7
|
|
; GFX9-NEXT: s_bfe_i32 s0, s0, 0x30000
|
|
; GFX9-NEXT: s_ashr_i32 s8, s0, 31
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_add_i32 s0, s0, s8
|
|
; GFX9-NEXT: s_xor_b32 s9, s0, s8
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, s1, v0
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX9-NEXT: s_xor_b32 s4, s8, s6
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s9, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, v0, s7
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v0
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, s9, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s4, v0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v0, s4, v0
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, s8, v1
|
|
; GFX9-NEXT: v_subrev_u32_e32 v1, s8, v1
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 7, v0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_store_byte v2, v0, s[0:1]
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 7, v1
|
|
; GFX9-NEXT: global_store_byte v2, v0, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: sdivrem_i3:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dword s0, s[4:5], 0x10
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_bfe_i32 s1, s0, 0x30008
|
|
; GFX10-NEXT: s_bfe_i32 s0, s0, 0x30000
|
|
; GFX10-NEXT: s_ashr_i32 s6, s1, 31
|
|
; GFX10-NEXT: s_ashr_i32 s7, s0, 31
|
|
; GFX10-NEXT: s_add_i32 s1, s1, s6
|
|
; GFX10-NEXT: s_add_i32 s0, s0, s7
|
|
; GFX10-NEXT: s_xor_b32 s1, s1, s6
|
|
; GFX10-NEXT: s_xor_b32 s0, s0, s7
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, s1
|
|
; GFX10-NEXT: s_sub_i32 s2, 0, s1
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, s2, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, v0, s1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v0
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v1, s0, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s1, v1
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s1, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s1, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s1, v1
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX10-NEXT: s_xor_b32 s4, s7, s6
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: v_xor_b32_e32 v0, s4, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, s7, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v0, s4, v0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v1, s7, v1
|
|
; GFX10-NEXT: v_and_b32_e32 v0, 7, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 7, v1
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_store_byte v2, v0, s[0:1]
|
|
; GFX10-NEXT: global_store_byte v2, v1, s[2:3]
|
|
; GFX10-NEXT: s_endpgm
|
|
%div = sdiv i3 %x, %y
|
|
store i3 %div, i3 addrspace(1)* %out0
|
|
%rem = srem i3 %x, %y
|
|
store i3 %rem, i3 addrspace(1)* %out1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sdivrem_i27(i27 addrspace(1)* %out0, i27 addrspace(1)* %out1, i27 %x, i27 %y) {
|
|
; GFX8-LABEL: sdivrem_i27:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x10
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
|
|
; GFX8-NEXT: s_mov_b32 s9, 0x7ffffff
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_bfe_i32 s1, s1, 0x1b0000
|
|
; GFX8-NEXT: s_ashr_i32 s2, s1, 31
|
|
; GFX8-NEXT: s_add_i32 s1, s1, s2
|
|
; GFX8-NEXT: s_xor_b32 s3, s1, s2
|
|
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, s3
|
|
; GFX8-NEXT: s_sub_i32 s1, 0, s3
|
|
; GFX8-NEXT: s_bfe_i32 s0, s0, 0x1b0000
|
|
; GFX8-NEXT: s_ashr_i32 s8, s0, 31
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_add_i32 s0, s0, s8
|
|
; GFX8-NEXT: s_xor_b32 s0, s0, s8
|
|
; GFX8-NEXT: s_xor_b32 s2, s8, s2
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v1, s1, v0
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v1, v0, s3
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0
|
|
; GFX8-NEXT: v_sub_u32_e32 v1, vcc, s0, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s3, v1
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v2, s[0:1], s3, v1
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, s3, v1
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX8-NEXT: v_subrev_u32_e64 v2, s[0:1], s3, v1
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
|
; GFX8-NEXT: v_xor_b32_e32 v0, s2, v0
|
|
; GFX8-NEXT: v_subrev_u32_e32 v0, vcc, s2, v0
|
|
; GFX8-NEXT: v_xor_b32_e32 v1, s8, v1
|
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s8, v1
|
|
; GFX8-NEXT: v_and_b32_e32 v3, s9, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v3
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_and_b32_e32 v2, s9, v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: sdivrem_i27:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x10
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_bfe_i32 s1, s1, 0x1b0000
|
|
; GFX9-NEXT: s_ashr_i32 s6, s1, 31
|
|
; GFX9-NEXT: s_add_i32 s1, s1, s6
|
|
; GFX9-NEXT: s_xor_b32 s7, s1, s6
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s7
|
|
; GFX9-NEXT: s_sub_i32 s1, 0, s7
|
|
; GFX9-NEXT: s_bfe_i32 s0, s0, 0x1b0000
|
|
; GFX9-NEXT: s_ashr_i32 s8, s0, 31
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_add_i32 s0, s0, s8
|
|
; GFX9-NEXT: s_xor_b32 s9, s0, s8
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, s1, v0
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX9-NEXT: s_xor_b32 s5, s8, s6
|
|
; GFX9-NEXT: s_mov_b32 s4, 0x7ffffff
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s9, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, v0, s7
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v0
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, s9, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: v_subrev_u32_e32 v3, s7, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s5, v0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v0, s5, v0
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, s8, v1
|
|
; GFX9-NEXT: v_subrev_u32_e32 v1, s8, v1
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s4, v0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_store_dword v2, v0, s[0:1]
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s4, v1
|
|
; GFX9-NEXT: global_store_dword v2, v0, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: sdivrem_i27:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x10
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_bfe_i32 s1, s1, 0x1b0000
|
|
; GFX10-NEXT: s_bfe_i32 s0, s0, 0x1b0000
|
|
; GFX10-NEXT: s_ashr_i32 s6, s1, 31
|
|
; GFX10-NEXT: s_ashr_i32 s7, s0, 31
|
|
; GFX10-NEXT: s_add_i32 s1, s1, s6
|
|
; GFX10-NEXT: s_add_i32 s0, s0, s7
|
|
; GFX10-NEXT: s_xor_b32 s1, s1, s6
|
|
; GFX10-NEXT: s_xor_b32 s0, s0, s7
|
|
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, s1
|
|
; GFX10-NEXT: s_sub_i32 s2, 0, s1
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, s2, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, v0, s1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v0
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v1, s0, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s1, v1
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s1, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, s1, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v3, s1, v1
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
|
|
; GFX10-NEXT: s_xor_b32 s4, s7, s6
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: v_xor_b32_e32 v0, s4, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, s7, v1
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v0, s4, v0
|
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v1, s7, v1
|
|
; GFX10-NEXT: s_mov_b32 s4, 0x7ffffff
|
|
; GFX10-NEXT: v_and_b32_e32 v0, s4, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v1, s4, v1
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_store_dword v2, v0, s[0:1]
|
|
; GFX10-NEXT: global_store_dword v2, v1, s[2:3]
|
|
; GFX10-NEXT: s_endpgm
|
|
%div = sdiv i27 %x, %y
|
|
store i27 %div, i27 addrspace(1)* %out0
|
|
%rem = srem i27 %x, %y
|
|
store i27 %rem, i27 addrspace(1)* %out1
|
|
ret void
|
|
}
|