2020-02-18 07:16:59 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
|
|
|
|
|
|
|
|
; 64-bit divides and rems should be split into a fast and slow path
|
|
|
|
; where the fast path uses a 32-bit operation.
|
|
|
|
|
|
|
|
define i64 @sdiv64(i64 %a, i64 %b) {
|
|
|
|
; GFX9-LABEL: sdiv64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_or_b32_e32 v5, v1, v3
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
|
|
|
|
; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[4:5]
|
|
|
|
; GFX9-NEXT: s_cbranch_execz BB0_2
|
|
|
|
; GFX9-NEXT: ; %bb.1:
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v4, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, v2, v4
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v3, v4
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v5, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v6, v3
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v7, vcc, 0, v2
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, 0, v3, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v15, 0
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v5, 0x4f800000, v6
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v14, 0
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v5, 0x5f7ffffc, v5
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v6, 0x2f800000, v5
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v6, v6
|
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v5, 0xcf800000, v6
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v6, v6
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v8, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v7, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v7, v6
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v12, v7, v5
|
|
|
|
; GFX9-NEXT: v_add3_u32 v9, v10, v11, v9
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v5, v9
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v13, v5, v12
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v5, v9
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v16, v6, v9
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v6, v9
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v13, v11
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v13, v6, v12
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v12, v6, v12
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v15, v10, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v11, v13
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v10, v12, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v11, vcc, v16, v14, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v10, v9
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v5, s[4:5], v5, v9
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v15, v11, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v9, vcc, v6, v10, s[4:5]
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v7, v9
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v12, v7, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v8, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v7, v5
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v6, v6, v10
|
|
|
|
; GFX9-NEXT: v_add3_u32 v8, v12, v11, v8
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v13, v5, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v16, v5, v7
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v17, v5, v8
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v12, v9, v7
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v9, v7
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v13, vcc, v16, v13
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v9, v8
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v16, vcc, v15, v17, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v9, v8
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v13, v7
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v16, v12, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v11, v14, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v15, v9, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v6, vcc, v6, v8, s[4:5]
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, v5, v7
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v7, 31, v1
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v7
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v7
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v0, v6
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v0, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v0, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v7, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v7
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v9, v8
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v15, v10, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v1, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v5, v1, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v1, v6
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v1, v6
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v8, v10
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v11, v14, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, v5, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v15, v8, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v3, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v2, v6
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v2, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v2, v5
|
|
|
|
; GFX9-NEXT: v_add3_u32 v8, v10, v9, v8
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v9, v1, v8
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v11
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v9, s[4:5], v9, v3, vcc
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e64 v10, s[4:5], v0, v2
|
|
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v9, s[4:5], 0, v9, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[4:5], v9, v3
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[4:5], v10, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], v9, v3
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v8, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v9, v11, v10, s[4:5]
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v10, s[4:5], 2, v5
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v11, s[4:5], 0, v6, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v12, s[4:5], 1, v5
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v13, s[4:5], 0, v6, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v9
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v12, v10, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v9, v13, v11, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, v7, v4
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v9, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v2
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v1, v2
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v0, v2, vcc
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3
|
2021-07-21 21:20:10 +08:00
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB0_2: ; %Flow
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_saveexec_b64 s[4:5], s[6:7]
|
|
|
|
; GFX9-NEXT: s_xor_b64 exec, exec, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_cbranch_execz BB0_4
|
|
|
|
; GFX9-NEXT: ; %bb.3:
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v2
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, 0, v2
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, 0
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v3, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v1, v2
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v3
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v1
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v1, v3, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB0_4:
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%d = sdiv i64 %a, %b
|
|
|
|
ret i64 %d
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @udiv64(i64 %a, i64 %b) {
|
|
|
|
; GFX9-LABEL: udiv64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_or_b32_e32 v5, v1, v3
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5
|
|
|
|
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
|
|
|
|
; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[4:5]
|
|
|
|
; GFX9-NEXT: s_cbranch_execz BB1_2
|
|
|
|
; GFX9-NEXT: ; %bb.1:
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v4, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v5, v3
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v6, vcc, 0, v2
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, 0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v13, 0
|
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v4, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v12, 0
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v4, 0xcf800000, v5
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v4, v4
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v7, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v6, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v6, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v6, v4
|
|
|
|
; GFX9-NEXT: v_add3_u32 v8, v10, v8, v9
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v4, v11
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v4, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v14, v4, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v15, v5, v8
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v5, v8
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v9, v10
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v13, v14, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v14, v5, v11
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v5, v11
|
[AMDGPU] Enable divergence driven ISel for ADD/SUB i64
Summary:
Currently we custom select add/sub with carry out to scalar form relying on later replacing them to vector form if necessary.
This change enables custom selection code to take the divergence of adde/addc SDNodes into account and select the appropriate form in one step.
Reviewers: arsenm, vpykhtin, rampitec
Reviewed By: arsenm, vpykhtin
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa
Differential Revision: https://reviews.llvm.org/D76371
2020-03-20 02:33:13 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v9, v14
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v10, v11, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v15, v12, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v9, v8
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v4, s[4:5], v4, v8
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v13, v10, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v8, vcc, v5, v9, s[4:5]
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v6, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v6, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v7, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v6, v4
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v5, v5, v9
|
|
|
|
; GFX9-NEXT: v_add3_u32 v7, v11, v10, v7
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v4, v7
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v4, v6
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v15, v4, v7
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v14, v8, v7
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v8, v7
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v10, vcc, v11, v10
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v8, v6
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v8, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, v13, v15, vcc
|
[AMDGPU] Enable divergence driven ISel for ADD/SUB i64
Summary:
Currently we custom select add/sub with carry out to scalar form relying on later replacing them to vector form if necessary.
This change enables custom selection code to take the divergence of adde/addc SDNodes into account and select the appropriate form in one step.
Reviewers: arsenm, vpykhtin, rampitec
Reviewed By: arsenm, vpykhtin
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa
Differential Revision: https://reviews.llvm.org/D76371
2020-03-20 02:33:13 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v10, v6
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v15, v11, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v14, v12, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v7
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v13, v8, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v5, vcc, v5, v7, s[4:5]
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v0, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v7, v0, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, v0, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v1, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, v1, v5
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v7, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v13, v8, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v1, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v1, v4
|
[AMDGPU] Enable divergence driven ISel for ADD/SUB i64
Summary:
Currently we custom select add/sub with carry out to scalar form relying on later replacing them to vector form if necessary.
This change enables custom selection code to take the divergence of adde/addc SDNodes into account and select the appropriate form in one step.
Reviewers: arsenm, vpykhtin, rampitec
Reviewed By: arsenm, vpykhtin
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa
Differential Revision: https://reviews.llvm.org/D76371
2020-03-20 02:33:13 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v8
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v7, v4, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v9, v12, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v5
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v6, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v3, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v2, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, v2, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v2, v4
|
|
|
|
; GFX9-NEXT: v_add3_u32 v6, v8, v7, v6
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v7, v1, v6
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v9
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v7, s[4:5], v7, v3, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_sub_co_u32_e64 v8, s[4:5], v0, v2
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v7, s[4:5], 0, v7, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[4:5], v7, v3
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[4:5], v8, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], v7, v3
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v7, v9, v8, s[4:5]
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v8, s[4:5], 2, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v9, s[4:5], 0, v5, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v10, s[4:5], 1, v4
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v11, s[4:5], 0, v5, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v7
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v7, v11, v9, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, v10, v8, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3
|
2021-07-21 21:20:10 +08:00
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB1_2: ; %Flow
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_saveexec_b64 s[4:5], s[6:7]
|
|
|
|
; GFX9-NEXT: s_xor_b64 exec, exec, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_cbranch_execz BB1_4
|
|
|
|
; GFX9-NEXT: ; %bb.3:
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v2
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, 0, v2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, 0
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v3, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v1, v2
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v3
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v1
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v1, v3, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB1_4:
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%d = udiv i64 %a, %b
|
|
|
|
ret i64 %d
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @srem64(i64 %a, i64 %b) {
|
|
|
|
; GFX9-LABEL: srem64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_or_b32_e32 v5, v1, v3
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
|
|
|
|
; GFX9-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
|
|
|
|
; GFX9-NEXT: s_cbranch_execz BB2_2
|
|
|
|
; GFX9-NEXT: ; %bb.1:
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v4, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v3, v4
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, v2, v4
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v4, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v5, v3
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v6, vcc, 0, v2
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, 0, v3, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v14, 0
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v4, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v13, 0
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v4, 0xcf800000, v5
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v4, v4
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v7, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v6, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v6, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v6, v4
|
|
|
|
; GFX9-NEXT: v_add3_u32 v8, v9, v10, v8
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v4, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v12, v4, v11
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v4, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v15, v5, v8
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v5, v8
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v10, vcc, v12, v10
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v12, v5, v11
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v5, v11
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v14, v9, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v10, vcc, v10, v12
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v11, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v15, v13, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v9, v8
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v4, s[4:5], v4, v8
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v14, v10, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v8, vcc, v5, v9, s[4:5]
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v6, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v6, v4
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v7, v4
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v6, v4
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v5, v5, v9
|
|
|
|
; GFX9-NEXT: v_add3_u32 v7, v11, v10, v7
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v12, v4, v7
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v15, v4, v6
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v16, v4, v7
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v8, v6
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v8, v6
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, v15, v12
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v8, v7
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, v14, v16, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v8, v7
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v12, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v15, v11, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v10, v13, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v7
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v14, v8, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v5, vcc, v5, v7, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v6
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v6, 31, v1
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v6
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v6
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v0, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, v0, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v0, v5
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v6, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v6
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v8, v7
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v14, v9, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v1, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v1, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v1, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, v1, v5
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v9
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v8, v4, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v10, v13, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v5
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v14, v7, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v3, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, v2, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, v2, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v2, v4
|
|
|
|
; GFX9-NEXT: v_add3_u32 v5, v8, v5, v7
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v7, v1, v5
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v4
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v4, s[4:5], v7, v3, vcc
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e64 v7, s[4:5], v0, v2
|
|
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v8, s[6:7], 0, v4, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[6:7], v8, v3
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v5, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[6:7], v7, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], v8, v3
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v4, s[4:5], v4, v3, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[6:7]
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e64 v10, s[4:5], v7, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v4, s[4:5], 0, v4, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v9
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v10, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, v8, v4, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v6
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v6
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v6
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v6, vcc
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3
|
2021-07-21 21:20:10 +08:00
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB2_2: ; %Flow
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_saveexec_b64 s[4:5], s[8:9]
|
|
|
|
; GFX9-NEXT: s_xor_b64 exec, exec, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_cbranch_execz BB2_4
|
|
|
|
; GFX9-NEXT: ; %bb.3:
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v2
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, 0, v2
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, 0
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v3, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, v1, v2
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, v0, v2
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, v0, v2
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB2_4:
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%d = srem i64 %a, %b
|
|
|
|
ret i64 %d
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @urem64(i64 %a, i64 %b) {
|
|
|
|
; GFX9-LABEL: urem64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_or_b32_e32 v5, v1, v3
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5
|
|
|
|
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
|
|
|
|
; GFX9-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
|
|
|
|
; GFX9-NEXT: s_cbranch_execz BB3_2
|
|
|
|
; GFX9-NEXT: ; %bb.1:
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v4, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v5, v3
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v6, vcc, 0, v2
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, 0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v13, 0
|
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v4, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v12, 0
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v4, 0xcf800000, v5
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v4, v4
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v7, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v6, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v6, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v6, v4
|
|
|
|
; GFX9-NEXT: v_add3_u32 v8, v10, v8, v9
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v4, v11
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v4, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v14, v4, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v15, v5, v8
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v5, v8
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v9, v10
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v13, v14, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v14, v5, v11
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v5, v11
|
[AMDGPU] Enable divergence driven ISel for ADD/SUB i64
Summary:
Currently we custom select add/sub with carry out to scalar form relying on later replacing them to vector form if necessary.
This change enables custom selection code to take the divergence of adde/addc SDNodes into account and select the appropriate form in one step.
Reviewers: arsenm, vpykhtin, rampitec
Reviewed By: arsenm, vpykhtin
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa
Differential Revision: https://reviews.llvm.org/D76371
2020-03-20 02:33:13 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v9, v14
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v10, v11, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v15, v12, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v9, v8
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v4, s[4:5], v4, v8
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v13, v10, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v8, vcc, v5, v9, s[4:5]
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v6, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v6, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v7, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v6, v4
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v5, v5, v9
|
|
|
|
; GFX9-NEXT: v_add3_u32 v7, v11, v10, v7
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v4, v7
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v4, v6
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v15, v4, v7
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v14, v8, v7
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v8, v7
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v10, vcc, v11, v10
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v8, v6
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v8, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, v13, v15, vcc
|
[AMDGPU] Enable divergence driven ISel for ADD/SUB i64
Summary:
Currently we custom select add/sub with carry out to scalar form relying on later replacing them to vector form if necessary.
This change enables custom selection code to take the divergence of adde/addc SDNodes into account and select the appropriate form in one step.
Reviewers: arsenm, vpykhtin, rampitec
Reviewed By: arsenm, vpykhtin
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa
Differential Revision: https://reviews.llvm.org/D76371
2020-03-20 02:33:13 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v10, v6
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v15, v11, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v14, v12, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v7
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v13, v8, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v5, vcc, v5, v7, s[4:5]
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v0, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v7, v0, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, v0, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v1, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, v1, v5
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v7, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v13, v8, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v1, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v1, v4
|
[AMDGPU] Enable divergence driven ISel for ADD/SUB i64
Summary:
Currently we custom select add/sub with carry out to scalar form relying on later replacing them to vector form if necessary.
This change enables custom selection code to take the divergence of adde/addc SDNodes into account and select the appropriate form in one step.
Reviewers: arsenm, vpykhtin, rampitec
Reviewed By: arsenm, vpykhtin
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa
Differential Revision: https://reviews.llvm.org/D76371
2020-03-20 02:33:13 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v8
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v7, v4, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v9, v12, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v5
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v6, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v3, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v7, v2, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, v2, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v2, v4
|
|
|
|
; GFX9-NEXT: v_add3_u32 v5, v7, v5, v6
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v6, v1, v5
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v4
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v4, s[4:5], v6, v3, vcc
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e64 v6, s[4:5], v0, v2
|
|
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v7, s[6:7], 0, v4, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[6:7], v7, v3
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v5, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[6:7], v6, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], v7, v3
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v4, s[4:5], v4, v3, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[6:7]
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e64 v9, s[4:5], v6, v2
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v4, s[4:5], 0, v4, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v8
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, v7, v4, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v1, v4, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v6, v9, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3
|
2021-07-21 21:20:10 +08:00
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB3_2: ; %Flow
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_saveexec_b64 s[4:5], s[8:9]
|
|
|
|
; GFX9-NEXT: s_xor_b64 exec, exec, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_cbranch_execz BB3_4
|
|
|
|
; GFX9-NEXT: ; %bb.3:
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v2
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, 0, v2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, 0
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v3, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, v1, v2
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, v0, v2
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, v0, v2
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB3_4:
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%d = urem i64 %a, %b
|
|
|
|
ret i64 %d
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @sdiv32(i32 %a, i32 %b) {
|
|
|
|
; GFX9-LABEL: sdiv32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v3, v1
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v4, 0, v1
|
[AMDGPU] Remove dubious logic in bidirectional list scheduler
Summary:
pickNodeBidirectional tried to compare the best top candidate and the
best bottom candidate by examining TopCand.Reason and BotCand.Reason.
This is unsound because, after calling pickNodeFromQueue, Cand.Reason
does not reflect the most important reason why Cand was chosen. Rather
it reflects the most recent reason why it beat some other potential
candidate, which could have been for some low priority tie breaker
reason.
I have seen this cause problems where TopCand is a good candidate, but
because TopCand.Reason is ORDER (which is very low priority) it is
repeatedly ignored in favour of a mediocre BotCand. This is not how
bidirectional scheduling is supposed to work.
To fix this I changed the code to always compare TopCand and BotCand
directly, like the generic implementation of pickNodeBidirectional does.
This removes some uncommented AMDGPU-specific logic; if this logic turns
out to be important then perhaps it could be moved into an override of
tryCandidate instead.
Graphics shader benchmarking on gfx10 shows a lot more positive than
negative effects from this change.
Reviewers: arsenm, tstellar, rampitec, kzhuravl, vpykhtin, dstuttard, tpr, atrick, MatzeB
Subscribers: jvesely, wdng, nhaehnle, yaxunl, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68338
2019-10-07 22:33:59 +08:00
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v5, 31, v0
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v5
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
[AMDGPU] Remove dubious logic in bidirectional list scheduler
Summary:
pickNodeBidirectional tried to compare the best top candidate and the
best bottom candidate by examining TopCand.Reason and BotCand.Reason.
This is unsound because, after calling pickNodeFromQueue, Cand.Reason
does not reflect the most important reason why Cand was chosen. Rather
it reflects the most recent reason why it beat some other potential
candidate, which could have been for some low priority tie breaker
reason.
I have seen this cause problems where TopCand is a good candidate, but
because TopCand.Reason is ORDER (which is very low priority) it is
repeatedly ignored in favour of a mediocre BotCand. This is not how
bidirectional scheduling is supposed to work.
To fix this I changed the code to always compare TopCand and BotCand
directly, like the generic implementation of pickNodeBidirectional does.
This removes some uncommented AMDGPU-specific logic; if this logic turns
out to be important then perhaps it could be moved into an override of
tryCandidate instead.
Graphics shader benchmarking on gfx10 shows a lot more positive than
negative effects from this change.
Reviewers: arsenm, tstellar, rampitec, kzhuravl, vpykhtin, dstuttard, tpr, atrick, MatzeB
Subscribers: jvesely, wdng, nhaehnle, yaxunl, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68338
2019-10-07 22:33:59 +08:00
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v5
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, v5, v2
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v4, v3
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v3, v4
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v3, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v0, v3
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v3, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v5, 1, v3
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v4
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v1
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v4, v0, v1
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v3
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v1
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v2
|
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%d = sdiv i32 %a, %b
|
|
|
|
ret i32 %d
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @udiv32(i32 %a, i32 %b) {
|
|
|
|
; GFX9-LABEL: udiv32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v2, v1
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, 0, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v3, v2
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v2, v3
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, v0, v2
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v2, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v2
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v3
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v1
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v0, v1
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v2
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v1
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%d = udiv i32 %a, %b
|
|
|
|
ret i32 %d
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @srem32(i32 %a, i32 %b) {
|
|
|
|
; GFX9-LABEL: srem32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v2, v1
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, 0, v1
|
[AMDGPU] Remove dubious logic in bidirectional list scheduler
Summary:
pickNodeBidirectional tried to compare the best top candidate and the
best bottom candidate by examining TopCand.Reason and BotCand.Reason.
This is unsound because, after calling pickNodeFromQueue, Cand.Reason
does not reflect the most important reason why Cand was chosen. Rather
it reflects the most recent reason why it beat some other potential
candidate, which could have been for some low priority tie breaker
reason.
I have seen this cause problems where TopCand is a good candidate, but
because TopCand.Reason is ORDER (which is very low priority) it is
repeatedly ignored in favour of a mediocre BotCand. This is not how
bidirectional scheduling is supposed to work.
To fix this I changed the code to always compare TopCand and BotCand
directly, like the generic implementation of pickNodeBidirectional does.
This removes some uncommented AMDGPU-specific logic; if this logic turns
out to be important then perhaps it could be moved into an override of
tryCandidate instead.
Graphics shader benchmarking on gfx10 shows a lot more positive than
negative effects from this change.
Reviewers: arsenm, tstellar, rampitec, kzhuravl, vpykhtin, dstuttard, tpr, atrick, MatzeB
Subscribers: jvesely, wdng, nhaehnle, yaxunl, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68338
2019-10-07 22:33:59 +08:00
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v0
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v4
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
[AMDGPU] Remove dubious logic in bidirectional list scheduler
Summary:
pickNodeBidirectional tried to compare the best top candidate and the
best bottom candidate by examining TopCand.Reason and BotCand.Reason.
This is unsound because, after calling pickNodeFromQueue, Cand.Reason
does not reflect the most important reason why Cand was chosen. Rather
it reflects the most recent reason why it beat some other potential
candidate, which could have been for some low priority tie breaker
reason.
I have seen this cause problems where TopCand is a good candidate, but
because TopCand.Reason is ORDER (which is very low priority) it is
repeatedly ignored in favour of a mediocre BotCand. This is not how
bidirectional scheduling is supposed to work.
To fix this I changed the code to always compare TopCand and BotCand
directly, like the generic implementation of pickNodeBidirectional does.
This removes some uncommented AMDGPU-specific logic; if this logic turns
out to be important then perhaps it could be moved into an override of
tryCandidate instead.
Graphics shader benchmarking on gfx10 shows a lot more positive than
negative effects from this change.
Reviewers: arsenm, tstellar, rampitec, kzhuravl, vpykhtin, dstuttard, tpr, atrick, MatzeB
Subscribers: jvesely, wdng, nhaehnle, yaxunl, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68338
2019-10-07 22:33:59 +08:00
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v4
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v3, v2
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v2, v3
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, v0, v2
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, v2, v1
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v2
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, v0, v1
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v1
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, v0, v1
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v1
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
[AMDGPU] Remove dubious logic in bidirectional list scheduler
Summary:
pickNodeBidirectional tried to compare the best top candidate and the
best bottom candidate by examining TopCand.Reason and BotCand.Reason.
This is unsound because, after calling pickNodeFromQueue, Cand.Reason
does not reflect the most important reason why Cand was chosen. Rather
it reflects the most recent reason why it beat some other potential
candidate, which could have been for some low priority tie breaker
reason.
I have seen this cause problems where TopCand is a good candidate, but
because TopCand.Reason is ORDER (which is very low priority) it is
repeatedly ignored in favour of a mediocre BotCand. This is not how
bidirectional scheduling is supposed to work.
To fix this I changed the code to always compare TopCand and BotCand
directly, like the generic implementation of pickNodeBidirectional does.
This removes some uncommented AMDGPU-specific logic; if this logic turns
out to be important then perhaps it could be moved into an override of
tryCandidate instead.
Graphics shader benchmarking on gfx10 shows a lot more positive than
negative effects from this change.
Reviewers: arsenm, tstellar, rampitec, kzhuravl, vpykhtin, dstuttard, tpr, atrick, MatzeB
Subscribers: jvesely, wdng, nhaehnle, yaxunl, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68338
2019-10-07 22:33:59 +08:00
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v4
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v4
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%d = srem i32 %a, %b
|
|
|
|
ret i32 %d
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @urem32(i32 %a, i32 %b) {
|
|
|
|
; GFX9-LABEL: urem32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v2, v1
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, 0, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v3, v2
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v2, v3
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v2, v0, v2
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v2, v2, v1
|
2020-06-22 22:27:37 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v2
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, v0, v1
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v1
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, v0, v1
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v1
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%d = urem i32 %a, %b
|
|
|
|
ret i32 %d
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @sdivrem64(i64 %a, i64 %b) {
|
|
|
|
; GFX9-LABEL: sdivrem64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_or_b32_e32 v5, v1, v3
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr6_vgpr7
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
|
[AMDGPU] Extend macro fusion for ADDC and SUBB to SUBBREV
Summary:
There's a lot of test case churn but the overall effect is to increase
the number of back-to-back v_sub,v_subbrev pairs, which can execute with
no delay even on gfx10.
Reviewers: arsenm, rampitec, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75999
2020-03-12 01:01:24 +08:00
|
|
|
; GFX9-NEXT: s_xor_b64 s[10:11], exec, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_cbranch_execz BB8_2
|
|
|
|
; GFX9-NEXT: ; %bb.1:
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v3
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v4, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, v2, v4
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v3, v4
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v5, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v6, v3
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v7, vcc, 0, v2
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, 0, v3, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v15, 0
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v5, 0x4f800000, v6
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v14, 0
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v5, 0x5f7ffffc, v5
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v6, 0x2f800000, v5
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v6, v6
|
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v5, 0xcf800000, v6
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v6, v6
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v8, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v7, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v7, v6
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v12, v7, v5
|
|
|
|
; GFX9-NEXT: v_add3_u32 v9, v10, v11, v9
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v5, v9
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v13, v5, v12
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v5, v9
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v16, v6, v9
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v6, v9
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v13, v11
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v13, v6, v12
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v12, v6, v12
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v15, v10, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v11, v13
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v10, v12, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v11, vcc, v16, v14, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v10, v9
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v5, s[4:5], v5, v9
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v15, v11, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v9, vcc, v6, v10, s[4:5]
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v7, v9
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v12, v7, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v8, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v7, v5
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v6, v6, v10
|
|
|
|
; GFX9-NEXT: v_add3_u32 v8, v12, v11, v8
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v13, v5, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v16, v5, v7
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v17, v5, v8
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v12, v9, v7
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v9, v7
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v13, vcc, v16, v13
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v9, v8
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v16, vcc, v15, v17, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v9, v8
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v13, v7
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v16, v12, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v11, v14, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v15, v9, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v6, vcc, v6, v8, s[4:5]
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, v5, v7
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v7, 31, v1
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v7
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v7
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v0, v6
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v0, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v0, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v7, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v7
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v9, v8
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v15, v10, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v1, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v5, v1, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v1, v6
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v1, v6
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v8, v10
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v11, v14, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, v5, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v15, v8, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v3, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v2, v6
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v2, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v2, v5
|
|
|
|
; GFX9-NEXT: v_add3_u32 v8, v10, v9, v8
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v9, v1, v8
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v11
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v9, s[4:5], v9, v3, vcc
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e64 v10, s[4:5], v0, v2
|
|
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v11, s[6:7], 0, v9, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[6:7], v11, v3
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[6:7], v10, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], v11, v3
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, v13, s[6:7]
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v13, s[6:7], 2, v5
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v14, s[6:7], 0, v6, s[6:7]
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v8, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v15, s[6:7], 1, v5
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v16, s[6:7], 0, v6, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v12
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v12, v16, v14, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v14, 0, -1, vcc
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v14, vcc
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v3, s[4:5], v9, v3, s[4:5]
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e64 v2, s[4:5], v10, v2
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v8, v15, v13, s[6:7]
|
|
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v3, s[4:5], 0, v3, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v8, v7, v4
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v12, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v4, v5, v8
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v6, v6, v8
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e64 v4, s[8:9], v4, v8
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v7
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v5, s[8:9], v6, v8, s[8:9]
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v7
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v6, vcc, v0, v7
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v1, v7, vcc
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3
|
2021-07-21 21:20:10 +08:00
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB8_2: ; %Flow
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_saveexec_b64 s[4:5], s[10:11]
|
|
|
|
; GFX9-NEXT: s_xor_b64 exec, exec, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_cbranch_execz BB8_4
|
|
|
|
; GFX9-NEXT: ; %bb.3:
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v2
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, 0, v2
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, 0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v3, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v1, v2
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v3
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v0, v2
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v1, v4, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB8_4:
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v6
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v7
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%d = sdiv i64 %a, %b
|
|
|
|
%r = srem i64 %a, %b
|
|
|
|
%ins.0 = insertelement <2 x i64> undef, i64 %d, i32 0
|
|
|
|
%ins.1 = insertelement <2 x i64> %ins.0, i64 %r, i32 1
|
|
|
|
ret <2 x i64> %ins.1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @udivrem64(i64 %a, i64 %b) {
|
|
|
|
; GFX9-LABEL: udivrem64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_or_b32_e32 v5, v1, v3
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
|
|
; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr6_vgpr7
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5
|
|
|
|
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
|
|
|
|
; GFX9-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
|
|
|
|
; GFX9-NEXT: s_cbranch_execz BB9_2
|
|
|
|
; GFX9-NEXT: ; %bb.1:
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v4, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v5, v3
|
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v6, vcc, 0, v2
|
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, 0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v13, 0
|
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v4, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v12, 0
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_mac_f32_e32 v4, 0xcf800000, v5
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v4, v4
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v7, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v6, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v6, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v6, v4
|
|
|
|
; GFX9-NEXT: v_add3_u32 v8, v10, v8, v9
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v4, v11
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v4, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v14, v4, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v15, v5, v8
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v5, v8
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v9, v10
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v13, v14, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v14, v5, v11
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v5, v11
|
[AMDGPU] Enable divergence driven ISel for ADD/SUB i64
Summary:
Currently we custom select add/sub with carry out to scalar form relying on later replacing them to vector form if necessary.
This change enables custom selection code to take the divergence of adde/addc SDNodes into account and select the appropriate form in one step.
Reviewers: arsenm, vpykhtin, rampitec
Reviewed By: arsenm, vpykhtin
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa
Differential Revision: https://reviews.llvm.org/D76371
2020-03-20 02:33:13 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v9, v14
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v10, v11, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v15, v12, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v9, v8
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v4, s[4:5], v4, v8
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v13, v10, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v8, vcc, v5, v9, s[4:5]
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v6, v8
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v6, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v7, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v6, v4
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v5, v5, v9
|
|
|
|
; GFX9-NEXT: v_add3_u32 v7, v11, v10, v7
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v4, v7
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v4, v6
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v15, v4, v7
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v14, v8, v7
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v8, v7
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v10, vcc, v11, v10
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v8, v6
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v8, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, v13, v15, vcc
|
[AMDGPU] Enable divergence driven ISel for ADD/SUB i64
Summary:
Currently we custom select add/sub with carry out to scalar form relying on later replacing them to vector form if necessary.
This change enables custom selection code to take the divergence of adde/addc SDNodes into account and select the appropriate form in one step.
Reviewers: arsenm, vpykhtin, rampitec
Reviewed By: arsenm, vpykhtin
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa
Differential Revision: https://reviews.llvm.org/D76371
2020-03-20 02:33:13 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v10, v6
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v15, v11, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v14, v12, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v7
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v13, v8, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v5, vcc, v5, v7, s[4:5]
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v0, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v7, v0, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, v0, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v1, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v5, v1, v5
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v7, v6
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v13, v8, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v1, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v1, v4
|
[AMDGPU] Enable divergence driven ISel for ADD/SUB i64
Summary:
Currently we custom select add/sub with carry out to scalar form relying on later replacing them to vector form if necessary.
This change enables custom selection code to take the divergence of adde/addc SDNodes into account and select the appropriate form in one step.
Reviewers: arsenm, vpykhtin, rampitec
Reviewed By: arsenm, vpykhtin
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa
Differential Revision: https://reviews.llvm.org/D76371
2020-03-20 02:33:13 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v8
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v7, v4, vcc
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v9, v12, vcc
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v5
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v6, vcc
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v3, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v2, v5
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, v2, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v9, v2, v4
|
|
|
|
; GFX9-NEXT: v_add3_u32 v6, v8, v7, v6
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v7, v1, v6
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v9
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v7, s[4:5], v7, v3, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_sub_co_u32_e64 v8, s[4:5], v0, v2
|
|
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v9, s[6:7], 0, v7, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[6:7], v9, v3
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[6:7], v8, v2
|
[AMDGPU] Extend macro fusion for ADDC and SUBB to SUBBREV
Summary:
There's a lot of test case churn but the overall effect is to increase
the number of back-to-back v_sub,v_subbrev pairs, which can execute with
no delay even on gfx10.
Reviewers: arsenm, rampitec, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75999
2020-03-12 01:01:24 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[6:7]
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], v9, v3
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, v11, s[6:7]
|
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v11, s[6:7], 2, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v12, s[6:7], 0, v5, s[6:7]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_add_co_u32_e64 v13, s[6:7], 1, v4
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e64 v14, s[6:7], 0, v5, s[6:7]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v10
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v10, v14, v12, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, -1, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
|
[AMDGPU] Extend macro fusion for ADDC and SUBB to SUBBREV
Summary:
There's a lot of test case churn but the overall effect is to increase
the number of back-to-back v_sub,v_subbrev pairs, which can execute with
no delay even on gfx10.
Reviewers: arsenm, rampitec, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75999
2020-03-12 01:01:24 +08:00
|
|
|
; GFX9-NEXT: v_subb_co_u32_e64 v3, s[4:5], v7, v3, s[4:5]
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_sub_co_u32_e64 v2, s[4:5], v8, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v12, vcc
|
[AMDGPU] Extend macro fusion for ADDC and SUBB to SUBBREV
Summary:
There's a lot of test case churn but the overall effect is to increase
the number of back-to-back v_sub,v_subbrev pairs, which can execute with
no delay even on gfx10.
Reviewers: arsenm, rampitec, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75999
2020-03-12 01:01:24 +08:00
|
|
|
; GFX9-NEXT: v_subbrev_co_u32_e64 v3, s[4:5], 0, v3, s[4:5]
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, v9, v3, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v6, v13, v11, s[6:7]
|
[AMDGPU] Extend macro fusion for ADDC and SUBB to SUBBREV
Summary:
There's a lot of test case churn but the overall effect is to increase
the number of back-to-back v_sub,v_subbrev pairs, which can execute with
no delay even on gfx10.
Reviewers: arsenm, rampitec, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75999
2020-03-12 01:01:24 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
|
2021-04-19 10:45:41 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v8, v2, s[6:7]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v0, v1, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v10, vcc
|
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3
|
2021-07-21 21:20:10 +08:00
|
|
|
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB9_2: ; %Flow
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_saveexec_b64 s[4:5], s[8:9]
|
|
|
|
; GFX9-NEXT: s_xor_b64 exec, exec, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: s_cbranch_execz BB9_4
|
|
|
|
; GFX9-NEXT: ; %bb.3:
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v2
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, 0, v2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, 0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v3, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v1, v2
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v3
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v0, v2
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v0, v2
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 1, v1
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v1, v4, vcc
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: BB9_4:
|
2020-06-23 00:11:58 +08:00
|
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
2020-02-18 07:16:59 +08:00
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v6
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v7
|
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%d = udiv i64 %a, %b
|
|
|
|
%r = urem i64 %a, %b
|
|
|
|
%ins.0 = insertelement <2 x i64> undef, i64 %d, i32 0
|
|
|
|
%ins.1 = insertelement <2 x i64> %ins.0, i64 %r, i32 1
|
|
|
|
ret <2 x i64> %ins.1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @sdiv64_known32(i64 %a, i64 %b) {
|
|
|
|
; GFX9-LABEL: sdiv64_known32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v3
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v1
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v0
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v2, v1, v2
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v2
|
|
|
|
; GFX9-NEXT: v_mad_f32 v1, -v2, v0, v1
|
|
|
|
; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, 0, v3, vcc
|
|
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%a.ext = ashr i64 %a, 32
|
|
|
|
%b.ext = ashr i64 %b, 32
|
|
|
|
%d = udiv i64 %a.ext, %b.ext
|
|
|
|
ret i64 %d
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @udiv64_known32(i64 %a, i64 %b) {
|
|
|
|
; GFX9-LABEL: udiv64_known32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v0
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v1
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v2
|
|
|
|
; GFX9-NEXT: v_mad_f32 v0, -v2, v1, v0
|
|
|
|
; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
|
|
; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, 0, v3, vcc
|
|
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
|
|
%a.mask = and i64 %a, 4294967295
|
|
|
|
%b.mask = and i64 %b, 4294967295
|
|
|
|
%d = udiv i64 %a.mask, %b.mask
|
|
|
|
ret i64 %d
|
|
|
|
}
|