2020-07-24 18:41:30 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2021-01-06 06:37:29 +08:00
|
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -enable-misched=0 -march=amdgcn -mattr=+mad-mac-f32-insts -verify-machineinstrs < %s | FileCheck --check-prefix=SI %s
|
|
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -enable-misched=0 -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck --check-prefix=CI %s
|
|
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -enable-misched=0 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck --check-prefix=VI %s
|
2021-03-29 17:12:46 +08:00
|
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -enable-misched=0 -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck --check-prefix=GFX9 %s
|
|
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -enable-misched=0 -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck --check-prefix=GFX10 %s
|
2014-09-11 05:44:27 +08:00
|
|
|
|
2020-07-24 19:05:46 +08:00
|
|
|
define amdgpu_kernel void @frem_f16(half addrspace(1)* %out, half addrspace(1)* %in1,
|
|
|
|
; SI-LABEL: frem_f16:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
2020-07-24 19:05:46 +08:00
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
2020-07-24 19:05:46 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
2020-07-24 19:05:46 +08:00
|
|
|
; SI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
2020-07-24 19:05:46 +08:00
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
|
|
; SI-NEXT: v_div_scale_f32 v2, vcc, v0, v1, v0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: v_div_scale_f32 v3, s[0:1], v1, v1, v0
|
2020-07-24 19:05:46 +08:00
|
|
|
; SI-NEXT: v_rcp_f32_e32 v4, v3
|
|
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
|
|
; SI-NEXT: v_fma_f32 v5, -v3, v4, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v5, v2, v4
|
|
|
|
; SI-NEXT: v_fma_f32 v6, -v3, v5, v2
|
|
|
|
; SI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
|
|
; SI-NEXT: v_fma_f32 v2, -v3, v5, v2
|
|
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v2, v2, v1, v0
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
2020-07-24 19:05:46 +08:00
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: frem_f16:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; CI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
|
|
; CI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
|
|
; CI-NEXT: v_div_scale_f32 v3, s[0:1], v1, v1, v0
|
|
|
|
; CI-NEXT: v_div_scale_f32 v2, vcc, v0, v1, v0
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v4, v3
|
|
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
|
|
; CI-NEXT: v_fma_f32 v5, -v3, v4, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v5, v2, v4
|
|
|
|
; CI-NEXT: v_fma_f32 v6, -v3, v5, v2
|
|
|
|
; CI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
|
|
; CI-NEXT: v_fma_f32 v2, -v3, v5, v2
|
|
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v2, v2, v1, v0
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
|
|
; CI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: frem_f16:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 8
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: flat_load_ushort v4, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: flat_load_ushort v2, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
2020-07-24 19:05:46 +08:00
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v3, v4
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 19:05:46 +08:00
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v5, v2
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v5, v5
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v3, v3, v5
|
|
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v3, v3
|
|
|
|
; VI-NEXT: v_div_fixup_f16 v3, v3, v2, v4
|
|
|
|
; VI-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; VI-NEXT: v_fma_f16 v2, -v3, v2, v4
|
|
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: frem_f16:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_ushort v1, v0, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_ushort v2, v0, s[2:3] offset:8
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v3, v1
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v4, v2
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v4, v4
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v3, v3, v4
|
|
|
|
; GFX9-NEXT: v_cvt_f16_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_div_fixup_f16 v3, v3, v2, v1
|
|
|
|
; GFX9-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_fma_f16 v1, -v3, v2, v1
|
|
|
|
; GFX9-NEXT: global_store_short v0, v1, s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: frem_f16:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: global_load_ushort v1, v0, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_ushort v2, v0, s[2:3] offset:8
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v3, v1
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v4, v2
|
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v4, v4
|
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v3, v3, v4
|
|
|
|
; GFX10-NEXT: v_cvt_f16_f32_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_div_fixup_f16 v3, v3, v2, v1
|
|
|
|
; GFX10-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_fmac_f16_e64 v1, -v3, v2
|
|
|
|
; GFX10-NEXT: global_store_short v0, v1, s[4:5]
|
|
|
|
; GFX10-NEXT: s_endpgm
|
2020-07-24 19:05:46 +08:00
|
|
|
half addrspace(1)* %in2) #0 {
|
|
|
|
%gep2 = getelementptr half, half addrspace(1)* %in2, i32 4
|
|
|
|
%r0 = load half, half addrspace(1)* %in1, align 4
|
|
|
|
%r1 = load half, half addrspace(1)* %gep2, align 4
|
|
|
|
%r2 = frem half %r0, %r1
|
|
|
|
store half %r2, half addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-07-24 19:52:36 +08:00
|
|
|
define amdgpu_kernel void @fast_frem_f16(half addrspace(1)* %out, half addrspace(1)* %in1,
|
|
|
|
; SI-LABEL: fast_frem_f16:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
2020-07-24 19:55:12 +08:00
|
|
|
; SI-NEXT: v_rcp_f32_e32 v2, v1
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v2, v0, v2
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: fast_frem_f16:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
2020-07-24 19:55:12 +08:00
|
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
2020-07-24 19:52:36 +08:00
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-07-24 19:55:12 +08:00
|
|
|
; CI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
2020-07-24 19:52:36 +08:00
|
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; CI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
2020-07-24 19:55:12 +08:00
|
|
|
; CI-NEXT: v_rcp_f32_e32 v2, v1
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v2, v0, v2
|
2020-07-24 19:52:36 +08:00
|
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
|
|
; CI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: fast_frem_f16:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 8
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: flat_load_ushort v4, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: flat_load_ushort v2, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 19:55:12 +08:00
|
|
|
; VI-NEXT: v_rcp_f16_e32 v3, v2
|
|
|
|
; VI-NEXT: v_mul_f16_e32 v3, v4, v3
|
2020-07-24 19:52:36 +08:00
|
|
|
; VI-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; VI-NEXT: v_fma_f16 v2, -v3, v2, v4
|
|
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: fast_frem_f16:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_ushort v1, v0, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_ushort v2, v0, s[2:3] offset:8
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_rcp_f16_e32 v3, v2
|
|
|
|
; GFX9-NEXT: v_mul_f16_e32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_fma_f16 v1, -v3, v2, v1
|
|
|
|
; GFX9-NEXT: global_store_short v0, v1, s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: fast_frem_f16:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: global_load_ushort v1, v0, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_ushort v2, v0, s[2:3] offset:8
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX10-NEXT: v_rcp_f16_e32 v3, v2
|
|
|
|
; GFX10-NEXT: v_mul_f16_e32 v3, v1, v3
|
|
|
|
; GFX10-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_fmac_f16_e64 v1, -v3, v2
|
|
|
|
; GFX10-NEXT: global_store_short v0, v1, s[4:5]
|
|
|
|
; GFX10-NEXT: s_endpgm
|
2020-07-24 19:52:36 +08:00
|
|
|
half addrspace(1)* %in2) #0 {
|
|
|
|
%gep2 = getelementptr half, half addrspace(1)* %in2, i32 4
|
|
|
|
%r0 = load half, half addrspace(1)* %in1, align 4
|
|
|
|
%r1 = load half, half addrspace(1)* %gep2, align 4
|
|
|
|
%r2 = frem fast half %r0, %r1
|
|
|
|
store half %r2, half addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-07-24 19:05:46 +08:00
|
|
|
define amdgpu_kernel void @unsafe_frem_f16(half addrspace(1)* %out, half addrspace(1)* %in1,
|
|
|
|
; SI-LABEL: unsafe_frem_f16:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
2020-07-24 19:05:46 +08:00
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
2020-07-24 19:05:46 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
2020-07-24 19:05:46 +08:00
|
|
|
; SI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
2020-07-24 19:05:46 +08:00
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v2, v1
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
2020-07-24 19:05:46 +08:00
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: unsafe_frem_f16:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
|
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; CI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v2, v1
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
|
|
; CI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: unsafe_frem_f16:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 8
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: flat_load_ushort v4, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: flat_load_ushort v2, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 19:05:46 +08:00
|
|
|
; VI-NEXT: v_rcp_f16_e32 v3, v2
|
|
|
|
; VI-NEXT: v_mul_f16_e32 v3, v4, v3
|
|
|
|
; VI-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; VI-NEXT: v_fma_f16 v2, -v3, v2, v4
|
|
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: unsafe_frem_f16:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_ushort v1, v0, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_ushort v2, v0, s[2:3] offset:8
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_rcp_f16_e32 v3, v2
|
|
|
|
; GFX9-NEXT: v_mul_f16_e32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_fma_f16 v1, -v3, v2, v1
|
|
|
|
; GFX9-NEXT: global_store_short v0, v1, s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: unsafe_frem_f16:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: global_load_ushort v1, v0, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_ushort v2, v0, s[2:3] offset:8
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX10-NEXT: v_rcp_f16_e32 v3, v2
|
|
|
|
; GFX10-NEXT: v_mul_f16_e32 v3, v1, v3
|
|
|
|
; GFX10-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_fmac_f16_e64 v1, -v3, v2
|
|
|
|
; GFX10-NEXT: global_store_short v0, v1, s[4:5]
|
|
|
|
; GFX10-NEXT: s_endpgm
|
2020-07-24 19:05:46 +08:00
|
|
|
half addrspace(1)* %in2) #1 {
|
|
|
|
%gep2 = getelementptr half, half addrspace(1)* %in2, i32 4
|
|
|
|
%r0 = load half, half addrspace(1)* %in1, align 4
|
|
|
|
%r1 = load half, half addrspace(1)* %gep2, align 4
|
2021-01-11 10:25:53 +08:00
|
|
|
%r2 = frem afn half %r0, %r1
|
2020-07-24 19:05:46 +08:00
|
|
|
store half %r2, half addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-LABEL: frem_f32:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_div_scale_f32 v2, vcc, v0, v1, v0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: v_div_scale_f32 v3, s[0:1], v1, v1, v0
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: v_rcp_f32_e32 v4, v3
|
|
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
|
|
; SI-NEXT: v_fma_f32 v5, -v3, v4, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v5, v2, v4
|
|
|
|
; SI-NEXT: v_fma_f32 v6, -v3, v5, v2
|
|
|
|
; SI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
|
|
; SI-NEXT: v_fma_f32 v2, -v3, v5, v2
|
|
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v2, v2, v1, v0
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: frem_f32:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
|
|
; CI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
|
|
; CI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_div_scale_f32 v3, s[0:1], v1, v1, v0
|
|
|
|
; CI-NEXT: v_div_scale_f32 v2, vcc, v0, v1, v0
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v4, v3
|
|
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
|
|
; CI-NEXT: v_fma_f32 v5, -v3, v4, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v5, v2, v4
|
|
|
|
; CI-NEXT: v_fma_f32 v6, -v3, v5, v2
|
|
|
|
; CI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
|
|
; CI-NEXT: v_fma_f32 v2, -v3, v5, v2
|
|
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v2, v2, v1, v0
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: frem_f32:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: flat_load_dword v4, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: flat_load_dword v2, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_div_scale_f32 v5, s[0:1], v2, v2, v4
|
|
|
|
; VI-NEXT: v_div_scale_f32 v3, vcc, v4, v2, v4
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v6, v5
|
|
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
|
|
; VI-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
|
|
; VI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v7, v3, v6
|
|
|
|
; VI-NEXT: v_fma_f32 v8, -v5, v7, v3
|
|
|
|
; VI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
|
|
; VI-NEXT: v_fma_f32 v3, -v5, v7, v3
|
|
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
|
|
; VI-NEXT: v_div_fmas_f32 v3, v3, v6, v7
|
|
|
|
; VI-NEXT: v_div_fixup_f32 v3, v3, v2, v4
|
|
|
|
; VI-NEXT: v_trunc_f32_e32 v3, v3
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f32 v2, -v3, v2, v4
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: frem_f32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_dword v1, v0, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_dword v2, v0, s[2:3] offset:16
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v4, s[0:1], v2, v2, v1
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v3, vcc, v1, v2, v1
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v5, v4
|
|
|
|
; GFX9-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
|
|
; GFX9-NEXT: v_fma_f32 v6, -v4, v5, 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v6, v3, v5
|
|
|
|
; GFX9-NEXT: v_fma_f32 v7, -v4, v6, v3
|
|
|
|
; GFX9-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
|
|
; GFX9-NEXT: v_fma_f32 v3, -v4, v6, v3
|
|
|
|
; GFX9-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
|
|
; GFX9-NEXT: v_div_fmas_f32 v3, v3, v5, v6
|
|
|
|
; GFX9-NEXT: v_div_fixup_f32 v3, v3, v2, v1
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_fma_f32 v1, -v3, v2, v1
|
|
|
|
; GFX9-NEXT: global_store_dword v0, v1, s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: frem_f32:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: global_load_dword v1, v0, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_dword v2, v0, s[2:3] offset:16
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX10-NEXT: v_div_scale_f32 v4, s0, v2, v2, v1
|
|
|
|
; GFX10-NEXT: v_div_scale_f32 v3, vcc_lo, v1, v2, v1
|
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v5, v4
|
|
|
|
; GFX10-NEXT: s_denorm_mode 15
|
|
|
|
; GFX10-NEXT: v_fma_f32 v6, -v4, v5, 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v6, v3, v5
|
|
|
|
; GFX10-NEXT: v_fma_f32 v7, -v4, v6, v3
|
|
|
|
; GFX10-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
|
|
; GFX10-NEXT: v_fma_f32 v3, -v4, v6, v3
|
|
|
|
; GFX10-NEXT: s_denorm_mode 12
|
|
|
|
; GFX10-NEXT: v_div_fmas_f32 v3, v3, v5, v6
|
|
|
|
; GFX10-NEXT: v_div_fixup_f32 v3, v3, v2, v1
|
|
|
|
; GFX10-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_fmac_f32_e64 v1, -v3, v2
|
|
|
|
; GFX10-NEXT: global_store_dword v0, v1, s[4:5]
|
|
|
|
; GFX10-NEXT: s_endpgm
|
2014-09-11 05:44:27 +08:00
|
|
|
float addrspace(1)* %in2) #0 {
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%r0 = load float, float addrspace(1)* %in1, align 4
|
|
|
|
%r1 = load float, float addrspace(1)* %gep2, align 4
|
2014-09-11 05:44:27 +08:00
|
|
|
%r2 = frem float %r0, %r1
|
|
|
|
store float %r2, float addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-07-24 19:52:36 +08:00
|
|
|
define amdgpu_kernel void @fast_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
|
|
|
|
; SI-LABEL: fast_frem_f32:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 19:55:12 +08:00
|
|
|
; SI-NEXT: v_rcp_f32_e32 v2, v1
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v2, v0, v2
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: fast_frem_f32:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
|
|
; CI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
|
|
; CI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 19:55:12 +08:00
|
|
|
; CI-NEXT: v_rcp_f32_e32 v2, v1
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v2, v0, v2
|
2020-07-24 19:52:36 +08:00
|
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
|
|
; CI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: fast_frem_f32:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: flat_load_dword v4, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: flat_load_dword v2, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 19:55:12 +08:00
|
|
|
; VI-NEXT: v_rcp_f32_e32 v3, v2
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v3, v4, v3
|
2020-07-24 19:52:36 +08:00
|
|
|
; VI-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; VI-NEXT: v_fma_f32 v2, -v3, v2, v4
|
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: fast_frem_f32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_dword v1, v0, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_dword v2, v0, s[2:3] offset:16
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v3, v2
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_fma_f32 v1, -v3, v2, v1
|
|
|
|
; GFX9-NEXT: global_store_dword v0, v1, s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: fast_frem_f32:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: global_load_dword v1, v0, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_dword v2, v0, s[2:3] offset:16
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v3, v2
|
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v3, v1, v3
|
|
|
|
; GFX10-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_fmac_f32_e64 v1, -v3, v2
|
|
|
|
; GFX10-NEXT: global_store_dword v0, v1, s[4:5]
|
|
|
|
; GFX10-NEXT: s_endpgm
|
2020-07-24 19:52:36 +08:00
|
|
|
float addrspace(1)* %in2) #0 {
|
|
|
|
%gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
|
|
|
|
%r0 = load float, float addrspace(1)* %in1, align 4
|
|
|
|
%r1 = load float, float addrspace(1)* %gep2, align 4
|
|
|
|
%r2 = frem fast float %r0, %r1
|
|
|
|
store float %r2, float addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-LABEL: unsafe_frem_f32:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v2, v1
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: unsafe_frem_f32:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
|
|
; CI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
|
|
; CI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v2, v1
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: unsafe_frem_f32:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: flat_load_dword v4, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: flat_load_dword v2, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_rcp_f32_e32 v3, v2
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v3, v4, v3
|
|
|
|
; VI-NEXT: v_trunc_f32_e32 v3, v3
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f32 v2, -v3, v2, v4
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: unsafe_frem_f32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_dword v1, v0, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_dword v2, v0, s[2:3] offset:16
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v3, v2
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v3, v1, v3
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_fma_f32 v1, -v3, v2, v1
|
|
|
|
; GFX9-NEXT: global_store_dword v0, v1, s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: unsafe_frem_f32:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: global_load_dword v1, v0, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_dword v2, v0, s[2:3] offset:16
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v3, v2
|
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v3, v1, v3
|
|
|
|
; GFX10-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_fmac_f32_e64 v1, -v3, v2
|
|
|
|
; GFX10-NEXT: global_store_dword v0, v1, s[4:5]
|
|
|
|
; GFX10-NEXT: s_endpgm
|
2014-09-11 05:44:27 +08:00
|
|
|
float addrspace(1)* %in2) #1 {
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%r0 = load float, float addrspace(1)* %in1, align 4
|
|
|
|
%r1 = load float, float addrspace(1)* %gep2, align 4
|
2021-01-11 10:25:53 +08:00
|
|
|
%r2 = frem afn float %r0, %r1
|
2014-09-11 05:44:27 +08:00
|
|
|
store float %r2, float addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-LABEL: frem_f64:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; SI-NEXT: s_mov_b32 s4, s8
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s8, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s9, s11
|
|
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s2, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s3, s7
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_div_scale_f64 v[4:5], s[0:1], v[2:3], v[2:3], v[0:1]
|
|
|
|
; SI-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
|
|
|
; SI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; SI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
|
|
; SI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; SI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
|
|
; SI-NEXT: v_div_scale_f64 v[8:9], s[0:1], v[0:1], v[2:3], v[0:1]
|
|
|
|
; SI-NEXT: v_mul_f64 v[10:11], v[8:9], v[6:7]
|
|
|
|
; SI-NEXT: v_fma_f64 v[12:13], -v[4:5], v[10:11], v[8:9]
|
|
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
|
|
; SI-NEXT: v_cmp_eq_u32_e64 s[0:1], v1, v9
|
|
|
|
; SI-NEXT: s_xor_b64 vcc, s[0:1], vcc
|
2020-10-20 05:38:02 +08:00
|
|
|
; SI-NEXT: s_nop 1
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: v_div_fmas_f64 v[4:5], v[12:13], v[6:7], v[10:11]
|
|
|
|
; SI-NEXT: v_div_fixup_f64 v[4:5], v[4:5], v[2:3], v[0:1]
|
|
|
|
; SI-NEXT: v_bfe_u32 v6, v5, 20, 11
|
|
|
|
; SI-NEXT: v_add_i32_e32 v8, vcc, 0xfffffc01, v6
|
|
|
|
; SI-NEXT: s_mov_b32 s1, 0xfffff
|
|
|
|
; SI-NEXT: s_mov_b32 s0, s6
|
|
|
|
; SI-NEXT: v_lshr_b64 v[6:7], s[0:1], v8
|
|
|
|
; SI-NEXT: v_not_b32_e32 v6, v6
|
|
|
|
; SI-NEXT: v_and_b32_e32 v6, v4, v6
|
|
|
|
; SI-NEXT: v_not_b32_e32 v7, v7
|
|
|
|
; SI-NEXT: v_and_b32_e32 v7, v5, v7
|
|
|
|
; SI-NEXT: v_and_b32_e32 v9, 0x80000000, v5
|
|
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v8
|
|
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
|
|
|
|
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], 51, v8
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v5, v7, v5, s[0:1]
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v4, v6, v4, s[0:1]
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: frem_f64:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_div_scale_f64 v[4:5], s[0:1], v[2:3], v[2:3], v[0:1]
|
|
|
|
; CI-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
|
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; CI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; CI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
|
|
; CI-NEXT: v_div_scale_f64 v[8:9], vcc, v[0:1], v[2:3], v[0:1]
|
|
|
|
; CI-NEXT: v_mul_f64 v[10:11], v[8:9], v[6:7]
|
|
|
|
; CI-NEXT: v_fma_f64 v[4:5], -v[4:5], v[10:11], v[8:9]
|
|
|
|
; CI-NEXT: s_nop 1
|
|
|
|
; CI-NEXT: v_div_fmas_f64 v[4:5], v[4:5], v[6:7], v[10:11]
|
|
|
|
; CI-NEXT: v_div_fixup_f64 v[4:5], v[4:5], v[2:3], v[0:1]
|
|
|
|
; CI-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: frem_f64:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_div_scale_f64 v[6:7], s[0:1], v[4:5], v[4:5], v[2:3]
|
|
|
|
; VI-NEXT: v_rcp_f64_e32 v[8:9], v[6:7]
|
|
|
|
; VI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
|
|
; VI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
|
|
; VI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
|
|
; VI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
|
|
; VI-NEXT: v_div_scale_f64 v[10:11], vcc, v[2:3], v[4:5], v[2:3]
|
|
|
|
; VI-NEXT: v_mul_f64 v[12:13], v[10:11], v[8:9]
|
|
|
|
; VI-NEXT: v_fma_f64 v[6:7], -v[6:7], v[12:13], v[10:11]
|
|
|
|
; VI-NEXT: s_nop 1
|
|
|
|
; VI-NEXT: v_div_fmas_f64 v[6:7], v[6:7], v[8:9], v[12:13]
|
|
|
|
; VI-NEXT: v_div_fixup_f64 v[6:7], v[6:7], v[4:5], v[2:3]
|
|
|
|
; VI-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f64 v[2:3], -v[6:7], v[4:5], v[2:3]
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: frem_f64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v12, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v12, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v12, s[2:3]
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_div_scale_f64 v[4:5], s[0:1], v[2:3], v[2:3], v[0:1]
|
|
|
|
; GFX9-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
|
|
; GFX9-NEXT: v_div_scale_f64 v[8:9], vcc, v[0:1], v[2:3], v[0:1]
|
|
|
|
; GFX9-NEXT: v_mul_f64 v[10:11], v[8:9], v[6:7]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[4:5], -v[4:5], v[10:11], v[8:9]
|
|
|
|
; GFX9-NEXT: s_nop 1
|
|
|
|
; GFX9-NEXT: v_div_fmas_f64 v[4:5], v[4:5], v[6:7], v[10:11]
|
|
|
|
; GFX9-NEXT: v_div_fixup_f64 v[4:5], v[4:5], v[2:3], v[0:1]
|
|
|
|
; GFX9-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
|
|
; GFX9-NEXT: global_store_dwordx2 v12, v[0:1], s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: frem_f64:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v12, 0
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: global_load_dwordx2 v[0:1], v12, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_dwordx2 v[2:3], v12, s[2:3]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX10-NEXT: v_div_scale_f64 v[4:5], s0, v[2:3], v[2:3], v[0:1]
|
|
|
|
; GFX10-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
|
|
; GFX10-NEXT: v_div_scale_f64 v[8:9], vcc_lo, v[0:1], v[2:3], v[0:1]
|
|
|
|
; GFX10-NEXT: v_mul_f64 v[10:11], v[8:9], v[6:7]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[4:5], -v[4:5], v[10:11], v[8:9]
|
|
|
|
; GFX10-NEXT: v_div_fmas_f64 v[4:5], v[4:5], v[6:7], v[10:11]
|
|
|
|
; GFX10-NEXT: v_div_fixup_f64 v[4:5], v[4:5], v[2:3], v[0:1]
|
|
|
|
; GFX10-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: global_store_dwordx2 v12, v[0:1], s[4:5]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_endpgm
|
2014-09-11 05:44:27 +08:00
|
|
|
double addrspace(1)* %in2) #0 {
|
2015-02-28 05:17:42 +08:00
|
|
|
%r0 = load double, double addrspace(1)* %in1, align 8
|
|
|
|
%r1 = load double, double addrspace(1)* %in2, align 8
|
2014-09-11 05:44:27 +08:00
|
|
|
%r2 = frem double %r0, %r1
|
|
|
|
store double %r2, double addrspace(1)* %out, align 8
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-07-24 19:52:36 +08:00
|
|
|
define amdgpu_kernel void @fast_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
|
|
|
|
; SI-LABEL: fast_frem_f64:
|
|
|
|
; SI: ; %bb.0:
|
2021-01-21 02:55:55 +08:00
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
|
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
2021-01-21 02:55:55 +08:00
|
|
|
; SI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
|
|
; SI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; SI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; SI-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
|
|
|
; SI-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
|
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: v_bfe_u32 v6, v5, 20, 11
|
|
|
|
; SI-NEXT: v_add_i32_e32 v8, vcc, 0xfffffc01, v6
|
|
|
|
; SI-NEXT: s_mov_b32 s1, 0xfffff
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s0, s10
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: v_lshr_b64 v[6:7], s[0:1], v8
|
|
|
|
; SI-NEXT: v_not_b32_e32 v6, v6
|
|
|
|
; SI-NEXT: v_and_b32_e32 v6, v4, v6
|
|
|
|
; SI-NEXT: v_not_b32_e32 v7, v7
|
|
|
|
; SI-NEXT: v_and_b32_e32 v7, v5, v7
|
|
|
|
; SI-NEXT: v_and_b32_e32 v9, 0x80000000, v5
|
|
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v8
|
|
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
|
|
|
|
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], 51, v8
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v5, v7, v5, s[0:1]
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v4, v6, v4, s[0:1]
|
|
|
|
; SI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
2020-07-24 19:52:36 +08:00
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: fast_frem_f64:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
2021-01-21 02:55:55 +08:00
|
|
|
; CI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; CI-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
|
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
|
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
2020-07-24 19:52:36 +08:00
|
|
|
; CI-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
|
|
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: fast_frem_f64:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2021-01-21 02:55:55 +08:00
|
|
|
; VI-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
|
|
|
; VI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[8:9], v[6:7], v[6:7]
|
|
|
|
; VI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[8:9], v[6:7], v[6:7]
|
|
|
|
; VI-NEXT: v_mul_f64 v[8:9], v[2:3], v[6:7]
|
|
|
|
; VI-NEXT: v_fma_f64 v[10:11], -v[4:5], v[8:9], v[2:3]
|
|
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[10:11], v[6:7], v[8:9]
|
2020-07-24 19:52:36 +08:00
|
|
|
; VI-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
|
|
|
; VI-NEXT: v_fma_f64 v[2:3], -v[6:7], v[4:5], v[2:3]
|
|
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: fast_frem_f64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v10, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v10, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v10, s[2:3]
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; GFX9-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
|
|
|
; GFX9-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
|
|
; GFX9-NEXT: global_store_dwordx2 v10, v[0:1], s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: fast_frem_f64:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v10, 0
|
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: global_load_dwordx2 v[0:1], v10, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_dwordx2 v[2:3], v10, s[2:3]
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX10-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; GFX10-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
|
|
; GFX10-NEXT: global_store_dwordx2 v10, v[0:1], s[4:5]
|
|
|
|
; GFX10-NEXT: s_endpgm
|
2020-07-24 19:52:36 +08:00
|
|
|
double addrspace(1)* %in2) #0 {
|
|
|
|
%r0 = load double, double addrspace(1)* %in1, align 8
|
|
|
|
%r1 = load double, double addrspace(1)* %in2, align 8
|
|
|
|
%r2 = frem fast double %r0, %r1
|
|
|
|
store double %r2, double addrspace(1)* %out, align 8
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-LABEL: unsafe_frem_f64:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
|
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
2021-01-21 02:55:55 +08:00
|
|
|
; SI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; SI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; SI-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
|
|
|
; SI-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
|
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: v_bfe_u32 v6, v5, 20, 11
|
|
|
|
; SI-NEXT: v_add_i32_e32 v8, vcc, 0xfffffc01, v6
|
|
|
|
; SI-NEXT: s_mov_b32 s1, 0xfffff
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s0, s10
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: v_lshr_b64 v[6:7], s[0:1], v8
|
|
|
|
; SI-NEXT: v_not_b32_e32 v6, v6
|
|
|
|
; SI-NEXT: v_and_b32_e32 v6, v4, v6
|
|
|
|
; SI-NEXT: v_not_b32_e32 v7, v7
|
|
|
|
; SI-NEXT: v_and_b32_e32 v7, v5, v7
|
|
|
|
; SI-NEXT: v_and_b32_e32 v9, 0x80000000, v5
|
|
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v8
|
|
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
|
|
|
|
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], 51, v8
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v5, v7, v5, s[0:1]
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v4, v6, v4, s[0:1]
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: unsafe_frem_f64:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
2021-01-21 02:55:55 +08:00
|
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; CI-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
|
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
|
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: unsafe_frem_f64:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
2021-01-21 02:55:55 +08:00
|
|
|
; VI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[8:9], v[6:7], v[6:7]
|
|
|
|
; VI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[8:9], v[6:7], v[6:7]
|
|
|
|
; VI-NEXT: v_mul_f64 v[8:9], v[2:3], v[6:7]
|
|
|
|
; VI-NEXT: v_fma_f64 v[10:11], -v[4:5], v[8:9], v[2:3]
|
|
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[10:11], v[6:7], v[8:9]
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f64 v[2:3], -v[6:7], v[4:5], v[2:3]
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: unsafe_frem_f64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v10, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v10, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v10, s[2:3]
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; GFX9-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
|
|
|
; GFX9-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
|
|
; GFX9-NEXT: global_store_dwordx2 v10, v[0:1], s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: unsafe_frem_f64:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v10, 0
|
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: global_load_dwordx2 v[0:1], v10, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_dwordx2 v[2:3], v10, s[2:3]
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX10-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
|
|
; GFX10-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
|
|
; GFX10-NEXT: global_store_dwordx2 v10, v[0:1], s[4:5]
|
|
|
|
; GFX10-NEXT: s_endpgm
|
2014-09-11 05:44:27 +08:00
|
|
|
double addrspace(1)* %in2) #1 {
|
2015-02-28 05:17:42 +08:00
|
|
|
%r0 = load double, double addrspace(1)* %in1, align 8
|
|
|
|
%r1 = load double, double addrspace(1)* %in2, align 8
|
2021-01-11 10:25:53 +08:00
|
|
|
%r2 = frem afn double %r0, %r1
|
2014-09-11 05:44:27 +08:00
|
|
|
store double %r2, double addrspace(1)* %out, align 8
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-07-24 19:05:46 +08:00
|
|
|
define amdgpu_kernel void @frem_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in1,
|
|
|
|
; SI-LABEL: frem_v2f16:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; SI-NEXT: s_mov_b32 s0, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s1, s5
|
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; SI-NEXT: s_mov_b32 s6, s2
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s3
|
|
|
|
; SI-NEXT: s_mov_b32 s10, s2
|
|
|
|
; SI-NEXT: s_mov_b32 s11, s3
|
|
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v0
|
|
|
|
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
|
|
; SI-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:16
|
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, v2
|
|
|
|
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
|
|
; SI-NEXT: v_div_scale_f32 v4, vcc, v0, v2, v0
|
|
|
|
; SI-NEXT: v_div_scale_f32 v5, s[4:5], v2, v2, v0
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v6, v5
|
|
|
|
; SI-NEXT: s_mov_b32 s6, 3
|
|
|
|
; SI-NEXT: s_mov_b32 s7, 0
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v7, v4, v6
|
|
|
|
; SI-NEXT: v_fma_f32 v8, -v5, v7, v4
|
|
|
|
; SI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
|
|
; SI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v4, v4, v6, v7
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v4, v4, v2, v0
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v4, v4
|
|
|
|
; SI-NEXT: v_fma_f32 v0, -v4, v2, v0
|
|
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
|
|
; SI-NEXT: v_div_scale_f32 v2, vcc, v1, v3, v1
|
|
|
|
; SI-NEXT: v_div_scale_f32 v4, s[4:5], v3, v3, v1
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v5, v4
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v6, -v4, v5, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v6, v2, v5
|
|
|
|
; SI-NEXT: v_fma_f32 v7, -v4, v6, v2
|
|
|
|
; SI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
|
|
; SI-NEXT: v_fma_f32 v2, -v4, v6, v2
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v2, v2, v5, v6
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v2, v2, v3, v1
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; SI-NEXT: v_fma_f32 v1, -v2, v3, v1
|
|
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
|
|
; SI-NEXT: v_or_b32_e32 v0, v1, v0
|
|
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: frem_v2f16:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s2, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s10, s2
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s0, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s1, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s11, s3
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s2
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s3
|
|
|
|
; CI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
|
|
; CI-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:16
|
|
|
|
; CI-NEXT: s_mov_b32 s6, 3
|
|
|
|
; CI-NEXT: s_mov_b32 s7, 0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, v0
|
|
|
|
; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v3, v2
|
|
|
|
; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
|
|
; CI-NEXT: v_div_scale_f32 v5, s[4:5], v2, v2, v0
|
|
|
|
; CI-NEXT: v_div_scale_f32 v4, vcc, v0, v2, v0
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v6, v5
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v7, v4, v6
|
|
|
|
; CI-NEXT: v_fma_f32 v8, -v5, v7, v4
|
|
|
|
; CI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
|
|
; CI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v4, v4, v6, v7
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v4, v4, v2, v0
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v4, v4
|
|
|
|
; CI-NEXT: v_fma_f32 v0, -v4, v2, v0
|
|
|
|
; CI-NEXT: v_div_scale_f32 v4, s[4:5], v3, v3, v1
|
2020-09-10 00:21:36 +08:00
|
|
|
; CI-NEXT: v_div_scale_f32 v2, vcc, v1, v3, v1
|
2020-07-24 19:05:46 +08:00
|
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
|
|
; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v5, v4
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v6, -v4, v5, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v6, v2, v5
|
|
|
|
; CI-NEXT: v_fma_f32 v7, -v4, v6, v2
|
|
|
|
; CI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
|
|
; CI-NEXT: v_fma_f32 v2, -v4, v6, v2
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v2, v2, v5, v6
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v2, v2, v3, v1
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; CI-NEXT: v_fma_f32 v1, -v2, v3, v1
|
|
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
|
|
; CI-NEXT: v_or_b32_e32 v0, v1, v0
|
|
|
|
; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: frem_v2f16:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 16
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: flat_load_dword v4, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
|
|
; VI-NEXT: flat_load_dword v2, v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
2020-07-24 19:05:46 +08:00
|
|
|
; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v4
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v5, v3
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 19:05:46 +08:00
|
|
|
; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v2
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v7, v6
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v7, v7
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v5, v5, v7
|
|
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v5, v5
|
|
|
|
; VI-NEXT: v_div_fixup_f16 v5, v5, v6, v3
|
|
|
|
; VI-NEXT: v_trunc_f16_e32 v5, v5
|
|
|
|
; VI-NEXT: v_fma_f16 v3, -v5, v6, v3
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v6, v2
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v5, v4
|
|
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v6, v6
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v5, v5, v6
|
|
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v5, v5
|
|
|
|
; VI-NEXT: v_div_fixup_f16 v5, v5, v2, v4
|
|
|
|
; VI-NEXT: v_trunc_f16_e32 v5, v5
|
|
|
|
; VI-NEXT: v_fma_f16 v2, -v5, v2, v4
|
|
|
|
; VI-NEXT: v_or_b32_e32 v2, v2, v3
|
|
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: frem_v2f16:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_dword v1, v0, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_dword v2, v0, s[2:3] offset:16
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v3, v1
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v4, v2
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v4, v4
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v3, v3, v4
|
|
|
|
; GFX9-NEXT: v_cvt_f16_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_div_fixup_f16 v3, v3, v2, v1
|
|
|
|
; GFX9-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_fma_f16 v3, -v3, v2, v1
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v5, v2
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v4, v1
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v4, v4, v5
|
|
|
|
; GFX9-NEXT: v_cvt_f16_f32_e32 v4, v4
|
|
|
|
; GFX9-NEXT: v_div_fixup_f16 v4, v4, v2, v1
|
|
|
|
; GFX9-NEXT: v_trunc_f16_e32 v4, v4
|
|
|
|
; GFX9-NEXT: v_fma_f16 v1, -v4, v2, v1
|
|
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v3
|
|
|
|
; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v2
|
|
|
|
; GFX9-NEXT: global_store_dword v0, v1, s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: frem_v2f16:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, 0
|
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: global_load_dword v1, v0, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_dword v2, v0, s[2:3] offset:16
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v3, v1
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v4, v2
|
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v4, v4
|
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v3, v3, v4
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v4, v1
|
|
|
|
; GFX10-NEXT: v_cvt_f16_f32_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_div_fixup_f16 v3, v3, v2, v1
|
|
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
|
|
; GFX10-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_fmac_f16_e64 v4, -v3, v2
|
|
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v3, v1
|
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v5, v2
|
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v5, v5
|
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v3, v3, v5
|
|
|
|
; GFX10-NEXT: v_cvt_f16_f32_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_div_fixup_f16 v3, v3, v2, v1
|
|
|
|
; GFX10-NEXT: v_trunc_f16_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_fmac_f16_e64 v1, -v3, v2
|
|
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v4
|
|
|
|
; GFX10-NEXT: v_lshl_or_b32 v1, v1, 16, v2
|
|
|
|
; GFX10-NEXT: global_store_dword v0, v1, s[4:5]
|
|
|
|
; GFX10-NEXT: s_endpgm
|
2020-07-24 19:05:46 +08:00
|
|
|
<2 x half> addrspace(1)* %in2) #0 {
|
|
|
|
%gep2 = getelementptr <2 x half>, <2 x half> addrspace(1)* %in2, i32 4
|
|
|
|
%r0 = load <2 x half>, <2 x half> addrspace(1)* %in1, align 8
|
|
|
|
%r1 = load <2 x half>, <2 x half> addrspace(1)* %gep2, align 8
|
|
|
|
%r2 = frem <2 x half> %r0, %r1
|
|
|
|
store <2 x half> %r2, <2 x half> addrspace(1)* %out, align 8
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_kernel void @frem_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %in1,
|
|
|
|
; SI-LABEL: frem_v4f16:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; SI-NEXT: s_mov_b32 s0, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s1, s5
|
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; SI-NEXT: s_mov_b32 s6, s2
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s3
|
|
|
|
; SI-NEXT: s_mov_b32 s10, s2
|
|
|
|
; SI-NEXT: s_mov_b32 s11, s3
|
|
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, v0
|
|
|
|
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, v0
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v4, v1
|
|
|
|
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v5, v0
|
|
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0 offset:32
|
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v6, v0
|
|
|
|
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v7, v1
|
|
|
|
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
|
|
; SI-NEXT: v_div_scale_f32 v8, vcc, v5, v1, v5
|
|
|
|
; SI-NEXT: v_div_scale_f32 v9, s[4:5], v1, v1, v5
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v10, v9
|
|
|
|
; SI-NEXT: s_mov_b32 s6, 3
|
|
|
|
; SI-NEXT: s_mov_b32 s7, 0
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v11, -v9, v10, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v10, v11, v10, v10
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v11, v8, v10
|
|
|
|
; SI-NEXT: v_fma_f32 v12, -v9, v11, v8
|
|
|
|
; SI-NEXT: v_fma_f32 v11, v12, v10, v11
|
|
|
|
; SI-NEXT: v_fma_f32 v8, -v9, v11, v8
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v8, v8, v10, v11
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v8, v8, v1, v5
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v8, v8
|
|
|
|
; SI-NEXT: v_fma_f32 v1, -v8, v1, v5
|
|
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
|
|
; SI-NEXT: v_div_scale_f32 v5, vcc, v4, v7, v4
|
|
|
|
; SI-NEXT: v_div_scale_f32 v8, s[4:5], v7, v7, v4
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v9, v8
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v10, -v8, v9, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v9, v10, v9, v9
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v10, v5, v9
|
|
|
|
; SI-NEXT: v_fma_f32 v11, -v8, v10, v5
|
|
|
|
; SI-NEXT: v_fma_f32 v10, v11, v9, v10
|
|
|
|
; SI-NEXT: v_fma_f32 v5, -v8, v10, v5
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v5, v5, v9, v10
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v5, v5, v7, v4
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v5, v5
|
|
|
|
; SI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
|
|
|
|
; SI-NEXT: v_or_b32_e32 v1, v4, v1
|
|
|
|
; SI-NEXT: v_div_scale_f32 v4, vcc, v3, v0, v3
|
|
|
|
; SI-NEXT: v_div_scale_f32 v5, s[4:5], v0, v0, v3
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v7, v5
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v8, -v5, v7, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v8, v4, v7
|
|
|
|
; SI-NEXT: v_fma_f32 v9, -v5, v8, v4
|
|
|
|
; SI-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
|
|
; SI-NEXT: v_fma_f32 v4, -v5, v8, v4
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v4, v4, v7, v8
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v4, v4, v0, v3
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v4, v4
|
|
|
|
; SI-NEXT: v_fma_f32 v0, -v4, v0, v3
|
|
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
|
|
; SI-NEXT: v_div_scale_f32 v3, vcc, v2, v6, v2
|
|
|
|
; SI-NEXT: v_div_scale_f32 v4, s[4:5], v6, v6, v2
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v5, v4
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v7, -v4, v5, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v5, v7, v5, v5
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v7, v3, v5
|
|
|
|
; SI-NEXT: v_fma_f32 v8, -v4, v7, v3
|
|
|
|
; SI-NEXT: v_fma_f32 v7, v8, v5, v7
|
|
|
|
; SI-NEXT: v_fma_f32 v3, -v4, v7, v3
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v3, v3, v5, v7
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v3, v3, v6, v2
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; SI-NEXT: v_fma_f32 v2, -v3, v6, v2
|
|
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
|
|
; SI-NEXT: v_or_b32_e32 v0, v2, v0
|
|
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: frem_v4f16:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s2, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s10, s2
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s0, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s1, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s2
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s3
|
|
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; CI-NEXT: s_mov_b32 s11, s3
|
|
|
|
; CI-NEXT: s_mov_b32 s6, 3
|
|
|
|
; CI-NEXT: s_mov_b32 s7, 0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v2, v0
|
|
|
|
; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v3, v0
|
|
|
|
; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v4, v1
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v5, v0
|
|
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0 offset:32
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v7, v1
|
|
|
|
; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v6, v0
|
|
|
|
; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
|
|
; CI-NEXT: v_div_scale_f32 v9, s[4:5], v1, v1, v5
|
|
|
|
; CI-NEXT: v_div_scale_f32 v8, vcc, v5, v1, v5
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v10, v9
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v11, -v9, v10, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v10, v11, v10, v10
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v11, v8, v10
|
|
|
|
; CI-NEXT: v_fma_f32 v12, -v9, v11, v8
|
|
|
|
; CI-NEXT: v_fma_f32 v11, v12, v10, v11
|
|
|
|
; CI-NEXT: v_fma_f32 v8, -v9, v11, v8
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v8, v8, v10, v11
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v8, v8, v1, v5
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v8, v8
|
|
|
|
; CI-NEXT: v_fma_f32 v1, -v8, v1, v5
|
|
|
|
; CI-NEXT: v_div_scale_f32 v8, s[4:5], v7, v7, v4
|
2020-09-10 00:21:36 +08:00
|
|
|
; CI-NEXT: v_div_scale_f32 v5, vcc, v4, v7, v4
|
2020-07-24 19:05:46 +08:00
|
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
|
|
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v9, v8
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v10, -v8, v9, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v9, v10, v9, v9
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v10, v5, v9
|
|
|
|
; CI-NEXT: v_fma_f32 v11, -v8, v10, v5
|
|
|
|
; CI-NEXT: v_fma_f32 v10, v11, v9, v10
|
|
|
|
; CI-NEXT: v_fma_f32 v5, -v8, v10, v5
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v5, v5, v9, v10
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v5, v5, v7, v4
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v5, v5
|
|
|
|
; CI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
|
|
; CI-NEXT: v_div_scale_f32 v5, s[4:5], v0, v0, v3
|
|
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
|
|
|
|
; CI-NEXT: v_or_b32_e32 v1, v4, v1
|
|
|
|
; CI-NEXT: v_div_scale_f32 v4, vcc, v3, v0, v3
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v7, v5
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v8, -v5, v7, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v8, v4, v7
|
|
|
|
; CI-NEXT: v_fma_f32 v9, -v5, v8, v4
|
|
|
|
; CI-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
|
|
; CI-NEXT: v_fma_f32 v4, -v5, v8, v4
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v4, v4, v7, v8
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v4, v4, v0, v3
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v4, v4
|
|
|
|
; CI-NEXT: v_fma_f32 v0, -v4, v0, v3
|
|
|
|
; CI-NEXT: v_div_scale_f32 v4, s[4:5], v6, v6, v2
|
|
|
|
; CI-NEXT: v_div_scale_f32 v3, vcc, v2, v6, v2
|
2020-09-10 00:21:36 +08:00
|
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
2020-07-24 19:05:46 +08:00
|
|
|
; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v5, v4
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v7, -v4, v5, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v5, v7, v5, v5
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v7, v3, v5
|
|
|
|
; CI-NEXT: v_fma_f32 v8, -v4, v7, v3
|
|
|
|
; CI-NEXT: v_fma_f32 v7, v8, v5, v7
|
|
|
|
; CI-NEXT: v_fma_f32 v3, -v4, v7, v3
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v3, v3, v5, v7
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v3, v3, v6, v2
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; CI-NEXT: v_fma_f32 v2, -v3, v6, v2
|
|
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
|
|
; CI-NEXT: v_or_b32_e32 v0, v2, v0
|
|
|
|
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: frem_v4f16:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
2020-07-24 19:05:46 +08:00
|
|
|
; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v5
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v9, v8
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 19:05:46 +08:00
|
|
|
; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v7, v6
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v9, v9
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v7, v7, v9
|
|
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v7, v7
|
|
|
|
; VI-NEXT: v_div_fixup_f16 v7, v7, v8, v6
|
|
|
|
; VI-NEXT: v_trunc_f16_e32 v7, v7
|
|
|
|
; VI-NEXT: v_fma_f16 v6, -v7, v8, v6
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v8, v5
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v7, v3
|
|
|
|
; VI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v8, v8
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v7, v7, v8
|
|
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v7, v7
|
|
|
|
; VI-NEXT: v_div_fixup_f16 v7, v7, v5, v3
|
|
|
|
; VI-NEXT: v_trunc_f16_e32 v7, v7
|
|
|
|
; VI-NEXT: v_fma_f16 v3, -v7, v5, v3
|
|
|
|
; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v4
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v8, v7
|
|
|
|
; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v2
|
|
|
|
; VI-NEXT: v_or_b32_e32 v3, v3, v6
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v6, v5
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v8, v8
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v6, v6, v8
|
|
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v6, v6
|
|
|
|
; VI-NEXT: v_div_fixup_f16 v6, v6, v7, v5
|
|
|
|
; VI-NEXT: v_trunc_f16_e32 v6, v6
|
|
|
|
; VI-NEXT: v_fma_f16 v5, -v6, v7, v5
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v7, v4
|
|
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v6, v2
|
|
|
|
; VI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v7, v7
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v6, v6, v7
|
|
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v6, v6
|
|
|
|
; VI-NEXT: v_div_fixup_f16 v6, v6, v4, v2
|
|
|
|
; VI-NEXT: v_trunc_f16_e32 v6, v6
|
|
|
|
; VI-NEXT: v_fma_f16 v2, -v6, v4, v2
|
|
|
|
; VI-NEXT: v_or_b32_e32 v2, v2, v5
|
|
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: frem_v4f16:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v4, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[2:3] offset:32
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v5, v1
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v6, v3
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v6, v6
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v5, v5, v6
|
|
|
|
; GFX9-NEXT: v_cvt_f16_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_div_fixup_f16 v5, v5, v3, v1
|
|
|
|
; GFX9-NEXT: v_trunc_f16_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_fma_f16 v5, -v5, v3, v1
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v3
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v7, v3
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v6, v1
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v7, v7
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v6, v6, v7
|
|
|
|
; GFX9-NEXT: v_cvt_f16_f32_e32 v6, v6
|
|
|
|
; GFX9-NEXT: v_div_fixup_f16 v6, v6, v3, v1
|
|
|
|
; GFX9-NEXT: v_trunc_f16_e32 v6, v6
|
|
|
|
; GFX9-NEXT: v_fma_f16 v1, -v6, v3, v1
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v6, v2
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
|
|
|
|
; GFX9-NEXT: v_and_b32_e32 v5, v3, v5
|
|
|
|
; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v5
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v5, v0
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v6, v6
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v5, v5, v6
|
|
|
|
; GFX9-NEXT: v_cvt_f16_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_div_fixup_f16 v5, v5, v2, v0
|
|
|
|
; GFX9-NEXT: v_trunc_f16_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_fma_f16 v5, -v5, v2, v0
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v7, v2
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v6, v0
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v7, v7
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v6, v6, v7
|
|
|
|
; GFX9-NEXT: v_cvt_f16_f32_e32 v6, v6
|
|
|
|
; GFX9-NEXT: v_div_fixup_f16 v6, v6, v2, v0
|
|
|
|
; GFX9-NEXT: v_trunc_f16_e32 v6, v6
|
|
|
|
; GFX9-NEXT: v_fma_f16 v0, -v6, v2, v0
|
|
|
|
; GFX9-NEXT: v_and_b32_e32 v2, v3, v5
|
|
|
|
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v2
|
|
|
|
; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: frem_v4f16:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v4, 0
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: global_load_dwordx2 v[0:1], v4, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_dwordx2 v[2:3], v4, s[2:3] offset:32
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v5, v1
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v6, v3
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v6, v6
|
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v5, v5, v6
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v6, v1
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_cvt_f16_f32_e32 v5, v5
|
|
|
|
; GFX10-NEXT: v_div_fixup_f16 v5, v5, v3, v1
|
|
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_trunc_f16_e32 v5, v5
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fmac_f16_e64 v6, -v5, v3
|
|
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v3, 16, v3
|
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v5, v1
|
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v7, v3
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v7, v7
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v5, v5, v7
|
|
|
|
; GFX10-NEXT: v_cvt_f16_f32_e32 v5, v5
|
|
|
|
; GFX10-NEXT: v_div_fixup_f16 v5, v5, v3, v1
|
|
|
|
; GFX10-NEXT: v_trunc_f16_e32 v5, v5
|
|
|
|
; GFX10-NEXT: v_fmac_f16_e64 v1, -v5, v3
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v3, 0xffff
|
|
|
|
; GFX10-NEXT: v_and_b32_e32 v5, v3, v6
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v6, v2
|
|
|
|
; GFX10-NEXT: v_lshl_or_b32 v1, v1, 16, v5
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v6, v6
|
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v5, v0
|
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v5, v5, v6
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v6, v0
|
|
|
|
; GFX10-NEXT: v_cvt_f16_f32_e32 v5, v5
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_fixup_f16 v5, v5, v2, v0
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
|
|
; GFX10-NEXT: v_trunc_f16_e32 v5, v5
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fmac_f16_e64 v6, -v5, v2
|
|
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v5, v0
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_cvt_f32_f16_e32 v7, v2
|
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v7, v7
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v5, v5, v7
|
|
|
|
; GFX10-NEXT: v_cvt_f16_f32_e32 v5, v5
|
|
|
|
; GFX10-NEXT: v_div_fixup_f16 v5, v5, v2, v0
|
|
|
|
; GFX10-NEXT: v_trunc_f16_e32 v5, v5
|
|
|
|
; GFX10-NEXT: v_fmac_f16_e64 v0, -v5, v2
|
|
|
|
; GFX10-NEXT: v_and_b32_e32 v2, v3, v6
|
|
|
|
; GFX10-NEXT: v_lshl_or_b32 v0, v0, 16, v2
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: global_store_dwordx2 v4, v[0:1], s[4:5]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_endpgm
|
2020-07-24 19:05:46 +08:00
|
|
|
<4 x half> addrspace(1)* %in2) #0 {
|
|
|
|
%gep2 = getelementptr <4 x half>, <4 x half> addrspace(1)* %in2, i32 4
|
|
|
|
%r0 = load <4 x half>, <4 x half> addrspace(1)* %in1, align 16
|
|
|
|
%r1 = load <4 x half>, <4 x half> addrspace(1)* %gep2, align 16
|
|
|
|
%r2 = frem <4 x half> %r0, %r1
|
|
|
|
store <4 x half> %r2, <4 x half> addrspace(1)* %out, align 16
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-LABEL: frem_v2f32:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; SI-NEXT: s_mov_b32 s0, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s1, s5
|
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; SI-NEXT: s_mov_b32 s6, s2
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s3
|
|
|
|
; SI-NEXT: s_mov_b32 s10, s2
|
|
|
|
; SI-NEXT: s_mov_b32 s11, s3
|
|
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[8:11], 0 offset:32
|
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_div_scale_f32 v4, vcc, v1, v3, v1
|
|
|
|
; SI-NEXT: v_div_scale_f32 v5, s[4:5], v3, v3, v1
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v6, v5
|
|
|
|
; SI-NEXT: s_mov_b32 s6, 3
|
|
|
|
; SI-NEXT: s_mov_b32 s7, 0
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v7, v4, v6
|
|
|
|
; SI-NEXT: v_fma_f32 v8, -v5, v7, v4
|
|
|
|
; SI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
|
|
; SI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v4, v4, v6, v7
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v4, v4, v3, v1
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v4, v4
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f32 v1, -v4, v3, v1
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: v_div_scale_f32 v3, vcc, v0, v2, v0
|
|
|
|
; SI-NEXT: v_div_scale_f32 v4, s[4:5], v2, v2, v0
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v5, v4
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v6, -v4, v5, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v6, v3, v5
|
|
|
|
; SI-NEXT: v_fma_f32 v7, -v4, v6, v3
|
|
|
|
; SI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
|
|
; SI-NEXT: v_fma_f32 v3, -v4, v6, v3
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v3, v3, v5, v6
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v3, v3, v2, v0
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v3, v3
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f32 v0, -v3, v2, v0
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: frem_v2f32:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s2, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s10, s2
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s0, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s1, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s2
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s3
|
|
|
|
; CI-NEXT: s_mov_b32 s11, s3
|
|
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[8:11], 0 offset:32
|
|
|
|
; CI-NEXT: s_mov_b32 s6, 3
|
|
|
|
; CI-NEXT: s_mov_b32 s7, 0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_div_scale_f32 v5, s[4:5], v3, v3, v1
|
|
|
|
; CI-NEXT: v_div_scale_f32 v4, vcc, v1, v3, v1
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v6, v5
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v7, v4, v6
|
|
|
|
; CI-NEXT: v_fma_f32 v8, -v5, v7, v4
|
|
|
|
; CI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
|
|
; CI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v4, v4, v6, v7
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v4, v4, v3, v1
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v4, v4
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f32 v1, -v4, v3, v1
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: v_div_scale_f32 v4, s[4:5], v2, v2, v0
|
|
|
|
; CI-NEXT: v_div_scale_f32 v3, vcc, v0, v2, v0
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v5, v4
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v6, -v4, v5, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v6, v3, v5
|
|
|
|
; CI-NEXT: v_fma_f32 v7, -v4, v6, v3
|
|
|
|
; CI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
|
|
; CI-NEXT: v_fma_f32 v3, -v4, v6, v3
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v3, v3, v5, v6
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v3, v3, v2, v0
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v3, v3
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f32 v0, -v3, v2, v0
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: frem_v2f32:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_mov_b32 s2, 3
|
|
|
|
; VI-NEXT: s_mov_b32 s3, 0
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_div_scale_f32 v7, s[0:1], v5, v5, v3
|
|
|
|
; VI-NEXT: v_div_scale_f32 v6, vcc, v3, v5, v3
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v8, v7
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; VI-NEXT: v_fma_f32 v9, -v7, v8, 1.0
|
|
|
|
; VI-NEXT: v_fma_f32 v8, v9, v8, v8
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v9, v6, v8
|
|
|
|
; VI-NEXT: v_fma_f32 v10, -v7, v9, v6
|
|
|
|
; VI-NEXT: v_fma_f32 v9, v10, v8, v9
|
|
|
|
; VI-NEXT: v_fma_f32 v6, -v7, v9, v6
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; VI-NEXT: v_div_fmas_f32 v6, v6, v8, v9
|
|
|
|
; VI-NEXT: v_div_fixup_f32 v6, v6, v5, v3
|
|
|
|
; VI-NEXT: v_trunc_f32_e32 v6, v6
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f32 v3, -v6, v5, v3
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_div_scale_f32 v6, s[0:1], v4, v4, v2
|
|
|
|
; VI-NEXT: v_div_scale_f32 v5, vcc, v2, v4, v2
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v7, v6
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; VI-NEXT: v_fma_f32 v8, -v6, v7, 1.0
|
|
|
|
; VI-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v8, v5, v7
|
|
|
|
; VI-NEXT: v_fma_f32 v9, -v6, v8, v5
|
|
|
|
; VI-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
|
|
; VI-NEXT: v_fma_f32 v5, -v6, v8, v5
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; VI-NEXT: v_div_fmas_f32 v5, v5, v7, v8
|
|
|
|
; VI-NEXT: v_div_fixup_f32 v5, v5, v4, v2
|
|
|
|
; VI-NEXT: v_trunc_f32_e32 v5, v5
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f32 v2, -v5, v4, v2
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: frem_v2f32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v4, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[2:3] offset:32
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, 3
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v6, s[0:1], v3, v3, v1
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v5, vcc, v1, v3, v1
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v7, v6
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; GFX9-NEXT: v_fma_f32 v8, -v6, v7, 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v8, v5, v7
|
|
|
|
; GFX9-NEXT: v_fma_f32 v9, -v6, v8, v5
|
|
|
|
; GFX9-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
|
|
; GFX9-NEXT: v_fma_f32 v5, -v6, v8, v5
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; GFX9-NEXT: v_div_fmas_f32 v5, v5, v7, v8
|
|
|
|
; GFX9-NEXT: v_div_fixup_f32 v5, v5, v3, v1
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_fma_f32 v1, -v5, v3, v1
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v5, s[0:1], v2, v2, v0
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v3, vcc, v0, v2, v0
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v6, v5
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; GFX9-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v7, v3, v6
|
|
|
|
; GFX9-NEXT: v_fma_f32 v8, -v5, v7, v3
|
|
|
|
; GFX9-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
|
|
; GFX9-NEXT: v_fma_f32 v3, -v5, v7, v3
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; GFX9-NEXT: v_div_fmas_f32 v3, v3, v6, v7
|
|
|
|
; GFX9-NEXT: v_div_fixup_f32 v3, v3, v2, v0
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_fma_f32 v0, -v3, v2, v0
|
|
|
|
; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: frem_v2f32:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v4, 0
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: global_load_dwordx2 v[0:1], v4, s[6:7]
|
|
|
|
; GFX10-NEXT: global_load_dwordx2 v[2:3], v4, s[2:3] offset:32
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX10-NEXT: v_div_scale_f32 v6, s0, v3, v3, v1
|
|
|
|
; GFX10-NEXT: v_div_scale_f32 v5, vcc_lo, v1, v3, v1
|
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v7, v6
|
|
|
|
; GFX10-NEXT: s_denorm_mode 15
|
|
|
|
; GFX10-NEXT: v_fma_f32 v8, -v6, v7, 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v8, v5, v7
|
|
|
|
; GFX10-NEXT: v_fma_f32 v9, -v6, v8, v5
|
|
|
|
; GFX10-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
|
|
; GFX10-NEXT: v_fma_f32 v5, -v6, v8, v5
|
|
|
|
; GFX10-NEXT: s_denorm_mode 12
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_fmas_f32 v5, v5, v7, v8
|
|
|
|
; GFX10-NEXT: v_div_fixup_f32 v5, v5, v3, v1
|
|
|
|
; GFX10-NEXT: v_trunc_f32_e32 v5, v5
|
|
|
|
; GFX10-NEXT: v_fma_f32 v1, v3, -v5, v1
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_div_scale_f32 v5, s0, v2, v2, v0
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_scale_f32 v3, vcc_lo, v0, v2, v0
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v6, v5
|
|
|
|
; GFX10-NEXT: s_denorm_mode 15
|
|
|
|
; GFX10-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f32 v6, v7, v6, v6
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v7, v3, v6
|
|
|
|
; GFX10-NEXT: v_fma_f32 v8, -v5, v7, v3
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_fma_f32 v7, v8, v6, v7
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fma_f32 v3, -v5, v7, v3
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_denorm_mode 12
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_fmas_f32 v3, v3, v6, v7
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_div_fixup_f32 v3, v3, v2, v0
|
|
|
|
; GFX10-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; GFX10-NEXT: v_fmac_f32_e64 v0, -v3, v2
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: global_store_dwordx2 v4, v[0:1], s[4:5]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_endpgm
|
2014-09-11 05:44:27 +08:00
|
|
|
<2 x float> addrspace(1)* %in2) #0 {
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%gep2 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in2, i32 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%r0 = load <2 x float>, <2 x float> addrspace(1)* %in1, align 8
|
|
|
|
%r1 = load <2 x float>, <2 x float> addrspace(1)* %gep2, align 8
|
2014-09-11 05:44:27 +08:00
|
|
|
%r2 = frem <2 x float> %r0, %r1
|
|
|
|
store <2 x float> %r2, <2 x float> addrspace(1)* %out, align 8
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-LABEL: frem_v4f32:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; SI-NEXT: s_mov_b32 s0, s4
|
|
|
|
; SI-NEXT: s_mov_b32 s1, s5
|
|
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; SI-NEXT: s_mov_b32 s6, s2
|
|
|
|
; SI-NEXT: s_mov_b32 s7, s3
|
|
|
|
; SI-NEXT: s_mov_b32 s10, s2
|
|
|
|
; SI-NEXT: s_mov_b32 s11, s3
|
|
|
|
; SI-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; SI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:64
|
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_div_scale_f32 v8, vcc, v3, v7, v3
|
|
|
|
; SI-NEXT: v_div_scale_f32 v9, s[4:5], v7, v7, v3
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v10, v9
|
|
|
|
; SI-NEXT: s_mov_b32 s6, 3
|
|
|
|
; SI-NEXT: s_mov_b32 s7, 0
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v11, -v9, v10, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v10, v11, v10, v10
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v11, v8, v10
|
|
|
|
; SI-NEXT: v_fma_f32 v12, -v9, v11, v8
|
|
|
|
; SI-NEXT: v_fma_f32 v11, v12, v10, v11
|
|
|
|
; SI-NEXT: v_fma_f32 v8, -v9, v11, v8
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v8, v8, v10, v11
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v8, v8, v7, v3
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v8, v8
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f32 v3, -v8, v7, v3
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: v_div_scale_f32 v7, vcc, v2, v6, v2
|
|
|
|
; SI-NEXT: v_div_scale_f32 v8, s[4:5], v6, v6, v2
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v9, v8
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v10, -v8, v9, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v9, v10, v9, v9
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v10, v7, v9
|
|
|
|
; SI-NEXT: v_fma_f32 v11, -v8, v10, v7
|
|
|
|
; SI-NEXT: v_fma_f32 v10, v11, v9, v10
|
|
|
|
; SI-NEXT: v_fma_f32 v7, -v8, v10, v7
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v7, v7, v9, v10
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v7, v7, v6, v2
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v7, v7
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f32 v2, -v7, v6, v2
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: v_div_scale_f32 v6, vcc, v1, v5, v1
|
|
|
|
; SI-NEXT: v_div_scale_f32 v7, s[4:5], v5, v5, v1
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v8, v7
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v9, -v7, v8, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v8, v9, v8, v8
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v9, v6, v8
|
|
|
|
; SI-NEXT: v_fma_f32 v10, -v7, v9, v6
|
|
|
|
; SI-NEXT: v_fma_f32 v9, v10, v8, v9
|
|
|
|
; SI-NEXT: v_fma_f32 v6, -v7, v9, v6
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v6, v6, v8, v9
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v6, v6, v5, v1
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v6, v6
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f32 v1, -v6, v5, v1
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: v_div_scale_f32 v5, vcc, v0, v4, v0
|
|
|
|
; SI-NEXT: v_div_scale_f32 v6, s[4:5], v4, v4, v0
|
|
|
|
; SI-NEXT: v_rcp_f32_e32 v7, v6
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; SI-NEXT: v_fma_f32 v8, -v6, v7, 1.0
|
|
|
|
; SI-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
|
|
; SI-NEXT: v_mul_f32_e32 v8, v5, v7
|
|
|
|
; SI-NEXT: v_fma_f32 v9, -v6, v8, v5
|
|
|
|
; SI-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
|
|
; SI-NEXT: v_fma_f32 v5, -v6, v8, v5
|
|
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; SI-NEXT: v_div_fmas_f32 v5, v5, v7, v8
|
|
|
|
; SI-NEXT: v_div_fixup_f32 v5, v5, v4, v0
|
|
|
|
; SI-NEXT: v_trunc_f32_e32 v5, v5
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f32 v0, -v5, v4, v0
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: frem_v4f32:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s2, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s10, s2
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s0, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s1, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s2
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s3
|
|
|
|
; CI-NEXT: s_mov_b32 s11, s3
|
|
|
|
; CI-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; CI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:64
|
|
|
|
; CI-NEXT: s_mov_b32 s6, 3
|
|
|
|
; CI-NEXT: s_mov_b32 s7, 0
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_div_scale_f32 v9, s[4:5], v7, v7, v3
|
|
|
|
; CI-NEXT: v_div_scale_f32 v8, vcc, v3, v7, v3
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v10, v9
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v11, -v9, v10, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v10, v11, v10, v10
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v11, v8, v10
|
|
|
|
; CI-NEXT: v_fma_f32 v12, -v9, v11, v8
|
|
|
|
; CI-NEXT: v_fma_f32 v11, v12, v10, v11
|
|
|
|
; CI-NEXT: v_fma_f32 v8, -v9, v11, v8
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v8, v8, v10, v11
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v8, v8, v7, v3
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v8, v8
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f32 v3, -v8, v7, v3
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: v_div_scale_f32 v8, s[4:5], v6, v6, v2
|
|
|
|
; CI-NEXT: v_div_scale_f32 v7, vcc, v2, v6, v2
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v9, v8
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v10, -v8, v9, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v9, v10, v9, v9
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v10, v7, v9
|
|
|
|
; CI-NEXT: v_fma_f32 v11, -v8, v10, v7
|
|
|
|
; CI-NEXT: v_fma_f32 v10, v11, v9, v10
|
|
|
|
; CI-NEXT: v_fma_f32 v7, -v8, v10, v7
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v7, v7, v9, v10
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v7, v7, v6, v2
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v7, v7
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f32 v2, -v7, v6, v2
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: v_div_scale_f32 v7, s[4:5], v5, v5, v1
|
|
|
|
; CI-NEXT: v_div_scale_f32 v6, vcc, v1, v5, v1
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v8, v7
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v9, -v7, v8, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v8, v9, v8, v8
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v9, v6, v8
|
|
|
|
; CI-NEXT: v_fma_f32 v10, -v7, v9, v6
|
|
|
|
; CI-NEXT: v_fma_f32 v9, v10, v8, v9
|
|
|
|
; CI-NEXT: v_fma_f32 v6, -v7, v9, v6
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v6, v6, v8, v9
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v6, v6, v5, v1
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v6, v6
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f32 v1, -v6, v5, v1
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: v_div_scale_f32 v6, s[4:5], v4, v4, v0
|
|
|
|
; CI-NEXT: v_div_scale_f32 v5, vcc, v0, v4, v0
|
|
|
|
; CI-NEXT: v_rcp_f32_e32 v7, v6
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
|
|
; CI-NEXT: v_fma_f32 v8, -v6, v7, 1.0
|
|
|
|
; CI-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
|
|
; CI-NEXT: v_mul_f32_e32 v8, v5, v7
|
|
|
|
; CI-NEXT: v_fma_f32 v9, -v6, v8, v5
|
|
|
|
; CI-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
|
|
; CI-NEXT: v_fma_f32 v5, -v6, v8, v5
|
|
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
|
|
; CI-NEXT: v_div_fmas_f32 v5, v5, v7, v8
|
|
|
|
; CI-NEXT: v_div_fixup_f32 v5, v5, v4, v0
|
|
|
|
; CI-NEXT: v_trunc_f32_e32 v5, v5
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f32 v0, -v5, v4, v0
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: frem_v4f32:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_mov_b32 s2, 3
|
|
|
|
; VI-NEXT: s_mov_b32 s3, 0
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 64
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
|
|
|
|
; VI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v8, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v9, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_div_scale_f32 v11, s[0:1], v7, v7, v3
|
|
|
|
; VI-NEXT: v_div_scale_f32 v10, vcc, v3, v7, v3
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v12, v11
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; VI-NEXT: v_fma_f32 v13, -v11, v12, 1.0
|
|
|
|
; VI-NEXT: v_fma_f32 v12, v13, v12, v12
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v13, v10, v12
|
|
|
|
; VI-NEXT: v_fma_f32 v14, -v11, v13, v10
|
|
|
|
; VI-NEXT: v_fma_f32 v13, v14, v12, v13
|
|
|
|
; VI-NEXT: v_fma_f32 v10, -v11, v13, v10
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; VI-NEXT: v_div_fmas_f32 v10, v10, v12, v13
|
|
|
|
; VI-NEXT: v_div_fixup_f32 v10, v10, v7, v3
|
|
|
|
; VI-NEXT: v_trunc_f32_e32 v10, v10
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f32 v3, -v10, v7, v3
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_div_scale_f32 v10, s[0:1], v6, v6, v2
|
|
|
|
; VI-NEXT: v_div_scale_f32 v7, vcc, v2, v6, v2
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v11, v10
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; VI-NEXT: v_fma_f32 v12, -v10, v11, 1.0
|
|
|
|
; VI-NEXT: v_fma_f32 v11, v12, v11, v11
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v12, v7, v11
|
|
|
|
; VI-NEXT: v_fma_f32 v13, -v10, v12, v7
|
|
|
|
; VI-NEXT: v_fma_f32 v12, v13, v11, v12
|
|
|
|
; VI-NEXT: v_fma_f32 v7, -v10, v12, v7
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; VI-NEXT: v_div_fmas_f32 v7, v7, v11, v12
|
|
|
|
; VI-NEXT: v_div_fixup_f32 v7, v7, v6, v2
|
|
|
|
; VI-NEXT: v_trunc_f32_e32 v7, v7
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f32 v2, -v7, v6, v2
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_div_scale_f32 v7, s[0:1], v5, v5, v1
|
|
|
|
; VI-NEXT: v_div_scale_f32 v6, vcc, v1, v5, v1
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v10, v7
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; VI-NEXT: v_fma_f32 v11, -v7, v10, 1.0
|
|
|
|
; VI-NEXT: v_fma_f32 v10, v11, v10, v10
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v11, v6, v10
|
|
|
|
; VI-NEXT: v_fma_f32 v12, -v7, v11, v6
|
|
|
|
; VI-NEXT: v_fma_f32 v11, v12, v10, v11
|
|
|
|
; VI-NEXT: v_fma_f32 v6, -v7, v11, v6
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; VI-NEXT: v_div_fmas_f32 v6, v6, v10, v11
|
|
|
|
; VI-NEXT: v_div_fixup_f32 v6, v6, v5, v1
|
|
|
|
; VI-NEXT: v_trunc_f32_e32 v6, v6
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f32 v1, -v6, v5, v1
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_div_scale_f32 v6, s[0:1], v4, v4, v0
|
|
|
|
; VI-NEXT: v_div_scale_f32 v5, vcc, v0, v4, v0
|
|
|
|
; VI-NEXT: v_rcp_f32_e32 v7, v6
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; VI-NEXT: v_fma_f32 v10, -v6, v7, 1.0
|
|
|
|
; VI-NEXT: v_fma_f32 v7, v10, v7, v7
|
|
|
|
; VI-NEXT: v_mul_f32_e32 v10, v5, v7
|
|
|
|
; VI-NEXT: v_fma_f32 v11, -v6, v10, v5
|
|
|
|
; VI-NEXT: v_fma_f32 v10, v11, v7, v10
|
|
|
|
; VI-NEXT: v_fma_f32 v5, -v6, v10, v5
|
|
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; VI-NEXT: v_div_fmas_f32 v5, v5, v7, v10
|
|
|
|
; VI-NEXT: v_div_fixup_f32 v5, v5, v4, v0
|
|
|
|
; VI-NEXT: v_trunc_f32_e32 v5, v5
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f32 v0, -v5, v4, v0
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: frem_v4f32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_dwordx4 v[0:3], v8, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_dwordx4 v[4:7], v8, s[2:3] offset:64
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, 3
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v10, s[0:1], v7, v7, v3
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v9, vcc, v3, v7, v3
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v11, v10
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; GFX9-NEXT: v_fma_f32 v12, -v10, v11, 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f32 v11, v12, v11, v11
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v12, v9, v11
|
|
|
|
; GFX9-NEXT: v_fma_f32 v13, -v10, v12, v9
|
|
|
|
; GFX9-NEXT: v_fma_f32 v12, v13, v11, v12
|
|
|
|
; GFX9-NEXT: v_fma_f32 v9, -v10, v12, v9
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; GFX9-NEXT: v_div_fmas_f32 v9, v9, v11, v12
|
|
|
|
; GFX9-NEXT: v_div_fixup_f32 v9, v9, v7, v3
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v9, v9
|
|
|
|
; GFX9-NEXT: v_fma_f32 v3, -v9, v7, v3
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v9, s[0:1], v6, v6, v2
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v7, vcc, v2, v6, v2
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v10, v9
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; GFX9-NEXT: v_fma_f32 v11, -v9, v10, 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f32 v10, v11, v10, v10
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v11, v7, v10
|
|
|
|
; GFX9-NEXT: v_fma_f32 v12, -v9, v11, v7
|
|
|
|
; GFX9-NEXT: v_fma_f32 v11, v12, v10, v11
|
|
|
|
; GFX9-NEXT: v_fma_f32 v7, -v9, v11, v7
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; GFX9-NEXT: v_div_fmas_f32 v7, v7, v10, v11
|
|
|
|
; GFX9-NEXT: v_div_fixup_f32 v7, v7, v6, v2
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v7, v7
|
|
|
|
; GFX9-NEXT: v_fma_f32 v2, -v7, v6, v2
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v7, s[0:1], v5, v5, v1
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v6, vcc, v1, v5, v1
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v9, v7
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; GFX9-NEXT: v_fma_f32 v10, -v7, v9, 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f32 v9, v10, v9, v9
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v10, v6, v9
|
|
|
|
; GFX9-NEXT: v_fma_f32 v11, -v7, v10, v6
|
|
|
|
; GFX9-NEXT: v_fma_f32 v10, v11, v9, v10
|
|
|
|
; GFX9-NEXT: v_fma_f32 v6, -v7, v10, v6
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; GFX9-NEXT: v_div_fmas_f32 v6, v6, v9, v10
|
|
|
|
; GFX9-NEXT: v_div_fixup_f32 v6, v6, v5, v1
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v6, v6
|
|
|
|
; GFX9-NEXT: v_fma_f32 v1, -v6, v5, v1
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v6, s[0:1], v4, v4, v0
|
|
|
|
; GFX9-NEXT: v_div_scale_f32 v5, vcc, v0, v4, v0
|
|
|
|
; GFX9-NEXT: v_rcp_f32_e32 v7, v6
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
|
|
; GFX9-NEXT: v_fma_f32 v9, -v6, v7, 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f32 v7, v9, v7, v7
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v9, v5, v7
|
|
|
|
; GFX9-NEXT: v_fma_f32 v10, -v6, v9, v5
|
|
|
|
; GFX9-NEXT: v_fma_f32 v9, v10, v7, v9
|
|
|
|
; GFX9-NEXT: v_fma_f32 v5, -v6, v9, v5
|
|
|
|
; GFX9-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
|
|
; GFX9-NEXT: v_div_fmas_f32 v5, v5, v7, v9
|
|
|
|
; GFX9-NEXT: v_div_fixup_f32 v5, v5, v4, v0
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v5, v5
|
|
|
|
; GFX9-NEXT: v_fma_f32 v0, -v5, v4, v0
|
|
|
|
; GFX9-NEXT: global_store_dwordx4 v8, v[0:3], s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: frem_v4f32:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v8, 0
|
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: global_load_dwordx4 v[0:3], v8, s[6:7]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: global_load_dwordx4 v[4:7], v8, s[2:3] offset:64
|
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_scale_f32 v10, s0, v7, v7, v3
|
|
|
|
; GFX10-NEXT: v_div_scale_f32 v9, vcc_lo, v3, v7, v3
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v11, v10
|
|
|
|
; GFX10-NEXT: s_denorm_mode 15
|
|
|
|
; GFX10-NEXT: v_fma_f32 v12, -v10, v11, 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f32 v11, v12, v11, v11
|
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v12, v9, v11
|
|
|
|
; GFX10-NEXT: v_fma_f32 v13, -v10, v12, v9
|
|
|
|
; GFX10-NEXT: v_fma_f32 v12, v13, v11, v12
|
|
|
|
; GFX10-NEXT: v_fma_f32 v9, -v10, v12, v9
|
|
|
|
; GFX10-NEXT: s_denorm_mode 12
|
|
|
|
; GFX10-NEXT: v_div_fmas_f32 v9, v9, v11, v12
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_fixup_f32 v9, v9, v7, v3
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_trunc_f32_e32 v9, v9
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fma_f32 v3, v7, -v9, v3
|
|
|
|
; GFX10-NEXT: v_div_scale_f32 v9, s0, v6, v6, v2
|
|
|
|
; GFX10-NEXT: v_div_scale_f32 v7, vcc_lo, v2, v6, v2
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v10, v9
|
|
|
|
; GFX10-NEXT: s_denorm_mode 15
|
|
|
|
; GFX10-NEXT: v_fma_f32 v11, -v9, v10, 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f32 v10, v11, v10, v10
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v11, v7, v10
|
|
|
|
; GFX10-NEXT: v_fma_f32 v12, -v9, v11, v7
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_fma_f32 v11, v12, v10, v11
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fma_f32 v7, -v9, v11, v7
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_denorm_mode 12
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_fmas_f32 v7, v7, v10, v11
|
|
|
|
; GFX10-NEXT: v_div_fixup_f32 v7, v7, v6, v2
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_trunc_f32_e32 v7, v7
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fma_f32 v2, v6, -v7, v2
|
|
|
|
; GFX10-NEXT: v_div_scale_f32 v7, s0, v5, v5, v1
|
|
|
|
; GFX10-NEXT: v_div_scale_f32 v6, vcc_lo, v1, v5, v1
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v9, v7
|
|
|
|
; GFX10-NEXT: s_denorm_mode 15
|
|
|
|
; GFX10-NEXT: v_fma_f32 v10, -v7, v9, 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f32 v9, v10, v9, v9
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v10, v6, v9
|
|
|
|
; GFX10-NEXT: v_fma_f32 v11, -v7, v10, v6
|
|
|
|
; GFX10-NEXT: v_fma_f32 v10, v11, v9, v10
|
|
|
|
; GFX10-NEXT: v_fma_f32 v6, -v7, v10, v6
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_denorm_mode 12
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_fmas_f32 v6, v6, v9, v10
|
|
|
|
; GFX10-NEXT: v_div_fixup_f32 v6, v6, v5, v1
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_trunc_f32_e32 v6, v6
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fma_f32 v1, v5, -v6, v1
|
|
|
|
; GFX10-NEXT: v_div_scale_f32 v6, s0, v4, v4, v0
|
|
|
|
; GFX10-NEXT: v_div_scale_f32 v5, vcc_lo, v0, v4, v0
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_rcp_f32_e32 v7, v6
|
|
|
|
; GFX10-NEXT: s_denorm_mode 15
|
|
|
|
; GFX10-NEXT: v_fma_f32 v9, -v6, v7, 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f32 v7, v9, v7, v7
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_mul_f32_e32 v9, v5, v7
|
|
|
|
; GFX10-NEXT: v_fma_f32 v10, -v6, v9, v5
|
|
|
|
; GFX10-NEXT: v_fma_f32 v9, v10, v7, v9
|
|
|
|
; GFX10-NEXT: v_fma_f32 v5, -v6, v9, v5
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_denorm_mode 12
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_fmas_f32 v5, v5, v7, v9
|
|
|
|
; GFX10-NEXT: v_div_fixup_f32 v5, v5, v4, v0
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_trunc_f32_e32 v5, v5
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fmac_f32_e64 v0, -v5, v4
|
|
|
|
; GFX10-NEXT: global_store_dwordx4 v8, v[0:3], s[4:5]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_endpgm
|
2014-09-11 05:44:27 +08:00
|
|
|
<4 x float> addrspace(1)* %in2) #0 {
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%gep2 = getelementptr <4 x float>, <4 x float> addrspace(1)* %in2, i32 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%r0 = load <4 x float>, <4 x float> addrspace(1)* %in1, align 16
|
|
|
|
%r1 = load <4 x float>, <4 x float> addrspace(1)* %gep2, align 16
|
2014-09-11 05:44:27 +08:00
|
|
|
%r2 = frem <4 x float> %r0, %r1
|
|
|
|
store <4 x float> %r2, <4 x float> addrspace(1)* %out, align 16
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @frem_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-LABEL: frem_v2f64:
|
|
|
|
; SI: ; %bb.0:
|
|
|
|
; SI-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; SI-NEXT: s_mov_b32 s4, s8
|
|
|
|
; SI-NEXT: s_mov_b32 s5, s9
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s8, s10
|
|
|
|
; SI-NEXT: s_mov_b32 s9, s11
|
|
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_mov_b32 s2, s6
|
|
|
|
; SI-NEXT: s_mov_b32 s3, s7
|
2020-12-01 01:06:35 +08:00
|
|
|
; SI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
|
|
|
|
; SI-NEXT: buffer_load_dwordx4 v[4:7], off, s[0:3], 0 offset:64
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; SI-NEXT: v_div_scale_f64 v[8:9], s[0:1], v[6:7], v[6:7], v[2:3]
|
|
|
|
; SI-NEXT: v_rcp_f64_e32 v[10:11], v[8:9]
|
|
|
|
; SI-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
|
|
; SI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
|
|
; SI-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
|
|
; SI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
|
|
; SI-NEXT: v_div_scale_f64 v[12:13], s[0:1], v[2:3], v[6:7], v[2:3]
|
|
|
|
; SI-NEXT: v_mul_f64 v[14:15], v[12:13], v[10:11]
|
|
|
|
; SI-NEXT: v_fma_f64 v[16:17], -v[8:9], v[14:15], v[12:13]
|
|
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v7, v9
|
|
|
|
; SI-NEXT: v_cmp_eq_u32_e64 s[0:1], v3, v13
|
|
|
|
; SI-NEXT: s_xor_b64 vcc, s[0:1], vcc
|
2020-10-20 05:38:02 +08:00
|
|
|
; SI-NEXT: s_nop 1
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: v_div_fmas_f64 v[8:9], v[16:17], v[10:11], v[14:15]
|
|
|
|
; SI-NEXT: v_div_fixup_f64 v[8:9], v[8:9], v[6:7], v[2:3]
|
|
|
|
; SI-NEXT: v_bfe_u32 v10, v9, 20, 11
|
|
|
|
; SI-NEXT: s_movk_i32 s8, 0xfc01
|
|
|
|
; SI-NEXT: v_add_i32_e32 v12, vcc, s8, v10
|
|
|
|
; SI-NEXT: s_mov_b32 s3, 0xfffff
|
|
|
|
; SI-NEXT: v_lshr_b64 v[10:11], s[2:3], v12
|
|
|
|
; SI-NEXT: v_not_b32_e32 v10, v10
|
|
|
|
; SI-NEXT: v_and_b32_e32 v10, v8, v10
|
|
|
|
; SI-NEXT: v_not_b32_e32 v11, v11
|
|
|
|
; SI-NEXT: v_and_b32_e32 v11, v9, v11
|
|
|
|
; SI-NEXT: s_brev_b32 s9, 1
|
|
|
|
; SI-NEXT: v_and_b32_e32 v13, s9, v9
|
|
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v12
|
|
|
|
; SI-NEXT: v_cndmask_b32_e32 v11, v11, v13, vcc
|
|
|
|
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], 51, v12
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v9, v11, v9, s[0:1]
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v10, v10, 0, vcc
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v8, v10, v8, s[0:1]
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f64 v[2:3], -v[8:9], v[6:7], v[2:3]
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: v_div_scale_f64 v[6:7], s[0:1], v[4:5], v[4:5], v[0:1]
|
|
|
|
; SI-NEXT: v_rcp_f64_e32 v[8:9], v[6:7]
|
|
|
|
; SI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
|
|
; SI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
|
|
; SI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
|
|
; SI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
|
|
; SI-NEXT: v_div_scale_f64 v[10:11], s[0:1], v[0:1], v[4:5], v[0:1]
|
|
|
|
; SI-NEXT: v_mul_f64 v[12:13], v[10:11], v[8:9]
|
|
|
|
; SI-NEXT: v_fma_f64 v[14:15], -v[6:7], v[12:13], v[10:11]
|
|
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
|
|
|
|
; SI-NEXT: v_cmp_eq_u32_e64 s[0:1], v1, v11
|
|
|
|
; SI-NEXT: s_xor_b64 vcc, s[0:1], vcc
|
2020-10-20 05:38:02 +08:00
|
|
|
; SI-NEXT: s_nop 1
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: v_div_fmas_f64 v[6:7], v[14:15], v[8:9], v[12:13]
|
|
|
|
; SI-NEXT: v_div_fixup_f64 v[6:7], v[6:7], v[4:5], v[0:1]
|
|
|
|
; SI-NEXT: v_bfe_u32 v8, v7, 20, 11
|
|
|
|
; SI-NEXT: v_add_i32_e32 v10, vcc, s8, v8
|
|
|
|
; SI-NEXT: v_lshr_b64 v[8:9], s[2:3], v10
|
|
|
|
; SI-NEXT: v_not_b32_e32 v8, v8
|
|
|
|
; SI-NEXT: v_and_b32_e32 v8, v6, v8
|
|
|
|
; SI-NEXT: v_not_b32_e32 v9, v9
|
|
|
|
; SI-NEXT: v_and_b32_e32 v9, v7, v9
|
|
|
|
; SI-NEXT: v_and_b32_e32 v11, s9, v7
|
|
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v10
|
|
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
|
|
|
|
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], 51, v10
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v7, v9, v7, s[0:1]
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v8, v8, 0, vcc
|
|
|
|
; SI-NEXT: v_cndmask_b32_e64 v6, v8, v6, s[0:1]
|
2020-07-24 18:41:57 +08:00
|
|
|
; SI-NEXT: v_fma_f64 v[0:1], -v[6:7], v[4:5], v[0:1]
|
2020-07-24 18:41:30 +08:00
|
|
|
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; SI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; CI-LABEL: frem_v2f64:
|
|
|
|
; CI: ; %bb.0:
|
|
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
|
|
; CI-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; CI-NEXT: s_mov_b32 s2, -1
|
|
|
|
; CI-NEXT: s_mov_b32 s10, s2
|
|
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; CI-NEXT: s_mov_b32 s0, s4
|
|
|
|
; CI-NEXT: s_mov_b32 s1, s5
|
|
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
|
|
; CI-NEXT: s_mov_b32 s6, s2
|
|
|
|
; CI-NEXT: s_mov_b32 s7, s3
|
|
|
|
; CI-NEXT: s_mov_b32 s11, s3
|
|
|
|
; CI-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; CI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:64
|
|
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; CI-NEXT: v_div_scale_f64 v[8:9], s[4:5], v[6:7], v[6:7], v[2:3]
|
|
|
|
; CI-NEXT: v_rcp_f64_e32 v[10:11], v[8:9]
|
|
|
|
; CI-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
|
|
; CI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
|
|
; CI-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
|
|
; CI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
|
|
; CI-NEXT: v_div_scale_f64 v[12:13], vcc, v[2:3], v[6:7], v[2:3]
|
|
|
|
; CI-NEXT: v_mul_f64 v[14:15], v[12:13], v[10:11]
|
|
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[8:9], v[14:15], v[12:13]
|
|
|
|
; CI-NEXT: s_nop 1
|
|
|
|
; CI-NEXT: v_div_fmas_f64 v[8:9], v[8:9], v[10:11], v[14:15]
|
|
|
|
; CI-NEXT: v_div_fixup_f64 v[8:9], v[8:9], v[6:7], v[2:3]
|
|
|
|
; CI-NEXT: v_trunc_f64_e32 v[8:9], v[8:9]
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f64 v[2:3], -v[8:9], v[6:7], v[2:3]
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: v_div_scale_f64 v[6:7], s[4:5], v[4:5], v[4:5], v[0:1]
|
|
|
|
; CI-NEXT: v_rcp_f64_e32 v[8:9], v[6:7]
|
|
|
|
; CI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
|
|
; CI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
|
|
; CI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
|
|
; CI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
|
|
; CI-NEXT: v_div_scale_f64 v[10:11], vcc, v[0:1], v[4:5], v[0:1]
|
|
|
|
; CI-NEXT: v_mul_f64 v[12:13], v[10:11], v[8:9]
|
|
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[6:7], v[12:13], v[10:11]
|
|
|
|
; CI-NEXT: s_nop 1
|
|
|
|
; CI-NEXT: v_div_fmas_f64 v[6:7], v[6:7], v[8:9], v[12:13]
|
|
|
|
; CI-NEXT: v_div_fixup_f64 v[6:7], v[6:7], v[4:5], v[0:1]
|
|
|
|
; CI-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
2020-07-24 18:41:57 +08:00
|
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[6:7], v[4:5], v[0:1]
|
2020-07-24 18:41:30 +08:00
|
|
|
; CI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; CI-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; VI-LABEL: frem_v2f64:
|
|
|
|
; VI: ; %bb.0:
|
|
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
|
|
; VI-NEXT: s_add_u32 s0, s0, 64
|
|
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
|
|
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
|
|
|
|
; VI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v8, s4
|
|
|
|
; VI-NEXT: v_mov_b32_e32 v9, s5
|
2020-10-16 15:09:38 +08:00
|
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_div_scale_f64 v[10:11], s[0:1], v[6:7], v[6:7], v[2:3]
|
|
|
|
; VI-NEXT: v_rcp_f64_e32 v[12:13], v[10:11]
|
|
|
|
; VI-NEXT: v_fma_f64 v[14:15], -v[10:11], v[12:13], 1.0
|
|
|
|
; VI-NEXT: v_fma_f64 v[12:13], v[12:13], v[14:15], v[12:13]
|
|
|
|
; VI-NEXT: v_fma_f64 v[14:15], -v[10:11], v[12:13], 1.0
|
|
|
|
; VI-NEXT: v_fma_f64 v[12:13], v[12:13], v[14:15], v[12:13]
|
|
|
|
; VI-NEXT: v_div_scale_f64 v[14:15], vcc, v[2:3], v[6:7], v[2:3]
|
|
|
|
; VI-NEXT: v_mul_f64 v[16:17], v[14:15], v[12:13]
|
|
|
|
; VI-NEXT: v_fma_f64 v[10:11], -v[10:11], v[16:17], v[14:15]
|
|
|
|
; VI-NEXT: s_nop 1
|
|
|
|
; VI-NEXT: v_div_fmas_f64 v[10:11], v[10:11], v[12:13], v[16:17]
|
|
|
|
; VI-NEXT: v_div_fixup_f64 v[10:11], v[10:11], v[6:7], v[2:3]
|
|
|
|
; VI-NEXT: v_trunc_f64_e32 v[10:11], v[10:11]
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f64 v[2:3], -v[10:11], v[6:7], v[2:3]
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: v_div_scale_f64 v[6:7], s[0:1], v[4:5], v[4:5], v[0:1]
|
|
|
|
; VI-NEXT: v_rcp_f64_e32 v[10:11], v[6:7]
|
|
|
|
; VI-NEXT: v_fma_f64 v[12:13], -v[6:7], v[10:11], 1.0
|
|
|
|
; VI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
|
|
; VI-NEXT: v_fma_f64 v[12:13], -v[6:7], v[10:11], 1.0
|
|
|
|
; VI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
|
|
; VI-NEXT: v_div_scale_f64 v[12:13], vcc, v[0:1], v[4:5], v[0:1]
|
|
|
|
; VI-NEXT: v_mul_f64 v[14:15], v[12:13], v[10:11]
|
|
|
|
; VI-NEXT: v_fma_f64 v[6:7], -v[6:7], v[14:15], v[12:13]
|
|
|
|
; VI-NEXT: s_nop 1
|
|
|
|
; VI-NEXT: v_div_fmas_f64 v[6:7], v[6:7], v[10:11], v[14:15]
|
|
|
|
; VI-NEXT: v_div_fixup_f64 v[6:7], v[6:7], v[4:5], v[0:1]
|
|
|
|
; VI-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
2020-07-24 18:41:57 +08:00
|
|
|
; VI-NEXT: v_fma_f64 v[0:1], -v[6:7], v[4:5], v[0:1]
|
2020-07-24 18:41:30 +08:00
|
|
|
; VI-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
|
|
|
|
; VI-NEXT: s_endpgm
|
2021-03-29 17:12:46 +08:00
|
|
|
;
|
|
|
|
; GFX9-LABEL: frem_v2f64:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX9-NEXT: v_mov_b32_e32 v16, 0
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
|
|
|
|
; GFX9-NEXT: global_load_dwordx4 v[4:7], v16, s[2:3] offset:64
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_div_scale_f64 v[8:9], s[0:1], v[6:7], v[6:7], v[2:3]
|
|
|
|
; GFX9-NEXT: v_rcp_f64_e32 v[10:11], v[8:9]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
|
|
; GFX9-NEXT: v_div_scale_f64 v[12:13], vcc, v[2:3], v[6:7], v[2:3]
|
|
|
|
; GFX9-NEXT: v_mul_f64 v[14:15], v[12:13], v[10:11]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[8:9], -v[8:9], v[14:15], v[12:13]
|
|
|
|
; GFX9-NEXT: s_nop 1
|
|
|
|
; GFX9-NEXT: v_div_fmas_f64 v[8:9], v[8:9], v[10:11], v[14:15]
|
|
|
|
; GFX9-NEXT: v_div_fixup_f64 v[8:9], v[8:9], v[6:7], v[2:3]
|
|
|
|
; GFX9-NEXT: v_trunc_f64_e32 v[8:9], v[8:9]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[2:3], -v[8:9], v[6:7], v[2:3]
|
|
|
|
; GFX9-NEXT: v_div_scale_f64 v[6:7], s[0:1], v[4:5], v[4:5], v[0:1]
|
|
|
|
; GFX9-NEXT: v_rcp_f64_e32 v[8:9], v[6:7]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
|
|
; GFX9-NEXT: v_div_scale_f64 v[10:11], vcc, v[0:1], v[4:5], v[0:1]
|
|
|
|
; GFX9-NEXT: v_mul_f64 v[12:13], v[10:11], v[8:9]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[6:7], -v[6:7], v[12:13], v[10:11]
|
|
|
|
; GFX9-NEXT: s_nop 1
|
|
|
|
; GFX9-NEXT: v_div_fmas_f64 v[6:7], v[6:7], v[8:9], v[12:13]
|
|
|
|
; GFX9-NEXT: v_div_fixup_f64 v[6:7], v[6:7], v[4:5], v[0:1]
|
|
|
|
; GFX9-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
|
|
|
; GFX9-NEXT: v_fma_f64 v[0:1], -v[6:7], v[4:5], v[0:1]
|
|
|
|
; GFX9-NEXT: global_store_dwordx4 v16, v[0:3], s[4:5]
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX10-LABEL: frem_v2f64:
|
|
|
|
; GFX10: ; %bb.0:
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
|
|
; GFX10-NEXT: v_mov_b32_e32 v16, 0
|
|
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX10-NEXT: s_clause 0x1
|
|
|
|
; GFX10-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: global_load_dwordx4 v[4:7], v16, s[2:3] offset:64
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_scale_f64 v[8:9], s0, v[6:7], v[6:7], v[2:3]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_rcp_f64_e32 v[10:11], v[8:9]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_scale_f64 v[12:13], vcc_lo, v[2:3], v[6:7], v[2:3]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_mul_f64 v[14:15], v[12:13], v[10:11]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[8:9], -v[8:9], v[14:15], v[12:13]
|
|
|
|
; GFX10-NEXT: v_div_fmas_f64 v[8:9], v[8:9], v[10:11], v[14:15]
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_fixup_f64 v[8:9], v[8:9], v[6:7], v[2:3]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_trunc_f64_e32 v[8:9], v[8:9]
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fma_f64 v[2:3], -v[8:9], v[6:7], v[2:3]
|
|
|
|
; GFX10-NEXT: v_div_scale_f64 v[6:7], s0, v[4:5], v[4:5], v[0:1]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_rcp_f64_e32 v[8:9], v[6:7]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_scale_f64 v[10:11], vcc_lo, v[0:1], v[4:5], v[0:1]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_mul_f64 v[12:13], v[10:11], v[8:9]
|
|
|
|
; GFX10-NEXT: v_fma_f64 v[6:7], -v[6:7], v[12:13], v[10:11]
|
|
|
|
; GFX10-NEXT: v_div_fmas_f64 v[6:7], v[6:7], v[8:9], v[12:13]
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_div_fixup_f64 v[6:7], v[6:7], v[4:5], v[0:1]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
2021-04-27 03:48:12 +08:00
|
|
|
; GFX10-NEXT: v_fma_f64 v[0:1], -v[6:7], v[4:5], v[0:1]
|
2021-03-29 17:12:46 +08:00
|
|
|
; GFX10-NEXT: global_store_dwordx4 v16, v[0:3], s[4:5]
|
|
|
|
; GFX10-NEXT: s_endpgm
|
2014-09-11 05:44:27 +08:00
|
|
|
<2 x double> addrspace(1)* %in2) #0 {
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%gep2 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in2, i32 4
|
2015-02-28 05:17:42 +08:00
|
|
|
%r0 = load <2 x double>, <2 x double> addrspace(1)* %in1, align 16
|
|
|
|
%r1 = load <2 x double>, <2 x double> addrspace(1)* %gep2, align 16
|
2014-09-11 05:44:27 +08:00
|
|
|
%r2 = frem <2 x double> %r0, %r1
|
|
|
|
store <2 x double> %r2, <2 x double> addrspace(1)* %out, align 16
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2019-11-18 19:18:07 +08:00
|
|
|
attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
|
|
|
|
attributes #1 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
|