2019-06-02 02:27:06 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -amdgpu-scalarize-global-loads=false -march=amdgcn | FileCheck %s -check-prefixes=FUNC,SI,GCN
|
|
|
|
; RUN: llc < %s -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global | FileCheck %s -check-prefixes=FUNC,SI,TONGA
|
|
|
|
; RUN: llc < %s -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global | FileCheck %s -check-prefixes=FUNC,SI,GFX9
|
|
|
|
; RUN: llc < %s -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood | FileCheck %s -check-prefixes=FUNC,EG
|
2012-12-12 05:25:42 +08:00
|
|
|
|
|
|
|
; The code generated by sdiv is long and complex and may frequently change.
|
|
|
|
; The goal of this test is to make sure the ISel doesn't fail.
|
|
|
|
;
|
|
|
|
; This program was previously failing to compile when one of the selectcc
|
|
|
|
; opcodes generated by the sdiv lowering was being legalized and optimized to:
|
|
|
|
; selectcc Remainder -1, 0, -1, SETGT
|
|
|
|
; This was fixed by adding an additional pattern in R600Instructions.td to
|
|
|
|
; match this pattern with a CNDGE_INT.
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @sdiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: sdiv_i32:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GCN-NEXT: s_mov_b32 s10, s6
|
|
|
|
; GCN-NEXT: s_mov_b32 s11, s7
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s8, s2
|
|
|
|
; GCN-NEXT: s_mov_b32 s9, s3
|
|
|
|
; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v2, 31, v0
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v3, 31, v1
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v4, v2, v3
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v2, v0
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v1, vcc, v3, v1
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v0, v0, v2
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v1, v1, v3
|
|
|
|
; GCN-NEXT: v_cvt_f32_u32_e32 v2, v1
|
|
|
|
; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
|
|
|
; GCN-NEXT: v_mul_f32_e32 v2, 0x4f800000, v2
|
|
|
|
; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v3, v2, v1
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v5, v2, v1
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v6, vcc, 0, v5
|
|
|
|
; GCN-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v3
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v3, v5, v6, s[0:1]
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v3, v3, v2
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v5, vcc, v3, v2
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v2, vcc, v3, v2
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1]
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v2, v2, v0
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v3, v2, v1
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v2
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v6, vcc, -1, v2
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v7, vcc, v3, v0
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v0, v3
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v7, v1
|
|
|
|
; GCN-NEXT: s_and_b64 s[0:1], s[0:1], vcc
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v0, v2, v5, s[0:1]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v0, v0, v4
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
|
|
|
|
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: sdiv_i32:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s6, -1
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, s6
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, s7
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s10
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s11
|
|
|
|
; TONGA-NEXT: buffer_load_dwordx2 v[0:1], off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_mov_b32 s4, s8
|
|
|
|
; TONGA-NEXT: s_mov_b32 s5, s9
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v2, 31, v1
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v1, vcc, v2, v1
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v2
|
|
|
|
; TONGA-NEXT: v_cvt_f32_u32_e32 v3, v1
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v6, 31, v0
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v6, v0
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v0, v0, v6
|
|
|
|
; TONGA-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v2, v6, v2
|
|
|
|
; TONGA-NEXT: v_mul_f32_e32 v3, 0x4f800000, v3
|
|
|
|
; TONGA-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v4, v3, v1
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v5, v3, v1
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v7, vcc, 0, v4
|
|
|
|
; TONGA-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v5
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v4, v4, v7, s[0:1]
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v4, v4, v3
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v5, vcc, v4, v3
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v3, vcc, v4, v3
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v3, v3, v5, s[0:1]
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v3, v3, v0
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v4, v3, v1
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v5, vcc, 1, v3
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v6, vcc, -1, v3
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v7, vcc, v4, v0
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v7, v1
|
|
|
|
; TONGA-NEXT: s_and_b64 s[0:1], s[0:1], vcc
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v0, v3, v5, s[0:1]
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v0, v0, v2
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
|
|
|
|
; TONGA-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: sdiv_i32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GFX9-NEXT: s_mov_b32 s10, s6
|
|
|
|
; GFX9-NEXT: s_mov_b32 s11, s7
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s8, s2
|
|
|
|
; GFX9-NEXT: s_mov_b32 s9, s3
|
|
|
|
; GFX9-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; GFX9-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GFX9-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v3, v1
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v3, 0x4f800000, v3
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v3, v1
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v5, v3, v1
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v6, 0, v4
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v4, v3
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v5, 31, v0
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v5
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v5
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v6, v3, v4
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v3, v4
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v3, v0
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, v5, v2
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v3, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v5, 1, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v6, -1, v3
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v7, v0, v4
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[0:1], v7, v1
|
|
|
|
; GFX9-NEXT: s_and_b64 s[0:1], s[0:1], vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, v5, s[0:1]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v2
|
|
|
|
; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: sdiv_i32:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 0 @6
|
|
|
|
; EG-NEXT: ALU 30, @9, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 6:
|
|
|
|
; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 8:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Z,
|
|
|
|
; EG-NEXT: ALU clause starting at 9:
|
|
|
|
; EG-NEXT: SETGT_INT * T0.W, 0.0, T0.Y,
|
|
|
|
; EG-NEXT: ADD_INT * T1.W, T0.Y, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT * T1.W, PV.W, T0.W,
|
|
|
|
; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Z, PS, T1.W,
|
|
|
|
; EG-NEXT: SUB_INT T2.W, 0.0, PS,
|
|
|
|
; EG-NEXT: MULHI * T1.X, T0.Y, T1.W,
|
|
|
|
; EG-NEXT: CNDE_INT T2.W, PS, PV.W, T0.Z,
|
|
|
|
; EG-NEXT: SETGT_INT * T3.W, 0.0, T0.X,
|
|
|
|
; EG-NEXT: MULHI * T0.Z, PV.W, T0.Y,
|
|
|
|
; EG-NEXT: ADD_INT T1.Z, T0.X, T3.W,
|
|
|
|
; EG-NEXT: ADD_INT T2.W, T0.Y, PS,
|
|
|
|
; EG-NEXT: SUB_INT * T4.W, T0.Y, PS,
|
|
|
|
; EG-NEXT: CNDE_INT T2.W, T1.X, PV.W, PS,
|
|
|
|
; EG-NEXT: XOR_INT * T4.W, PV.Z, T3.W,
|
|
|
|
; EG-NEXT: MULHI * T0.X, PV.W, PS,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Y, PS, T1.W,
|
|
|
|
; EG-NEXT: SUB_INT * T2.W, T4.W, PS,
|
|
|
|
; EG-NEXT: SETGE_UINT T1.W, PV.W, T1.W,
|
|
|
|
; EG-NEXT: SETGE_UINT * T2.W, T4.W, T0.Y,
|
|
|
|
; EG-NEXT: AND_INT T1.W, PV.W, PS,
|
|
|
|
; EG-NEXT: ADD_INT * T4.W, T0.X, 1,
|
|
|
|
; EG-NEXT: CNDE_INT T1.W, PV.W, T0.X, PS,
|
|
|
|
; EG-NEXT: ADD_INT * T4.W, T0.X, literal.x,
|
|
|
|
; EG-NEXT: -1(nan), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: CNDE_INT T1.W, T2.W, PS, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT * T0.W, T3.W, T0.W,
|
|
|
|
; EG-NEXT: XOR_INT * T1.W, PV.W, PS,
|
|
|
|
; EG-NEXT: SUB_INT T0.X, PV.W, T0.W,
|
|
|
|
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
|
2015-02-28 05:17:42 +08:00
|
|
|
%num = load i32, i32 addrspace(1) * %in
|
|
|
|
%den = load i32, i32 addrspace(1) * %den_ptr
|
2012-12-12 05:25:42 +08:00
|
|
|
%result = sdiv i32 %num, %den
|
|
|
|
store i32 %result, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
2014-06-16 03:48:16 +08:00
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: sdiv_i32_4:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GCN-NEXT: s_mov_b32 s10, s6
|
|
|
|
; GCN-NEXT: s_mov_b32 s11, s7
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s8, s2
|
|
|
|
; GCN-NEXT: s_mov_b32 s9, s3
|
|
|
|
; GCN-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v1, 30, v1
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v0, 2, v0
|
|
|
|
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: sdiv_i32_4:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, -1
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s4
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s5
|
|
|
|
; TONGA-NEXT: s_mov_b32 s4, s6
|
|
|
|
; TONGA-NEXT: s_mov_b32 s5, s7
|
|
|
|
; TONGA-NEXT: s_mov_b32 s6, s2
|
|
|
|
; TONGA-NEXT: s_mov_b32 s7, s3
|
|
|
|
; TONGA-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v1, 30, v1
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 2, v0
|
|
|
|
; TONGA-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: sdiv_i32_4:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, -1
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, s4
|
|
|
|
; GFX9-NEXT: s_mov_b32 s1, s5
|
|
|
|
; GFX9-NEXT: s_mov_b32 s4, s6
|
|
|
|
; GFX9-NEXT: s_mov_b32 s5, s7
|
|
|
|
; GFX9-NEXT: s_mov_b32 s6, s2
|
|
|
|
; GFX9-NEXT: s_mov_b32 s7, s3
|
|
|
|
; GFX9-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 30, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 2, v0
|
|
|
|
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: sdiv_i32_4:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 0 @6
|
|
|
|
; EG-NEXT: ALU 7, @9, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 6:
|
|
|
|
; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 8:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Z,
|
|
|
|
; EG-NEXT: ALU clause starting at 9:
|
|
|
|
; EG-NEXT: ASHR * T0.W, T0.X, literal.x,
|
|
|
|
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: LSHR * T0.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ADD_INT * T0.W, T0.X, PV.W,
|
|
|
|
; EG-NEXT: ASHR T0.X, PV.W, literal.x,
|
|
|
|
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
|
2015-02-28 05:17:42 +08:00
|
|
|
%num = load i32, i32 addrspace(1) * %in
|
2014-06-16 03:48:16 +08:00
|
|
|
%result = sdiv i32 %num, 4
|
|
|
|
store i32 %result, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Multiply by a weird constant to make sure setIntDivIsCheap is
|
|
|
|
; working.
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @slow_sdiv_i32_3435(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: slow_sdiv_i32_3435:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GCN-NEXT: s_mov_b32 s10, s6
|
|
|
|
; GCN-NEXT: s_mov_b32 s11, s7
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s8, s2
|
|
|
|
; GCN-NEXT: s_mov_b32 s9, s3
|
|
|
|
; GCN-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
|
|
; GCN-NEXT: s_mov_b32 s2, 0x98a1930b
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GCN-NEXT: v_mul_hi_i32 v1, v0, s2
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v1, 31, v0
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v0, 11, v0
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v1, v0
|
|
|
|
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: slow_sdiv_i32_3435:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, -1
|
|
|
|
; TONGA-NEXT: s_mov_b32 s10, s2
|
|
|
|
; TONGA-NEXT: s_mov_b32 s11, s3
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s8, s6
|
|
|
|
; TONGA-NEXT: s_mov_b32 s9, s7
|
|
|
|
; TONGA-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, 0x98a1930b
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s5
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; TONGA-NEXT: v_mul_hi_i32 v1, v0, s0
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s4
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v1, 31, v0
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 11, v0
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v1, v0
|
|
|
|
; TONGA-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: slow_sdiv_i32_3435:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, -1
|
|
|
|
; GFX9-NEXT: s_mov_b32 s10, s2
|
|
|
|
; GFX9-NEXT: s_mov_b32 s11, s3
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s8, s6
|
|
|
|
; GFX9-NEXT: s_mov_b32 s9, s7
|
|
|
|
; GFX9-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, 0x98a1930b
|
|
|
|
; GFX9-NEXT: s_mov_b32 s1, s5
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mul_hi_i32 v1, v0, s0
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, s4
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v1, v0
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 31, v0
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 11, v0
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
|
|
|
|
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: slow_sdiv_i32_3435:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 0 @6
|
|
|
|
; EG-NEXT: ALU 8, @9, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 6:
|
|
|
|
; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 8:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Z,
|
|
|
|
; EG-NEXT: ALU clause starting at 9:
|
|
|
|
; EG-NEXT: MULHI_INT * T0.Y, T0.X, literal.x,
|
|
|
|
; EG-NEXT: -1734241525(-4.176600e-24), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ADD_INT * T0.W, PS, T0.X,
|
|
|
|
; EG-NEXT: ASHR T1.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: LSHR * T0.W, PV.W, literal.y,
|
|
|
|
; EG-NEXT: 11(1.541428e-44), 31(4.344025e-44)
|
|
|
|
; EG-NEXT: ADD_INT T0.X, PV.W, PS,
|
|
|
|
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
|
2015-02-28 05:17:42 +08:00
|
|
|
%num = load i32, i32 addrspace(1) * %in
|
2014-06-16 03:48:16 +08:00
|
|
|
%result = sdiv i32 %num, 3435
|
|
|
|
store i32 %result, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @sdiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: sdiv_v2i32:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s10, -1
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, s10
|
|
|
|
; GCN-NEXT: s_mov_b32 s7, s11
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s2
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s3
|
|
|
|
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_mov_b32 s2, 0x4f800000
|
|
|
|
; GCN-NEXT: s_mov_b32 s8, s0
|
|
|
|
; GCN-NEXT: s_mov_b32 s9, s1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v4, 31, v0
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v5, 31, v2
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v6, 31, v1
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v7, 31, v3
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v8, v4, v5
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v4, v0
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v2, vcc, v5, v2
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v9, v6, v7
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v1, vcc, v6, v1
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v3, vcc, v7, v3
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v0, v0, v4
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v2, v2, v5
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v1, v1, v6
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v3, v3, v7
|
|
|
|
; GCN-NEXT: v_cvt_f32_u32_e32 v4, v2
|
|
|
|
; GCN-NEXT: v_cvt_f32_u32_e32 v5, v3
|
|
|
|
; GCN-NEXT: v_rcp_iflag_f32_e32 v4, v4
|
|
|
|
; GCN-NEXT: v_rcp_iflag_f32_e32 v5, v5
|
|
|
|
; GCN-NEXT: v_mul_f32_e32 v4, s2, v4
|
|
|
|
; GCN-NEXT: v_mul_f32_e32 v5, s2, v5
|
|
|
|
; GCN-NEXT: v_cvt_u32_f32_e32 v4, v4
|
|
|
|
; GCN-NEXT: v_cvt_u32_f32_e32 v5, v5
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v6, v4, v2
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v7, v4, v2
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v10, v5, v3
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v11, v5, v3
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v12, vcc, 0, v7
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v13, vcc, 0, v11
|
|
|
|
; GCN-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v6
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v6, v7, v12, s[0:1]
|
|
|
|
; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], 0, v10
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v7, v11, v13, s[2:3]
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v6, v6, v4
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v7, v7, v5
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v10, vcc, v6, v4
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v4, vcc, v6, v4
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v6, vcc, v7, v5
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v5, vcc, v7, v5
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[2:3]
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v4, v4, v0
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v5, v5, v1
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v6, v4, v2
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v7, vcc, 1, v4
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v10, vcc, -1, v4
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v11, v5, v3
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v12, vcc, 1, v5
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v13, vcc, -1, v5
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v14, vcc, v6, v0
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v6
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v0, vcc, v11, v1
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v11
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v14, v2
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v3
|
|
|
|
; GCN-NEXT: s_and_b64 s[2:3], s[2:3], s[0:1]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v0, v4, v7, s[2:3]
|
|
|
|
; GCN-NEXT: s_and_b64 s[2:3], s[4:5], vcc
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v1, v5, v12, s[2:3]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v0, v10, v0, s[0:1]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e32 v1, v13, v1, vcc
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v0, v0, v8
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v1, v1, v9
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v1, vcc, v1, v9
|
|
|
|
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: sdiv_v2i32:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s10, -1
|
|
|
|
; TONGA-NEXT: s_mov_b32 s4, 0x4f800000
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s8, s0
|
|
|
|
; TONGA-NEXT: s_mov_b32 s9, s1
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s2
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s3
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, s10
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, s11
|
|
|
|
; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v5, 31, v2
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v7, 31, v3
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v2, vcc, v5, v2
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v4, 31, v0
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v3, vcc, v7, v3
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v2, v2, v5
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v6, 31, v1
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v8, v4, v5
|
|
|
|
; TONGA-NEXT: v_cvt_f32_u32_e32 v5, v2
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v3, v3, v7
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v9, v6, v7
|
|
|
|
; TONGA-NEXT: v_cvt_f32_u32_e32 v7, v3
|
|
|
|
; TONGA-NEXT: v_rcp_iflag_f32_e32 v5, v5
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v4, v0
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v0, v0, v4
|
|
|
|
; TONGA-NEXT: v_rcp_iflag_f32_e32 v7, v7
|
|
|
|
; TONGA-NEXT: v_mul_f32_e32 v4, s4, v5
|
|
|
|
; TONGA-NEXT: v_cvt_u32_f32_e32 v4, v4
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v1, vcc, v6, v1
|
|
|
|
; TONGA-NEXT: v_mul_f32_e32 v5, s4, v7
|
|
|
|
; TONGA-NEXT: v_cvt_u32_f32_e32 v5, v5
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v6
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v6, v4, v2
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v7, v4, v2
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v10, v5, v3
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v11, v5, v3
|
|
|
|
; TONGA-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v6
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v12, vcc, 0, v7
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v6, v7, v12, s[0:1]
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v13, vcc, 0, v11
|
|
|
|
; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], 0, v10
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v7, v11, v13, s[2:3]
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v6, v6, v4
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v7, v7, v5
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v10, vcc, v6, v4
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v4, vcc, v6, v4
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1]
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v6, vcc, v7, v5
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v5, vcc, v7, v5
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v4, v4, v0
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[2:3]
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v5, v5, v1
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v6, v4, v2
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v7, vcc, 1, v4
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v11, v5, v3
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v10, vcc, -1, v4
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v14, vcc, v6, v0
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v6
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v14, v2
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v0, vcc, v11, v1
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v12, vcc, 1, v5
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v13, vcc, -1, v5
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v11
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v3
|
|
|
|
; TONGA-NEXT: s_and_b64 s[2:3], s[2:3], s[0:1]
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v0, v4, v7, s[2:3]
|
|
|
|
; TONGA-NEXT: s_and_b64 s[2:3], s[4:5], vcc
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v1, v5, v12, s[2:3]
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v0, v10, v0, s[0:1]
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e32 v1, v13, v1, vcc
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v0, v0, v8
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v9
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v8
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v1, v9
|
|
|
|
; TONGA-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: sdiv_v2i32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s10, -1
|
|
|
|
; GFX9-NEXT: s_mov_b32 s4, 0x4f800000
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s8, s0
|
|
|
|
; GFX9-NEXT: s_mov_b32 s9, s1
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, s2
|
|
|
|
; GFX9-NEXT: s_mov_b32 s1, s3
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, s10
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, s11
|
|
|
|
; GFX9-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v5, 31, v2
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v6, 31, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v2, v5
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v3, v6
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, v2, v5
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v7, v2
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v3, v6
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v8, v3
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v0
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v7
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v4
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v8, v8
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v5, v4, v5
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v7, s4, v7
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v7, v7
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v8, s4, v8
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v8, v8
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v7, v2
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v7, v2
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v10, v8, v3
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v12, v8, v3
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v13, 0, v4
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v11
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v13, vcc
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v14, 0, v10
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v12
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, v14, s[0:1]
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v4, v7
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v10, v8
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v9, 31, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v9
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v6, v9, v6
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v9
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v9, v7, v4
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v4, v7, v4
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v7, v8, v10
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v8, v8, v10
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v4, v0
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[0:1]
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v7, v7, v1
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v8, v4, v2
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v9, 1, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v7, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v12, 1, v7
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v14, v0, v8
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v8
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[2:3], v14, v2
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v1, v11
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[0:1], v1, v11
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v3
|
|
|
|
; GFX9-NEXT: s_and_b64 s[2:3], s[2:3], vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, v9, s[2:3]
|
|
|
|
; GFX9-NEXT: s_and_b64 s[2:3], s[4:5], s[0:1]
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v10, -1, v4
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v13, -1, v7
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v7, v12, s[2:3]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v13, v1, s[0:1]
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v5
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v6
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v5
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, v1, v6
|
|
|
|
; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: sdiv_v2i32:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 1 @6
|
|
|
|
; EG-NEXT: ALU 59, @11, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 6:
|
|
|
|
; EG-NEXT: VTX_READ_64 T1.XY, T0.X, 8, #1
|
|
|
|
; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 10:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Z,
|
|
|
|
; EG-NEXT: ALU clause starting at 11:
|
|
|
|
; EG-NEXT: SETGT_INT * T0.W, 0.0, T1.Y,
|
|
|
|
; EG-NEXT: ADD_INT * T1.W, T1.Y, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT T1.W, PV.W, T0.W,
|
|
|
|
; EG-NEXT: SETGT_INT * T2.W, 0.0, T1.X,
|
|
|
|
; EG-NEXT: ADD_INT T3.W, T1.X, PS,
|
|
|
|
; EG-NEXT: RECIP_UINT * T0.Z, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT T3.W, PV.W, T2.W, BS:VEC_021/SCL_122
|
|
|
|
; EG-NEXT: MULLO_INT * T1.X, PS, T1.W,
|
|
|
|
; EG-NEXT: RECIP_UINT * T1.Y, PV.W,
|
|
|
|
; EG-NEXT: MULLO_INT * T1.Z, PS, T3.W,
|
|
|
|
; EG-NEXT: SUB_INT T4.W, 0.0, PS,
|
|
|
|
; EG-NEXT: MULHI * T2.X, T1.Y, T3.W,
|
|
|
|
; EG-NEXT: CNDE_INT T1.Z, PS, PV.W, T1.Z, BS:VEC_021/SCL_122
|
|
|
|
; EG-NEXT: SUB_INT T4.W, 0.0, T1.X,
|
|
|
|
; EG-NEXT: MULHI * T2.Y, T0.Z, T1.W,
|
|
|
|
; EG-NEXT: CNDE_INT T2.Z, PS, PV.W, T1.X,
|
|
|
|
; EG-NEXT: SETGT_INT T4.W, 0.0, T0.X,
|
|
|
|
; EG-NEXT: MULHI * T1.X, PV.Z, T1.Y,
|
|
|
|
; EG-NEXT: SETGT_INT T3.X, 0.0, T0.Y,
|
|
|
|
; EG-NEXT: ADD_INT T3.Y, T0.X, PV.W,
|
|
|
|
; EG-NEXT: ADD_INT T1.Z, T1.Y, PS,
|
|
|
|
; EG-NEXT: SUB_INT T5.W, T1.Y, PS,
|
|
|
|
; EG-NEXT: MULHI * T0.X, PV.Z, T0.Z,
|
|
|
|
; EG-NEXT: CNDE_INT T1.X, T2.X, PV.Z, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT T1.Y, PV.Y, T4.W,
|
|
|
|
; EG-NEXT: ADD_INT T1.Z, T0.Y, PV.X,
|
|
|
|
; EG-NEXT: ADD_INT T5.W, T0.Z, PS,
|
|
|
|
; EG-NEXT: SUB_INT * T6.W, T0.Z, PS,
|
|
|
|
; EG-NEXT: CNDE_INT T0.Z, T2.Y, PV.W, PS,
|
|
|
|
; EG-NEXT: XOR_INT T5.W, PV.Z, T3.X,
|
|
|
|
; EG-NEXT: MULHI * T0.X, PV.X, PV.Y,
|
|
|
|
; EG-NEXT: MULHI * T0.Y, PV.Z, PV.W,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Z, PS, T1.W,
|
|
|
|
; EG-NEXT: SUB_INT T6.W, T5.W, PS,
|
|
|
|
; EG-NEXT: MULLO_INT * T1.X, T0.X, T3.W,
|
|
|
|
; EG-NEXT: SUB_INT T1.Z, T1.Y, PS,
|
|
|
|
; EG-NEXT: SETGE_UINT T1.W, PV.W, T1.W,
|
|
|
|
; EG-NEXT: SETGE_UINT * T5.W, T5.W, T0.Z,
|
|
|
|
; EG-NEXT: AND_INT T2.Y, PV.W, PS,
|
|
|
|
; EG-NEXT: ADD_INT T0.Z, T0.Y, 1,
|
|
|
|
; EG-NEXT: SETGE_UINT T1.W, PV.Z, T3.W,
|
|
|
|
; EG-NEXT: SETGE_UINT * T3.W, T1.Y, T1.X,
|
|
|
|
; EG-NEXT: AND_INT T1.Y, PV.W, PS,
|
|
|
|
; EG-NEXT: ADD_INT T1.Z, T0.X, 1,
|
|
|
|
; EG-NEXT: CNDE_INT T1.W, PV.Y, T0.Y, PV.Z,
|
|
|
|
; EG-NEXT: ADD_INT * T6.W, T0.Y, literal.x,
|
|
|
|
; EG-NEXT: -1(nan), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: CNDE_INT T0.Y, T5.W, PS, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT T0.Z, T3.X, T0.W,
|
|
|
|
; EG-NEXT: CNDE_INT T0.W, PV.Y, T0.X, PV.Z,
|
|
|
|
; EG-NEXT: ADD_INT * T1.W, T0.X, literal.x,
|
|
|
|
; EG-NEXT: -1(nan), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: CNDE_INT T1.Z, T3.W, PS, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT T0.W, T4.W, T2.W, BS:VEC_120/SCL_212
|
|
|
|
; EG-NEXT: XOR_INT * T1.W, PV.Y, PV.Z,
|
|
|
|
; EG-NEXT: SUB_INT T0.Y, PS, T0.Z,
|
|
|
|
; EG-NEXT: XOR_INT * T1.W, PV.Z, PV.W,
|
|
|
|
; EG-NEXT: SUB_INT T0.X, PV.W, T0.W,
|
|
|
|
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%den_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
|
2015-02-28 05:17:42 +08:00
|
|
|
%num = load <2 x i32>, <2 x i32> addrspace(1) * %in
|
|
|
|
%den = load <2 x i32>, <2 x i32> addrspace(1) * %den_ptr
|
2014-06-16 05:08:54 +08:00
|
|
|
%result = sdiv <2 x i32> %num, %den
|
|
|
|
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @sdiv_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: sdiv_v2i32_4:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GCN-NEXT: s_mov_b32 s10, s6
|
|
|
|
; GCN-NEXT: s_mov_b32 s11, s7
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s8, s2
|
|
|
|
; GCN-NEXT: s_mov_b32 s9, s3
|
|
|
|
; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v2, 31, v0
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v3, 31, v1
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v2, 30, v2
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v3, 30, v3
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v2, v0
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v1, vcc, v3, v1
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v0, 2, v0
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v1, 2, v1
|
|
|
|
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: sdiv_v2i32_4:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, -1
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s4
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s5
|
|
|
|
; TONGA-NEXT: s_mov_b32 s4, s6
|
|
|
|
; TONGA-NEXT: s_mov_b32 s5, s7
|
|
|
|
; TONGA-NEXT: s_mov_b32 s6, s2
|
|
|
|
; TONGA-NEXT: s_mov_b32 s7, s3
|
|
|
|
; TONGA-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v2, 31, v0
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v3, 31, v1
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v2, 30, v2
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v3, 30, v3
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v2, v0
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v1, vcc, v3, v1
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 2, v0
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v1, 2, v1
|
|
|
|
; TONGA-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: sdiv_v2i32_4:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, -1
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, s4
|
|
|
|
; GFX9-NEXT: s_mov_b32 s1, s5
|
|
|
|
; GFX9-NEXT: s_mov_b32 s4, s6
|
|
|
|
; GFX9-NEXT: s_mov_b32 s5, s7
|
|
|
|
; GFX9-NEXT: s_mov_b32 s6, s2
|
|
|
|
; GFX9-NEXT: s_mov_b32 s7, s3
|
|
|
|
; GFX9-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v0
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v3, 31, v1
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 30, v2
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 30, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v2
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 2, v0
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 2, v1
|
|
|
|
; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: sdiv_v2i32_4:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 0 @6
|
|
|
|
; EG-NEXT: ALU 13, @9, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 6:
|
|
|
|
; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 8:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Z,
|
|
|
|
; EG-NEXT: ALU clause starting at 9:
|
|
|
|
; EG-NEXT: ASHR * T0.W, T0.Y, literal.x,
|
|
|
|
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: LSHR T0.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: ASHR * T1.W, T0.X, literal.y,
|
|
|
|
; EG-NEXT: 30(4.203895e-44), 31(4.344025e-44)
|
|
|
|
; EG-NEXT: LSHR T1.W, PS, literal.x,
|
|
|
|
; EG-NEXT: ADD_INT * T0.W, T0.Y, PV.W,
|
|
|
|
; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T0.Y, PS, literal.x,
|
|
|
|
; EG-NEXT: ADD_INT * T0.W, T0.X, PV.W,
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T0.X, PV.W, literal.x,
|
|
|
|
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
|
2015-02-28 05:17:42 +08:00
|
|
|
%num = load <2 x i32>, <2 x i32> addrspace(1) * %in
|
2014-06-16 05:08:54 +08:00
|
|
|
%result = sdiv <2 x i32> %num, <i32 4, i32 4>
|
|
|
|
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: sdiv_v4i32:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s19, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s18, -1
|
|
|
|
; GCN-NEXT: s_mov_b32 s2, s18
|
|
|
|
; GCN-NEXT: s_mov_b32 s3, s19
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s0, s10
|
|
|
|
; GCN-NEXT: s_mov_b32 s1, s11
|
|
|
|
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; GCN-NEXT: buffer_load_dwordx4 v[4:7], off, s[0:3], 0 offset:16
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, 0x4f800000
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v8, 31, v0
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v9, 31, v4
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v10, 31, v1
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v11, 31, v5
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v12, 31, v2
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v13, 31, v6
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v14, 31, v3
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v15, 31, v7
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v16, v8, v9
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v8, v0
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v1, vcc, v10, v1
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v2, vcc, v12, v2
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v3, vcc, v14, v3
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v4, vcc, v9, v4
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v5, vcc, v11, v5
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v6, vcc, v13, v6
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v7, vcc, v15, v7
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v17, v10, v11
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v18, v12, v13
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v19, v14, v15
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v0, v0, v8
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v4, v4, v9
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v1, v1, v10
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v5, v5, v11
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v2, v2, v12
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v6, v6, v13
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v3, v3, v14
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v7, v7, v15
|
|
|
|
; GCN-NEXT: v_cvt_f32_u32_e32 v8, v4
|
|
|
|
; GCN-NEXT: v_cvt_f32_u32_e32 v9, v5
|
|
|
|
; GCN-NEXT: v_cvt_f32_u32_e32 v10, v6
|
|
|
|
; GCN-NEXT: v_rcp_iflag_f32_e32 v8, v8
|
|
|
|
; GCN-NEXT: v_rcp_iflag_f32_e32 v9, v9
|
|
|
|
; GCN-NEXT: v_rcp_iflag_f32_e32 v10, v10
|
|
|
|
; GCN-NEXT: v_mul_f32_e32 v8, s6, v8
|
|
|
|
; GCN-NEXT: v_mul_f32_e32 v9, s6, v9
|
|
|
|
; GCN-NEXT: v_mul_f32_e32 v10, s6, v10
|
|
|
|
; GCN-NEXT: v_cvt_u32_f32_e32 v8, v8
|
|
|
|
; GCN-NEXT: v_cvt_u32_f32_e32 v9, v9
|
|
|
|
; GCN-NEXT: v_cvt_u32_f32_e32 v10, v10
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v11, v8, v4
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v12, v8, v4
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v13, v9, v5
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v14, v9, v5
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v15, vcc, 0, v12
|
|
|
|
; GCN-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v11
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v11, v10, v6
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v12, v12, v15, s[0:1]
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v15, vcc, 0, v14
|
|
|
|
; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], 0, v13
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v13, v10, v6
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v14, v14, v15, s[2:3]
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v15, vcc, 0, v13
|
|
|
|
; GCN-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v11
|
|
|
|
; GCN-NEXT: v_cvt_f32_u32_e32 v11, v7
|
|
|
|
; GCN-NEXT: v_rcp_iflag_f32_e32 v11, v11
|
|
|
|
; GCN-NEXT: v_mul_f32_e32 v11, s6, v11
|
|
|
|
; GCN-NEXT: v_cvt_u32_f32_e32 v11, v11
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v13, v13, v15, s[4:5]
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v15, v11, v7
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v20, v11, v7
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v21, vcc, 0, v20
|
|
|
|
; GCN-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v15
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v15, v20, v21, s[6:7]
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v12, v12, v8
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v20, vcc, v12, v8
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v8, vcc, v12, v8
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v12, v14, v9
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v14, vcc, v12, v9
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v9, vcc, v12, v9
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v12, v13, v10
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v13, vcc, v12, v10
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v10, vcc, v12, v10
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v12, v15, v11
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v15, vcc, v12, v11
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v11, vcc, v12, v11
|
|
|
|
; GCN-NEXT: s_mov_b32 s16, s8
|
|
|
|
; GCN-NEXT: s_mov_b32 s17, s9
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v8, v8, v20, s[0:1]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v9, v9, v14, s[2:3]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v10, v10, v13, s[4:5]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v11, v11, v15, s[6:7]
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v8, v8, v0
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v9, v9, v1
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v10, v10, v2
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v11, v11, v3
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v12, v8, v4
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v13, vcc, 1, v8
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v14, vcc, -1, v8
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v15, v9, v5
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v12
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v12, vcc, 1, v9
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v4
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, -1, v9
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v4, v10, v6
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[4:5], v1, v15
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v1, vcc, v1, v15
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v15, vcc, 1, v10
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[6:7], v1, v5
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v1, vcc, -1, v10
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v5, v11, v7
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[8:9], v2, v4
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v2, vcc, v2, v4
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v4, vcc, -1, v11
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[10:11], v3, v5
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v3, vcc, v3, v5
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v11
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v2, v6
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[12:13], v3, v7
|
|
|
|
; GCN-NEXT: s_and_b64 s[2:3], s[2:3], s[0:1]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v2, v8, v13, s[2:3]
|
|
|
|
; GCN-NEXT: s_and_b64 s[2:3], s[6:7], s[4:5]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v3, v9, v12, s[2:3]
|
|
|
|
; GCN-NEXT: s_and_b64 vcc, vcc, s[8:9]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e32 v6, v10, v15, vcc
|
|
|
|
; GCN-NEXT: s_and_b64 vcc, s[12:13], s[10:11]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e32 v5, v11, v5, vcc
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v2, v14, v2, s[0:1]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[4:5]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v6, s[8:9]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v3, v4, v5, s[10:11]
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v2, v2, v16
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v4, v0, v17
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v5, v1, v18
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v3, v3, v19
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v0, vcc, v2, v16
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v1, vcc, v4, v17
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v2, vcc, v5, v18
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v3, vcc, v3, v19
|
|
|
|
; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: sdiv_v4i32:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[12:15], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s11, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s10, -1
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, s10
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, s11
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s14
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s15
|
|
|
|
; TONGA-NEXT: buffer_load_dwordx4 v[4:7], off, s[0:3], 0 offset:16
|
|
|
|
; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_mov_b32 s14, 0x4f800000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s8, s12
|
|
|
|
; TONGA-NEXT: s_mov_b32 s9, s13
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v9, 31, v4
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v4, vcc, v9, v4
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v8, 31, v0
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v4, v4, v9
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v15, v8, v9
|
|
|
|
; TONGA-NEXT: v_cvt_f32_u32_e32 v9, v4
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v11, 31, v5
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v5, vcc, v11, v5
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v8, v0
|
|
|
|
; TONGA-NEXT: v_rcp_iflag_f32_e32 v9, v9
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v5, v5, v11
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v0, v0, v8
|
|
|
|
; TONGA-NEXT: v_cvt_f32_u32_e32 v8, v5
|
|
|
|
; TONGA-NEXT: v_mul_f32_e32 v9, s14, v9
|
|
|
|
; TONGA-NEXT: v_cvt_u32_f32_e32 v9, v9
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v10, 31, v1
|
|
|
|
; TONGA-NEXT: v_rcp_iflag_f32_e32 v8, v8
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v1, vcc, v10, v1
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v16, v10, v11
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v10
|
|
|
|
; TONGA-NEXT: v_mul_f32_e32 v8, s14, v8
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v11, v9, v4
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v10, v9, v4
|
|
|
|
; TONGA-NEXT: v_cvt_u32_f32_e32 v8, v8
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v12, 31, v2
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v13, 31, v6
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v2, vcc, v12, v2
|
|
|
|
; TONGA-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v11
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v17, v12, v13
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v2, v2, v12
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v12, vcc, 0, v10
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v10, v10, v12, s[0:1]
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v12, v8, v5
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v6, vcc, v13, v6
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v6, v6, v13
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v11, v8, v5
|
|
|
|
; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], 0, v12
|
|
|
|
; TONGA-NEXT: v_cvt_f32_u32_e32 v12, v6
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v10, v10, v9
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v13, vcc, 0, v11
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v11, v11, v13, s[2:3]
|
|
|
|
; TONGA-NEXT: v_rcp_iflag_f32_e32 v12, v12
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v14, 31, v7
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v7, vcc, v14, v7
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v7, v7, v14
|
|
|
|
; TONGA-NEXT: v_mul_f32_e32 v12, s14, v12
|
|
|
|
; TONGA-NEXT: v_cvt_u32_f32_e32 v12, v12
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v18, v12, v6
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v13, v12, v6
|
|
|
|
; TONGA-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v18
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v18, vcc, v10, v9
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v9, vcc, v10, v9
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v10, v11, v8
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v9, v9, v18, s[0:1]
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v9, v9, v0
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v19, vcc, 0, v13
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v11, vcc, v10, v8
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v8, vcc, v10, v8
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v13, v13, v19, s[4:5]
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v8, v8, v11, s[2:3]
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v10, v13, v12
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v11, v9, v4
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v8, v8, v1
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v13, vcc, v10, v12
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v10, vcc, v10, v12
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v11
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v11
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v4
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v10, v10, v13, s[4:5]
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v0, v8, v5
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v4, v10, v2
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v12, vcc, -1, v9
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v10, vcc, -1, v8
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[4:5], v1, v0
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v1, v0
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[6:7], v0, v5
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v5, v4, v6
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v1, vcc, 1, v9
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, 1, v8
|
|
|
|
; TONGA-NEXT: s_and_b64 vcc, s[2:3], s[0:1]
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v9, vcc, v2, v5
|
|
|
|
; TONGA-NEXT: s_and_b64 vcc, s[6:7], s[4:5]
|
|
|
|
; TONGA-NEXT: v_cvt_f32_u32_e32 v11, v7
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v1, v12, v1, s[0:1]
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v0, v10, v0, s[4:5]
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v15
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v8, v0, v16
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v1, v15
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v8, v16
|
|
|
|
; TONGA-NEXT: v_rcp_iflag_f32_e32 v8, v11
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v9, v6
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v2, v5
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v10, 31, v3
|
|
|
|
; TONGA-NEXT: v_mul_f32_e32 v8, s14, v8
|
|
|
|
; TONGA-NEXT: v_cvt_u32_f32_e32 v8, v8
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v3, vcc, v10, v3
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v3, v3, v10
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v6, vcc, -1, v4
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v5, v8, v7
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v9, v8, v7
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v4
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v11, vcc, 0, v5
|
|
|
|
; TONGA-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v9
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[4:5]
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v5, v5, v8
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v9, vcc, v5, v8
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v5, vcc, v5, v8
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v5, v5, v9, s[4:5]
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v5, v5, v3
|
|
|
|
; TONGA-NEXT: s_and_b64 vcc, s[0:1], s[2:3]
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[2:3]
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v4, v5, v7
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v2, v2, v17
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v2, vcc, v2, v17
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v6, v10, v14
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v3, v4
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v8, v7
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v3, v4
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v7, vcc, -1, v5
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v3, vcc, 1, v5
|
|
|
|
; TONGA-NEXT: s_and_b64 vcc, s[0:1], s[2:3]
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[2:3]
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v3, v3, v6
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v3, vcc, v3, v6
|
|
|
|
; TONGA-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: sdiv_v4i32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s15, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s14, -1
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, s14
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, s15
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, s10
|
|
|
|
; GFX9-NEXT: s_mov_b32 s1, s11
|
|
|
|
; GFX9-NEXT: buffer_load_dwordx4 v[4:7], off, s[0:3], 0 offset:16
|
|
|
|
; GFX9-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; GFX9-NEXT: s_mov_b32 s4, 0x4f800000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s12, s8
|
|
|
|
; GFX9-NEXT: s_mov_b32 s13, s9
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v9, 31, v4
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v0
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v4, v4, v9
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v8
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v4, v4, v9
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v16, v8, v9
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v8
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v8, v4
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v11, 31, v5
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v5, v5, v11
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v5, v5, v11
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v9, v5
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v8, v8
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v13, 31, v6
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v10, 31, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v6, v6, v13
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v10
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v9, v9
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v8, s4, v8
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v6, v6, v13
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v17, v10, v11
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v10
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v10, v6
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v8, v8
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v12, 31, v2
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v2, v12
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v9, s4, v9
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v10, v10
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v18, v12, v13
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, v2, v12
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v9, v9
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v12, v8, v4
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v11, v8, v4
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v10, s4, v10
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v13, v9, v5
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v12
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v12, v9, v5
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v10, v10
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v19, 0, v11
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v11, v11, v19, vcc
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v12
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v19, 0, v13
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v13, v13, v19, s[0:1]
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v19, v10, v6
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v15, 31, v7
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v7, v7, v15
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v7, v7, v15
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[2:3], 0, v19
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v19, v7
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v11, v8
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v12, v10, v6
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v14, 31, v3
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v19, v19
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v3, v14
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v20, 0, v12
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, v20, s[2:3]
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v19, s4, v19
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v19, v19
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v3, v14
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v21, v19, v7
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v20, v19, v7
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v21
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v21, v8, v11
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v8, v8, v11
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v13, v9
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v21, vcc
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v8, v8, v0
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v22, 0, v20
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v13, v9, v11
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v9, v9, v11
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v12, v10
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v13, s[0:1]
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v9, v9, v1
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v20, v20, v22, s[4:5]
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v12, v10, v11
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v10, v10, v11
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, v12, s[2:3]
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v12, v8, v4
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v20, v19
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v10, v10, v2
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v13, 1, v8
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v0, v12
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v0, v12
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v12, v9, v5
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v20, v19, v11
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v11, v19, v11
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v11, v11, v20, s[4:5]
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[2:3], v1, v12
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, v1, v12
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v12, v10, v6
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v11, v11, v3
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[4:5], v1, v5
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[6:7], v2, v12
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, v2, v12
|
|
|
|
; GFX9-NEXT: s_and_b64 s[0:1], s[0:1], vcc
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[8:9], v2, v6
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v2, v8, v13, s[0:1]
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, 1, v9
|
|
|
|
; GFX9-NEXT: s_and_b64 s[0:1], s[4:5], s[2:3]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, v9, v0, s[0:1]
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, 1, v10
|
|
|
|
; GFX9-NEXT: s_and_b64 s[0:1], s[8:9], s[6:7]
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v12, v11, v7
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v19, -1, v8
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v10, v1, s[0:1]
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v5, -1, v10
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v19, v2, vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, v1, s[6:7]
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v4, -1, v9
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, v0, s[2:3]
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, v2, v16
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v5, v1, v18
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v4, v0, v17
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v2, v16
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, v5, v18
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v5, v3, v12
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v5, v7
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[0:1], v3, v12
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v11
|
|
|
|
; GFX9-NEXT: s_and_b64 vcc, vcc, s[0:1]
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v5, -1, v11
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, v4, v17
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v4, v14, v15
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v3, v5, v3, s[0:1]
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v3, v4
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v3, v4
|
|
|
|
; GFX9-NEXT: buffer_store_dwordx4 v[0:3], off, s[12:15], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: sdiv_v4i32:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 0 @8
|
|
|
|
; EG-NEXT: ALU 2, @13, KC0[], KC1[]
|
|
|
|
; EG-NEXT: TEX 0 @10
|
|
|
|
; EG-NEXT: ALU 114, @16, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T3.XYZW, T0.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 8:
|
|
|
|
; EG-NEXT: VTX_READ_128 T1.XYZW, T0.X, 16, #1
|
|
|
|
; EG-NEXT: Fetch clause starting at 10:
|
|
|
|
; EG-NEXT: VTX_READ_128 T3.XYZW, T0.X, 0, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 12:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Z,
|
|
|
|
; EG-NEXT: ALU clause starting at 13:
|
|
|
|
; EG-NEXT: SETGT_INT * T0.W, 0.0, T1.Z,
|
|
|
|
; EG-NEXT: ADD_INT * T2.W, T1.Z, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT * T2.W, PV.W, T0.W,
|
|
|
|
; EG-NEXT: ALU clause starting at 16:
|
|
|
|
; EG-NEXT: RECIP_UINT * T0.X, T2.W,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Y, PS, T2.W,
|
|
|
|
; EG-NEXT: SUB_INT T4.W, 0.0, PS,
|
|
|
|
; EG-NEXT: MULHI * T0.Z, T0.X, T2.W,
|
|
|
|
; EG-NEXT: CNDE_INT T4.W, PS, PV.W, T0.Y,
|
|
|
|
; EG-NEXT: SETGT_INT * T5.W, 0.0, T3.Z,
|
|
|
|
; EG-NEXT: MULHI * T0.Y, PV.W, T0.X,
|
|
|
|
; EG-NEXT: SETGT_INT T2.Y, 0.0, T1.W,
|
|
|
|
; EG-NEXT: ADD_INT T1.Z, T3.Z, T5.W, BS:VEC_021/SCL_122
|
|
|
|
; EG-NEXT: ADD_INT T4.W, T0.X, PS,
|
|
|
|
; EG-NEXT: SUB_INT * T6.W, T0.X, PS,
|
|
|
|
; EG-NEXT: CNDE_INT T0.Z, T0.Z, PV.W, PS,
|
|
|
|
; EG-NEXT: XOR_INT T4.W, PV.Z, T5.W,
|
|
|
|
; EG-NEXT: ADD_INT * T1.W, T1.W, PV.Y,
|
|
|
|
; EG-NEXT: XOR_INT T1.W, PS, T2.Y,
|
|
|
|
; EG-NEXT: MULHI * T0.X, PV.Z, PV.W,
|
|
|
|
; EG-NEXT: SETGT_INT T6.W, 0.0, T1.Y,
|
|
|
|
; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
|
|
|
|
; EG-NEXT: ADD_INT T7.W, T1.Y, PV.W,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Z, PS, T1.W,
|
|
|
|
; EG-NEXT: XOR_INT T1.Z, PV.W, T6.W, BS:VEC_021/SCL_122
|
|
|
|
; EG-NEXT: SUB_INT T7.W, 0.0, PS,
|
|
|
|
; EG-NEXT: MULHI * T1.Y, T0.Y, T1.W,
|
|
|
|
; EG-NEXT: CNDE_INT T7.W, PS, PV.W, T0.Z,
|
|
|
|
; EG-NEXT: RECIP_UINT * T0.Z, PV.Z,
|
|
|
|
; EG-NEXT: SETGT_INT T8.W, 0.0, T3.W,
|
|
|
|
; EG-NEXT: MULHI * T2.X, PV.W, T0.Y,
|
|
|
|
; EG-NEXT: ADD_INT T4.Y, T3.W, PV.W,
|
|
|
|
; EG-NEXT: ADD_INT T2.Z, T0.Y, PS,
|
|
|
|
; EG-NEXT: SUB_INT T3.W, T0.Y, PS,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Y, T0.Z, T1.Z,
|
|
|
|
; EG-NEXT: CNDE_INT T2.X, T1.Y, PV.Z, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT T1.Y, PV.Y, T8.W,
|
|
|
|
; EG-NEXT: SETGT_INT T2.Z, 0.0, T1.X,
|
|
|
|
; EG-NEXT: SUB_INT T3.W, 0.0, PS,
|
|
|
|
; EG-NEXT: MULHI * T3.Z, T0.Z, T1.Z,
|
|
|
|
; EG-NEXT: CNDE_INT T4.Z, PS, PV.W, T0.Y,
|
|
|
|
; EG-NEXT: ADD_INT T3.W, T1.X, PV.Z,
|
|
|
|
; EG-NEXT: MULHI * T0.Y, PV.X, PV.Y,
|
|
|
|
; EG-NEXT: XOR_INT T3.W, PV.W, T2.Z, BS:VEC_021/SCL_122
|
|
|
|
; EG-NEXT: MULHI * T1.X, PV.Z, T0.Z,
|
|
|
|
; EG-NEXT: RECIP_UINT * T2.X, PV.W,
|
|
|
|
; EG-NEXT: MULLO_INT * T4.X, PS, T3.W,
|
|
|
|
; EG-NEXT: SETGT_INT T4.Z, 0.0, T3.Y,
|
|
|
|
; EG-NEXT: SUB_INT T7.W, 0.0, PS,
|
|
|
|
; EG-NEXT: MULHI * T4.Y, T2.X, T3.W,
|
|
|
|
; EG-NEXT: CNDE_INT T4.X, PS, PV.W, T4.X,
|
|
|
|
; EG-NEXT: ADD_INT T3.Y, T3.Y, PV.Z,
|
|
|
|
; EG-NEXT: ADD_INT T5.Z, T0.Z, T1.X,
|
|
|
|
; EG-NEXT: SUB_INT T7.W, T0.Z, T1.X,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Z, T0.Y, T1.W,
|
|
|
|
; EG-NEXT: CNDE_INT T5.Y, T3.Z, PV.Z, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT T3.Z, PV.Y, T4.Z,
|
|
|
|
; EG-NEXT: SUB_INT T7.W, T1.Y, PS,
|
|
|
|
; EG-NEXT: MULHI * T1.X, PV.X, T2.X,
|
|
|
|
; EG-NEXT: SETGE_UINT T5.Z, PV.W, T1.W,
|
|
|
|
; EG-NEXT: SETGE_UINT T1.W, T1.Y, T0.Z,
|
|
|
|
; EG-NEXT: MULHI * T0.Z, PV.Y, PV.Z,
|
|
|
|
; EG-NEXT: AND_INT T1.Y, PV.Z, PV.W,
|
|
|
|
; EG-NEXT: ADD_INT T5.Z, T0.Y, 1,
|
|
|
|
; EG-NEXT: SETGT_INT T7.W, 0.0, T3.X,
|
|
|
|
; EG-NEXT: MULLO_INT * T3.Y, PS, T1.Z,
|
|
|
|
; EG-NEXT: SUB_INT T4.X, T3.Z, PS,
|
|
|
|
; EG-NEXT: ADD_INT T5.Y, T3.X, PV.W,
|
|
|
|
; EG-NEXT: ADD_INT T6.Z, T2.X, T1.X, BS:VEC_120/SCL_212
|
|
|
|
; EG-NEXT: SUB_INT * T9.W, T2.X, T1.X, BS:VEC_120/SCL_212
|
|
|
|
; EG-NEXT: MULLO_INT * T1.X, T0.X, T2.W,
|
|
|
|
; EG-NEXT: CNDE_INT T2.X, T4.Y, T6.Z, T9.W,
|
|
|
|
; EG-NEXT: XOR_INT T4.Y, T5.Y, T7.W, BS:VEC_201
|
|
|
|
; EG-NEXT: SUB_INT T6.Z, T4.W, PS, BS:VEC_120/SCL_212
|
|
|
|
; EG-NEXT: SETGE_UINT T9.W, T4.X, T1.Z, BS:VEC_102/SCL_221
|
|
|
|
; EG-NEXT: SETGE_UINT * T10.W, T3.Z, T3.Y,
|
|
|
|
; EG-NEXT: AND_INT T3.X, PV.W, PS,
|
|
|
|
; EG-NEXT: ADD_INT T3.Y, T0.Z, 1,
|
|
|
|
; EG-NEXT: SETGE_UINT T1.Z, PV.Z, T2.W,
|
|
|
|
; EG-NEXT: SETGE_UINT T2.W, T4.W, T1.X,
|
|
|
|
; EG-NEXT: MULHI * T1.X, PV.X, PV.Y,
|
|
|
|
; EG-NEXT: AND_INT T2.X, PV.Z, PV.W,
|
|
|
|
; EG-NEXT: ADD_INT T5.Y, T0.X, 1,
|
|
|
|
; EG-NEXT: CNDE_INT T1.Z, PV.X, T0.Z, PV.Y,
|
|
|
|
; EG-NEXT: ADD_INT T4.W, T0.Z, literal.x,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Z, PS, T3.W,
|
|
|
|
; EG-NEXT: -1(nan), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: CNDE_INT T3.X, T10.W, PV.W, PV.Z,
|
|
|
|
; EG-NEXT: CNDE_INT T3.Y, PV.X, T0.X, PV.Y,
|
|
|
|
; EG-NEXT: CNDE_INT T1.Z, T1.Y, T0.Y, T5.Z,
|
|
|
|
; EG-NEXT: ADD_INT T4.W, T0.Y, literal.x, BS:VEC_120/SCL_212
|
|
|
|
; EG-NEXT: SUB_INT * T9.W, T4.Y, PS,
|
|
|
|
; EG-NEXT: -1(nan), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ADD_INT T0.X, T0.X, literal.x,
|
|
|
|
; EG-NEXT: SETGE_UINT T0.Y, PS, T3.W,
|
|
|
|
; EG-NEXT: SETGE_UINT T0.Z, T4.Y, T0.Z,
|
|
|
|
; EG-NEXT: CNDE_INT T1.W, T1.W, PV.W, PV.Z,
|
|
|
|
; EG-NEXT: XOR_INT * T3.W, T8.W, T2.Y,
|
|
|
|
; EG-NEXT: -1(nan), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: XOR_INT T2.X, PV.W, PS,
|
|
|
|
; EG-NEXT: AND_INT T0.Y, PV.Y, PV.Z,
|
|
|
|
; EG-NEXT: ADD_INT T1.Z, T1.X, 1,
|
|
|
|
; EG-NEXT: CNDE_INT T1.W, T2.W, PV.X, T3.Y,
|
|
|
|
; EG-NEXT: XOR_INT * T0.W, T5.W, T0.W,
|
|
|
|
; EG-NEXT: XOR_INT T0.X, T4.Z, T6.W, BS:VEC_021/SCL_122
|
|
|
|
; EG-NEXT: XOR_INT T1.Y, PV.W, PS,
|
|
|
|
; EG-NEXT: CNDE_INT T1.Z, PV.Y, T1.X, PV.Z,
|
|
|
|
; EG-NEXT: ADD_INT T1.W, T1.X, literal.x,
|
|
|
|
; EG-NEXT: SUB_INT * T3.W, PV.X, T3.W,
|
|
|
|
; EG-NEXT: -1(nan), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: CNDE_INT T0.Y, T0.Z, PV.W, PV.Z,
|
|
|
|
; EG-NEXT: SUB_INT T3.Z, PV.Y, T0.W,
|
|
|
|
; EG-NEXT: XOR_INT T0.W, T7.W, T2.Z,
|
|
|
|
; EG-NEXT: XOR_INT * T1.W, T3.X, PV.X,
|
|
|
|
; EG-NEXT: SUB_INT T3.Y, PS, T0.X,
|
|
|
|
; EG-NEXT: XOR_INT * T1.W, PV.Y, PV.W,
|
|
|
|
; EG-NEXT: SUB_INT T3.X, PV.W, T0.W,
|
|
|
|
; EG-NEXT: LSHR * T0.X, KC0[2].Y, literal.x,
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%den_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
|
2015-02-28 05:17:42 +08:00
|
|
|
%num = load <4 x i32>, <4 x i32> addrspace(1) * %in
|
|
|
|
%den = load <4 x i32>, <4 x i32> addrspace(1) * %den_ptr
|
2014-06-16 05:08:54 +08:00
|
|
|
%result = sdiv <4 x i32> %num, %den
|
|
|
|
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @sdiv_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: sdiv_v4i32_4:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GCN-NEXT: s_mov_b32 s10, s6
|
|
|
|
; GCN-NEXT: s_mov_b32 s11, s7
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s8, s2
|
|
|
|
; GCN-NEXT: s_mov_b32 s9, s3
|
|
|
|
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v4, 31, v0
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v5, 31, v1
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v6, 31, v2
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v7, 31, v3
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v4, 30, v4
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v5, 30, v5
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v6, 30, v6
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v7, 30, v7
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v4, v0
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v1, vcc, v5, v1
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v2, vcc, v6, v2
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v3, vcc, v7, v3
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v0, 2, v0
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v1, 2, v1
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v2, 2, v2
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v3, 2, v3
|
|
|
|
; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: sdiv_v4i32_4:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, -1
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s4
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s5
|
|
|
|
; TONGA-NEXT: s_mov_b32 s4, s6
|
|
|
|
; TONGA-NEXT: s_mov_b32 s5, s7
|
|
|
|
; TONGA-NEXT: s_mov_b32 s6, s2
|
|
|
|
; TONGA-NEXT: s_mov_b32 s7, s3
|
|
|
|
; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v4, 31, v0
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v5, 31, v1
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v6, 31, v2
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v7, 31, v3
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v4, 30, v4
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v5, 30, v5
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v6, 30, v6
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v7, 30, v7
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v4, v0
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v1, vcc, v5, v1
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v2, vcc, v6, v2
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v3, vcc, v7, v3
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 2, v0
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v1, 2, v1
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v2, 2, v2
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v3, 2, v3
|
|
|
|
; TONGA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: sdiv_v4i32_4:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, -1
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, s4
|
|
|
|
; GFX9-NEXT: s_mov_b32 s1, s5
|
|
|
|
; GFX9-NEXT: s_mov_b32 s4, s6
|
|
|
|
; GFX9-NEXT: s_mov_b32 s5, s7
|
|
|
|
; GFX9-NEXT: s_mov_b32 s6, s2
|
|
|
|
; GFX9-NEXT: s_mov_b32 s7, s3
|
|
|
|
; GFX9-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v0
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v5, 31, v1
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v6, 31, v2
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v7, 31, v3
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 30, v4
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 30, v5
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v6, 30, v6
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v7, 30, v7
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v4
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v5
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v2, v6
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v3, v7
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 2, v0
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 2, v1
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 2, v2
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v3, 2, v3
|
|
|
|
; GFX9-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: sdiv_v4i32_4:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 0 @6
|
|
|
|
; EG-NEXT: ALU 24, @9, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T0.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 6:
|
|
|
|
; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 8:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Z,
|
|
|
|
; EG-NEXT: ALU clause starting at 9:
|
|
|
|
; EG-NEXT: ASHR T1.W, T0.W, literal.x,
|
|
|
|
; EG-NEXT: ASHR * T2.W, T0.Z, literal.x,
|
|
|
|
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: LSHR * T1.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ADD_INT T1.Z, T0.W, PV.W,
|
|
|
|
; EG-NEXT: LSHR T0.W, T2.W, literal.x, BS:VEC_120/SCL_212
|
|
|
|
; EG-NEXT: ASHR * T1.W, T0.Y, literal.y,
|
|
|
|
; EG-NEXT: 30(4.203895e-44), 31(4.344025e-44)
|
|
|
|
; EG-NEXT: LSHR T1.Y, PS, literal.x,
|
|
|
|
; EG-NEXT: ASHR T2.Z, T0.X, literal.y,
|
|
|
|
; EG-NEXT: ADD_INT T0.W, T0.Z, PV.W,
|
|
|
|
; EG-NEXT: ASHR * T1.W, PV.Z, literal.z,
|
|
|
|
; EG-NEXT: 30(4.203895e-44), 31(4.344025e-44)
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T1.Z, PV.W, literal.x,
|
|
|
|
; EG-NEXT: LSHR T0.W, PV.Z, literal.y,
|
|
|
|
; EG-NEXT: ADD_INT * T2.W, T0.Y, PV.Y,
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 30(4.203895e-44)
|
|
|
|
; EG-NEXT: ASHR T1.Y, PS, literal.x,
|
|
|
|
; EG-NEXT: ADD_INT * T0.W, T0.X, PV.W,
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T1.X, PV.W, literal.x,
|
|
|
|
; EG-NEXT: LSHR * T0.X, KC0[2].Y, literal.x,
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
|
2015-02-28 05:17:42 +08:00
|
|
|
%num = load <4 x i32>, <4 x i32> addrspace(1) * %in
|
2014-06-16 05:08:54 +08:00
|
|
|
%result = sdiv <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4>
|
|
|
|
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_sdiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: v_sdiv_i8:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GCN-NEXT: s_mov_b32 s10, s6
|
|
|
|
; GCN-NEXT: s_mov_b32 s11, s7
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s8, s2
|
|
|
|
; GCN-NEXT: s_mov_b32 s9, s3
|
|
|
|
; GCN-NEXT: buffer_load_sbyte v0, off, s[8:11], 0
|
|
|
|
; GCN-NEXT: buffer_load_sbyte v1, off, s[8:11], 0 offset:1
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v2, v0, v1
|
|
|
|
; GCN-NEXT: v_cvt_f32_i32_e32 v0, v0
|
|
|
|
; GCN-NEXT: v_cvt_f32_i32_e32 v1, v1
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v2, 30, v2
|
|
|
|
; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v1
|
|
|
|
; GCN-NEXT: v_or_b32_e32 v2, 1, v2
|
|
|
|
; GCN-NEXT: v_mul_f32_e32 v3, v0, v3
|
|
|
|
; GCN-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; GCN-NEXT: v_mad_f32 v0, -v3, v1, v0
|
|
|
|
; GCN-NEXT: v_cvt_i32_f32_e32 v3, v3
|
|
|
|
; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, |v1|
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v2, vcc
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v3
|
|
|
|
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 8
|
|
|
|
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: v_sdiv_i8:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, -1
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s4
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s5
|
|
|
|
; TONGA-NEXT: s_mov_b32 s4, s6
|
|
|
|
; TONGA-NEXT: s_mov_b32 s5, s7
|
|
|
|
; TONGA-NEXT: s_mov_b32 s6, s2
|
|
|
|
; TONGA-NEXT: s_mov_b32 s7, s3
|
|
|
|
; TONGA-NEXT: buffer_load_sbyte v0, off, s[4:7], 0 offset:1
|
|
|
|
; TONGA-NEXT: buffer_load_sbyte v2, off, s[4:7], 0
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; TONGA-NEXT: v_cvt_f32_i32_e32 v1, v0
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; TONGA-NEXT: v_cvt_f32_i32_e32 v3, v2
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v0, v2, v0
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 30, v0
|
|
|
|
; TONGA-NEXT: v_rcp_iflag_f32_e32 v4, v1
|
|
|
|
; TONGA-NEXT: v_or_b32_e32 v0, 1, v0
|
|
|
|
; TONGA-NEXT: v_mul_f32_e32 v2, v3, v4
|
|
|
|
; TONGA-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; TONGA-NEXT: v_mad_f32 v3, -v2, v1, v3
|
|
|
|
; TONGA-NEXT: v_cvt_i32_f32_e32 v2, v2
|
|
|
|
; TONGA-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1|
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
|
|
; TONGA-NEXT: v_bfe_i32 v0, v0, 0, 8
|
|
|
|
; TONGA-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: v_sdiv_i8:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, -1
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, s4
|
|
|
|
; GFX9-NEXT: s_mov_b32 s1, s5
|
|
|
|
; GFX9-NEXT: s_mov_b32 s4, s6
|
|
|
|
; GFX9-NEXT: s_mov_b32 s5, s7
|
|
|
|
; GFX9-NEXT: s_mov_b32 s6, s2
|
|
|
|
; GFX9-NEXT: s_mov_b32 s7, s3
|
|
|
|
; GFX9-NEXT: buffer_load_sbyte v0, off, s[4:7], 0 offset:1
|
|
|
|
; GFX9-NEXT: buffer_load_sbyte v2, off, s[4:7], 0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_cvt_f32_i32_e32 v3, v2
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v2, v0
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 30, v0
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v1
|
|
|
|
; GFX9-NEXT: v_or_b32_e32 v0, 1, v0
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v2, v3, v4
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; GFX9-NEXT: v_cvt_i32_f32_e32 v4, v2
|
|
|
|
; GFX9-NEXT: v_mad_f32 v2, -v2, v1, v3
|
|
|
|
; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v2|, |v1|
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v4, v0
|
|
|
|
; GFX9-NEXT: v_bfe_i32 v0, v0, 0, 8
|
|
|
|
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: v_sdiv_i8:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 1 @6
|
|
|
|
; EG-NEXT: ALU 21, @11, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 6:
|
|
|
|
; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
|
|
|
|
; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 10:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Z,
|
|
|
|
; EG-NEXT: ALU clause starting at 11:
|
|
|
|
; EG-NEXT: BFE_INT * T0.W, T1.X, 0.0, literal.x,
|
|
|
|
; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: INT_TO_FLT * T0.Y, PV.W,
|
|
|
|
; EG-NEXT: BFE_INT T1.W, T0.X, 0.0, literal.x,
|
|
|
|
; EG-NEXT: RECIP_IEEE * T0.X, PS,
|
|
|
|
; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: INT_TO_FLT * T0.Z, PV.W,
|
|
|
|
; EG-NEXT: MUL_IEEE * T2.W, PS, T0.X,
|
|
|
|
; EG-NEXT: TRUNC T2.W, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT * T0.W, T1.W, T0.W,
|
|
|
|
; EG-NEXT: ASHR T0.W, PS, literal.x,
|
|
|
|
; EG-NEXT: MULADD_IEEE * T1.W, -PV.W, T0.Y, T0.Z,
|
|
|
|
; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: TRUNC T0.Z, T2.W,
|
|
|
|
; EG-NEXT: SETGE T1.W, |PS|, |T0.Y|,
|
|
|
|
; EG-NEXT: OR_INT * T0.W, PV.W, 1,
|
|
|
|
; EG-NEXT: CNDE T0.W, PV.W, 0.0, PS,
|
|
|
|
; EG-NEXT: FLT_TO_INT * T1.W, PV.Z,
|
|
|
|
; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
|
|
|
|
; EG-NEXT: BFE_INT T0.X, PV.W, 0.0, literal.x,
|
|
|
|
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
|
|
|
|
; EG-NEXT: 8(1.121039e-44), 2(2.802597e-45)
|
2016-05-21 09:53:33 +08:00
|
|
|
%den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
|
|
|
|
%num = load i8, i8 addrspace(1) * %in
|
|
|
|
%den = load i8, i8 addrspace(1) * %den_ptr
|
|
|
|
%result = sdiv i8 %num, %den
|
|
|
|
%result.ext = sext i8 %result to i32
|
|
|
|
store i32 %result.ext, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_sdiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)* %in) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: v_sdiv_i23:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GCN-NEXT: s_mov_b32 s10, s6
|
|
|
|
; GCN-NEXT: s_mov_b32 s11, s7
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GCN-NEXT: s_mov_b32 s8, s2
|
|
|
|
; GCN-NEXT: s_mov_b32 s9, s3
|
|
|
|
; GCN-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:2
|
|
|
|
; GCN-NEXT: buffer_load_ubyte v3, off, s[8:11], 0 offset:6
|
|
|
|
; GCN-NEXT: buffer_load_ushort v0, off, s[8:11], 0
|
|
|
|
; GCN-NEXT: buffer_load_ushort v2, off, s[8:11], 0 offset:4
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(3)
|
|
|
|
; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(2)
|
|
|
|
; GCN-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GCN-NEXT: v_or_b32_e32 v0, v0, v1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GCN-NEXT: v_or_b32_e32 v1, v2, v3
|
|
|
|
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 23
|
|
|
|
; GCN-NEXT: v_bfe_i32 v1, v1, 0, 23
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v2, v0, v1
|
|
|
|
; GCN-NEXT: v_cvt_f32_i32_e32 v0, v0
|
|
|
|
; GCN-NEXT: v_cvt_f32_i32_e32 v1, v1
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v2, 30, v2
|
|
|
|
; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v1
|
|
|
|
; GCN-NEXT: v_or_b32_e32 v2, 1, v2
|
|
|
|
; GCN-NEXT: v_mul_f32_e32 v3, v0, v3
|
|
|
|
; GCN-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; GCN-NEXT: v_mad_f32 v0, -v3, v1, v0
|
|
|
|
; GCN-NEXT: v_cvt_i32_f32_e32 v3, v3
|
|
|
|
; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, |v1|
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v2, vcc
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v3
|
|
|
|
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 23
|
|
|
|
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: v_sdiv_i23:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, -1
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s4
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s5
|
|
|
|
; TONGA-NEXT: s_mov_b32 s4, s6
|
|
|
|
; TONGA-NEXT: s_mov_b32 s5, s7
|
|
|
|
; TONGA-NEXT: s_mov_b32 s6, s2
|
|
|
|
; TONGA-NEXT: s_mov_b32 s7, s3
|
|
|
|
; TONGA-NEXT: buffer_load_ubyte v0, off, s[4:7], 0 offset:2
|
|
|
|
; TONGA-NEXT: buffer_load_ushort v1, off, s[4:7], 0 offset:4
|
|
|
|
; TONGA-NEXT: buffer_load_ubyte v2, off, s[4:7], 0 offset:6
|
|
|
|
; TONGA-NEXT: buffer_load_ushort v3, off, s[4:7], 0
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(3)
|
|
|
|
; TONGA-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; TONGA-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
|
|
; TONGA-NEXT: v_or_b32_e32 v1, v1, v2
|
|
|
|
; TONGA-NEXT: v_bfe_i32 v1, v1, 0, 23
|
|
|
|
; TONGA-NEXT: v_cvt_f32_i32_e32 v2, v1
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; TONGA-NEXT: v_or_b32_e32 v0, v3, v0
|
|
|
|
; TONGA-NEXT: v_bfe_i32 v0, v0, 0, 23
|
|
|
|
; TONGA-NEXT: v_cvt_f32_i32_e32 v3, v0
|
|
|
|
; TONGA-NEXT: v_rcp_iflag_f32_e32 v4, v2
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v0, v0, v1
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 30, v0
|
|
|
|
; TONGA-NEXT: v_or_b32_e32 v0, 1, v0
|
|
|
|
; TONGA-NEXT: v_mul_f32_e32 v1, v3, v4
|
|
|
|
; TONGA-NEXT: v_trunc_f32_e32 v1, v1
|
|
|
|
; TONGA-NEXT: v_mad_f32 v3, -v1, v2, v3
|
|
|
|
; TONGA-NEXT: v_cvt_i32_f32_e32 v1, v1
|
|
|
|
; TONGA-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v2|
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
|
|
; TONGA-NEXT: v_bfe_i32 v0, v0, 0, 23
|
|
|
|
; TONGA-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: v_sdiv_i23:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, -1
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, s4
|
|
|
|
; GFX9-NEXT: s_mov_b32 s1, s5
|
|
|
|
; GFX9-NEXT: s_mov_b32 s4, s6
|
|
|
|
; GFX9-NEXT: s_mov_b32 s5, s7
|
|
|
|
; GFX9-NEXT: s_mov_b32 s6, s2
|
|
|
|
; GFX9-NEXT: s_mov_b32 s7, s3
|
|
|
|
; GFX9-NEXT: buffer_load_ubyte v0, off, s[4:7], 0 offset:2
|
|
|
|
; GFX9-NEXT: buffer_load_ushort v1, off, s[4:7], 0 offset:4
|
|
|
|
; GFX9-NEXT: buffer_load_ubyte v2, off, s[4:7], 0 offset:6
|
|
|
|
; GFX9-NEXT: buffer_load_ushort v3, off, s[4:7], 0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(3)
|
|
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
|
|
; GFX9-NEXT: v_or_b32_e32 v1, v1, v2
|
|
|
|
; GFX9-NEXT: v_bfe_i32 v1, v1, 0, 23
|
|
|
|
; GFX9-NEXT: v_cvt_f32_i32_e32 v2, v1
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v3, v0
|
|
|
|
; GFX9-NEXT: v_bfe_i32 v0, v0, 0, 23
|
|
|
|
; GFX9-NEXT: v_cvt_f32_i32_e32 v3, v0
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v2
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 30, v0
|
|
|
|
; GFX9-NEXT: v_or_b32_e32 v0, 1, v0
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v1, v3, v4
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v1, v1
|
|
|
|
; GFX9-NEXT: v_cvt_i32_f32_e32 v4, v1
|
|
|
|
; GFX9-NEXT: v_mad_f32 v1, -v1, v2, v3
|
|
|
|
; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, |v2|
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v4, v0
|
|
|
|
; GFX9-NEXT: v_bfe_i32 v0, v0, 0, 23
|
|
|
|
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: v_sdiv_i23:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 0, @14, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 3 @6
|
|
|
|
; EG-NEXT: ALU 33, @15, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 6:
|
|
|
|
; EG-NEXT: VTX_READ_8 T1.X, T0.X, 6, #1
|
|
|
|
; EG-NEXT: VTX_READ_16 T2.X, T0.X, 0, #1
|
|
|
|
; EG-NEXT: VTX_READ_8 T3.X, T0.X, 2, #1
|
|
|
|
; EG-NEXT: VTX_READ_16 T0.X, T0.X, 4, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 14:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Z,
|
|
|
|
; EG-NEXT: ALU clause starting at 15:
|
|
|
|
; EG-NEXT: LSHL * T0.W, T1.X, literal.x,
|
|
|
|
; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: OR_INT T0.W, T0.X, PV.W,
|
|
|
|
; EG-NEXT: LSHL * T1.W, T3.X, literal.x,
|
|
|
|
; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: LSHL * T0.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T0.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: OR_INT * T1.W, T2.X, T1.W,
|
|
|
|
; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: LSHL T1.W, PS, literal.x,
|
|
|
|
; EG-NEXT: INT_TO_FLT * T0.X, PV.W,
|
|
|
|
; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T1.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: RECIP_IEEE * T0.Y, PS,
|
|
|
|
; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: INT_TO_FLT * T0.Z, PV.W,
|
|
|
|
; EG-NEXT: MUL_IEEE * T2.W, PS, T0.Y,
|
|
|
|
; EG-NEXT: TRUNC T2.W, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT * T0.W, T1.W, T0.W,
|
|
|
|
; EG-NEXT: ASHR T0.W, PS, literal.x,
|
|
|
|
; EG-NEXT: MULADD_IEEE * T1.W, -PV.W, T0.X, T0.Z,
|
|
|
|
; EG-NEXT: 30(4.203895e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: TRUNC T0.Z, T2.W,
|
|
|
|
; EG-NEXT: SETGE T1.W, |PS|, |T0.X|,
|
|
|
|
; EG-NEXT: OR_INT * T0.W, PV.W, 1,
|
|
|
|
; EG-NEXT: CNDE T0.W, PV.W, 0.0, PS,
|
|
|
|
; EG-NEXT: FLT_TO_INT * T1.W, PV.Z,
|
|
|
|
; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
|
|
|
|
; EG-NEXT: LSHL * T0.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T0.X, PV.W, literal.x,
|
|
|
|
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
|
|
|
|
; EG-NEXT: 9(1.261169e-44), 2(2.802597e-45)
|
2016-05-21 09:53:33 +08:00
|
|
|
%den_ptr = getelementptr i23, i23 addrspace(1)* %in, i23 1
|
|
|
|
%num = load i23, i23 addrspace(1) * %in
|
|
|
|
%den = load i23, i23 addrspace(1) * %den_ptr
|
|
|
|
%result = sdiv i23 %num, %den
|
|
|
|
%result.ext = sext i23 %result to i32
|
|
|
|
store i32 %result.ext, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_sdiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)* %in) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: v_sdiv_i24:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GCN-NEXT: s_mov_b32 s10, s6
|
|
|
|
; GCN-NEXT: s_mov_b32 s11, s7
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GCN-NEXT: s_mov_b32 s8, s2
|
|
|
|
; GCN-NEXT: s_mov_b32 s9, s3
|
|
|
|
; GCN-NEXT: buffer_load_sbyte v1, off, s[8:11], 0 offset:2
|
|
|
|
; GCN-NEXT: buffer_load_sbyte v3, off, s[8:11], 0 offset:6
|
|
|
|
; GCN-NEXT: buffer_load_ushort v0, off, s[8:11], 0
|
|
|
|
; GCN-NEXT: buffer_load_ushort v2, off, s[8:11], 0 offset:4
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(3)
|
|
|
|
; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(2)
|
|
|
|
; GCN-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; GCN-NEXT: v_or_b32_e32 v0, v0, v1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
2019-07-23 20:39:08 +08:00
|
|
|
; GCN-NEXT: v_or_b32_e32 v2, v2, v3
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v1, v1, v3
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v1, 30, v1
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-NEXT: v_cvt_f32_i32_e32 v0, v0
|
2019-07-23 20:39:08 +08:00
|
|
|
; GCN-NEXT: v_cvt_f32_i32_e32 v2, v2
|
|
|
|
; GCN-NEXT: v_or_b32_e32 v1, 1, v1
|
|
|
|
; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v2
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-NEXT: v_mul_f32_e32 v3, v0, v3
|
|
|
|
; GCN-NEXT: v_trunc_f32_e32 v3, v3
|
2019-07-23 20:39:08 +08:00
|
|
|
; GCN-NEXT: v_mad_f32 v0, -v3, v2, v0
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-NEXT: v_cvt_i32_f32_e32 v3, v3
|
2019-07-23 20:39:08 +08:00
|
|
|
; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, |v2|
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v3
|
|
|
|
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 24
|
|
|
|
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: v_sdiv_i24:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, -1
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s4
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s5
|
|
|
|
; TONGA-NEXT: s_mov_b32 s4, s6
|
|
|
|
; TONGA-NEXT: s_mov_b32 s5, s7
|
|
|
|
; TONGA-NEXT: s_mov_b32 s6, s2
|
|
|
|
; TONGA-NEXT: s_mov_b32 s7, s3
|
|
|
|
; TONGA-NEXT: buffer_load_sbyte v0, off, s[4:7], 0 offset:2
|
|
|
|
; TONGA-NEXT: buffer_load_ushort v1, off, s[4:7], 0 offset:4
|
|
|
|
; TONGA-NEXT: buffer_load_sbyte v2, off, s[4:7], 0 offset:6
|
|
|
|
; TONGA-NEXT: buffer_load_ushort v3, off, s[4:7], 0
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(3)
|
|
|
|
; TONGA-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(1)
|
|
|
|
; TONGA-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
|
|
; TONGA-NEXT: v_or_b32_e32 v1, v1, v2
|
2019-07-23 20:39:08 +08:00
|
|
|
; TONGA-NEXT: v_cvt_f32_i32_e32 v1, v1
|
2019-06-02 02:27:06 +08:00
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
2019-07-23 20:39:08 +08:00
|
|
|
; TONGA-NEXT: v_or_b32_e32 v3, v3, v0
|
|
|
|
; TONGA-NEXT: v_cvt_f32_i32_e32 v3, v3
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v0, v0, v2
|
|
|
|
; TONGA-NEXT: v_rcp_iflag_f32_e32 v4, v1
|
2019-06-02 02:27:06 +08:00
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 30, v0
|
|
|
|
; TONGA-NEXT: v_or_b32_e32 v0, 1, v0
|
2019-07-23 20:39:08 +08:00
|
|
|
; TONGA-NEXT: v_mul_f32_e32 v2, v3, v4
|
|
|
|
; TONGA-NEXT: v_trunc_f32_e32 v2, v2
|
|
|
|
; TONGA-NEXT: v_mad_f32 v3, -v2, v1, v3
|
|
|
|
; TONGA-NEXT: v_cvt_i32_f32_e32 v2, v2
|
|
|
|
; TONGA-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1|
|
2019-06-02 02:27:06 +08:00
|
|
|
; TONGA-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
|
2019-07-23 20:39:08 +08:00
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
2019-06-02 02:27:06 +08:00
|
|
|
; TONGA-NEXT: v_bfe_i32 v0, v0, 0, 24
|
|
|
|
; TONGA-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: v_sdiv_i24:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, -1
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, s4
|
|
|
|
; GFX9-NEXT: s_mov_b32 s1, s5
|
|
|
|
; GFX9-NEXT: s_mov_b32 s4, s6
|
|
|
|
; GFX9-NEXT: s_mov_b32 s5, s7
|
|
|
|
; GFX9-NEXT: s_mov_b32 s6, s2
|
|
|
|
; GFX9-NEXT: s_mov_b32 s7, s3
|
|
|
|
; GFX9-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
|
|
; GFX9-NEXT: buffer_load_sbyte v1, off, s[4:7], 0 offset:2
|
|
|
|
; GFX9-NEXT: buffer_load_ushort v2, off, s[4:7], 0 offset:4
|
|
|
|
; GFX9-NEXT: buffer_load_sbyte v3, off, s[4:7], 0 offset:6
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(2)
|
|
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
|
|
|
; GFX9-NEXT: v_or_b32_e32 v2, v2, v3
|
2019-07-23 20:39:08 +08:00
|
|
|
; GFX9-NEXT: v_cvt_f32_i32_e32 v2, v2
|
|
|
|
; GFX9-NEXT: v_cvt_f32_i32_e32 v0, v0
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v3
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 30, v1
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v2
|
|
|
|
; GFX9-NEXT: v_or_b32_e32 v1, 1, v1
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v3, v0, v4
|
|
|
|
; GFX9-NEXT: v_trunc_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_cvt_i32_f32_e32 v4, v3
|
|
|
|
; GFX9-NEXT: v_mad_f32 v0, -v3, v2, v0
|
|
|
|
; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, |v2|
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
|
2019-06-02 02:27:06 +08:00
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v4, v0
|
|
|
|
; GFX9-NEXT: v_bfe_i32 v0, v0, 0, 24
|
|
|
|
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: v_sdiv_i24:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 0, @14, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 3 @6
|
|
|
|
; EG-NEXT: ALU 43, @15, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 6:
|
|
|
|
; EG-NEXT: VTX_READ_8 T1.X, T0.X, 6, #1
|
|
|
|
; EG-NEXT: VTX_READ_16 T2.X, T0.X, 0, #1
|
|
|
|
; EG-NEXT: VTX_READ_8 T3.X, T0.X, 2, #1
|
|
|
|
; EG-NEXT: VTX_READ_16 T0.X, T0.X, 4, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 14:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Z,
|
|
|
|
; EG-NEXT: ALU clause starting at 15:
|
|
|
|
; EG-NEXT: BFE_INT * T0.W, T1.X, 0.0, literal.x,
|
|
|
|
; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: LSHL * T0.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: OR_INT * T0.W, T0.X, PV.W,
|
|
|
|
; EG-NEXT: SETGT_INT * T1.W, 0.0, PV.W,
|
|
|
|
; EG-NEXT: ADD_INT * T0.W, T0.W, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT * T0.W, PV.W, T1.W,
|
|
|
|
; EG-NEXT: RECIP_UINT * T0.X, PV.W,
|
|
|
|
; EG-NEXT: BFE_INT T2.W, T3.X, 0.0, literal.x,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
|
|
|
|
; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: LSHL T0.Z, PV.W, literal.x,
|
|
|
|
; EG-NEXT: SUB_INT T2.W, 0.0, PS,
|
|
|
|
; EG-NEXT: MULHI * T1.X, T0.X, T0.W,
|
|
|
|
; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: CNDE_INT T2.W, PS, PV.W, T0.Y,
|
|
|
|
; EG-NEXT: OR_INT * T3.W, T2.X, PV.Z,
|
|
|
|
; EG-NEXT: SETGT_INT T4.W, 0.0, PS,
|
|
|
|
; EG-NEXT: MULHI * T0.Y, PV.W, T0.X,
|
|
|
|
; EG-NEXT: ADD_INT T0.Z, T3.W, PV.W,
|
|
|
|
; EG-NEXT: ADD_INT T2.W, T0.X, PS,
|
|
|
|
; EG-NEXT: SUB_INT * T3.W, T0.X, PS,
|
|
|
|
; EG-NEXT: CNDE_INT T2.W, T1.X, PV.W, PS,
|
|
|
|
; EG-NEXT: XOR_INT * T3.W, PV.Z, T4.W,
|
|
|
|
; EG-NEXT: MULHI * T0.X, PV.W, PS,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
|
|
|
|
; EG-NEXT: SUB_INT * T2.W, T3.W, PS,
|
|
|
|
; EG-NEXT: SETGE_UINT T0.W, PV.W, T0.W,
|
|
|
|
; EG-NEXT: SETGE_UINT * T2.W, T3.W, T0.Y,
|
|
|
|
; EG-NEXT: AND_INT T0.W, PV.W, PS,
|
|
|
|
; EG-NEXT: ADD_INT * T3.W, T0.X, 1,
|
|
|
|
; EG-NEXT: CNDE_INT T0.W, PV.W, T0.X, PS,
|
|
|
|
; EG-NEXT: ADD_INT * T3.W, T0.X, literal.x,
|
|
|
|
; EG-NEXT: -1(nan), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: CNDE_INT T0.W, T2.W, PS, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT * T1.W, T4.W, T1.W,
|
|
|
|
; EG-NEXT: XOR_INT * T0.W, PV.W, PS,
|
|
|
|
; EG-NEXT: SUB_INT * T0.W, PV.W, T1.W,
|
|
|
|
; EG-NEXT: LSHL * T0.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T0.X, PV.W, literal.x,
|
|
|
|
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
|
|
|
|
; EG-NEXT: 8(1.121039e-44), 2(2.802597e-45)
|
2016-05-21 09:53:33 +08:00
|
|
|
%den_ptr = getelementptr i24, i24 addrspace(1)* %in, i24 1
|
|
|
|
%num = load i24, i24 addrspace(1) * %in
|
|
|
|
%den = load i24, i24 addrspace(1) * %den_ptr
|
|
|
|
%result = sdiv i24 %num, %den
|
|
|
|
%result.ext = sext i24 %result to i32
|
|
|
|
store i32 %result.ext, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_sdiv_i25(i32 addrspace(1)* %out, i25 addrspace(1)* %in) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: v_sdiv_i25:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GCN-NEXT: s_mov_b32 s10, s6
|
|
|
|
; GCN-NEXT: s_mov_b32 s11, s7
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s8, s2
|
|
|
|
; GCN-NEXT: s_mov_b32 s9, s3
|
|
|
|
; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GCN-NEXT: v_bfe_i32 v2, v0, 0, 25
|
|
|
|
; GCN-NEXT: v_bfe_i32 v3, v1, 0, 25
|
|
|
|
; GCN-NEXT: v_bfe_i32 v0, v0, 24, 1
|
|
|
|
; GCN-NEXT: v_bfe_i32 v1, v1, 24, 1
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v4, v0, v1
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v2, vcc, v0, v2
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v3, vcc, v1, v3
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v0, v2, v0
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v1, v3, v1
|
|
|
|
; GCN-NEXT: v_cvt_f32_u32_e32 v2, v1
|
|
|
|
; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
|
|
|
; GCN-NEXT: v_mul_f32_e32 v2, 0x4f800000, v2
|
|
|
|
; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v3, v2, v1
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v5, v2, v1
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v6, vcc, 0, v5
|
|
|
|
; GCN-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v3
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v3, v5, v6, s[0:1]
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v3, v3, v2
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v5, vcc, v3, v2
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v2, vcc, v3, v2
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1]
|
|
|
|
; GCN-NEXT: v_mul_hi_u32 v2, v2, v0
|
|
|
|
; GCN-NEXT: v_mul_lo_u32 v3, v2, v1
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v2
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v6, vcc, -1, v2
|
|
|
|
; GCN-NEXT: v_subrev_i32_e32 v7, vcc, v3, v0
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v0, v3
|
|
|
|
; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v7, v1
|
|
|
|
; GCN-NEXT: s_and_b64 s[0:1], s[0:1], vcc
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e64 v0, v2, v5, s[0:1]
|
|
|
|
; GCN-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
|
|
|
|
; GCN-NEXT: v_xor_b32_e32 v0, v0, v4
|
|
|
|
; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
|
|
|
|
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 25
|
|
|
|
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: v_sdiv_i25:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s6, -1
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, s6
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, s7
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s10
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s11
|
|
|
|
; TONGA-NEXT: buffer_load_dwordx2 v[0:1], off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_mov_b32 s4, s8
|
|
|
|
; TONGA-NEXT: s_mov_b32 s5, s9
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; TONGA-NEXT: v_bfe_i32 v2, v1, 0, 25
|
|
|
|
; TONGA-NEXT: v_bfe_i32 v1, v1, 24, 1
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v2, vcc, v1, v2
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v2, v2, v1
|
|
|
|
; TONGA-NEXT: v_cvt_f32_u32_e32 v3, v2
|
|
|
|
; TONGA-NEXT: v_bfe_i32 v4, v0, 0, 25
|
|
|
|
; TONGA-NEXT: v_bfe_i32 v0, v0, 24, 1
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v4, vcc, v0, v4
|
|
|
|
; TONGA-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v4, v4, v0
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v0, v0, v1
|
|
|
|
; TONGA-NEXT: v_mul_f32_e32 v3, 0x4f800000, v3
|
|
|
|
; TONGA-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v5, v3, v2
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v6, v3, v2
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v7, vcc, 0, v5
|
|
|
|
; TONGA-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v6
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v5, v5, v7, s[0:1]
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v5, v5, v3
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v6, vcc, v5, v3
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v3, vcc, v5, v3
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v3, v3, v6, s[0:1]
|
|
|
|
; TONGA-NEXT: v_mul_hi_u32 v3, v3, v4
|
|
|
|
; TONGA-NEXT: v_mul_lo_u32 v1, v3, v2
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v5, vcc, 1, v3
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v6, vcc, -1, v3
|
|
|
|
; TONGA-NEXT: v_subrev_u32_e32 v7, vcc, v1, v4
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v4, v1
|
|
|
|
; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v7, v2
|
|
|
|
; TONGA-NEXT: s_and_b64 s[0:1], s[0:1], vcc
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e64 v1, v3, v5, s[0:1]
|
|
|
|
; TONGA-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
|
|
|
|
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v0
|
|
|
|
; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v1, v0
|
|
|
|
; TONGA-NEXT: v_bfe_i32 v0, v0, 0, 25
|
|
|
|
; TONGA-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: v_sdiv_i25:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GFX9-NEXT: s_mov_b32 s10, s6
|
|
|
|
; GFX9-NEXT: s_mov_b32 s11, s7
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s8, s2
|
|
|
|
; GFX9-NEXT: s_mov_b32 s9, s3
|
|
|
|
; GFX9-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
|
|
|
|
; GFX9-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GFX9-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_bfe_i32 v2, v1, 0, 25
|
|
|
|
; GFX9-NEXT: v_bfe_i32 v1, v1, 24, 1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v2, v1
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, v2, v1
|
|
|
|
; GFX9-NEXT: v_cvt_f32_u32_e32 v3, v2
|
|
|
|
; GFX9-NEXT: v_bfe_i32 v6, v0, 0, 25
|
|
|
|
; GFX9-NEXT: v_bfe_i32 v0, v0, 24, 1
|
|
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_mul_f32_e32 v3, 0x4f800000, v3
|
|
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v3, v2
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v5, v3, v2
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v7, 0, v4
|
|
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v4, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v5, v6, v0
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v5, v5, v0
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v6, v3, v4
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v3, v4
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc
|
|
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v3, v5
|
|
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v3, v2
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, 1, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v6, -1, v3
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v7, v5, v4
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v5, v4
|
|
|
|
; GFX9-NEXT: v_cmp_ge_u32_e64 s[0:1], v7, v2
|
|
|
|
; GFX9-NEXT: s_and_b64 s[0:1], s[0:1], vcc
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1]
|
|
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
|
|
|
|
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v0
|
|
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, v1, v0
|
|
|
|
; GFX9-NEXT: v_bfe_i32 v0, v0, 0, 25
|
|
|
|
; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: v_sdiv_i25:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 1 @6
|
|
|
|
; EG-NEXT: ALU 41, @12, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 6:
|
|
|
|
; EG-NEXT: VTX_READ_32 T0.X, T0.X, 4, #1
|
|
|
|
; EG-NEXT: VTX_READ_32 T1.X, T1.X, 0, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 10:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Z,
|
|
|
|
; EG-NEXT: MOV * T1.X, PV.X,
|
|
|
|
; EG-NEXT: ALU clause starting at 12:
|
|
|
|
; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
|
|
|
|
; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR * T0.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: SETGT_INT * T1.W, 0.0, PV.W,
|
|
|
|
; EG-NEXT: ADD_INT * T0.W, T0.W, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT * T0.W, PV.W, T1.W,
|
|
|
|
; EG-NEXT: RECIP_UINT * T0.X, PV.W,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
|
|
|
|
; EG-NEXT: LSHL T0.Z, T1.X, literal.x,
|
|
|
|
; EG-NEXT: SUB_INT T2.W, 0.0, PS,
|
|
|
|
; EG-NEXT: MULHI * T1.X, T0.X, T0.W,
|
|
|
|
; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: CNDE_INT T2.W, PS, PV.W, T0.Y,
|
|
|
|
; EG-NEXT: ASHR * T3.W, PV.Z, literal.x,
|
|
|
|
; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: SETGT_INT T4.W, 0.0, PS,
|
|
|
|
; EG-NEXT: MULHI * T0.Y, PV.W, T0.X,
|
|
|
|
; EG-NEXT: ADD_INT T0.Z, T3.W, PV.W,
|
|
|
|
; EG-NEXT: ADD_INT T2.W, T0.X, PS,
|
|
|
|
; EG-NEXT: SUB_INT * T3.W, T0.X, PS,
|
|
|
|
; EG-NEXT: CNDE_INT T2.W, T1.X, PV.W, PS,
|
|
|
|
; EG-NEXT: XOR_INT * T3.W, PV.Z, T4.W,
|
|
|
|
; EG-NEXT: MULHI * T0.X, PV.W, PS,
|
|
|
|
; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
|
|
|
|
; EG-NEXT: SUB_INT * T2.W, T3.W, PS,
|
|
|
|
; EG-NEXT: SETGE_UINT T0.W, PV.W, T0.W,
|
|
|
|
; EG-NEXT: SETGE_UINT * T2.W, T3.W, T0.Y,
|
|
|
|
; EG-NEXT: AND_INT T0.W, PV.W, PS,
|
|
|
|
; EG-NEXT: ADD_INT * T3.W, T0.X, 1,
|
|
|
|
; EG-NEXT: CNDE_INT T0.W, PV.W, T0.X, PS,
|
|
|
|
; EG-NEXT: ADD_INT * T3.W, T0.X, literal.x,
|
|
|
|
; EG-NEXT: -1(nan), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: CNDE_INT T0.W, T2.W, PS, PV.W,
|
|
|
|
; EG-NEXT: XOR_INT * T1.W, T4.W, T1.W,
|
|
|
|
; EG-NEXT: XOR_INT * T0.W, PV.W, PS,
|
|
|
|
; EG-NEXT: SUB_INT * T0.W, PV.W, T1.W,
|
|
|
|
; EG-NEXT: LSHL * T0.W, PV.W, literal.x,
|
|
|
|
; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T0.X, PV.W, literal.x,
|
|
|
|
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
|
|
|
|
; EG-NEXT: 7(9.809089e-45), 2(2.802597e-45)
|
2016-05-21 09:53:33 +08:00
|
|
|
%den_ptr = getelementptr i25, i25 addrspace(1)* %in, i25 1
|
|
|
|
%num = load i25, i25 addrspace(1) * %in
|
|
|
|
%den = load i25, i25 addrspace(1) * %den_ptr
|
|
|
|
%result = sdiv i25 %num, %den
|
|
|
|
%result.ext = sext i25 %result to i32
|
|
|
|
store i32 %result.ext, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2014-06-16 03:48:16 +08:00
|
|
|
; Tests for 64-bit divide bypass.
|
2017-03-22 05:39:51 +08:00
|
|
|
; define amdgpu_kernel void @test_get_quotient(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
|
2014-06-16 03:48:16 +08:00
|
|
|
; %result = sdiv i64 %a, %b
|
|
|
|
; store i64 %result, i64 addrspace(1)* %out, align 8
|
|
|
|
; ret void
|
|
|
|
; }
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
; define amdgpu_kernel void @test_get_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
|
2014-06-16 03:48:16 +08:00
|
|
|
; %result = srem i64 %a, %b
|
|
|
|
; store i64 %result, i64 addrspace(1)* %out, align 8
|
|
|
|
; ret void
|
|
|
|
; }
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
; define amdgpu_kernel void @test_get_quotient_and_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
|
2014-06-16 03:48:16 +08:00
|
|
|
; %resultdiv = sdiv i64 %a, %b
|
|
|
|
; %resultrem = srem i64 %a, %b
|
|
|
|
; %result = add i64 %resultdiv, %resultrem
|
|
|
|
; store i64 %result, i64 addrspace(1)* %out, align 8
|
|
|
|
; ret void
|
|
|
|
; }
|
2016-11-01 18:26:48 +08:00
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @scalarize_mulhs_4xi32(<4 x i32> addrspace(1)* nocapture readonly %in, <4 x i32> addrspace(1)* nocapture %out) {
|
2019-06-02 02:27:06 +08:00
|
|
|
; GCN-LABEL: scalarize_mulhs_4xi32:
|
|
|
|
; GCN: ; %bb.0:
|
|
|
|
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
|
|
; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
|
|
; GCN-NEXT: s_mov_b32 s6, -1
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s0
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s1
|
|
|
|
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_mov_b32 s0, 0x1389c755
|
|
|
|
; GCN-NEXT: s_mov_b32 s4, s2
|
|
|
|
; GCN-NEXT: s_mov_b32 s5, s3
|
|
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GCN-NEXT: v_mul_hi_i32 v0, v0, s0
|
|
|
|
; GCN-NEXT: v_mul_hi_i32 v1, v1, s0
|
|
|
|
; GCN-NEXT: v_mul_hi_i32 v2, v2, s0
|
|
|
|
; GCN-NEXT: v_mul_hi_i32 v3, v3, s0
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v4, 31, v0
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v0, 12, v0
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v5, 31, v1
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v1, 12, v1
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v6, 31, v2
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v2, 12, v2
|
|
|
|
; GCN-NEXT: v_lshrrev_b32_e32 v7, 31, v3
|
|
|
|
; GCN-NEXT: v_ashrrev_i32_e32 v3, 12, v3
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v4
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v5
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v6
|
|
|
|
; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v7
|
|
|
|
; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; TONGA-LABEL: scalarize_mulhs_4xi32:
|
|
|
|
; TONGA: ; %bb.0:
|
|
|
|
; TONGA-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; TONGA-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; TONGA-NEXT: s_mov_b32 s2, -1
|
|
|
|
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, s4
|
|
|
|
; TONGA-NEXT: s_mov_b32 s1, s5
|
|
|
|
; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; TONGA-NEXT: s_mov_b32 s0, 0x1389c755
|
|
|
|
; TONGA-NEXT: s_mov_b32 s4, s6
|
|
|
|
; TONGA-NEXT: s_mov_b32 s5, s7
|
|
|
|
; TONGA-NEXT: s_mov_b32 s6, s2
|
|
|
|
; TONGA-NEXT: s_mov_b32 s7, s3
|
|
|
|
; TONGA-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; TONGA-NEXT: v_mul_hi_i32 v0, v0, s0
|
|
|
|
; TONGA-NEXT: v_mul_hi_i32 v1, v1, s0
|
|
|
|
; TONGA-NEXT: v_mul_hi_i32 v2, v2, s0
|
|
|
|
; TONGA-NEXT: v_mul_hi_i32 v3, v3, s0
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v4, 31, v0
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 12, v0
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v5, 31, v1
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v1, 12, v1
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v6, 31, v2
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v2, 12, v2
|
|
|
|
; TONGA-NEXT: v_lshrrev_b32_e32 v7, 31, v3
|
|
|
|
; TONGA-NEXT: v_ashrrev_i32_e32 v3, 12, v3
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v4
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v5
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v2, vcc, v2, v6
|
|
|
|
; TONGA-NEXT: v_add_u32_e32 v3, vcc, v3, v7
|
|
|
|
; TONGA-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; TONGA-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; GFX9-LABEL: scalarize_mulhs_4xi32:
|
|
|
|
; GFX9: ; %bb.0:
|
|
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
|
|
; GFX9-NEXT: s_mov_b32 s3, 0xf000
|
|
|
|
; GFX9-NEXT: s_mov_b32 s2, -1
|
|
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, s4
|
|
|
|
; GFX9-NEXT: s_mov_b32 s1, s5
|
|
|
|
; GFX9-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0
|
|
|
|
; GFX9-NEXT: s_mov_b32 s0, 0x1389c755
|
|
|
|
; GFX9-NEXT: s_mov_b32 s4, s6
|
|
|
|
; GFX9-NEXT: s_mov_b32 s5, s7
|
|
|
|
; GFX9-NEXT: s_mov_b32 s6, s2
|
|
|
|
; GFX9-NEXT: s_mov_b32 s7, s3
|
|
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
|
|
; GFX9-NEXT: v_mul_hi_i32 v0, v0, s0
|
|
|
|
; GFX9-NEXT: v_mul_hi_i32 v1, v1, s0
|
|
|
|
; GFX9-NEXT: v_mul_hi_i32 v2, v2, s0
|
|
|
|
; GFX9-NEXT: v_mul_hi_i32 v3, v3, s0
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 31, v0
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 12, v0
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 31, v1
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 12, v1
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v6, 31, v2
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 12, v2
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v7, 31, v3
|
|
|
|
; GFX9-NEXT: v_ashrrev_i32_e32 v3, 12, v3
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v4
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v1, v1, v5
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v2, v2, v6
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v3, v7
|
|
|
|
; GFX9-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
|
|
|
; GFX9-NEXT: s_endpgm
|
|
|
|
;
|
|
|
|
; EG-LABEL: scalarize_mulhs_4xi32:
|
|
|
|
; EG: ; %bb.0:
|
|
|
|
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: TEX 0 @6
|
|
|
|
; EG-NEXT: ALU 25, @9, KC0[CB0:0-32], KC1[]
|
|
|
|
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
|
|
|
|
; EG-NEXT: CF_END
|
|
|
|
; EG-NEXT: PAD
|
|
|
|
; EG-NEXT: Fetch clause starting at 6:
|
|
|
|
; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
|
|
|
|
; EG-NEXT: ALU clause starting at 8:
|
|
|
|
; EG-NEXT: MOV * T0.X, KC0[2].Y,
|
|
|
|
; EG-NEXT: ALU clause starting at 9:
|
|
|
|
; EG-NEXT: MULHI_INT * T0.W, T0.W, literal.x,
|
|
|
|
; EG-NEXT: 327796565(3.478022e-27), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T1.Z, PS, literal.x,
|
|
|
|
; EG-NEXT: LSHR T0.W, PS, literal.y,
|
|
|
|
; EG-NEXT: MULHI_INT * T0.Z, T0.Z, literal.z,
|
|
|
|
; EG-NEXT: 12(1.681558e-44), 31(4.344025e-44)
|
|
|
|
; EG-NEXT: 327796565(3.478022e-27), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T1.Y, PS, literal.x,
|
|
|
|
; EG-NEXT: LSHR T0.Z, PS, literal.y,
|
|
|
|
; EG-NEXT: ADD_INT T0.W, PV.Z, PV.W,
|
|
|
|
; EG-NEXT: MULHI_INT * T0.Y, T0.Y, literal.z,
|
|
|
|
; EG-NEXT: 12(1.681558e-44), 31(4.344025e-44)
|
|
|
|
; EG-NEXT: 327796565(3.478022e-27), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ASHR T2.Y, PS, literal.x,
|
|
|
|
; EG-NEXT: ADD_INT T0.Z, PV.Y, PV.Z,
|
|
|
|
; EG-NEXT: LSHR T1.W, PS, literal.y,
|
|
|
|
; EG-NEXT: MULHI_INT * T0.X, T0.X, literal.z,
|
|
|
|
; EG-NEXT: 12(1.681558e-44), 31(4.344025e-44)
|
|
|
|
; EG-NEXT: 327796565(3.478022e-27), 0(0.000000e+00)
|
|
|
|
; EG-NEXT: ADD_INT T0.Y, PV.Y, PV.W,
|
|
|
|
; EG-NEXT: ASHR T1.W, PS, literal.x,
|
|
|
|
; EG-NEXT: LSHR * T2.W, PS, literal.y,
|
|
|
|
; EG-NEXT: 12(1.681558e-44), 31(4.344025e-44)
|
|
|
|
; EG-NEXT: ADD_INT T0.X, PV.W, PS,
|
|
|
|
; EG-NEXT: LSHR * T1.X, KC0[2].Z, literal.x,
|
|
|
|
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
|
2016-11-01 18:26:48 +08:00
|
|
|
%1 = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16
|
|
|
|
%2 = sdiv <4 x i32> %1, <i32 53668, i32 53668, i32 53668, i32 53668>
|
|
|
|
store <4 x i32> %2, <4 x i32> addrspace(1)* %out, align 16
|
|
|
|
ret void
|
|
|
|
}
|