llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/bool-legalization.ll

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

106 lines
3.3 KiB
LLVM
Raw Normal View History

AMDGPU/GlobalISel: Replace handling of boolean values This solves selection failures with generated selection patterns, which would fail due to inferring the SGPR reg bank for virtual registers with a set register class instead of VCC bank. Use instruction selection would constrain the virtual register to a specific class, so when the def was selected later the bank no longer was set to VCC. Remove the SCC reg bank. SCC isn't directly addressable, so it requires copying from SCC to an allocatable 32-bit register during selection, so these might as well be treated as 32-bit SGPR values. Now any scalar boolean value that will produce an outupt in SCC should be widened during RegBankSelect to s32. Any s1 value should be a vector boolean during selection. This makes the vcc register bank unambiguous with a normal SGPR during selection. Summary of how this should now work: - G_TRUNC is always a no-op, and never should use a vcc bank result. - SALU boolean operations should be promoted to s32 in RegBankSelect apply mapping - An s1 value means vcc bank at selection. The exception is for legalization artifacts that use s1, which are never VCC. All other contexts should infer the VCC register classes for s1 typed registers. The LLT for the register is now needed to infer the correct register class. Extensions with vcc sources should be legalized to a select of constants during RegBankSelect. - Copy from non-vcc to vcc ensures high bits of the input value are cleared during selection. - SALU boolean inputs should ensure the inputs are 0/1. This includes select, conditional branches, and carry-ins. There are a few somewhat dirty details. One is that G_TRUNC/G_*EXT selection ignores the usual register-bank from register class functions, and can't handle truncates with VCC result banks. I think this is OK, since the artifacts are specially treated anyway. This does require some care to avoid producing cases with vcc. There will also be no 100% reliable way to verify this rule is followed in selection in case of register classes, and violations manifests themselves as invalid copy instructions much later. Standard phi handling also only considers the bank of the result register, and doesn't insert copies to make the source banks match. This doesn't work for vcc, so we have to manually correct phi inputs in this case. We should add a verifier check to make sure there are no phis with mixed vcc and non-vcc register bank inputs. There's also some duplication with the LegalizerHelper, and some code which should live in the helper. I don't see a good way to share special knowledge about what types to use for intermediate operations depending on the bank for example. Using the helper to replace extensions with selects also seems somewhat awkward to me. Another issue is there are some contexts calling getRegBankFromRegClass that apparently don't have the LLT type for the register, but I haven't yet run into a real issue from this. This also introduces new unnecessary instructions in most cases, since we don't yet try to optimize out the zext when the source is known to come from a compare.
2019-11-03 00:30:59 +08:00
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; End to end tests for scalar vs. vector boolean legalization strategies.
define amdgpu_ps float @select_vgpr_sgpr_trunc_cond(i32 inreg %a, i32 %b, i32 %c) {
; GCN-LABEL: select_vgpr_sgpr_trunc_cond:
; GCN: ; %bb.0:
; GCN-NEXT: s_and_b32 s0, 1, s0
; GCN-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GCN-NEXT: ; return to shader part epilog
%cc = trunc i32 %a to i1
%r = select i1 %cc, i32 %b, i32 %c
%r.f = bitcast i32 %r to float
ret float %r.f
}
define amdgpu_ps float @select_vgpr_sgpr_trunc_and_cond(i32 inreg %a.0, i32 inreg %a.1, i32 %b, i32 %c) {
; GCN-LABEL: select_vgpr_sgpr_trunc_and_cond:
; GCN: ; %bb.0:
; GCN-NEXT: s_and_b32 s0, s0, s1
; GCN-NEXT: s_and_b32 s0, 1, s0
; GCN-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GCN-NEXT: ; return to shader part epilog
%cc.0 = trunc i32 %a.0 to i1
%cc.1 = trunc i32 %a.1 to i1
%and = and i1 %cc.0, %cc.1
%r = select i1 %and, i32 %b, i32 %c
%r.f = bitcast i32 %r to float
ret float %r.f
}
define amdgpu_ps i32 @select_sgpr_trunc_and_cond(i32 inreg %a.0, i32 inreg %a.1, i32 inreg %b, i32 inreg %c) {
; GCN-LABEL: select_sgpr_trunc_and_cond:
; GCN: ; %bb.0:
; GCN-NEXT: s_and_b32 s0, s0, s1
; GCN-NEXT: s_and_b32 s0, s0, 1
; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_cselect_b32 s0, s2, s3
; GCN-NEXT: ; return to shader part epilog
%cc.0 = trunc i32 %a.0 to i1
%cc.1 = trunc i32 %a.1 to i1
%and = and i1 %cc.0, %cc.1
%r = select i1 %and, i32 %b, i32 %c
ret i32 %r
}
define amdgpu_kernel void @sgpr_trunc_brcond(i32 %cond) {
; GCN-LABEL: sgpr_trunc_brcond:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dword s0, s[0:1], 0x9
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_and_b32 s0, s0, 1
; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_cbranch_scc0 BB3_2
; GCN-NEXT: ; %bb.1: ; %bb0
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: flat_store_dword v[0:1], v0
; GCN-NEXT: BB3_2: ; %bb1
; GCN-NEXT: v_mov_b32_e32 v0, 1
; GCN-NEXT: flat_store_dword v[0:1], v0
entry:
%trunc = trunc i32 %cond to i1
br i1 %trunc, label %bb0, label %bb1
bb0:
store volatile i32 0, i32 addrspace(1)* undef
unreachable
bb1:
store volatile i32 1, i32 addrspace(1)* undef
unreachable
}
define amdgpu_kernel void @brcond_sgpr_trunc_and(i32 %cond0, i32 %cond1) {
; GCN-LABEL: brcond_sgpr_trunc_and:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_and_b32 s0, s0, s1
; GCN-NEXT: s_and_b32 s0, s0, 1
; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_cbranch_scc0 BB4_2
; GCN-NEXT: ; %bb.1: ; %bb0
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: flat_store_dword v[0:1], v0
; GCN-NEXT: BB4_2: ; %bb1
; GCN-NEXT: v_mov_b32_e32 v0, 1
; GCN-NEXT: flat_store_dword v[0:1], v0
entry:
%trunc0 = trunc i32 %cond0 to i1
%trunc1 = trunc i32 %cond1 to i1
%and = and i1 %trunc0, %trunc1
br i1 %and, label %bb0, label %bb1
bb0:
store volatile i32 0, i32 addrspace(1)* undef
unreachable
bb1:
store volatile i32 1, i32 addrspace(1)* undef
unreachable
}