AMDGPU/GlobalISel: Adjust branch target when lowering loop intrinsic

This needs to steal the branch target like the other control flow
intrinsics.
This commit is contained in:
Matt Arsenault 2020-02-14 21:47:37 -05:00
parent b807a28787
commit 37c452a289
2 changed files with 61 additions and 3 deletions

View File

@ -3575,11 +3575,18 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
B.setInstr(*BrCond);
// FIXME: Need to adjust branch targets based on unconditional branch.
MachineBasicBlock *BrTarget = BrCond->getOperand(1).getMBB();
if (Br)
BrTarget = Br->getOperand(0).getMBB();
Register Reg = MI.getOperand(2).getReg();
B.buildInstr(AMDGPU::SI_LOOP)
.addUse(Reg)
.addMBB(BrCond->getOperand(1).getMBB());
.addMBB(BrTarget);
if (Br)
Br->getOperand(0).setMBB(BrCond->getOperand(1).getMBB());
MI.eraseFromParent();
BrCond->eraseFromParent();
MRI.setRegClass(Reg, TRI->getWaveMaskRegClass());

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s
; Make sure the branch targets are correct after lowering llvm.amdgcn.if
@ -198,3 +198,54 @@ bb11:
bb12:
ret void
}
define amdgpu_kernel void @break_loop(i32 %arg) {
; CHECK-LABEL: break_loop:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_dword s2, s[4:5], 0x0
; CHECK-NEXT: s_mov_b64 s[0:1], 0
; CHECK-NEXT: ; implicit-def: $vgpr1
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_subrev_u32_e32 v0, s2, v0
; CHECK-NEXT: BB5_1: ; %bb1
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: v_add_u32_e32 v1, 1, v1
; CHECK-NEXT: v_cmp_le_i32_e32 vcc, 0, v1
; CHECK-NEXT: v_cmp_ne_u32_e64 s[2:3], 0, 1
; CHECK-NEXT: s_cbranch_vccnz BB5_3
; CHECK-NEXT: ; %bb.2: ; %bb4
; CHECK-NEXT: ; in Loop: Header=BB5_1 Depth=1
; CHECK-NEXT: global_load_dword v2, v[0:1], off
; CHECK-NEXT: v_cmp_ne_u32_e64 s[2:3], 0, 1
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_cmp_lt_i32_e32 vcc, v0, v2
; CHECK-NEXT: s_xor_b64 s[2:3], vcc, s[2:3]
; CHECK-NEXT: BB5_3: ; %Flow
; CHECK-NEXT: ; in Loop: Header=BB5_1 Depth=1
; CHECK-NEXT: s_and_b64 s[2:3], exec, s[2:3]
; CHECK-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; CHECK-NEXT: s_andn2_b64 exec, exec, s[0:1]
; CHECK-NEXT: s_cbranch_execnz BB5_1
; CHECK-NEXT: ; %bb.4: ; %bb9
; CHECK-NEXT: s_endpgm
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
%tmp = sub i32 %id, %arg
br label %bb1
bb1:
%lsr.iv = phi i32 [ undef, %bb ], [ %lsr.iv.next, %bb4 ]
%lsr.iv.next = add i32 %lsr.iv, 1
%cmp0 = icmp slt i32 %lsr.iv.next, 0
br i1 %cmp0, label %bb4, label %bb9
bb4:
%load = load volatile i32, i32 addrspace(1)* undef, align 4
%cmp1 = icmp slt i32 %tmp, %load
br i1 %cmp1, label %bb1, label %bb9
bb9:
ret void
}
declare i32 @llvm.amdgcn.workitem.id.x()