[AMDGPU] add generated checks for some LIT tests

This is in prepration for further changes that affect these tests.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D75403
This commit is contained in:
Sameer Sahasrabuddhe 2020-02-28 22:47:35 +05:30
parent 6f029dadf6
commit 534d8866a1
6 changed files with 1085 additions and 551 deletions

View File

@ -1,3 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; NOTE: The checks for opt are NOT added by the update script. Those
; checks are looking for the absence of specific metadata, which
; cannot be expressed reliably by the generated checks.
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s -check-prefix=ISA
; RUN: opt --amdgpu-annotate-uniform -S %s | FileCheck %s -check-prefix=UNIFORM
; RUN: opt --amdgpu-annotate-uniform --si-annotate-control-flow -S %s | FileCheck %s -check-prefix=CONTROLFLOW
@ -9,6 +16,56 @@
target triple = "amdgcn-mesa-mesa3d"
define amdgpu_ps void @main(i32 %0, float %1) {
; ISA-LABEL: main:
; ISA: ; %bb.0: ; %start
; ISA-NEXT: v_readfirstlane_b32 s0, v0
; ISA-NEXT: s_mov_b32 m0, s0
; ISA-NEXT: s_mov_b32 s0, 0
; ISA-NEXT: v_interp_p1_f32_e32 v0, v1, attr0.x
; ISA-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
; ISA-NEXT: s_mov_b64 s[2:3], 0
; ISA-NEXT: ; implicit-def: $sgpr6_sgpr7
; ISA-NEXT: ; implicit-def: $sgpr4_sgpr5
; ISA-NEXT: s_branch BB0_3
; ISA-NEXT: BB0_1: ; %Flow1
; ISA-NEXT: ; in Loop: Header=BB0_3 Depth=1
; ISA-NEXT: s_or_b64 exec, exec, s[8:9]
; ISA-NEXT: s_add_i32 s0, s0, 1
; ISA-NEXT: s_mov_b64 s[8:9], 0
; ISA-NEXT: BB0_2: ; %Flow
; ISA-NEXT: ; in Loop: Header=BB0_3 Depth=1
; ISA-NEXT: s_and_b64 s[10:11], exec, s[6:7]
; ISA-NEXT: s_or_b64 s[2:3], s[10:11], s[2:3]
; ISA-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
; ISA-NEXT: s_and_b64 s[8:9], s[8:9], exec
; ISA-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
; ISA-NEXT: s_andn2_b64 exec, exec, s[2:3]
; ISA-NEXT: s_cbranch_execz BB0_6
; ISA-NEXT: BB0_3: ; %loop
; ISA-NEXT: ; =>This Inner Loop Header: Depth=1
; ISA-NEXT: s_or_b64 s[6:7], s[6:7], exec
; ISA-NEXT: s_cmp_lt_u32 s0, 32
; ISA-NEXT: s_mov_b64 s[8:9], -1
; ISA-NEXT: s_cbranch_scc0 BB0_2
; ISA-NEXT: ; %bb.4: ; %endif1
; ISA-NEXT: ; in Loop: Header=BB0_3 Depth=1
; ISA-NEXT: s_mov_b64 s[6:7], -1
; ISA-NEXT: s_and_saveexec_b64 s[8:9], vcc
; ISA-NEXT: s_cbranch_execz BB0_1
; ISA-NEXT: ; %bb.5: ; %endif2
; ISA-NEXT: ; in Loop: Header=BB0_3 Depth=1
; ISA-NEXT: s_xor_b64 s[6:7], exec, -1
; ISA-NEXT: s_branch BB0_1
; ISA-NEXT: BB0_6: ; %Flow2
; ISA-NEXT: s_or_b64 exec, exec, s[2:3]
; ISA-NEXT: v_mov_b32_e32 v1, 0
; ISA-NEXT: s_and_saveexec_b64 s[0:1], s[4:5]
; ISA-NEXT: ; %bb.7: ; %if1
; ISA-NEXT: v_sqrt_f32_e32 v1, v0
; ISA-NEXT: ; %bb.8: ; %endloop
; ISA-NEXT: s_or_b64 exec, exec, s[0:1]
; ISA-NEXT: exp mrt0 v1, v1, v1, v1 done vm
; ISA-NEXT: s_endpgm
start:
%v0 = call float @llvm.amdgcn.interp.p1(float %1, i32 0, i32 0, i32 %0)
br label %loop

View File

@ -1,13 +1,27 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; RUN: opt -mtriple=amdgcn-- -S -amdgpu-unify-divergent-exit-nodes -verify %s | FileCheck -check-prefix=IR %s
; SI-LABEL: {{^}}infinite_loop:
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3e7
; SI: [[LOOP:BB[0-9]+_[0-9]+]]: ; %loop
; SI: s_waitcnt lgkmcnt(0)
; SI: buffer_store_dword [[REG]]
; SI: s_branch [[LOOP]]
define amdgpu_kernel void @infinite_loop(i32 addrspace(1)* %out) {
; SI-LABEL: infinite_loop:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 0x3e7
; SI-NEXT: BB0_1: ; %loop
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_branch BB0_1
; IR-LABEL: @infinite_loop(
; IR-NEXT: entry:
; IR-NEXT: br label [[LOOP:%.*]]
; IR: loop:
; IR-NEXT: store volatile i32 999, i32 addrspace(1)* [[OUT:%.*]], align 4
; IR-NEXT: br label [[LOOP]]
;
entry:
br label %loop
@ -16,31 +30,36 @@ loop:
br label %loop
}
; IR-LABEL: @infinite_loop_ret(
; IR: br i1 %cond, label %loop, label %UnifiedReturnBlock
; IR: loop:
; IR: store volatile i32 999, i32 addrspace(1)* %out, align 4
; IR: br i1 true, label %loop, label %UnifiedReturnBlock
; IR: UnifiedReturnBlock:
; IR: ret void
; SI-LABEL: {{^}}infinite_loop_ret:
; SI: s_cbranch_execz [[RET:BB[0-9]+_[0-9]+]]
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3e7
; SI: s_and_b64 vcc, exec, -1
; SI: [[LOOP:BB[0-9]+_[0-9]+]]: ; %loop
; SI: s_waitcnt lgkmcnt(0)
; SI: buffer_store_dword [[REG]]
; SI: s_cbranch_vccnz [[LOOP]]
; SI: [[RET]]: ; %UnifiedReturnBlock
; SI: s_endpgm
define amdgpu_kernel void @infinite_loop_ret(i32 addrspace(1)* %out) {
; SI-LABEL: infinite_loop_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; SI-NEXT: s_and_saveexec_b64 s[2:3], vcc
; SI-NEXT: s_cbranch_execz BB1_3
; SI-NEXT: ; %bb.1: ; %loop.preheader
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 0x3e7
; SI-NEXT: s_and_b64 vcc, exec, -1
; SI-NEXT: BB1_2: ; %loop
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_cbranch_vccnz BB1_2
; SI-NEXT: BB1_3: ; %UnifiedReturnBlock
; SI-NEXT: s_endpgm
; IR-LABEL: @infinite_loop_ret(
; IR-NEXT: entry:
; IR-NEXT: [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
; IR-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP]], 1
; IR-NEXT: br i1 [[COND]], label [[LOOP:%.*]], label [[UNIFIEDRETURNBLOCK:%.*]]
; IR: loop:
; IR-NEXT: store volatile i32 999, i32 addrspace(1)* [[OUT:%.*]], align 4
; IR-NEXT: br i1 true, label [[LOOP]], label [[UNIFIEDRETURNBLOCK]]
; IR: UnifiedReturnBlock:
; IR-NEXT: ret void
;
entry:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
%cond = icmp eq i32 %tmp, 1
@ -54,44 +73,44 @@ return:
ret void
}
; IR-LABEL: @infinite_loops(
; IR: br i1 undef, label %loop1, label %loop2
; IR: loop1:
; IR: store volatile i32 999, i32 addrspace(1)* %out, align 4
; IR: br i1 true, label %loop1, label %DummyReturnBlock
; IR: loop2:
; IR: store volatile i32 888, i32 addrspace(1)* %out, align 4
; IR: br i1 true, label %loop2, label %DummyReturnBlock
; IR: DummyReturnBlock:
; IR: ret void
; SI-LABEL: {{^}}infinite_loops:
; SI: v_mov_b32_e32 [[REG1:v[0-9]+]], 0x3e7
; SI: s_and_b64 vcc, exec, -1
; SI: [[LOOP1:BB[0-9]+_[0-9]+]]: ; %loop1
; SI: s_waitcnt lgkmcnt(0)
; SI: buffer_store_dword [[REG1]]
; SI: s_cbranch_vccnz [[LOOP1]]
; SI: s_branch [[RET:BB[0-9]+_[0-9]+]]
; SI: v_mov_b32_e32 [[REG2:v[0-9]+]], 0x378
; SI: s_and_b64 vcc, exec, -1
; SI: [[LOOP2:BB[0-9]+_[0-9]+]]: ; %loop2
; SI: s_waitcnt lgkmcnt(0)
; SI: buffer_store_dword [[REG2]]
; SI: s_cbranch_vccnz [[LOOP2]]
; SI: [[RET]]: ; %DummyReturnBlock
; SI: s_endpgm
define amdgpu_kernel void @infinite_loops(i32 addrspace(1)* %out) {
; SI-LABEL: infinite_loops:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_cbranch_scc0 BB2_3
; SI-NEXT: ; %bb.1: ; %loop1.preheader
; SI-NEXT: v_mov_b32_e32 v0, 0x3e7
; SI-NEXT: s_and_b64 vcc, exec, -1
; SI-NEXT: BB2_2: ; %loop1
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_cbranch_vccnz BB2_2
; SI-NEXT: s_branch BB2_5
; SI-NEXT: BB2_3:
; SI-NEXT: v_mov_b32_e32 v0, 0x378
; SI-NEXT: s_and_b64 vcc, exec, -1
; SI-NEXT: BB2_4: ; %loop2
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_cbranch_vccnz BB2_4
; SI-NEXT: BB2_5: ; %DummyReturnBlock
; SI-NEXT: s_endpgm
; IR-LABEL: @infinite_loops(
; IR-NEXT: entry:
; IR-NEXT: br i1 undef, label [[LOOP1:%.*]], label [[LOOP2:%.*]]
; IR: loop1:
; IR-NEXT: store volatile i32 999, i32 addrspace(1)* [[OUT:%.*]], align 4
; IR-NEXT: br i1 true, label [[LOOP1]], label [[DUMMYRETURNBLOCK:%.*]]
; IR: loop2:
; IR-NEXT: store volatile i32 888, i32 addrspace(1)* [[OUT]], align 4
; IR-NEXT: br i1 true, label [[LOOP2]], label [[DUMMYRETURNBLOCK]]
; IR: DummyReturnBlock:
; IR-NEXT: ret void
;
entry:
br i1 undef, label %loop1, label %loop2
@ -104,55 +123,68 @@ loop2:
br label %loop2
}
; IR-LABEL: @infinite_loop_nest_ret(
; IR: br i1 %cond1, label %outer_loop, label %UnifiedReturnBlock
; IR: outer_loop:
; IR: br label %inner_loop
; IR: inner_loop:
; IR: store volatile i32 999, i32 addrspace(1)* %out, align 4
; IR: %cond3 = icmp eq i32 %tmp, 3
; IR: br i1 true, label %TransitionBlock, label %UnifiedReturnBlock
; IR: TransitionBlock:
; IR: br i1 %cond3, label %inner_loop, label %outer_loop
; IR: UnifiedReturnBlock:
; IR: ret void
; SI-LABEL: {{^}}infinite_loop_nest_ret:
; SI: s_cbranch_execz [[RET:BB[0-9]+_[0-9]+]]
; SI: s_mov_b32
; SI: [[OUTER_LOOP:BB[0-9]+_[0-9]+]]: ; %outer_loop
; SI: [[INNER_LOOP:BB[0-9]+_[0-9]+]]: ; %inner_loop
; SI: s_waitcnt expcnt(0)
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3e7
; SI: s_waitcnt lgkmcnt(0)
; SI: buffer_store_dword [[REG]]
; SI: s_andn2_b64 exec
; SI: s_cbranch_execnz [[INNER_LOOP]]
; SI: s_andn2_b64 exec
; SI: s_cbranch_execnz [[OUTER_LOOP]]
; SI: [[RET]]: ; %UnifiedReturnBlock
; SI: s_endpgm
define amdgpu_kernel void @infinite_loop_nest_ret(i32 addrspace(1)* %out) {
; SI-LABEL: infinite_loop_nest_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; SI-NEXT: s_and_saveexec_b64 s[2:3], vcc
; SI-NEXT: s_cbranch_execz BB3_5
; SI-NEXT: ; %bb.1: ; %outer_loop.preheader
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 3, v0
; SI-NEXT: v_cmp_ne_u32_e64 s[0:1], 3, v0
; SI-NEXT: s_mov_b64 s[2:3], 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: BB3_2: ; %outer_loop
; SI-NEXT: ; =>This Loop Header: Depth=1
; SI-NEXT: ; Child Loop BB3_3 Depth 2
; SI-NEXT: s_and_b64 s[8:9], exec, vcc
; SI-NEXT: s_or_b64 s[2:3], s[8:9], s[2:3]
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: BB3_3: ; %inner_loop
; SI-NEXT: ; Parent Loop BB3_2 Depth=1
; SI-NEXT: ; => This Inner Loop Header: Depth=2
; SI-NEXT: s_and_b64 s[10:11], exec, s[0:1]
; SI-NEXT: s_or_b64 s[8:9], s[10:11], s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, 0x3e7
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz BB3_3
; SI-NEXT: ; %bb.4: ; %Flow
; SI-NEXT: ; in Loop: Header=BB3_2 Depth=1
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_andn2_b64 exec, exec, s[2:3]
; SI-NEXT: s_cbranch_execnz BB3_2
; SI-NEXT: BB3_5: ; %UnifiedReturnBlock
; SI-NEXT: s_endpgm
; IR-LABEL: @infinite_loop_nest_ret(
; IR-NEXT: entry:
; IR-NEXT: [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
; IR-NEXT: [[COND1:%.*]] = icmp eq i32 [[TMP]], 1
; IR-NEXT: br i1 [[COND1]], label [[OUTER_LOOP:%.*]], label [[UNIFIEDRETURNBLOCK:%.*]]
; IR: outer_loop:
; IR-NEXT: br label [[INNER_LOOP:%.*]]
; IR: inner_loop:
; IR-NEXT: store volatile i32 999, i32 addrspace(1)* [[OUT:%.*]], align 4
; IR-NEXT: [[COND3:%.*]] = icmp eq i32 [[TMP]], 3
; IR-NEXT: br i1 true, label [[TRANSITIONBLOCK:%.*]], label [[UNIFIEDRETURNBLOCK]]
; IR: TransitionBlock:
; IR-NEXT: br i1 [[COND3]], label [[INNER_LOOP]], label [[OUTER_LOOP]]
; IR: UnifiedReturnBlock:
; IR-NEXT: ret void
;
entry:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
%cond1 = icmp eq i32 %tmp, 1
br i1 %cond1, label %outer_loop, label %return
outer_loop:
; %cond2 = icmp eq i32 %tmp, 2
; br i1 %cond2, label %outer_loop, label %inner_loop
br label %inner_loop
; %cond2 = icmp eq i32 %tmp, 2
; br i1 %cond2, label %outer_loop, label %inner_loop
br label %inner_loop
inner_loop: ; preds = %LeafBlock, %LeafBlock1
store volatile i32 999, i32 addrspace(1)* %out, align 4

View File

@ -1,56 +1,71 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: opt -mtriple=amdgcn-- -S -structurizecfg -si-annotate-control-flow %s | FileCheck -check-prefix=OPT %s
; RUN: llc -march=amdgcn -verify-machineinstrs -disable-block-placement < %s | FileCheck -check-prefix=GCN %s
; Uses llvm.amdgcn.break
; OPT-LABEL: @break_loop(
; OPT: bb1:
; OPT: icmp slt i32
; OPT-NEXT: br i1 %cmp0, label %bb4, label %Flow
; OPT: bb4:
; OPT: load volatile
; OPT: icmp slt i32
; OPT: xor i1 %cmp1
; OPT: br label %Flow
; OPT: Flow:
; OPT: call i64 @llvm.amdgcn.if.break.i64(
; OPT: call i1 @llvm.amdgcn.loop.i64(i64
; OPT: br i1 %{{[0-9]+}}, label %bb9, label %bb1
; OPT: bb9:
; OPT: call void @llvm.amdgcn.end.cf.i64(i64
; GCN-LABEL: {{^}}break_loop:
; GCN: s_mov_b64 [[ACCUM_MASK:s\[[0-9]+:[0-9]+\]]], 0{{$}}
; GCN: [[LOOP_ENTRY:BB[0-9]+_[0-9]+]]: ; %bb1
; GCN: s_add_i32 s6, s6, 1
; GCN: s_or_b64 [[INNER_MASK:s\[[0-9]+:[0-9]+\]]], [[INNER_MASK]], exec
; GCN: s_cmp_gt_i32 s6, -1
; GCN: s_cbranch_scc1 [[FLOW:BB[0-9]+_[0-9]+]]
; GCN: ; %bb4
; GCN: buffer_load_dword
; GCN: v_cmp_ge_i32_e32 vcc
; GCN: s_andn2_b64 [[INNER_MASK]], [[INNER_MASK]], exec
; GCN: s_and_b64 [[BROKEN_MASK:s\[[0-9]+:[0-9]+\]]], vcc, exec
; GCN: s_or_b64 [[INNER_MASK]], [[INNER_MASK]], [[BROKEN_MASK]]
; GCN: [[FLOW]]: ; %Flow
; GCN: ; in Loop: Header=BB0_1 Depth=1
; GCN: s_and_b64 [[AND_MASK:s\[[0-9]+:[0-9]+\]]], exec, [[INNER_MASK]]
; GCN-NEXT: s_or_b64 [[ACCUM_MASK]], [[AND_MASK]], [[ACCUM_MASK]]
; GCN-NEXT: s_andn2_b64 exec, exec, [[ACCUM_MASK]]
; GCN-NEXT: s_cbranch_execnz [[LOOP_ENTRY]]
; GCN: ; %bb.4: ; %bb9
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @break_loop(i32 %arg) #0 {
; OPT-LABEL: @break_loop(
; OPT-NEXT: bb:
; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
; OPT-NEXT: br label [[BB1:%.*]]
; OPT: bb1:
; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP2:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[LSR_IV_NEXT:%.*]], [[FLOW]] ]
; OPT-NEXT: [[LSR_IV_NEXT]] = add i32 [[LSR_IV]], 1
; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
; OPT: bb4:
; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: [[CMP1:%.*]] = icmp slt i32 [[MY_TMP]], [[LOAD]]
; OPT-NEXT: [[TMP0:%.*]] = xor i1 [[CMP1]], true
; OPT-NEXT: br label [[FLOW]]
; OPT: Flow:
; OPT-NEXT: [[TMP1:%.*]] = phi i1 [ [[TMP0]], [[BB4]] ], [ true, [[BB1]] ]
; OPT-NEXT: [[TMP2]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP1]], i64 [[PHI_BROKEN]])
; OPT-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP2]])
; OPT-NEXT: br i1 [[TMP3]], label [[BB9:%.*]], label [[BB1]]
; OPT: bb9:
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP2]])
; OPT-NEXT: ret void
;
; GCN-LABEL: break_loop:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dword s3, s[0:1], 0x9
; GCN-NEXT: s_mov_b64 s[0:1], 0
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: ; implicit-def: $sgpr4_sgpr5
; GCN-NEXT: ; implicit-def: $sgpr6
; GCN-NEXT: BB0_1: ; %bb1
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: s_add_i32 s6, s6, 1
; GCN-NEXT: s_or_b64 s[4:5], s[4:5], exec
; GCN-NEXT: s_cmp_gt_i32 s6, -1
; GCN-NEXT: s_cbranch_scc1 BB0_3
; GCN-NEXT: ; %bb.2: ; %bb4
; GCN-NEXT: ; in Loop: Header=BB0_1 Depth=1
; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cmp_ge_i32_e32 vcc, v0, v1
; GCN-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
; GCN-NEXT: s_and_b64 s[8:9], vcc, exec
; GCN-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
; GCN-NEXT: BB0_3: ; %Flow
; GCN-NEXT: ; in Loop: Header=BB0_1 Depth=1
; GCN-NEXT: s_and_b64 s[8:9], exec, s[4:5]
; GCN-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1]
; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GCN-NEXT: s_cbranch_execnz BB0_1
; GCN-NEXT: ; %bb.4: ; %bb9
; GCN-NEXT: s_endpgm
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
%tmp = sub i32 %id, %arg
%my.tmp = sub i32 %id, %arg
br label %bb1
bb1:
@ -61,58 +76,98 @@ bb1:
bb4:
%load = load volatile i32, i32 addrspace(1)* undef, align 4
%cmp1 = icmp slt i32 %tmp, %load
%cmp1 = icmp slt i32 %my.tmp, %load
br i1 %cmp1, label %bb1, label %bb9
bb9:
ret void
}
; OPT-LABEL: @undef_phi_cond_break_loop(
; OPT: bb1:
; OPT-NEXT: %phi.broken = phi i64 [ %0, %Flow ], [ 0, %bb ]
; OPT-NEXT: %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
; OPT-NEXT: %lsr.iv.next = add i32 %lsr.iv, 1
; OPT-NEXT: %cmp0 = icmp slt i32 %lsr.iv.next, 0
; OPT-NEXT: br i1 %cmp0, label %bb4, label %Flow
; OPT: bb4:
; OPT-NEXT: %load = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: %cmp1 = icmp sge i32 %tmp, %load
; OPT-NEXT: br label %Flow
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64(i1 %tmp3, i64 %phi.broken)
; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
; OPT: bb9: ; preds = %Flow
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0)
; OPT-NEXT: store volatile i32 7
; OPT-NEXT: ret void
define amdgpu_kernel void @undef_phi_cond_break_loop(i32 %arg) #0 {
; OPT-LABEL: @undef_phi_cond_break_loop(
; OPT-NEXT: bb:
; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
; OPT-NEXT: br label [[BB1:%.*]]
; OPT: bb1:
; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP0:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[MY_TMP2:%.*]], [[FLOW]] ]
; OPT-NEXT: [[LSR_IV_NEXT:%.*]] = add i32 [[LSR_IV]], 1
; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
; OPT: bb4:
; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[MY_TMP]], [[LOAD]]
; OPT-NEXT: br label [[FLOW]]
; OPT: Flow:
; OPT-NEXT: [[MY_TMP2]] = phi i32 [ [[LSR_IV_NEXT]], [[BB4]] ], [ undef, [[BB1]] ]
; OPT-NEXT: [[MY_TMP3:%.*]] = phi i1 [ [[CMP1]], [[BB4]] ], [ undef, [[BB1]] ]
; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[MY_TMP3]], i64 [[PHI_BROKEN]])
; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP0]])
; OPT-NEXT: br i1 [[TMP1]], label [[BB9:%.*]], label [[BB1]]
; OPT: bb9:
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP0]])
; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
; OPT-NEXT: ret void
;
; GCN-LABEL: undef_phi_cond_break_loop:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dword s3, s[0:1], 0x9
; GCN-NEXT: s_mov_b64 s[0:1], 0
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: ; implicit-def: $sgpr6_sgpr7
; GCN-NEXT: ; implicit-def: $sgpr4
; GCN-NEXT: BB1_1: ; %bb1
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: s_andn2_b64 s[6:7], s[6:7], exec
; GCN-NEXT: s_and_b64 s[8:9], s[0:1], exec
; GCN-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GCN-NEXT: s_cmp_gt_i32 s4, -1
; GCN-NEXT: s_cbranch_scc1 BB1_3
; GCN-NEXT: ; %bb.2: ; %bb4
; GCN-NEXT: ; in Loop: Header=BB1_1 Depth=1
; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cmp_ge_i32_e32 vcc, v0, v1
; GCN-NEXT: s_andn2_b64 s[6:7], s[6:7], exec
; GCN-NEXT: s_and_b64 s[8:9], vcc, exec
; GCN-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GCN-NEXT: BB1_3: ; %Flow
; GCN-NEXT: ; in Loop: Header=BB1_1 Depth=1
; GCN-NEXT: s_add_i32 s4, s4, 1
; GCN-NEXT: s_and_b64 s[8:9], exec, s[6:7]
; GCN-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1]
; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GCN-NEXT: s_cbranch_execnz BB1_1
; GCN-NEXT: ; %bb.4: ; %bb9
; GCN-NEXT: s_or_b64 exec, exec, s[0:1]
; GCN-NEXT: v_mov_b32_e32 v0, 7
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: ds_write_b32 v0, v0
; GCN-NEXT: s_endpgm
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
%tmp = sub i32 %id, %arg
%my.tmp = sub i32 %id, %arg
br label %bb1
bb1: ; preds = %Flow, %bb
%lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
%lsr.iv = phi i32 [ undef, %bb ], [ %my.tmp2, %Flow ]
%lsr.iv.next = add i32 %lsr.iv, 1
%cmp0 = icmp slt i32 %lsr.iv.next, 0
br i1 %cmp0, label %bb4, label %Flow
bb4: ; preds = %bb1
%load = load volatile i32, i32 addrspace(1)* undef, align 4
%cmp1 = icmp sge i32 %tmp, %load
%cmp1 = icmp sge i32 %my.tmp, %load
br label %Flow
Flow: ; preds = %bb4, %bb1
%tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
%tmp3 = phi i1 [ %cmp1, %bb4 ], [ undef, %bb1 ]
br i1 %tmp3, label %bb9, label %bb1
%my.tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
%my.tmp3 = phi i1 [ %cmp1, %bb4 ], [ undef, %bb1 ]
br i1 %my.tmp3, label %bb9, label %bb1
bb9: ; preds = %Flow
store volatile i32 7, i32 addrspace(3)* undef
@ -122,152 +177,271 @@ bb9: ; preds = %Flow
; FIXME: ConstantExpr compare of address to null folds away
@lds = addrspace(3) global i32 undef
; OPT-LABEL: @constexpr_phi_cond_break_loop(
; OPT: bb1:
; OPT-NEXT: %phi.broken = phi i64 [ %0, %Flow ], [ 0, %bb ]
; OPT-NEXT: %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
; OPT-NEXT: %lsr.iv.next = add i32 %lsr.iv, 1
; OPT-NEXT: %cmp0 = icmp slt i32 %lsr.iv.next, 0
; OPT-NEXT: br i1 %cmp0, label %bb4, label %Flow
; OPT: bb4:
; OPT-NEXT: %load = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: %cmp1 = icmp sge i32 %tmp, %load
; OPT-NEXT: br label %Flow
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ icmp ne (i32 addrspace(3)* inttoptr (i32 4 to i32 addrspace(3)*), i32 addrspace(3)* @lds), %bb1 ]
; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64(i1 %tmp3, i64 %phi.broken)
; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
; OPT: bb9: ; preds = %Flow
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0)
; OPT-NEXT: store volatile i32 7
; OPT-NEXT: ret void
define amdgpu_kernel void @constexpr_phi_cond_break_loop(i32 %arg) #0 {
; OPT-LABEL: @constexpr_phi_cond_break_loop(
; OPT-NEXT: bb:
; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
; OPT-NEXT: br label [[BB1:%.*]]
; OPT: bb1:
; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP0:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[MY_TMP2:%.*]], [[FLOW]] ]
; OPT-NEXT: [[LSR_IV_NEXT:%.*]] = add i32 [[LSR_IV]], 1
; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
; OPT: bb4:
; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[MY_TMP]], [[LOAD]]
; OPT-NEXT: br label [[FLOW]]
; OPT: Flow:
; OPT-NEXT: [[MY_TMP2]] = phi i32 [ [[LSR_IV_NEXT]], [[BB4]] ], [ undef, [[BB1]] ]
; OPT-NEXT: [[MY_TMP3:%.*]] = phi i1 [ [[CMP1]], [[BB4]] ], [ icmp ne (i32 addrspace(3)* inttoptr (i32 4 to i32 addrspace(3)*), i32 addrspace(3)* @lds), [[BB1]] ]
; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[MY_TMP3]], i64 [[PHI_BROKEN]])
; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP0]])
; OPT-NEXT: br i1 [[TMP1]], label [[BB9:%.*]], label [[BB1]]
; OPT: bb9:
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP0]])
; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
; OPT-NEXT: ret void
;
; GCN-LABEL: constexpr_phi_cond_break_loop:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dword s3, s[0:1], 0x9
; GCN-NEXT: s_mov_b64 s[0:1], 0
; GCN-NEXT: s_mov_b32 s2, lds@abs32@lo
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: ; implicit-def: $sgpr4_sgpr5
; GCN-NEXT: ; implicit-def: $sgpr3
; GCN-NEXT: BB2_1: ; %bb1
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: v_cmp_ne_u32_e64 s[8:9], s2, 4
; GCN-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
; GCN-NEXT: s_and_b64 s[8:9], s[8:9], exec
; GCN-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
; GCN-NEXT: s_cmp_gt_i32 s3, -1
; GCN-NEXT: s_cbranch_scc1 BB2_3
; GCN-NEXT: ; %bb.2: ; %bb4
; GCN-NEXT: ; in Loop: Header=BB2_1 Depth=1
; GCN-NEXT: buffer_load_dword v1, off, s[4:7], 0
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cmp_ge_i32_e32 vcc, v0, v1
; GCN-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
; GCN-NEXT: s_and_b64 s[8:9], vcc, exec
; GCN-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
; GCN-NEXT: BB2_3: ; %Flow
; GCN-NEXT: ; in Loop: Header=BB2_1 Depth=1
; GCN-NEXT: s_add_i32 s3, s3, 1
; GCN-NEXT: s_and_b64 s[8:9], exec, s[4:5]
; GCN-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1]
; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GCN-NEXT: s_cbranch_execnz BB2_1
; GCN-NEXT: ; %bb.4: ; %bb9
; GCN-NEXT: s_or_b64 exec, exec, s[0:1]
; GCN-NEXT: v_mov_b32_e32 v0, 7
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: ds_write_b32 v0, v0
; GCN-NEXT: s_endpgm
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
%tmp = sub i32 %id, %arg
%my.tmp = sub i32 %id, %arg
br label %bb1
bb1: ; preds = %Flow, %bb
%lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
%lsr.iv = phi i32 [ undef, %bb ], [ %my.tmp2, %Flow ]
%lsr.iv.next = add i32 %lsr.iv, 1
%cmp0 = icmp slt i32 %lsr.iv.next, 0
br i1 %cmp0, label %bb4, label %Flow
bb4: ; preds = %bb1
%load = load volatile i32, i32 addrspace(1)* undef, align 4
%cmp1 = icmp sge i32 %tmp, %load
%cmp1 = icmp sge i32 %my.tmp, %load
br label %Flow
Flow: ; preds = %bb4, %bb1
%tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
%tmp3 = phi i1 [ %cmp1, %bb4 ], [ icmp ne (i32 addrspace(3)* inttoptr (i32 4 to i32 addrspace(3)*), i32 addrspace(3)* @lds), %bb1 ]
br i1 %tmp3, label %bb9, label %bb1
%my.tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
%my.tmp3 = phi i1 [ %cmp1, %bb4 ], [ icmp ne (i32 addrspace(3)* inttoptr (i32 4 to i32 addrspace(3)*), i32 addrspace(3)* @lds), %bb1 ]
br i1 %my.tmp3, label %bb9, label %bb1
bb9: ; preds = %Flow
store volatile i32 7, i32 addrspace(3)* undef
ret void
}
; OPT-LABEL: @true_phi_cond_break_loop(
; OPT: bb1:
; OPT-NEXT: %phi.broken = phi i64 [ %0, %Flow ], [ 0, %bb ]
; OPT-NEXT: %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
; OPT-NEXT: %lsr.iv.next = add i32 %lsr.iv, 1
; OPT-NEXT: %cmp0 = icmp slt i32 %lsr.iv.next, 0
; OPT-NEXT: br i1 %cmp0, label %bb4, label %Flow
; OPT: bb4:
; OPT-NEXT: %load = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: %cmp1 = icmp sge i32 %tmp, %load
; OPT-NEXT: br label %Flow
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64(i1 %tmp3, i64 %phi.broken)
; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
; OPT: bb9: ; preds = %Flow
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0)
; OPT-NEXT: store volatile i32 7
; OPT-NEXT: ret void
define amdgpu_kernel void @true_phi_cond_break_loop(i32 %arg) #0 {
; OPT-LABEL: @true_phi_cond_break_loop(
; OPT-NEXT: bb:
; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
; OPT-NEXT: br label [[BB1:%.*]]
; OPT: bb1:
; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP0:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[MY_TMP2:%.*]], [[FLOW]] ]
; OPT-NEXT: [[LSR_IV_NEXT:%.*]] = add i32 [[LSR_IV]], 1
; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
; OPT: bb4:
; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[MY_TMP]], [[LOAD]]
; OPT-NEXT: br label [[FLOW]]
; OPT: Flow:
; OPT-NEXT: [[MY_TMP2]] = phi i32 [ [[LSR_IV_NEXT]], [[BB4]] ], [ undef, [[BB1]] ]
; OPT-NEXT: [[MY_TMP3:%.*]] = phi i1 [ [[CMP1]], [[BB4]] ], [ true, [[BB1]] ]
; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[MY_TMP3]], i64 [[PHI_BROKEN]])
; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP0]])
; OPT-NEXT: br i1 [[TMP1]], label [[BB9:%.*]], label [[BB1]]
; OPT: bb9:
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP0]])
; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
; OPT-NEXT: ret void
;
; GCN-LABEL: true_phi_cond_break_loop:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dword s3, s[0:1], 0x9
; GCN-NEXT: s_mov_b64 s[0:1], 0
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: ; implicit-def: $sgpr4_sgpr5
; GCN-NEXT: ; implicit-def: $sgpr6
; GCN-NEXT: BB3_1: ; %bb1
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: s_or_b64 s[4:5], s[4:5], exec
; GCN-NEXT: s_cmp_gt_i32 s6, -1
; GCN-NEXT: s_cbranch_scc1 BB3_3
; GCN-NEXT: ; %bb.2: ; %bb4
; GCN-NEXT: ; in Loop: Header=BB3_1 Depth=1
; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cmp_ge_i32_e32 vcc, v0, v1
; GCN-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
; GCN-NEXT: s_and_b64 s[8:9], vcc, exec
; GCN-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
; GCN-NEXT: BB3_3: ; %Flow
; GCN-NEXT: ; in Loop: Header=BB3_1 Depth=1
; GCN-NEXT: s_add_i32 s6, s6, 1
; GCN-NEXT: s_and_b64 s[8:9], exec, s[4:5]
; GCN-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1]
; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GCN-NEXT: s_cbranch_execnz BB3_1
; GCN-NEXT: ; %bb.4: ; %bb9
; GCN-NEXT: s_or_b64 exec, exec, s[0:1]
; GCN-NEXT: v_mov_b32_e32 v0, 7
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: ds_write_b32 v0, v0
; GCN-NEXT: s_endpgm
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
%tmp = sub i32 %id, %arg
%my.tmp = sub i32 %id, %arg
br label %bb1
bb1: ; preds = %Flow, %bb
%lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
%lsr.iv = phi i32 [ undef, %bb ], [ %my.tmp2, %Flow ]
%lsr.iv.next = add i32 %lsr.iv, 1
%cmp0 = icmp slt i32 %lsr.iv.next, 0
br i1 %cmp0, label %bb4, label %Flow
bb4: ; preds = %bb1
%load = load volatile i32, i32 addrspace(1)* undef, align 4
%cmp1 = icmp sge i32 %tmp, %load
%cmp1 = icmp sge i32 %my.tmp, %load
br label %Flow
Flow: ; preds = %bb4, %bb1
%tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
%tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
br i1 %tmp3, label %bb9, label %bb1
%my.tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
%my.tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
br i1 %my.tmp3, label %bb9, label %bb1
bb9: ; preds = %Flow
store volatile i32 7, i32 addrspace(3)* undef
ret void
}
; OPT-LABEL: @false_phi_cond_break_loop(
; OPT: bb1:
; OPT-NEXT: %phi.broken = phi i64 [ %0, %Flow ], [ 0, %bb ]
; OPT-NEXT: %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
; OPT-NOT: call
; OPT: br i1 %cmp0, label %bb4, label %Flow
; OPT: bb4:
; OPT-NEXT: %load = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: %cmp1 = icmp sge i32 %tmp, %load
; OPT-NEXT: br label %Flow
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ false, %bb1 ]
; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64(i1 %tmp3, i64 %phi.broken)
; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
; OPT: bb9: ; preds = %Flow
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0)
; OPT-NEXT: store volatile i32 7
; OPT-NEXT: ret void
define amdgpu_kernel void @false_phi_cond_break_loop(i32 %arg) #0 {
; OPT-LABEL: @false_phi_cond_break_loop(
; OPT-NEXT: bb:
; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
; OPT-NEXT: br label [[BB1:%.*]]
; OPT: bb1:
; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP0:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[MY_TMP2:%.*]], [[FLOW]] ]
; OPT-NEXT: [[LSR_IV_NEXT:%.*]] = add i32 [[LSR_IV]], 1
; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
; OPT: bb4:
; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[MY_TMP]], [[LOAD]]
; OPT-NEXT: br label [[FLOW]]
; OPT: Flow:
; OPT-NEXT: [[MY_TMP2]] = phi i32 [ [[LSR_IV_NEXT]], [[BB4]] ], [ undef, [[BB1]] ]
; OPT-NEXT: [[MY_TMP3:%.*]] = phi i1 [ [[CMP1]], [[BB4]] ], [ false, [[BB1]] ]
; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[MY_TMP3]], i64 [[PHI_BROKEN]])
; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP0]])
; OPT-NEXT: br i1 [[TMP1]], label [[BB9:%.*]], label [[BB1]]
; OPT: bb9:
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP0]])
; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
; OPT-NEXT: ret void
;
; GCN-LABEL: false_phi_cond_break_loop:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dword s3, s[0:1], 0x9
; GCN-NEXT: s_mov_b64 s[0:1], 0
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: ; implicit-def: $sgpr4_sgpr5
; GCN-NEXT: ; implicit-def: $sgpr6
; GCN-NEXT: BB4_1: ; %bb1
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
; GCN-NEXT: s_cmp_gt_i32 s6, -1
; GCN-NEXT: s_cbranch_scc1 BB4_3
; GCN-NEXT: ; %bb.2: ; %bb4
; GCN-NEXT: ; in Loop: Header=BB4_1 Depth=1
; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cmp_ge_i32_e32 vcc, v0, v1
; GCN-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
; GCN-NEXT: s_and_b64 s[8:9], vcc, exec
; GCN-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
; GCN-NEXT: BB4_3: ; %Flow
; GCN-NEXT: ; in Loop: Header=BB4_1 Depth=1
; GCN-NEXT: s_add_i32 s6, s6, 1
; GCN-NEXT: s_and_b64 s[8:9], exec, s[4:5]
; GCN-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1]
; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GCN-NEXT: s_cbranch_execnz BB4_1
; GCN-NEXT: ; %bb.4: ; %bb9
; GCN-NEXT: s_or_b64 exec, exec, s[0:1]
; GCN-NEXT: v_mov_b32_e32 v0, 7
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: ds_write_b32 v0, v0
; GCN-NEXT: s_endpgm
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
%tmp = sub i32 %id, %arg
%my.tmp = sub i32 %id, %arg
br label %bb1
bb1: ; preds = %Flow, %bb
%lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
%lsr.iv = phi i32 [ undef, %bb ], [ %my.tmp2, %Flow ]
%lsr.iv.next = add i32 %lsr.iv, 1
%cmp0 = icmp slt i32 %lsr.iv.next, 0
br i1 %cmp0, label %bb4, label %Flow
bb4: ; preds = %bb1
%load = load volatile i32, i32 addrspace(1)* undef, align 4
%cmp1 = icmp sge i32 %tmp, %load
%cmp1 = icmp sge i32 %my.tmp, %load
br label %Flow
Flow: ; preds = %bb4, %bb1
%tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
%tmp3 = phi i1 [ %cmp1, %bb4 ], [ false, %bb1 ]
br i1 %tmp3, label %bb9, label %bb1
%my.tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
%my.tmp3 = phi i1 [ %cmp1, %bb4 ], [ false, %bb1 ]
br i1 %my.tmp3, label %bb9, label %bb1
bb9: ; preds = %Flow
store volatile i32 7, i32 addrspace(3)* undef
@ -277,52 +451,91 @@ bb9: ; preds = %Flow
; Swap order of branches in flow block so that the true phi is
; continue.
; OPT-LABEL: @invert_true_phi_cond_break_loop(
; OPT: bb1:
; OPT-NEXT: %phi.broken = phi i64 [ %1, %Flow ], [ 0, %bb ]
; OPT-NEXT: %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
; OPT-NEXT: %lsr.iv.next = add i32 %lsr.iv, 1
; OPT-NEXT: %cmp0 = icmp slt i32 %lsr.iv.next, 0
; OPT-NEXT: br i1 %cmp0, label %bb4, label %Flow
; OPT: bb4:
; OPT-NEXT: %load = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: %cmp1 = icmp sge i32 %tmp, %load
; OPT-NEXT: br label %Flow
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
; OPT-NEXT: %0 = xor i1 %tmp3, true
; OPT-NEXT: %1 = call i64 @llvm.amdgcn.if.break.i64(i1 %0, i64 %phi.broken)
; OPT-NEXT: %2 = call i1 @llvm.amdgcn.loop.i64(i64 %1)
; OPT-NEXT: br i1 %2, label %bb9, label %bb1
; OPT: bb9:
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %1)
; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
; OPT-NEXT: ret void
define amdgpu_kernel void @invert_true_phi_cond_break_loop(i32 %arg) #0 {
; OPT-LABEL: @invert_true_phi_cond_break_loop(
; OPT-NEXT: bb:
; OPT-NEXT: [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
; OPT-NEXT: [[MY_TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
; OPT-NEXT: br label [[BB1:%.*]]
; OPT: bb1:
; OPT-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP1:%.*]], [[FLOW:%.*]] ], [ 0, [[BB:%.*]] ]
; OPT-NEXT: [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[MY_TMP2:%.*]], [[FLOW]] ]
; OPT-NEXT: [[LSR_IV_NEXT:%.*]] = add i32 [[LSR_IV]], 1
; OPT-NEXT: [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
; OPT: bb4:
; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[MY_TMP]], [[LOAD]]
; OPT-NEXT: br label [[FLOW]]
; OPT: Flow:
; OPT-NEXT: [[MY_TMP2]] = phi i32 [ [[LSR_IV_NEXT]], [[BB4]] ], [ undef, [[BB1]] ]
; OPT-NEXT: [[MY_TMP3:%.*]] = phi i1 [ [[CMP1]], [[BB4]] ], [ true, [[BB1]] ]
; OPT-NEXT: [[TMP0:%.*]] = xor i1 [[MY_TMP3]], true
; OPT-NEXT: [[TMP1]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP0]], i64 [[PHI_BROKEN]])
; OPT-NEXT: [[TMP2:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP1]])
; OPT-NEXT: br i1 [[TMP2]], label [[BB9:%.*]], label [[BB1]]
; OPT: bb9:
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP1]])
; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
; OPT-NEXT: ret void
;
; GCN-LABEL: invert_true_phi_cond_break_loop:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dword s3, s[0:1], 0x9
; GCN-NEXT: s_mov_b64 s[0:1], 0
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: ; implicit-def: $sgpr4_sgpr5
; GCN-NEXT: ; implicit-def: $sgpr6
; GCN-NEXT: BB5_1: ; %bb1
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: s_or_b64 s[4:5], s[4:5], exec
; GCN-NEXT: s_cmp_gt_i32 s6, -1
; GCN-NEXT: s_cbranch_scc1 BB5_3
; GCN-NEXT: ; %bb.2: ; %bb4
; GCN-NEXT: ; in Loop: Header=BB5_1 Depth=1
; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cmp_ge_i32_e32 vcc, v0, v1
; GCN-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
; GCN-NEXT: s_and_b64 s[8:9], vcc, exec
; GCN-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
; GCN-NEXT: BB5_3: ; %Flow
; GCN-NEXT: ; in Loop: Header=BB5_1 Depth=1
; GCN-NEXT: s_add_i32 s6, s6, 1
; GCN-NEXT: s_xor_b64 s[8:9], s[4:5], -1
; GCN-NEXT: s_and_b64 s[8:9], exec, s[8:9]
; GCN-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1]
; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GCN-NEXT: s_cbranch_execnz BB5_1
; GCN-NEXT: ; %bb.4: ; %bb9
; GCN-NEXT: s_or_b64 exec, exec, s[0:1]
; GCN-NEXT: v_mov_b32_e32 v0, 7
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: ds_write_b32 v0, v0
; GCN-NEXT: s_endpgm
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
%tmp = sub i32 %id, %arg
%my.tmp = sub i32 %id, %arg
br label %bb1
bb1: ; preds = %Flow, %bb
%lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
%lsr.iv = phi i32 [ undef, %bb ], [ %my.tmp2, %Flow ]
%lsr.iv.next = add i32 %lsr.iv, 1
%cmp0 = icmp slt i32 %lsr.iv.next, 0
br i1 %cmp0, label %bb4, label %Flow
bb4: ; preds = %bb1
%load = load volatile i32, i32 addrspace(1)* undef, align 4
%cmp1 = icmp sge i32 %tmp, %load
%cmp1 = icmp sge i32 %my.tmp, %load
br label %Flow
Flow: ; preds = %bb4, %bb1
%tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
%tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
br i1 %tmp3, label %bb1, label %bb9
%my.tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
%my.tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
br i1 %my.tmp3, label %bb1, label %bb9
bb9: ; preds = %Flow
store volatile i32 7, i32 addrspace(3)* undef

View File

@ -1,3 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: opt -mtriple=amdgcn-- -S -structurizecfg -si-annotate-control-flow %s | FileCheck -check-prefix=IR %s
; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
@ -6,73 +8,89 @@
; the condition that appears to have no uses until the loop is
; completely processed.
; IR-LABEL: @reduced_nested_loop_conditions(
; IR: bb5:
; IR-NEXT: %phi.broken = phi i64 [ %3, %bb10 ], [ 0, %bb ]
; IR-NEXT: %tmp6 = phi i32 [ 0, %bb ], [ %tmp11, %bb10 ]
; IR-NEXT: %tmp7 = icmp eq i32 %tmp6, 1
; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %tmp7)
; IR-NEXT: %1 = extractvalue { i1, i64 } %0, 0
; IR-NEXT: %2 = extractvalue { i1, i64 } %0, 1
; IR-NEXT: br i1 %1, label %bb8, label %Flow
; IR: bb8:
; IR-NEXT: br label %bb13
; IR: bb10:
; IR-NEXT: %tmp11 = phi i32 [ %6, %Flow ]
; IR-NEXT: %tmp12 = phi i1 [ %5, %Flow ]
; IR-NEXT: %3 = call i64 @llvm.amdgcn.if.break.i64(i1 %tmp12, i64 %phi.broken)
; IR-NEXT: %4 = call i1 @llvm.amdgcn.loop.i64(i64 %3)
; IR-NEXT: br i1 %4, label %bb23, label %bb5
; IR: Flow:
; IR-NEXT: %5 = phi i1 [ %tmp22, %bb4 ], [ true, %bb5 ]
; IR-NEXT: %6 = phi i32 [ %tmp21, %bb4 ], [ undef, %bb5 ]
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %2)
; IR-NEXT: br label %bb10
; IR: bb13:
; IR-NEXT: %tmp14 = phi i1 [ %tmp22, %bb3 ], [ true, %bb8 ]
; IR-NEXT: %tmp15 = bitcast i64 %tmp2 to <2 x i32>
; IR-NEXT: br i1 %tmp14, label %bb16, label %bb20
; IR: bb16:
; IR-NEXT: %tmp17 = extractelement <2 x i32> %tmp15, i64 1
; IR-NEXT: %tmp18 = getelementptr inbounds i32, i32 addrspace(3)* undef, i32 %tmp17
; IR-NEXT: %tmp19 = load volatile i32, i32 addrspace(3)* %tmp18
; IR-NEXT: br label %bb20
; IR: bb20:
; IR-NEXT: %tmp21 = phi i32 [ %tmp19, %bb16 ], [ 0, %bb13 ]
; IR-NEXT: %tmp22 = phi i1 [ false, %bb16 ], [ %tmp14, %bb13 ]
; IR-NEXT: br label %bb9
; IR: bb23:
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %3)
; IR-NEXT: ret void
; GCN-LABEL: {{^}}reduced_nested_loop_conditions:
; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 1
; GCN-NEXT: s_cbranch_scc0
; FIXME: Should fold to unconditional branch?
; GCN: ; implicit-def
; GCN: s_cbranch_vccnz
; GCN: ds_read_b32
; GCN: [[BB9:BB[0-9]+_[0-9]+]]: ; %bb9
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: s_cbranch_vccnz [[BB9]]
define amdgpu_kernel void @reduced_nested_loop_conditions(i64 addrspace(3)* nocapture %arg) #0 {
; GCN-LABEL: reduced_nested_loop_conditions:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dword s0, s[0:1], 0x9
; GCN-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v0
; GCN-NEXT: ds_read_b64 v[0:1], v0
; GCN-NEXT: s_mov_b32 s0, 0
; GCN-NEXT: s_and_b64 vcc, exec, 0
; GCN-NEXT: BB0_1: ; %bb5
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: s_cmp_lg_u32 s0, 1
; GCN-NEXT: s_cbranch_scc0 BB0_3
; GCN-NEXT: ; %bb.2: ; %bb10
; GCN-NEXT: ; in Loop: Header=BB0_1 Depth=1
; GCN-NEXT: ; implicit-def: $sgpr0
; GCN-NEXT: s_cbranch_vccnz BB0_1
; GCN-NEXT: s_branch BB0_5
; GCN-NEXT: BB0_3: ; %bb8
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: ds_read_b32 v0, v0
; GCN-NEXT: s_and_b64 vcc, exec, -1
; GCN-NEXT: BB0_4: ; %bb9
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: s_cbranch_vccnz BB0_4
; GCN-NEXT: BB0_5: ; %DummyReturnBlock
; GCN-NEXT: s_endpgm
; IR-LABEL: @reduced_nested_loop_conditions(
; IR-NEXT: bb:
; IR-NEXT: [[MY_TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x() #4
; IR-NEXT: [[MY_TMP1:%.*]] = getelementptr inbounds i64, i64 addrspace(3)* [[ARG:%.*]], i32 [[MY_TMP]]
; IR-NEXT: [[MY_TMP2:%.*]] = load volatile i64, i64 addrspace(3)* [[MY_TMP1]]
; IR-NEXT: br label [[BB5:%.*]]
; IR: bb3:
; IR-NEXT: br i1 true, label [[BB4:%.*]], label [[BB13:%.*]]
; IR: bb4:
; IR-NEXT: br label [[FLOW:%.*]]
; IR: bb5:
; IR-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP3:%.*]], [[BB10:%.*]] ], [ 0, [[BB:%.*]] ]
; IR-NEXT: [[MY_TMP6:%.*]] = phi i32 [ 0, [[BB]] ], [ [[MY_TMP11:%.*]], [[BB10]] ]
; IR-NEXT: [[MY_TMP7:%.*]] = icmp eq i32 [[MY_TMP6]], 1
; IR-NEXT: [[TMP0:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[MY_TMP7]])
; IR-NEXT: [[TMP1:%.*]] = extractvalue { i1, i64 } [[TMP0]], 0
; IR-NEXT: [[TMP2:%.*]] = extractvalue { i1, i64 } [[TMP0]], 1
; IR-NEXT: br i1 [[TMP1]], label [[BB8:%.*]], label [[FLOW]]
; IR: bb8:
; IR-NEXT: br label [[BB13]]
; IR: bb9:
; IR-NEXT: br i1 false, label [[BB3:%.*]], label [[BB9:%.*]]
; IR: bb10:
; IR-NEXT: [[MY_TMP11]] = phi i32 [ [[TMP6:%.*]], [[FLOW]] ]
; IR-NEXT: [[MY_TMP12:%.*]] = phi i1 [ [[TMP5:%.*]], [[FLOW]] ]
; IR-NEXT: [[TMP3]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[MY_TMP12]], i64 [[PHI_BROKEN]])
; IR-NEXT: [[TMP4:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP3]])
; IR-NEXT: br i1 [[TMP4]], label [[BB23:%.*]], label [[BB5]]
; IR: Flow:
; IR-NEXT: [[TMP5]] = phi i1 [ [[MY_TMP22:%.*]], [[BB4]] ], [ true, [[BB5]] ]
; IR-NEXT: [[TMP6]] = phi i32 [ [[MY_TMP21:%.*]], [[BB4]] ], [ undef, [[BB5]] ]
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP2]])
; IR-NEXT: br label [[BB10]]
; IR: bb13:
; IR-NEXT: [[MY_TMP14:%.*]] = phi i1 [ [[MY_TMP22]], [[BB3]] ], [ true, [[BB8]] ]
; IR-NEXT: [[MY_TMP15:%.*]] = bitcast i64 [[MY_TMP2]] to <2 x i32>
; IR-NEXT: br i1 [[MY_TMP14]], label [[BB16:%.*]], label [[BB20:%.*]]
; IR: bb16:
; IR-NEXT: [[MY_TMP17:%.*]] = extractelement <2 x i32> [[MY_TMP15]], i64 1
; IR-NEXT: [[MY_TMP18:%.*]] = getelementptr inbounds i32, i32 addrspace(3)* undef, i32 [[MY_TMP17]]
; IR-NEXT: [[MY_TMP19:%.*]] = load volatile i32, i32 addrspace(3)* [[MY_TMP18]]
; IR-NEXT: br label [[BB20]]
; IR: bb20:
; IR-NEXT: [[MY_TMP21]] = phi i32 [ [[MY_TMP19]], [[BB16]] ], [ 0, [[BB13]] ]
; IR-NEXT: [[MY_TMP22]] = phi i1 [ false, [[BB16]] ], [ [[MY_TMP14]], [[BB13]] ]
; IR-NEXT: br label [[BB9]]
; IR: bb23:
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP3]])
; IR-NEXT: ret void
;
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%tmp1 = getelementptr inbounds i64, i64 addrspace(3)* %arg, i32 %tmp
%tmp2 = load volatile i64, i64 addrspace(3)* %tmp1
%my.tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%my.tmp1 = getelementptr inbounds i64, i64 addrspace(3)* %arg, i32 %my.tmp
%my.tmp2 = load volatile i64, i64 addrspace(3)* %my.tmp1
br label %bb5
bb3: ; preds = %bb9
@ -82,9 +100,9 @@ bb4: ; preds = %bb3
br label %bb10
bb5: ; preds = %bb10, %bb
%tmp6 = phi i32 [ 0, %bb ], [ %tmp11, %bb10 ]
%tmp7 = icmp eq i32 %tmp6, 1
br i1 %tmp7, label %bb8, label %bb10
%my.tmp6 = phi i32 [ 0, %bb ], [ %my.tmp11, %bb10 ]
%my.tmp7 = icmp eq i32 %my.tmp6, 1
br i1 %my.tmp7, label %bb8, label %bb10
bb8: ; preds = %bb5
br label %bb13
@ -93,24 +111,24 @@ bb9: ; preds = %bb20, %bb9
br i1 false, label %bb3, label %bb9
bb10: ; preds = %bb5, %bb4
%tmp11 = phi i32 [ %tmp21, %bb4 ], [ undef, %bb5 ]
%tmp12 = phi i1 [ %tmp22, %bb4 ], [ true, %bb5 ]
br i1 %tmp12, label %bb23, label %bb5
%my.tmp11 = phi i32 [ %my.tmp21, %bb4 ], [ undef, %bb5 ]
%my.tmp12 = phi i1 [ %my.tmp22, %bb4 ], [ true, %bb5 ]
br i1 %my.tmp12, label %bb23, label %bb5
bb13: ; preds = %bb8, %bb3
%tmp14 = phi i1 [ %tmp22, %bb3 ], [ true, %bb8 ]
%tmp15 = bitcast i64 %tmp2 to <2 x i32>
br i1 %tmp14, label %bb16, label %bb20
%my.tmp14 = phi i1 [ %my.tmp22, %bb3 ], [ true, %bb8 ]
%my.tmp15 = bitcast i64 %my.tmp2 to <2 x i32>
br i1 %my.tmp14, label %bb16, label %bb20
bb16: ; preds = %bb13
%tmp17 = extractelement <2 x i32> %tmp15, i64 1
%tmp18 = getelementptr inbounds i32, i32 addrspace(3)* undef, i32 %tmp17
%tmp19 = load volatile i32, i32 addrspace(3)* %tmp18
%my.tmp17 = extractelement <2 x i32> %my.tmp15, i64 1
%my.tmp18 = getelementptr inbounds i32, i32 addrspace(3)* undef, i32 %my.tmp17
%my.tmp19 = load volatile i32, i32 addrspace(3)* %my.tmp18
br label %bb20
bb20: ; preds = %bb16, %bb13
%tmp21 = phi i32 [ %tmp19, %bb16 ], [ 0, %bb13 ]
%tmp22 = phi i1 [ false, %bb16 ], [ %tmp14, %bb13 ]
%my.tmp21 = phi i32 [ %my.tmp19, %bb16 ], [ 0, %bb13 ]
%my.tmp22 = phi i1 [ false, %bb16 ], [ %my.tmp14, %bb13 ]
br label %bb9
bb23: ; preds = %bb10
@ -118,97 +136,146 @@ bb23: ; preds = %bb10
}
; Earlier version of above, before a run of the structurizer.
; IR-LABEL: @nested_loop_conditions(
; IR: Flow3:
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %21)
; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %14)
; IR-NEXT: %1 = extractvalue { i1, i64 } %0, 0
; IR-NEXT: %2 = extractvalue { i1, i64 } %0, 1
; IR-NEXT: br i1 %1, label %bb4.bb13_crit_edge, label %Flow4
; IR: Flow4:
; IR-NEXT: %3 = phi i1 [ true, %bb4.bb13_crit_edge ], [ false, %Flow3 ]
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %2)
; IR-NEXT: br label %Flow
; IR: Flow:
; IR-NEXT: %4 = phi i1 [ %3, %Flow4 ], [ true, %bb ]
; IR-NEXT: %5 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %4)
; IR-NEXT: %6 = extractvalue { i1, i64 } %5, 0
; IR-NEXT: %7 = extractvalue { i1, i64 } %5, 1
; IR-NEXT: br i1 %6, label %bb13, label %bb31
; IR: bb14:
; IR: %tmp15 = icmp eq i32 %tmp1037, 1
; IR-NEXT: %8 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %tmp15)
; IR: Flow1:
; IR-NEXT: %11 = phi <4 x i32> [ %tmp9, %bb21 ], [ undef, %bb14 ]
; IR-NEXT: %12 = phi i32 [ %tmp10, %bb21 ], [ undef, %bb14 ]
; IR-NEXT: %13 = phi i1 [ %18, %bb21 ], [ true, %bb14 ]
; IR-NEXT: %14 = phi i1 [ %18, %bb21 ], [ false, %bb14 ]
; IR-NEXT: %15 = phi i1 [ false, %bb21 ], [ true, %bb14 ]
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %10)
; IR-NEXT: %16 = call i64 @llvm.amdgcn.if.break.i64(i1 %13, i64 %phi.broken)
; IR-NEXT: %17 = call i1 @llvm.amdgcn.loop.i64(i64 %16)
; IR-NEXT: br i1 %17, label %Flow2, label %bb14
; IR: bb21:
; IR: %tmp12 = icmp slt i32 %tmp11, 9
; IR-NEXT: %18 = xor i1 %tmp12, true
; IR-NEXT: br label %Flow1
; IR: Flow2:
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %16)
; IR-NEXT: %19 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %15)
; IR-NEXT: %20 = extractvalue { i1, i64 } %19, 0
; IR-NEXT: %21 = extractvalue { i1, i64 } %19, 1
; IR-NEXT: br i1 %20, label %bb31.loopexit, label %Flow3
; IR: bb31:
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %7)
; IR-NEXT: store volatile i32 0, i32 addrspace(1)* undef
; IR-NEXT: ret void
; GCN-LABEL: {{^}}nested_loop_conditions:
; GCN: v_cmp_lt_i32_e32 vcc, 8, v
; GCN: s_and_b64 vcc, exec, vcc
; GCN: s_cbranch_vccnz [[BB31:BB[0-9]+_[0-9]+]]
; GCN: [[BB14:BB[0-9]+_[0-9]+]]: ; %bb14
; GCN: v_cmp_ne_u32_e32 vcc, 1, v
; GCN-NEXT: s_and_b64 vcc, exec, vcc
; GCN-NEXT: s_cbranch_vccnz [[BB31]]
; GCN: [[BB18:BB[0-9]+_[0-9]+]]: ; %bb18
; GCN: buffer_load_dword
; GCN: v_cmp_lt_i32_e32 vcc, 8, v
; GCN-NEXT: s_and_b64 vcc, exec, vcc
; GCN-NEXT: s_cbranch_vccnz [[BB18]]
; GCN: buffer_load_dword
; GCN: buffer_load_dword
; GCN: v_cmp_gt_i32_e32 vcc, 9
; GCN-NEXT: s_and_b64 vcc, exec, vcc
; GCN-NEXT: s_cbranch_vccnz [[BB14]]
; GCN: [[BB31]]:
; GCN: buffer_store_dword
; GCN: s_endpgm
define amdgpu_kernel void @nested_loop_conditions(i64 addrspace(1)* nocapture %arg) #0 {
; GCN-LABEL: nested_loop_conditions:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cmp_lt_i32_e32 vcc, 8, v0
; GCN-NEXT: s_and_b64 vcc, exec, vcc
; GCN-NEXT: s_cbranch_vccnz BB1_5
; GCN-NEXT: ; %bb.1: ; %bb14.lr.ph
; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0
; GCN-NEXT: BB1_2: ; %bb14
; GCN-NEXT: ; =>This Loop Header: Depth=1
; GCN-NEXT: ; Child Loop BB1_3 Depth 2
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 1, v0
; GCN-NEXT: s_and_b64 vcc, exec, vcc
; GCN-NEXT: s_cbranch_vccnz BB1_5
; GCN-NEXT: BB1_3: ; %bb18
; GCN-NEXT: ; Parent Loop BB1_2 Depth=1
; GCN-NEXT: ; => This Inner Loop Header: Depth=2
; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cmp_lt_i32_e32 vcc, 8, v0
; GCN-NEXT: s_and_b64 vcc, exec, vcc
; GCN-NEXT: s_cbranch_vccnz BB1_3
; GCN-NEXT: ; %bb.4: ; %bb21
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0
; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 9, v1
; GCN-NEXT: s_and_b64 vcc, exec, vcc
; GCN-NEXT: s_cbranch_vccnz BB1_2
; GCN-NEXT: BB1_5: ; %bb31
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
; IR-LABEL: @nested_loop_conditions(
; IR-NEXT: bb:
; IR-NEXT: [[MY_TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x() #4
; IR-NEXT: [[MY_TMP1:%.*]] = zext i32 [[MY_TMP]] to i64
; IR-NEXT: [[MY_TMP2:%.*]] = getelementptr inbounds i64, i64 addrspace(1)* [[ARG:%.*]], i64 [[MY_TMP1]]
; IR-NEXT: [[MY_TMP3:%.*]] = load i64, i64 addrspace(1)* [[MY_TMP2]], align 16
; IR-NEXT: [[MY_TMP932:%.*]] = load <4 x i32>, <4 x i32> addrspace(1)* undef, align 16
; IR-NEXT: [[MY_TMP1033:%.*]] = extractelement <4 x i32> [[MY_TMP932]], i64 0
; IR-NEXT: [[MY_TMP1134:%.*]] = load volatile i32, i32 addrspace(1)* undef
; IR-NEXT: [[MY_TMP1235:%.*]] = icmp slt i32 [[MY_TMP1134]], 9
; IR-NEXT: br i1 [[MY_TMP1235]], label [[BB14_LR_PH:%.*]], label [[FLOW:%.*]]
; IR: bb14.lr.ph:
; IR-NEXT: br label [[BB14:%.*]]
; IR: Flow3:
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP21:%.*]])
; IR-NEXT: [[TMP0:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[TMP14:%.*]])
; IR-NEXT: [[TMP1:%.*]] = extractvalue { i1, i64 } [[TMP0]], 0
; IR-NEXT: [[TMP2:%.*]] = extractvalue { i1, i64 } [[TMP0]], 1
; IR-NEXT: br i1 [[TMP1]], label [[BB4_BB13_CRIT_EDGE:%.*]], label [[FLOW4:%.*]]
; IR: bb4.bb13_crit_edge:
; IR-NEXT: br label [[FLOW4]]
; IR: Flow4:
; IR-NEXT: [[TMP3:%.*]] = phi i1 [ true, [[BB4_BB13_CRIT_EDGE]] ], [ false, [[FLOW3:%.*]] ]
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP2]])
; IR-NEXT: br label [[FLOW]]
; IR: bb13:
; IR-NEXT: br label [[BB31:%.*]]
; IR: Flow:
; IR-NEXT: [[TMP4:%.*]] = phi i1 [ [[TMP3]], [[FLOW4]] ], [ true, [[BB:%.*]] ]
; IR-NEXT: [[TMP5:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[TMP4]])
; IR-NEXT: [[TMP6:%.*]] = extractvalue { i1, i64 } [[TMP5]], 0
; IR-NEXT: [[TMP7:%.*]] = extractvalue { i1, i64 } [[TMP5]], 1
; IR-NEXT: br i1 [[TMP6]], label [[BB13:%.*]], label [[BB31]]
; IR: bb14:
; IR-NEXT: [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP16:%.*]], [[FLOW1:%.*]] ], [ 0, [[BB14_LR_PH]] ]
; IR-NEXT: [[MY_TMP1037:%.*]] = phi i32 [ [[MY_TMP1033]], [[BB14_LR_PH]] ], [ [[TMP12:%.*]], [[FLOW1]] ]
; IR-NEXT: [[MY_TMP936:%.*]] = phi <4 x i32> [ [[MY_TMP932]], [[BB14_LR_PH]] ], [ [[TMP11:%.*]], [[FLOW1]] ]
; IR-NEXT: [[MY_TMP15:%.*]] = icmp eq i32 [[MY_TMP1037]], 1
; IR-NEXT: [[TMP8:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[MY_TMP15]])
; IR-NEXT: [[TMP9:%.*]] = extractvalue { i1, i64 } [[TMP8]], 0
; IR-NEXT: [[TMP10:%.*]] = extractvalue { i1, i64 } [[TMP8]], 1
; IR-NEXT: br i1 [[TMP9]], label [[BB16:%.*]], label [[FLOW1]]
; IR: bb16:
; IR-NEXT: [[MY_TMP17:%.*]] = bitcast i64 [[MY_TMP3]] to <2 x i32>
; IR-NEXT: br label [[BB18:%.*]]
; IR: Flow1:
; IR-NEXT: [[TMP11]] = phi <4 x i32> [ [[MY_TMP9:%.*]], [[BB21:%.*]] ], [ undef, [[BB14]] ]
; IR-NEXT: [[TMP12]] = phi i32 [ [[MY_TMP10:%.*]], [[BB21]] ], [ undef, [[BB14]] ]
; IR-NEXT: [[TMP13:%.*]] = phi i1 [ [[TMP18:%.*]], [[BB21]] ], [ true, [[BB14]] ]
; IR-NEXT: [[TMP14]] = phi i1 [ [[TMP18]], [[BB21]] ], [ false, [[BB14]] ]
; IR-NEXT: [[TMP15:%.*]] = phi i1 [ false, [[BB21]] ], [ true, [[BB14]] ]
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP10]])
; IR-NEXT: [[TMP16]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP13]], i64 [[PHI_BROKEN]])
; IR-NEXT: [[TMP17:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP16]])
; IR-NEXT: br i1 [[TMP17]], label [[FLOW2:%.*]], label [[BB14]]
; IR: bb18:
; IR-NEXT: [[MY_TMP19:%.*]] = load volatile i32, i32 addrspace(1)* undef
; IR-NEXT: [[MY_TMP20:%.*]] = icmp slt i32 [[MY_TMP19]], 9
; IR-NEXT: br i1 [[MY_TMP20]], label [[BB21]], label [[BB18]]
; IR: bb21:
; IR-NEXT: [[MY_TMP22:%.*]] = extractelement <2 x i32> [[MY_TMP17]], i64 1
; IR-NEXT: [[MY_TMP23:%.*]] = lshr i32 [[MY_TMP22]], 16
; IR-NEXT: [[MY_TMP24:%.*]] = select i1 undef, i32 undef, i32 [[MY_TMP23]]
; IR-NEXT: [[MY_TMP25:%.*]] = uitofp i32 [[MY_TMP24]] to float
; IR-NEXT: [[MY_TMP26:%.*]] = fmul float [[MY_TMP25]], 0x3EF0001000000000
; IR-NEXT: [[MY_TMP27:%.*]] = fsub float [[MY_TMP26]], undef
; IR-NEXT: [[MY_TMP28:%.*]] = fcmp olt float [[MY_TMP27]], 5.000000e-01
; IR-NEXT: [[MY_TMP29:%.*]] = select i1 [[MY_TMP28]], i64 1, i64 2
; IR-NEXT: [[MY_TMP30:%.*]] = extractelement <4 x i32> [[MY_TMP936]], i64 [[MY_TMP29]]
; IR-NEXT: [[MY_TMP7:%.*]] = zext i32 [[MY_TMP30]] to i64
; IR-NEXT: [[MY_TMP8:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* undef, i64 [[MY_TMP7]]
; IR-NEXT: [[MY_TMP9]] = load <4 x i32>, <4 x i32> addrspace(1)* [[MY_TMP8]], align 16
; IR-NEXT: [[MY_TMP10]] = extractelement <4 x i32> [[MY_TMP9]], i64 0
; IR-NEXT: [[MY_TMP11:%.*]] = load volatile i32, i32 addrspace(1)* undef
; IR-NEXT: [[MY_TMP12:%.*]] = icmp slt i32 [[MY_TMP11]], 9
; IR-NEXT: [[TMP18]] = xor i1 [[MY_TMP12]], true
; IR-NEXT: br label [[FLOW1]]
; IR: Flow2:
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP16]])
; IR-NEXT: [[TMP19:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[TMP15]])
; IR-NEXT: [[TMP20:%.*]] = extractvalue { i1, i64 } [[TMP19]], 0
; IR-NEXT: [[TMP21]] = extractvalue { i1, i64 } [[TMP19]], 1
; IR-NEXT: br i1 [[TMP20]], label [[BB31_LOOPEXIT:%.*]], label [[FLOW3]]
; IR: bb31.loopexit:
; IR-NEXT: br label [[FLOW3]]
; IR: bb31:
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP7]])
; IR-NEXT: store volatile i32 0, i32 addrspace(1)* undef
; IR-NEXT: ret void
;
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%tmp1 = zext i32 %tmp to i64
%tmp2 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp1
%tmp3 = load i64, i64 addrspace(1)* %tmp2, align 16
%tmp932 = load <4 x i32>, <4 x i32> addrspace(1)* undef, align 16
%tmp1033 = extractelement <4 x i32> %tmp932, i64 0
%tmp1134 = load volatile i32, i32 addrspace(1)* undef
%tmp1235 = icmp slt i32 %tmp1134, 9
br i1 %tmp1235, label %bb14.lr.ph, label %bb13
%my.tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%my.tmp1 = zext i32 %my.tmp to i64
%my.tmp2 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %my.tmp1
%my.tmp3 = load i64, i64 addrspace(1)* %my.tmp2, align 16
%my.tmp932 = load <4 x i32>, <4 x i32> addrspace(1)* undef, align 16
%my.tmp1033 = extractelement <4 x i32> %my.tmp932, i64 0
%my.tmp1134 = load volatile i32, i32 addrspace(1)* undef
%my.tmp1235 = icmp slt i32 %my.tmp1134, 9
br i1 %my.tmp1235, label %bb14.lr.ph, label %bb13
bb14.lr.ph: ; preds = %bb
br label %bb14
@ -220,37 +287,37 @@ bb13: ; preds = %bb4.bb13_crit_edge,
br label %bb31
bb14: ; preds = %bb21, %bb14.lr.ph
%tmp1037 = phi i32 [ %tmp1033, %bb14.lr.ph ], [ %tmp10, %bb21 ]
%tmp936 = phi <4 x i32> [ %tmp932, %bb14.lr.ph ], [ %tmp9, %bb21 ]
%tmp15 = icmp eq i32 %tmp1037, 1
br i1 %tmp15, label %bb16, label %bb31.loopexit
%my.tmp1037 = phi i32 [ %my.tmp1033, %bb14.lr.ph ], [ %my.tmp10, %bb21 ]
%my.tmp936 = phi <4 x i32> [ %my.tmp932, %bb14.lr.ph ], [ %my.tmp9, %bb21 ]
%my.tmp15 = icmp eq i32 %my.tmp1037, 1
br i1 %my.tmp15, label %bb16, label %bb31.loopexit
bb16: ; preds = %bb14
%tmp17 = bitcast i64 %tmp3 to <2 x i32>
%my.tmp17 = bitcast i64 %my.tmp3 to <2 x i32>
br label %bb18
bb18: ; preds = %bb18, %bb16
%tmp19 = load volatile i32, i32 addrspace(1)* undef
%tmp20 = icmp slt i32 %tmp19, 9
br i1 %tmp20, label %bb21, label %bb18
%my.tmp19 = load volatile i32, i32 addrspace(1)* undef
%my.tmp20 = icmp slt i32 %my.tmp19, 9
br i1 %my.tmp20, label %bb21, label %bb18
bb21: ; preds = %bb18
%tmp22 = extractelement <2 x i32> %tmp17, i64 1
%tmp23 = lshr i32 %tmp22, 16
%tmp24 = select i1 undef, i32 undef, i32 %tmp23
%tmp25 = uitofp i32 %tmp24 to float
%tmp26 = fmul float %tmp25, 0x3EF0001000000000
%tmp27 = fsub float %tmp26, undef
%tmp28 = fcmp olt float %tmp27, 5.000000e-01
%tmp29 = select i1 %tmp28, i64 1, i64 2
%tmp30 = extractelement <4 x i32> %tmp936, i64 %tmp29
%tmp7 = zext i32 %tmp30 to i64
%tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* undef, i64 %tmp7
%tmp9 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp8, align 16
%tmp10 = extractelement <4 x i32> %tmp9, i64 0
%tmp11 = load volatile i32, i32 addrspace(1)* undef
%tmp12 = icmp slt i32 %tmp11, 9
br i1 %tmp12, label %bb14, label %bb4.bb13_crit_edge
%my.tmp22 = extractelement <2 x i32> %my.tmp17, i64 1
%my.tmp23 = lshr i32 %my.tmp22, 16
%my.tmp24 = select i1 undef, i32 undef, i32 %my.tmp23
%my.tmp25 = uitofp i32 %my.tmp24 to float
%my.tmp26 = fmul float %my.tmp25, 0x3EF0001000000000
%my.tmp27 = fsub float %my.tmp26, undef
%my.tmp28 = fcmp olt float %my.tmp27, 5.000000e-01
%my.tmp29 = select i1 %my.tmp28, i64 1, i64 2
%my.tmp30 = extractelement <4 x i32> %my.tmp936, i64 %my.tmp29
%my.tmp7 = zext i32 %my.tmp30 to i64
%my.tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* undef, i64 %my.tmp7
%my.tmp9 = load <4 x i32>, <4 x i32> addrspace(1)* %my.tmp8, align 16
%my.tmp10 = extractelement <4 x i32> %my.tmp9, i64 0
%my.tmp11 = load volatile i32, i32 addrspace(1)* undef
%my.tmp12 = icmp slt i32 %my.tmp11, 9
br i1 %my.tmp12, label %bb14, label %bb4.bb13_crit_edge
bb31.loopexit: ; preds = %bb14
br label %bb31

View File

@ -1,16 +1,55 @@
; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI %s
; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck --check-prefix=FLAT %s
; FUNC-LABEL: {{^}}break_inserted_outside_of_loop:
; SI: [[LOOP_LABEL:[A-Z0-9]+]]:
; Lowered break instructin:
; SI: s_or_b64
; Lowered Loop instruction:
; SI: s_andn2_b64
; s_cbranch_execnz [[LOOP_LABEL]]
; SI: s_endpgm
define amdgpu_kernel void @break_inserted_outside_of_loop(i32 addrspace(1)* %out, i32 %a) {
; SI-LABEL: break_inserted_outside_of_loop:
; SI: ; %bb.0: ; %main_body
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
; SI-NEXT: s_load_dword s0, s[0:1], 0xb
; SI-NEXT: v_mbcnt_lo_u32_b32_e64 v0, -1, 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_and_b32_e32 v0, s0, v0
; SI-NEXT: v_and_b32_e32 v0, 1, v0
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; SI-NEXT: s_mov_b64 s[0:1], 0
; SI-NEXT: BB0_1: ; %ENDIF
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_and_b64 s[2:3], exec, vcc
; SI-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
; SI-NEXT: s_cbranch_execnz BB0_1
; SI-NEXT: ; %bb.2: ; %ENDLOOP
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: v_mov_b32_e32 v0, 0
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; FLAT-LABEL: break_inserted_outside_of_loop:
; FLAT: ; %bb.0: ; %main_body
; FLAT-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
; FLAT-NEXT: s_load_dword s0, s[0:1], 0x2c
; FLAT-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
; FLAT-NEXT: s_waitcnt lgkmcnt(0)
; FLAT-NEXT: v_and_b32_e32 v0, s0, v0
; FLAT-NEXT: v_and_b32_e32 v0, 1, v0
; FLAT-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; FLAT-NEXT: s_mov_b64 s[0:1], 0
; FLAT-NEXT: BB0_1: ; %ENDIF
; FLAT-NEXT: ; =>This Inner Loop Header: Depth=1
; FLAT-NEXT: s_and_b64 s[2:3], exec, vcc
; FLAT-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; FLAT-NEXT: s_andn2_b64 exec, exec, s[0:1]
; FLAT-NEXT: s_cbranch_execnz BB0_1
; FLAT-NEXT: ; %bb.2: ; %ENDLOOP
; FLAT-NEXT: s_or_b64 exec, exec, s[0:1]
; FLAT-NEXT: s_mov_b32 s7, 0xf000
; FLAT-NEXT: s_mov_b32 s6, -1
; FLAT-NEXT: v_mov_b32_e32 v0, 0
; FLAT-NEXT: buffer_store_dword v0, off, s[4:7], 0
; FLAT-NEXT: s_endpgm
main_body:
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
%0 = and i32 %a, %tid
@ -25,25 +64,54 @@ ENDIF:
br i1 %1, label %ENDLOOP, label %ENDIF
}
; FUNC-LABEL: {{^}}phi_cond_outside_loop:
; SI: s_mov_b64 [[LEFT:s\[[0-9]+:[0-9]+\]]], 0
; SI: s_mov_b64 [[PHI:s\[[0-9]+:[0-9]+\]]], 0
; SI: ; %else
; SI: v_cmp_eq_u32_e64 [[TMP:s\[[0-9]+:[0-9]+\]]],
; SI: ; %endif
; SI: [[LOOP_LABEL:BB[0-9]+_[0-9]+]]: ; %loop
; SI: s_and_b64 [[TMP1:s\[[0-9]+:[0-9]+\]]], exec, [[PHI]]
; SI: s_or_b64 [[LEFT]], [[TMP1]], [[LEFT]]
; SI: s_andn2_b64 exec, exec, [[LEFT]]
; SI: s_cbranch_execnz [[LOOP_LABEL]]
; SI: s_endpgm
define amdgpu_kernel void @phi_cond_outside_loop(i32 %b) {
; SI-LABEL: phi_cond_outside_loop:
; SI: ; %bb.0: ; %entry
; SI-NEXT: v_mbcnt_lo_u32_b32_e64 v0, -1, 0
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_mov_b64 s[2:3], 0
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_and_saveexec_b64 s[6:7], vcc
; SI-NEXT: s_cbranch_execz BB1_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_load_dword s0, s[0:1], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cmp_eq_u32_e64 s[0:1], s0, 0
; SI-NEXT: s_and_b64 s[4:5], s[0:1], exec
; SI-NEXT: BB1_2: ; %endif
; SI-NEXT: s_or_b64 exec, exec, s[6:7]
; SI-NEXT: BB1_3: ; %loop
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_and_b64 s[0:1], exec, s[4:5]
; SI-NEXT: s_or_b64 s[2:3], s[0:1], s[2:3]
; SI-NEXT: s_andn2_b64 exec, exec, s[2:3]
; SI-NEXT: s_cbranch_execnz BB1_3
; SI-NEXT: ; %bb.4: ; %exit
; SI-NEXT: s_endpgm
;
; FLAT-LABEL: phi_cond_outside_loop:
; FLAT: ; %bb.0: ; %entry
; FLAT-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
; FLAT-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; FLAT-NEXT: s_mov_b64 s[2:3], 0
; FLAT-NEXT: s_mov_b64 s[4:5], 0
; FLAT-NEXT: s_and_saveexec_b64 s[6:7], vcc
; FLAT-NEXT: s_cbranch_execz BB1_2
; FLAT-NEXT: ; %bb.1: ; %else
; FLAT-NEXT: s_load_dword s0, s[0:1], 0x24
; FLAT-NEXT: s_waitcnt lgkmcnt(0)
; FLAT-NEXT: v_cmp_eq_u32_e64 s[0:1], s0, 0
; FLAT-NEXT: s_and_b64 s[4:5], s[0:1], exec
; FLAT-NEXT: BB1_2: ; %endif
; FLAT-NEXT: s_or_b64 exec, exec, s[6:7]
; FLAT-NEXT: BB1_3: ; %loop
; FLAT-NEXT: ; =>This Inner Loop Header: Depth=1
; FLAT-NEXT: s_and_b64 s[0:1], exec, s[4:5]
; FLAT-NEXT: s_or_b64 s[2:3], s[0:1], s[2:3]
; FLAT-NEXT: s_andn2_b64 exec, exec, s[2:3]
; FLAT-NEXT: s_cbranch_execnz BB1_3
; FLAT-NEXT: ; %bb.4: ; %exit
; FLAT-NEXT: s_endpgm
entry:
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
%0 = icmp eq i32 %tid , 0
@ -67,11 +135,12 @@ exit:
ret void
}
; FIXME: should emit s_endpgm
; CHECK-LABEL: {{^}}switch_unreachable:
; CHECK-NOT: s_endpgm
; CHECK: .Lfunc_end2
define amdgpu_kernel void @switch_unreachable(i32 addrspace(1)* %g, i8 addrspace(3)* %l, i32 %x) nounwind {
; SI-LABEL: switch_unreachable:
; SI: ; %bb.0: ; %centry
;
; FLAT-LABEL: switch_unreachable:
; FLAT: ; %bb.0: ; %centry
centry:
switch i32 %x, label %sw.default [
i32 0, label %sw.bb
@ -90,29 +159,99 @@ sw.epilog:
declare float @llvm.fabs.f32(float) nounwind readnone
; This broke the old AMDIL cfg structurizer
; FUNC-LABEL: {{^}}loop_land_info_assert:
; SI: v_cmp_lt_i32_e64 [[CMP4:s\[[0-9:]+\]]], s{{[0-9]+}}, 4{{$}}
; SI: s_and_b64 [[CMP4M:s\[[0-9]+:[0-9]+\]]], exec, [[CMP4]]
; SI: [[WHILELOOP:BB[0-9]+_[0-9]+]]: ; %while.cond
; SI: s_cbranch_vccz [[FOR_COND_PH:BB[0-9]+_[0-9]+]]
; SI: [[CONVEX_EXIT:BB[0-9_]+]]
; SI: s_mov_b64 vcc,
; SI-NEXT: s_cbranch_vccnz [[ENDPGM:BB[0-9]+_[0-9]+]]
; SI: s_cbranch_vccnz [[WHILELOOP]]
; SI: ; %if.else
; SI: buffer_store_dword
; SI: [[FOR_COND_PH]]: ; %for.cond.preheader
; SI: s_cbranch_vccz [[ENDPGM]]
; SI: [[ENDPGM]]:
; SI-NEXT: s_endpgm
define amdgpu_kernel void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32 %c3, i32 %x, i32 %y, i1 %arg) nounwind {
; SI-LABEL: loop_land_info_assert:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9
; SI-NEXT: s_load_dword s4, s[0:1], 0xc
; SI-NEXT: s_brev_b32 s5, 44
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cmp_gt_i32_e64 s[0:1], s2, 0
; SI-NEXT: v_cmp_lt_i32_e64 s[2:3], s3, 4
; SI-NEXT: s_or_b64 s[8:9], s[0:1], s[2:3]
; SI-NEXT: s_and_b64 s[0:1], exec, s[2:3]
; SI-NEXT: s_and_b64 s[2:3], exec, s[8:9]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_lt_f32_e64 s[8:9], |v0|, s5
; SI-NEXT: v_mov_b32_e32 v0, 3
; SI-NEXT: BB3_1: ; %while.cond
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_mov_b64 vcc, s[0:1]
; SI-NEXT: s_cbranch_vccz BB3_5
; SI-NEXT: ; %bb.2: ; %convex.exit
; SI-NEXT: ; in Loop: Header=BB3_1 Depth=1
; SI-NEXT: s_mov_b64 vcc, s[2:3]
; SI-NEXT: s_cbranch_vccnz BB3_8
; SI-NEXT: ; %bb.3: ; %if.end
; SI-NEXT: ; in Loop: Header=BB3_1 Depth=1
; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; SI-NEXT: s_cbranch_vccnz BB3_1
; SI-NEXT: ; %bb.4: ; %if.else
; SI-NEXT: ; in Loop: Header=BB3_1 Depth=1
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_branch BB3_1
; SI-NEXT: BB3_5: ; %for.cond.preheader
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, 0x3e8
; SI-NEXT: v_cmp_lt_i32_e32 vcc, s4, v0
; SI-NEXT: s_and_b64 vcc, exec, vcc
; SI-NEXT: s_cbranch_vccz BB3_8
; SI-NEXT: ; %bb.6: ; %for.body
; SI-NEXT: s_and_b64 vcc, exec, -1
; SI-NEXT: BB3_7: ; %self.loop
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_cbranch_vccnz BB3_7
; SI-NEXT: BB3_8: ; %DummyReturnBlock
; SI-NEXT: s_endpgm
;
; FLAT-LABEL: loop_land_info_assert:
; FLAT: ; %bb.0: ; %entry
; FLAT-NEXT: s_mov_b32 s7, 0xf000
; FLAT-NEXT: s_mov_b32 s6, -1
; FLAT-NEXT: buffer_load_dword v0, off, s[4:7], 0
; FLAT-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; FLAT-NEXT: s_load_dword s4, s[0:1], 0x30
; FLAT-NEXT: s_brev_b32 s5, 44
; FLAT-NEXT: s_waitcnt lgkmcnt(0)
; FLAT-NEXT: v_cmp_gt_i32_e64 s[0:1], s2, 0
; FLAT-NEXT: v_cmp_lt_i32_e64 s[2:3], s3, 4
; FLAT-NEXT: s_or_b64 s[8:9], s[0:1], s[2:3]
; FLAT-NEXT: s_and_b64 s[0:1], exec, s[2:3]
; FLAT-NEXT: s_and_b64 s[2:3], exec, s[8:9]
; FLAT-NEXT: s_waitcnt vmcnt(0)
; FLAT-NEXT: v_cmp_lt_f32_e64 s[8:9], |v0|, s5
; FLAT-NEXT: v_mov_b32_e32 v0, 3
; FLAT-NEXT: BB3_1: ; %while.cond
; FLAT-NEXT: ; =>This Inner Loop Header: Depth=1
; FLAT-NEXT: s_mov_b64 vcc, s[0:1]
; FLAT-NEXT: s_cbranch_vccz BB3_5
; FLAT-NEXT: ; %bb.2: ; %convex.exit
; FLAT-NEXT: ; in Loop: Header=BB3_1 Depth=1
; FLAT-NEXT: s_mov_b64 vcc, s[2:3]
; FLAT-NEXT: s_cbranch_vccnz BB3_8
; FLAT-NEXT: ; %bb.3: ; %if.end
; FLAT-NEXT: ; in Loop: Header=BB3_1 Depth=1
; FLAT-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; FLAT-NEXT: s_cbranch_vccnz BB3_1
; FLAT-NEXT: ; %bb.4: ; %if.else
; FLAT-NEXT: ; in Loop: Header=BB3_1 Depth=1
; FLAT-NEXT: buffer_store_dword v0, off, s[4:7], 0
; FLAT-NEXT: s_branch BB3_1
; FLAT-NEXT: BB3_5: ; %for.cond.preheader
; FLAT-NEXT: v_mov_b32_e32 v0, 0x3e8
; FLAT-NEXT: v_cmp_lt_i32_e32 vcc, s4, v0
; FLAT-NEXT: s_and_b64 vcc, exec, vcc
; FLAT-NEXT: s_cbranch_vccz BB3_8
; FLAT-NEXT: ; %bb.6: ; %for.body
; FLAT-NEXT: s_and_b64 vcc, exec, -1
; FLAT-NEXT: BB3_7: ; %self.loop
; FLAT-NEXT: ; =>This Inner Loop Header: Depth=1
; FLAT-NEXT: s_cbranch_vccnz BB3_7
; FLAT-NEXT: BB3_8: ; %DummyReturnBlock
; FLAT-NEXT: s_endpgm
entry:
%cmp = icmp sgt i32 %c0, 0
br label %while.cond.outer

View File

@ -0,0 +1,26 @@
; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
; This testcase was discovered in si-annotate-cf.ll, where none of the
; RUN lines was actually exercising it. See that files git log for its
; history.
; FIXME: should emit s_endpgm
; CHECK-LABEL: {{^}}switch_unreachable:
; CHECK-NOT: s_endpgm
; CHECK: .Lfunc_end
define amdgpu_kernel void @switch_unreachable(i32 addrspace(1)* %g, i8 addrspace(3)* %l, i32 %x) nounwind {
centry:
switch i32 %x, label %sw.default [
i32 0, label %sw.bb
i32 60, label %sw.bb
]
sw.bb:
unreachable
sw.default:
unreachable
sw.epilog:
ret void
}