2016-09-17 06:11:18 +08:00
|
|
|
; RUN: opt -S -mtriple=amdgcn-- -structurizecfg -si-annotate-control-flow < %s | FileCheck -check-prefix=OPT %s
|
|
|
|
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
|
|
|
|
|
|
|
|
; OPT-LABEL: {{^}}define amdgpu_vs void @multi_else_break(
|
|
|
|
; OPT: main_body:
|
|
|
|
; OPT: LOOP.outer:
|
|
|
|
; OPT: LOOP:
|
2019-06-14 07:47:36 +08:00
|
|
|
; OPT: [[if:%[0-9]+]] = call { i1, i64 } @llvm.amdgcn.if.i64(
|
2016-09-17 06:11:18 +08:00
|
|
|
; OPT: [[if_exec:%[0-9]+]] = extractvalue { i1, i64 } [[if]], 1
|
2016-04-13 00:10:38 +08:00
|
|
|
;
|
2016-09-17 06:11:18 +08:00
|
|
|
; OPT: Flow:
|
2016-04-13 00:10:38 +08:00
|
|
|
;
|
AMDGPU: Remove PHI loop condition optimization
Summary:
The optimization to early break out of loops if all threads are dead was
never fully implemented.
But the PHI node analyzing is actually causing a number of problems, so
remove all the extra code for it.
(This does actually regress code quality in a few places because it
ends up relying more heavily on phi's of i1, which we don't do a
great job with. However, since it fixes real bugs in the wild, we
should take this change. I have some prototype changes to improve
i1 lowering in general -- not just for control flow -- which should
help recover the code quality, I just need to make those changes
fit for general consumption. -- Nicolai)
Change-Id: I6fc6c6c8961857ac6009fcfb9f7e5e48dc23fbb1
Patch-by: Christian König <christian.koenig@amd.com>
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D53359
llvm-svn: 345718
2018-10-31 21:26:48 +08:00
|
|
|
; Ensure two if.break calls, for both the inner and outer loops
|
2016-09-17 06:11:18 +08:00
|
|
|
|
AMDGPU: Remove PHI loop condition optimization
Summary:
The optimization to early break out of loops if all threads are dead was
never fully implemented.
But the PHI node analyzing is actually causing a number of problems, so
remove all the extra code for it.
(This does actually regress code quality in a few places because it
ends up relying more heavily on phi's of i1, which we don't do a
great job with. However, since it fixes real bugs in the wild, we
should take this change. I have some prototype changes to improve
i1 lowering in general -- not just for control flow -- which should
help recover the code quality, I just need to make those changes
fit for general consumption. -- Nicolai)
Change-Id: I6fc6c6c8961857ac6009fcfb9f7e5e48dc23fbb1
Patch-by: Christian König <christian.koenig@amd.com>
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D53359
llvm-svn: 345718
2018-10-31 21:26:48 +08:00
|
|
|
; OPT: call void @llvm.amdgcn.end.cf
|
2019-06-14 07:47:36 +08:00
|
|
|
; OPT-NEXT: call i64 @llvm.amdgcn.if.break.i64.i64(i1
|
|
|
|
; OPT-NEXT: call i1 @llvm.amdgcn.loop.i64(i64
|
|
|
|
; OPT-NEXT: call i64 @llvm.amdgcn.if.break.i64.i64(i1
|
2016-04-13 00:10:38 +08:00
|
|
|
;
|
2016-09-17 06:11:18 +08:00
|
|
|
; OPT: Flow1:
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}multi_else_break:
|
|
|
|
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
; GCN: ; %main_body
|
|
|
|
; GCN: s_mov_b64 [[LEFT_OUTER:s\[[0-9]+:[0-9]+\]]], 0{{$}}
|
|
|
|
|
2016-09-17 06:11:18 +08:00
|
|
|
; GCN: [[OUTER_LOOP:BB[0-9]+_[0-9]+]]: ; %LOOP.outer{{$}}
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
; GCN: s_mov_b64 [[LEFT_INNER:s\[[0-9]+:[0-9]+\]]], 0{{$}}
|
2016-09-17 06:11:18 +08:00
|
|
|
|
|
|
|
; GCN: [[INNER_LOOP:BB[0-9]+_[0-9]+]]: ; %LOOP{{$}}
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
; GCN: s_or_b64 [[BREAK_OUTER:s\[[0-9]+:[0-9]+\]]], [[BREAK_OUTER]], exec
|
|
|
|
; GCN: s_or_b64 [[BREAK_INNER:s\[[0-9]+:[0-9]+\]]], [[BREAK_INNER]], exec
|
|
|
|
; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc
|
|
|
|
|
|
|
|
; FIXME: duplicate comparison
|
|
|
|
; GCN: ; %ENDIF
|
|
|
|
; GCN-DAG: v_cmp_eq_u32_e32 vcc,
|
|
|
|
; GCN-DAG: v_cmp_ne_u32_e64 [[TMP51NEG:s\[[0-9]+:[0-9]+\]]],
|
|
|
|
; GCN-DAG: s_andn2_b64 [[BREAK_OUTER]], [[BREAK_OUTER]], exec
|
|
|
|
; GCN-DAG: s_andn2_b64 [[BREAK_INNER]], [[BREAK_INNER]], exec
|
|
|
|
; GCN-DAG: s_and_b64 [[TMP_EQ:s\[[0-9]+:[0-9]+\]]], vcc, exec
|
|
|
|
; GCN-DAG: s_and_b64 [[TMP_NE:s\[[0-9]+:[0-9]+\]]], [[TMP51NEG]], exec
|
|
|
|
; GCN-DAG: s_or_b64 [[BREAK_OUTER]], [[BREAK_OUTER]], [[TMP_EQ]]
|
|
|
|
; GCN-DAG: s_or_b64 [[BREAK_INNER]], [[BREAK_INNER]], [[TMP_NE]]
|
|
|
|
|
|
|
|
; GCN: ; %Flow
|
|
|
|
; GCN: s_or_b64 exec, exec, [[SAVE_EXEC]]
|
|
|
|
; GCN: s_and_b64 [[TMP0:s\[[0-9]+:[0-9]+\]]], exec, [[BREAK_INNER]]
|
|
|
|
; GCN: s_or_b64 [[TMP0]], [[TMP0]], [[LEFT_INNER]]
|
|
|
|
; GCN: s_mov_b64 [[LEFT_INNER]], [[TMP0]]
|
|
|
|
; GCN: s_andn2_b64 exec, exec, [[TMP0]]
|
|
|
|
; GCN: s_cbranch_execnz [[INNER_LOOP]]
|
|
|
|
|
|
|
|
; GCN: ; %Flow2
|
|
|
|
; GCN: s_or_b64 exec, exec, [[TMP0]]
|
|
|
|
; GCN: s_and_b64 [[TMP1:s\[[0-9]+:[0-9]+\]]], exec, [[BREAK_OUTER]]
|
|
|
|
; GCN: s_or_b64 [[TMP1]], [[TMP1]], [[LEFT_OUTER]]
|
|
|
|
; GCN: s_mov_b64 [[LEFT_OUTER]], [[TMP1]]
|
|
|
|
; GCN: s_andn2_b64 exec, exec, [[TMP1]]
|
|
|
|
; GCN: s_cbranch_execnz [[OUTER_LOOP]]
|
|
|
|
|
|
|
|
; GCN: ; %IF
|
|
|
|
; GCN-NEXT: s_endpgm
|
2016-09-17 06:11:18 +08:00
|
|
|
define amdgpu_vs void @multi_else_break(<4 x float> %vec, i32 %ub, i32 %cont) {
|
2016-04-13 00:10:38 +08:00
|
|
|
main_body:
|
|
|
|
br label %LOOP.outer
|
|
|
|
|
|
|
|
LOOP.outer: ; preds = %ENDIF, %main_body
|
|
|
|
%tmp43 = phi i32 [ 0, %main_body ], [ %tmp47, %ENDIF ]
|
|
|
|
br label %LOOP
|
|
|
|
|
|
|
|
LOOP: ; preds = %ENDIF, %LOOP.outer
|
|
|
|
%tmp45 = phi i32 [ %tmp43, %LOOP.outer ], [ %tmp47, %ENDIF ]
|
|
|
|
%tmp47 = add i32 %tmp45, 1
|
|
|
|
%tmp48 = icmp slt i32 %tmp45, %ub
|
|
|
|
br i1 %tmp48, label %ENDIF, label %IF
|
|
|
|
|
|
|
|
IF: ; preds = %LOOP
|
|
|
|
ret void
|
|
|
|
|
|
|
|
ENDIF: ; preds = %LOOP
|
|
|
|
%tmp51 = icmp eq i32 %tmp47, %cont
|
|
|
|
br i1 %tmp51, label %LOOP, label %LOOP.outer
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
; OPT-LABEL: define amdgpu_kernel void @multi_if_break_loop(
|
2016-09-17 06:11:18 +08:00
|
|
|
; OPT: llvm.amdgcn.if.break
|
AMDGPU: Remove PHI loop condition optimization
Summary:
The optimization to early break out of loops if all threads are dead was
never fully implemented.
But the PHI node analyzing is actually causing a number of problems, so
remove all the extra code for it.
(This does actually regress code quality in a few places because it
ends up relying more heavily on phi's of i1, which we don't do a
great job with. However, since it fixes real bugs in the wild, we
should take this change. I have some prototype changes to improve
i1 lowering in general -- not just for control flow -- which should
help recover the code quality, I just need to make those changes
fit for general consumption. -- Nicolai)
Change-Id: I6fc6c6c8961857ac6009fcfb9f7e5e48dc23fbb1
Patch-by: Christian König <christian.koenig@amd.com>
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D53359
llvm-svn: 345718
2018-10-31 21:26:48 +08:00
|
|
|
; OPT: llvm.amdgcn.loop
|
2016-09-17 06:11:18 +08:00
|
|
|
; OPT: llvm.amdgcn.if.break
|
|
|
|
; OPT: llvm.amdgcn.end.cf
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}multi_if_break_loop:
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
; GCN: s_mov_b64 [[LEFT:s\[[0-9]+:[0-9]+\]]], 0{{$}}
|
2016-09-17 06:11:18 +08:00
|
|
|
|
|
|
|
; GCN: [[LOOP:BB[0-9]+_[0-9]+]]: ; %bb1{{$}}
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
; GCN: s_mov_b64 [[OLD_LEFT:s\[[0-9]+:[0-9]+\]]], [[LEFT]]
|
|
|
|
|
|
|
|
; GCN: ; %LeafBlock1
|
2019-06-07 05:13:02 +08:00
|
|
|
; GCN: s_mov_b64
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
; GCN: s_mov_b64 [[BREAK:s\[[0-9]+:[0-9]+\]]], -1{{$}}
|
|
|
|
|
|
|
|
; GCN: ; %case1
|
|
|
|
; GCN: buffer_load_dword [[LOAD2:v[0-9]+]],
|
|
|
|
; GCN: v_cmp_ge_i32_e32 vcc, {{v[0-9]+}}, [[LOAD2]]
|
|
|
|
; GCN: s_orn2_b64 [[BREAK]], vcc, exec
|
|
|
|
|
|
|
|
; GCN: ; %Flow3
|
|
|
|
; GCN: s_branch [[FLOW:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN: s_mov_b64 [[BREAK]], -1{{$}}
|
|
|
|
|
2019-06-07 05:13:02 +08:00
|
|
|
; GCN: [[FLOW]]: ; %Flow
|
|
|
|
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
; GCN: ; %case0
|
|
|
|
; GCN: buffer_load_dword [[LOAD1:v[0-9]+]],
|
|
|
|
; GCN-DAG: s_andn2_b64 [[BREAK]], [[BREAK]], exec
|
|
|
|
; GCN-DAG: v_cmp_ge_i32_e32 vcc, {{v[0-9]+}}, [[LOAD1]]
|
|
|
|
; GCN-DAG: s_and_b64 [[TMP:s\[[0-9]+:[0-9]+\]]], vcc, exec
|
|
|
|
; GCN: s_or_b64 [[BREAK]], [[BREAK]], [[TMP]]
|
2016-09-17 06:11:18 +08:00
|
|
|
|
2019-06-07 05:13:02 +08:00
|
|
|
; GCN: ; %Flow4
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
; GCN: s_and_b64 [[BREAK]], exec, [[BREAK]]
|
|
|
|
; GCN: s_or_b64 [[LEFT]], [[BREAK]], [[OLD_LEFT]]
|
|
|
|
; GCN: s_andn2_b64 exec, exec, [[LEFT]]
|
AMDGPU: Remove PHI loop condition optimization
Summary:
The optimization to early break out of loops if all threads are dead was
never fully implemented.
But the PHI node analyzing is actually causing a number of problems, so
remove all the extra code for it.
(This does actually regress code quality in a few places because it
ends up relying more heavily on phi's of i1, which we don't do a
great job with. However, since it fixes real bugs in the wild, we
should take this change. I have some prototype changes to improve
i1 lowering in general -- not just for control flow -- which should
help recover the code quality, I just need to make those changes
fit for general consumption. -- Nicolai)
Change-Id: I6fc6c6c8961857ac6009fcfb9f7e5e48dc23fbb1
Patch-by: Christian König <christian.koenig@amd.com>
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D53359
llvm-svn: 345718
2018-10-31 21:26:48 +08:00
|
|
|
; GCN-NEXT: s_cbranch_execnz
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 {
|
2016-09-17 06:11:18 +08:00
|
|
|
bb:
|
|
|
|
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%tmp = sub i32 %id, %arg
|
|
|
|
br label %bb1
|
|
|
|
|
|
|
|
bb1:
|
|
|
|
%lsr.iv = phi i32 [ undef, %bb ], [ %lsr.iv.next, %case0 ], [ %lsr.iv.next, %case1 ]
|
|
|
|
%lsr.iv.next = add i32 %lsr.iv, 1
|
|
|
|
%cmp0 = icmp slt i32 %lsr.iv.next, 0
|
|
|
|
%load0 = load volatile i32, i32 addrspace(1)* undef, align 4
|
|
|
|
switch i32 %load0, label %bb9 [
|
|
|
|
i32 0, label %case0
|
|
|
|
i32 1, label %case1
|
|
|
|
]
|
|
|
|
|
|
|
|
case0:
|
|
|
|
%load1 = load volatile i32, i32 addrspace(1)* undef, align 4
|
|
|
|
%cmp1 = icmp slt i32 %tmp, %load1
|
|
|
|
br i1 %cmp1, label %bb1, label %bb9
|
|
|
|
|
|
|
|
case1:
|
|
|
|
%load2 = load volatile i32, i32 addrspace(1)* undef, align 4
|
|
|
|
%cmp2 = icmp slt i32 %tmp, %load2
|
|
|
|
br i1 %cmp2, label %bb1, label %bb9
|
|
|
|
|
|
|
|
bb9:
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x() #1
|
|
|
|
|
|
|
|
attributes #0 = { nounwind }
|
|
|
|
attributes #1 = { nounwind readnone }
|