2017-08-07 22:58:04 +08:00
|
|
|
; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -amdgpu-s-branch-bits=4 < %s | FileCheck -check-prefix=GCN %s
|
2017-06-24 12:31:45 +08:00
|
|
|
|
|
|
|
|
|
|
|
; FIXME: We should use llvm-mc for this, but we can't even parse our own output.
|
|
|
|
; See PR33579.
|
2017-06-24 12:41:39 +08:00
|
|
|
; RUN: llc -march=amdgcn -verify-machineinstrs -amdgpu-s-branch-bits=4 -o %t.o -filetype=obj %s
|
2017-06-24 12:31:45 +08:00
|
|
|
; RUN: llvm-readobj -r %t.o | FileCheck --check-prefix=OBJ %s
|
|
|
|
|
|
|
|
; OBJ: Relocations [
|
|
|
|
; OBJ-NEXT: ]
|
|
|
|
|
2016-10-07 00:20:41 +08:00
|
|
|
; Restrict maximum branch to between +7 and -8 dwords
|
|
|
|
|
|
|
|
; Used to emit an always 4 byte instruction. Inline asm always assumes
|
|
|
|
; each instruction is the maximum size.
|
|
|
|
declare void @llvm.amdgcn.s.sleep(i32) #0
|
|
|
|
|
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x() #1
|
|
|
|
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_conditional_max_short_forward_branch:
|
|
|
|
; GCN: s_load_dword [[CND:s[0-9]+]]
|
|
|
|
; GCN: s_cmp_eq_u32 [[CND]], 0
|
|
|
|
; GCN-NEXT: s_cbranch_scc1 [[BB3:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
|
2017-12-05 01:18:51 +08:00
|
|
|
; GCN-NEXT: ; %bb.1: ; %bb2
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
; GCN-NEXT: s_sleep 0
|
|
|
|
|
|
|
|
; GCN-NEXT: [[BB3]]: ; %bb3
|
|
|
|
; GCN: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
|
|
|
|
; GCN: buffer_store_dword [[V_CND]]
|
|
|
|
; GCN: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_conditional_max_short_forward_branch(i32 addrspace(1)* %arg, i32 %cnd) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb:
|
|
|
|
%cmp = icmp eq i32 %cnd, 0
|
|
|
|
br i1 %cmp, label %bb3, label %bb2 ; +8 dword branch
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
; 24 bytes
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
call void @llvm.amdgcn.s.sleep(i32 0)
|
|
|
|
br label %bb3
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
store volatile i32 %cnd, i32 addrspace(1)* %arg
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_conditional_min_long_forward_branch:
|
|
|
|
; GCN: s_load_dword [[CND:s[0-9]+]]
|
|
|
|
; GCN: s_cmp_eq_u32 [[CND]], 0
|
|
|
|
; GCN-NEXT: s_cbranch_scc0 [[LONGBB:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb0
|
|
|
|
; GCN-NEXT: s_getpc_b64 vcc
|
|
|
|
; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[ENDBB:BB[0-9]+_[0-9]+]]-([[LONG_JUMP]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0
|
|
|
|
; GCN-NEXT: s_setpc_b64 vcc
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB]]:
|
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
|
|
|
|
; GCN-NEXT: [[ENDBB]]:
|
|
|
|
; GCN: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
|
|
|
|
; GCN: buffer_store_dword [[V_CND]]
|
|
|
|
; GCN: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_conditional_min_long_forward_branch(i32 addrspace(1)* %arg, i32 %cnd) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb0:
|
|
|
|
%cmp = icmp eq i32 %cnd, 0
|
|
|
|
br i1 %cmp, label %bb3, label %bb2 ; +9 dword branch
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
; 32 bytes
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %bb3
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
store volatile i32 %cnd, i32 addrspace(1)* %arg
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_conditional_min_long_forward_vcnd_branch:
|
|
|
|
; GCN: s_load_dword [[CND:s[0-9]+]]
|
|
|
|
; GCN-DAG: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
|
[AMDGPU] Fixed incorrect uniform branch condition
Summary:
I had a case where multiple nested uniform ifs resulted in code that did
v_cmp comparisons, combining the results with s_and_b64, s_or_b64 and
s_xor_b64 and using the resulting mask in s_cbranch_vccnz, without first
ensuring that bits for inactive lanes were clear.
There was already code for inserting an "s_and_b64 vcc, exec, vcc" to
clear bits for inactive lanes in the case that the branch is instruction
selected as s_cbranch_scc1 and is then changed to s_cbranch_vccnz in
SIFixSGPRCopies. I have added the same code into SILowerControlFlow for
the case that the branch is instruction selected as s_cbranch_vccnz.
This de-optimizes the code in some cases where the s_and is not needed,
because vcc is the result of a v_cmp, or multiple v_cmp instructions
combined by s_and/s_or. We should add a pass to re-optimize those cases.
Reviewers: arsenm, kzhuravl
Subscribers: wdng, yaxunl, t-tye, llvm-commits, dstuttard, timcorringham, nhaehnle
Differential Revision: https://reviews.llvm.org/D41292
llvm-svn: 322119
2018-01-10 05:34:43 +08:00
|
|
|
; GCN-DAG: v_cmp_eq_f32_e64 [[UNMASKED:s\[[0-9]+:[0-9]+\]]], [[CND]], 0
|
|
|
|
; GCN-DAG: s_and_b64 vcc, exec, [[UNMASKED]]
|
2016-11-08 03:09:33 +08:00
|
|
|
; GCN: s_cbranch_vccz [[LONGBB:BB[0-9]+_[0-9]+]]
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb0
|
|
|
|
; GCN-NEXT: s_getpc_b64 vcc
|
|
|
|
; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[ENDBB:BB[0-9]+_[0-9]+]]-([[LONG_JUMP]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0
|
|
|
|
; GCN-NEXT: s_setpc_b64 vcc
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB]]:
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
|
|
|
|
; GCN: [[ENDBB]]:
|
|
|
|
; GCN: buffer_store_dword [[V_CND]]
|
|
|
|
; GCN: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_conditional_min_long_forward_vcnd_branch(float addrspace(1)* %arg, float %cnd) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb0:
|
|
|
|
%cmp = fcmp oeq float %cnd, 0.0
|
|
|
|
br i1 %cmp, label %bb3, label %bb2 ; + 8 dword branch
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
call void asm sideeffect " ; 32 bytes
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %bb3
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
store volatile float %cnd, float addrspace(1)* %arg
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}min_long_forward_vbranch:
|
|
|
|
|
|
|
|
; GCN: buffer_load_dword
|
|
|
|
; GCN: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
|
|
|
|
; GCN: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], vcc
|
|
|
|
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
|
|
|
|
; GCN: s_or_b64 exec, exec, [[SAVE]]
|
|
|
|
; GCN: buffer_store_dword
|
|
|
|
; GCN: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @min_long_forward_vbranch(i32 addrspace(1)* %arg) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb:
|
|
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%tid.ext = zext i32 %tid to i64
|
|
|
|
%gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tid.ext
|
|
|
|
%load = load volatile i32, i32 addrspace(1)* %gep
|
|
|
|
%cmp = icmp eq i32 %load, 0
|
|
|
|
br i1 %cmp, label %bb3, label %bb2 ; + 8 dword branch
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
call void asm sideeffect " ; 32 bytes
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %bb3
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
store volatile i32 %load, i32 addrspace(1)* %gep
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}long_backward_sbranch:
|
2016-11-29 08:46:46 +08:00
|
|
|
; GCN: s_mov_b32 [[LOOPIDX:s[0-9]+]], 0{{$}}
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]: ; %bb2
|
|
|
|
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
|
2016-11-29 08:46:46 +08:00
|
|
|
; GCN-NEXT: s_add_i32 [[INC:s[0-9]+]], [[LOOPIDX]], 1
|
|
|
|
; GCN-NEXT: s_cmp_lt_i32 [[INC]], 10
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
|
2016-11-29 08:46:46 +08:00
|
|
|
; GCN-NEXT: s_cbranch_scc0 [[ENDBB:BB[0-9]+_[0-9]+]]
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb2
|
|
|
|
; GCN-NEXT: ; in Loop: Header=[[LOOPBB]] Depth=1
|
|
|
|
; GCN-NEXT: s_getpc_b64 vcc
|
|
|
|
; GCN-NEXT: s_sub_u32 vcc_lo, vcc_lo, ([[LONG_JUMP]]+4)-[[LOOPBB]]
|
|
|
|
; GCN-NEXT: s_subb_u32 vcc_hi, vcc_hi, 0
|
|
|
|
; GCN-NEXT: s_setpc_b64 vcc
|
|
|
|
|
|
|
|
; GCN-NEXT: [[ENDBB]]:
|
|
|
|
; GCN-NEXT: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @long_backward_sbranch(i32 addrspace(1)* %arg) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb:
|
|
|
|
br label %bb2
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
%loop.idx = phi i32 [ 0, %bb ], [ %inc, %bb2 ]
|
|
|
|
; 24 bytes
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
%inc = add nsw i32 %loop.idx, 1 ; add cost 4
|
|
|
|
%cmp = icmp slt i32 %inc, 10 ; condition cost = 8
|
|
|
|
br i1 %cmp, label %bb2, label %bb3 ; -
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Requires expansion of unconditional branch from %bb2 to %bb4 (and
|
|
|
|
; expansion of conditional branch from %bb to %bb3.
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_unconditional_min_long_forward_branch:
|
|
|
|
; GCN: s_cmp_eq_u32
|
|
|
|
; GCN-NEXT: s_cbranch_scc0 [[BB2:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONG_JUMP0:BB[0-9]+_[0-9]+]]: ; %bb0
|
|
|
|
; GCN-NEXT: s_getpc_b64 vcc
|
|
|
|
; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[BB3:BB[0-9]_[0-9]+]]-([[LONG_JUMP0]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 vcc
|
|
|
|
|
|
|
|
; GCN-NEXT: [[BB2]]: ; %bb2
|
|
|
|
; GCN: v_mov_b32_e32 [[BB2_K:v[0-9]+]], 17
|
|
|
|
; GCN: buffer_store_dword [[BB2_K]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONG_JUMP1:BB[0-9]+_[0-9]+]]: ; %bb2
|
|
|
|
; GCN-NEXT: s_getpc_b64 vcc
|
|
|
|
; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[BB4:BB[0-9]_[0-9]+]]-([[LONG_JUMP1]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 vcc
|
|
|
|
|
|
|
|
; GCN: [[BB3]]: ; %bb3
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: ;;#ASMEND
|
|
|
|
|
|
|
|
; GCN-NEXT: [[BB4]]: ; %bb4
|
|
|
|
; GCN: v_mov_b32_e32 [[BB4_K:v[0-9]+]], 63
|
|
|
|
; GCN: buffer_store_dword [[BB4_K]]
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
; GCN-NEXT: .Lfunc_end{{[0-9]+}}:
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb0:
|
|
|
|
%tmp = icmp ne i32 %arg1, 0
|
|
|
|
br i1 %tmp, label %bb2, label %bb3
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
store volatile i32 17, i32 addrspace(1)* undef
|
|
|
|
br label %bb4
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
; 32 byte asm
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %bb4
|
|
|
|
|
|
|
|
bb4:
|
|
|
|
store volatile i32 63, i32 addrspace(1)* %arg
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_unconditional_min_long_backward_branch:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GCN-NEXT: ; %bb.0: ; %entry
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[LOOP:BB[0-9]_[0-9]+]]: ; %loop
|
|
|
|
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %loop
|
|
|
|
; GCN-NEXT: ; in Loop: Header=[[LOOP]] Depth=1
|
|
|
|
; GCN-NEXT: s_getpc_b64 vcc
|
|
|
|
; GCN-NEXT: s_sub_u32 vcc_lo, vcc_lo, ([[LONGBB]]+4)-[[LOOP]]
|
|
|
|
; GCN-NEXT: s_subb_u32 vcc_hi, vcc_hi, 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 vcc
|
|
|
|
; GCN-NEXT .Lfunc_end{{[0-9]+}}:
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_unconditional_min_long_backward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
|
2016-10-07 00:20:41 +08:00
|
|
|
entry:
|
|
|
|
br label %loop
|
|
|
|
|
|
|
|
loop:
|
|
|
|
; 32 byte asm
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %loop
|
|
|
|
}
|
|
|
|
|
|
|
|
; Expansion of branch from %bb1 to %bb3 introduces need to expand
|
|
|
|
; branch from %bb0 to %bb2
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}expand_requires_expand:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GCN-NEXT: ; %bb.0: ; %bb0
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN: s_load_dword
|
|
|
|
; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 0{{$}}
|
|
|
|
; GCN-NEXT: s_cbranch_scc0 [[BB1:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB0:BB[0-9]+_[0-9]+]]: ; %bb0
|
|
|
|
; GCN-NEXT: s_getpc_b64 vcc
|
|
|
|
; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[BB2:BB[0-9]_[0-9]+]]-([[LONGBB0]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 vcc
|
|
|
|
|
|
|
|
; GCN-NEXT: [[BB1]]: ; %bb1
|
|
|
|
; GCN-NEXT: s_load_dword
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_cmp_eq_u32 s{{[0-9]+}}, 3{{$}}
|
|
|
|
; GCN-NEXT: s_cbranch_scc0 [[BB2:BB[0-9]_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB1:BB[0-9]+_[0-9]+]]: ; %bb1
|
|
|
|
; GCN-NEXT: s_getpc_b64 vcc
|
|
|
|
; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[BB3:BB[0-9]+_[0-9]+]]-([[LONGBB1]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 vcc
|
|
|
|
|
|
|
|
; GCN-NEXT: [[BB2]]: ; %bb2
|
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
|
|
|
|
; GCN-NEXT: [[BB3]]: ; %bb3
|
2017-02-01 07:48:32 +08:00
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN-NEXT: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @expand_requires_expand(i32 %cond0) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb0:
|
|
|
|
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
|
|
|
|
%cmp0 = icmp slt i32 %cond0, 0
|
|
|
|
br i1 %cmp0, label %bb2, label %bb1
|
|
|
|
|
|
|
|
bb1:
|
2018-02-14 02:00:25 +08:00
|
|
|
%val = load volatile i32, i32 addrspace(4)* undef
|
2016-10-07 00:20:41 +08:00
|
|
|
%cmp1 = icmp eq i32 %val, 3
|
|
|
|
br i1 %cmp1, label %bb3, label %bb2
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %bb3
|
|
|
|
|
|
|
|
bb3:
|
2017-02-01 07:48:32 +08:00
|
|
|
; These NOPs prevent tail-duplication-based outlining
|
|
|
|
; from firing, which defeats the need to expand the branches and this test.
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64", ""() #0
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64", ""() #0
|
2016-10-07 00:20:41 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Requires expanding of required skip branch.
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_inside_divergent:
|
|
|
|
; GCN: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
|
|
|
|
; GCN-NEXT: s_and_saveexec_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], vcc
|
|
|
|
; GCN-NEXT: ; mask branch [[ENDIF:BB[0-9]+_[0-9]+]]
|
|
|
|
; GCN-NEXT: s_cbranch_execnz [[IF:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %entry
|
|
|
|
; GCN-NEXT: s_getpc_b64 vcc
|
|
|
|
; GCN-NEXT: s_add_u32 vcc_lo, vcc_lo, [[BB2:BB[0-9]_[0-9]+]]-([[LONGBB]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 vcc_hi, vcc_hi, 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 vcc
|
|
|
|
|
|
|
|
; GCN-NEXT: [[IF]]: ; %if
|
|
|
|
; GCN: buffer_store_dword
|
|
|
|
; GCN: s_cmp_lg_u32
|
|
|
|
; GCN: s_cbranch_scc1 [[ENDIF]]
|
|
|
|
|
2017-12-05 01:18:51 +08:00
|
|
|
; GCN-NEXT: ; %bb.2: ; %if_uniform
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN: buffer_store_dword
|
|
|
|
|
|
|
|
; GCN-NEXT: [[ENDIF]]: ; %endif
|
|
|
|
; GCN-NEXT: s_or_b64 exec, exec, [[MASK]]
|
2017-02-01 07:48:32 +08:00
|
|
|
; GCN-NEXT: s_sleep 5
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN-NEXT: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_inside_divergent(i32 addrspace(1)* %out, i32 %cond) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
entry:
|
|
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%d_cmp = icmp ult i32 %tid, 16
|
|
|
|
br i1 %d_cmp, label %if, label %endif
|
|
|
|
|
|
|
|
if:
|
|
|
|
store i32 0, i32 addrspace(1)* %out
|
|
|
|
%u_cmp = icmp eq i32 %cond, 0
|
|
|
|
br i1 %u_cmp, label %if_uniform, label %endif
|
|
|
|
|
|
|
|
if_uniform:
|
|
|
|
store i32 1, i32 addrspace(1)* %out
|
|
|
|
br label %endif
|
|
|
|
|
|
|
|
endif:
|
2017-02-01 07:48:32 +08:00
|
|
|
; layout can remove the split branch if it can copy the return block.
|
|
|
|
; This call makes the return block long enough that it doesn't get copied.
|
|
|
|
call void @llvm.amdgcn.s.sleep(i32 5);
|
2016-10-07 00:20:41 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; si_mask_branch
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}analyze_mask_branch:
|
|
|
|
; GCN: v_cmp_lt_f32_e32 vcc
|
|
|
|
; GCN-NEXT: s_and_saveexec_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], vcc
|
|
|
|
; GCN-NEXT: ; mask branch [[RET:BB[0-9]+_[0-9]+]]
|
2017-03-01 11:36:04 +08:00
|
|
|
|
[Dominators] Include infinite loops in PostDominatorTree
Summary:
This patch teaches PostDominatorTree about infinite loops. It is built on top of D29705 by @dberlin which includes a very detailed motivation for this change.
What's new is that the patch also teaches the incremental updater how to deal with reverse-unreachable regions and how to properly maintain and verify tree roots. Before that, the incremental algorithm sometimes ended up preserving reverse-unreachable regions after updates that wouldn't appear in the tree if it was constructed from scratch on the same CFG.
This patch makes the following assumptions:
- A sequence of updates should produce the same tree as a recalculating it.
- Any sequence of the same updates should lead to the same tree.
- Siblings and roots are unordered.
The last two properties are essential to efficiently perform batch updates in the future.
When it comes to the first one, we can decide later that the consistency between freshly built tree and an updated one doesn't matter match, as there are many correct ways to pick roots in infinite loops, and to relax this assumption. That should enable us to recalculate postdominators less frequently.
This patch is pretty conservative when it comes to incremental updates on reverse-unreachable regions and ends up recalculating the whole tree in many cases. It should be possible to improve the performance in many cases, if we decide that it's important enough.
That being said, my experiments showed that reverse-unreachable are very rare in the IR emitted by clang when bootstrapping clang. Here are the statistics I collected by analyzing IR between passes and after each removePredecessor call:
```
# functions: 52283
# samples: 337609
# reverse unreachable BBs: 216022
# BBs: 247840796
Percent reverse-unreachable: 0.08716159869015269 %
Max(PercRevUnreachable) in a function: 87.58620689655172 %
# > 25 % samples: 471 ( 0.1395104988314885 % samples )
... in 145 ( 0.27733680163724345 % functions )
```
Most of the reverse-unreachable regions come from invalid IR where it wouldn't be possible to construct a PostDomTree anyway.
I would like to commit this patch in the next week in order to be able to complete the work that depends on it before the end of my internship, so please don't wait long to voice your concerns :).
Reviewers: dberlin, sanjoy, grosser, brzycki, davide, chandlerc, hfinkel
Reviewed By: dberlin
Subscribers: nhaehnle, javed.absar, kparzysz, uabelho, jlebar, hiraditya, llvm-commits, dberlin, david2050
Differential Revision: https://reviews.llvm.org/D35851
llvm-svn: 310940
2017-08-16 02:14:57 +08:00
|
|
|
; GCN-NEXT: [[LOOP_BODY:BB[0-9]+_[0-9]+]]: ; %loop_body
|
2016-11-11 09:34:21 +08:00
|
|
|
; GCN: ;;#ASMSTART
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: ;;#ASMEND
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %loop_body
|
|
|
|
; GCN-NEXT: ; in Loop: Header=[[LOOP_BODY]] Depth=1
|
|
|
|
; GCN-NEXT: s_getpc_b64 vcc
|
|
|
|
; GCN-NEXT: s_sub_u32 vcc_lo, vcc_lo, ([[LONGBB]]+4)-[[LOOP_BODY]]
|
|
|
|
; GCN-NEXT: s_subb_u32 vcc_hi, vcc_hi, 0
|
|
|
|
; GCN-NEXT: s_setpc_b64 vcc
|
|
|
|
|
[Dominators] Include infinite loops in PostDominatorTree
Summary:
This patch teaches PostDominatorTree about infinite loops. It is built on top of D29705 by @dberlin which includes a very detailed motivation for this change.
What's new is that the patch also teaches the incremental updater how to deal with reverse-unreachable regions and how to properly maintain and verify tree roots. Before that, the incremental algorithm sometimes ended up preserving reverse-unreachable regions after updates that wouldn't appear in the tree if it was constructed from scratch on the same CFG.
This patch makes the following assumptions:
- A sequence of updates should produce the same tree as a recalculating it.
- Any sequence of the same updates should lead to the same tree.
- Siblings and roots are unordered.
The last two properties are essential to efficiently perform batch updates in the future.
When it comes to the first one, we can decide later that the consistency between freshly built tree and an updated one doesn't matter match, as there are many correct ways to pick roots in infinite loops, and to relax this assumption. That should enable us to recalculate postdominators less frequently.
This patch is pretty conservative when it comes to incremental updates on reverse-unreachable regions and ends up recalculating the whole tree in many cases. It should be possible to improve the performance in many cases, if we decide that it's important enough.
That being said, my experiments showed that reverse-unreachable are very rare in the IR emitted by clang when bootstrapping clang. Here are the statistics I collected by analyzing IR between passes and after each removePredecessor call:
```
# functions: 52283
# samples: 337609
# reverse unreachable BBs: 216022
# BBs: 247840796
Percent reverse-unreachable: 0.08716159869015269 %
Max(PercRevUnreachable) in a function: 87.58620689655172 %
# > 25 % samples: 471 ( 0.1395104988314885 % samples )
... in 145 ( 0.27733680163724345 % functions )
```
Most of the reverse-unreachable regions come from invalid IR where it wouldn't be possible to construct a PostDomTree anyway.
I would like to commit this patch in the next week in order to be able to complete the work that depends on it before the end of my internship, so please don't wait long to voice your concerns :).
Reviewers: dberlin, sanjoy, grosser, brzycki, davide, chandlerc, hfinkel
Reviewed By: dberlin
Subscribers: nhaehnle, javed.absar, kparzysz, uabelho, jlebar, hiraditya, llvm-commits, dberlin, david2050
Differential Revision: https://reviews.llvm.org/D35851
llvm-svn: 310940
2017-08-16 02:14:57 +08:00
|
|
|
; GCN-NEXT: [[RET]]: ; %ret
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN-NEXT: s_or_b64 exec, exec, [[MASK]]
|
|
|
|
; GCN: buffer_store_dword
|
|
|
|
; GCN-NEXT: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @analyze_mask_branch() #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
entry:
|
|
|
|
%reg = call float asm sideeffect "v_mov_b32_e64 $0, 0", "=v"()
|
|
|
|
%cmp0 = fcmp ogt float %reg, 0.000000e+00
|
|
|
|
br i1 %cmp0, label %loop, label %ret
|
2017-03-01 11:36:04 +08:00
|
|
|
|
2016-10-07 00:20:41 +08:00
|
|
|
loop:
|
|
|
|
%phi = phi float [ 0.000000e+00, %loop_body ], [ 1.000000e+00, %entry ]
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
%cmp1 = fcmp olt float %phi, 8.0
|
|
|
|
br i1 %cmp1, label %loop_body, label %ret
|
|
|
|
|
|
|
|
loop_body:
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %loop
|
|
|
|
|
|
|
|
ret:
|
|
|
|
store volatile i32 7, i32 addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2016-11-03 00:18:29 +08:00
|
|
|
; GCN-LABEL: {{^}}long_branch_hang:
|
|
|
|
; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 6
|
Codegen: Make chains from trellis-shaped CFGs
Lay out trellis-shaped CFGs optimally.
A trellis of the shape below:
A B
|\ /|
| \ / |
| X |
| / \ |
|/ \|
C D
would be laid out A; B->C ; D by the current layout algorithm. Now we identify
trellises and lay them out either A->C; B->D or A->D; B->C. This scales with an
increasing number of predecessors. A trellis is a a group of 2 or more
predecessor blocks that all have the same successors.
because of this we can tail duplicate to extend existing trellises.
As an example consider the following CFG:
B D F H
/ \ / \ / \ / \
A---C---E---G---Ret
Where A,C,E,G are all small (Currently 2 instructions).
The CFG preserving layout is then A,B,C,D,E,F,G,H,Ret.
The current code will copy C into B, E into D and G into F and yield the layout
A,C,B(C),E,D(E),F(G),G,H,ret
define void @straight_test(i32 %tag) {
entry:
br label %test1
test1: ; A
%tagbit1 = and i32 %tag, 1
%tagbit1eq0 = icmp eq i32 %tagbit1, 0
br i1 %tagbit1eq0, label %test2, label %optional1
optional1: ; B
call void @a()
br label %test2
test2: ; C
%tagbit2 = and i32 %tag, 2
%tagbit2eq0 = icmp eq i32 %tagbit2, 0
br i1 %tagbit2eq0, label %test3, label %optional2
optional2: ; D
call void @b()
br label %test3
test3: ; E
%tagbit3 = and i32 %tag, 4
%tagbit3eq0 = icmp eq i32 %tagbit3, 0
br i1 %tagbit3eq0, label %test4, label %optional3
optional3: ; F
call void @c()
br label %test4
test4: ; G
%tagbit4 = and i32 %tag, 8
%tagbit4eq0 = icmp eq i32 %tagbit4, 0
br i1 %tagbit4eq0, label %exit, label %optional4
optional4: ; H
call void @d()
br label %exit
exit:
ret void
}
here is the layout after D27742:
straight_test: # @straight_test
; ... Prologue elided
; BB#0: # %entry ; A (merged with test1)
; ... More prologue elided
mr 30, 3
andi. 3, 30, 1
bc 12, 1, .LBB0_2
; BB#1: # %test2 ; C
rlwinm. 3, 30, 0, 30, 30
beq 0, .LBB0_3
b .LBB0_4
.LBB0_2: # %optional1 ; B (copy of C)
bl a
nop
rlwinm. 3, 30, 0, 30, 30
bne 0, .LBB0_4
.LBB0_3: # %test3 ; E
rlwinm. 3, 30, 0, 29, 29
beq 0, .LBB0_5
b .LBB0_6
.LBB0_4: # %optional2 ; D (copy of E)
bl b
nop
rlwinm. 3, 30, 0, 29, 29
bne 0, .LBB0_6
.LBB0_5: # %test4 ; G
rlwinm. 3, 30, 0, 28, 28
beq 0, .LBB0_8
b .LBB0_7
.LBB0_6: # %optional3 ; F (copy of G)
bl c
nop
rlwinm. 3, 30, 0, 28, 28
beq 0, .LBB0_8
.LBB0_7: # %optional4 ; H
bl d
nop
.LBB0_8: # %exit ; Ret
ld 30, 96(1) # 8-byte Folded Reload
addi 1, 1, 112
ld 0, 16(1)
mtlr 0
blr
The tail-duplication has produced some benefit, but it has also produced a
trellis which is not laid out optimally. With this patch, we improve the layouts
of such trellises, and decrease the cost calculation for tail-duplication
accordingly.
This patch produces the layout A,C,E,G,B,D,F,H,Ret. This layout does have
back edges, which is a negative, but it has a bigger compensating
positive, which is that it handles the case where there are long strings
of skipped blocks much better than the original layout. Both layouts
handle runs of executed blocks equally well. Branch prediction also
improves if there is any correlation between subsequent optional blocks.
Here is the resulting concrete layout:
straight_test: # @straight_test
; BB#0: # %entry ; A (merged with test1)
mr 30, 3
andi. 3, 30, 1
bc 12, 1, .LBB0_4
; BB#1: # %test2 ; C
rlwinm. 3, 30, 0, 30, 30
bne 0, .LBB0_5
.LBB0_2: # %test3 ; E
rlwinm. 3, 30, 0, 29, 29
bne 0, .LBB0_6
.LBB0_3: # %test4 ; G
rlwinm. 3, 30, 0, 28, 28
bne 0, .LBB0_7
b .LBB0_8
.LBB0_4: # %optional1 ; B (Copy of C)
bl a
nop
rlwinm. 3, 30, 0, 30, 30
beq 0, .LBB0_2
.LBB0_5: # %optional2 ; D (Copy of E)
bl b
nop
rlwinm. 3, 30, 0, 29, 29
beq 0, .LBB0_3
.LBB0_6: # %optional3 ; F (Copy of G)
bl c
nop
rlwinm. 3, 30, 0, 28, 28
beq 0, .LBB0_8
.LBB0_7: # %optional4 ; H
bl d
nop
.LBB0_8: # %exit
Differential Revision: https://reviews.llvm.org/D28522
llvm-svn: 295223
2017-02-16 03:49:14 +08:00
|
|
|
; GCN-NEXT: s_cbranch_scc1 {{BB[0-9]+_[0-9]+}}
|
|
|
|
; GCN-NEXT: s_branch [[LONG_BR_0:BB[0-9]+_[0-9]+]]
|
2016-12-06 09:02:51 +08:00
|
|
|
; GCN-NEXT: BB{{[0-9]+_[0-9]+}}:
|
2016-11-03 00:18:29 +08:00
|
|
|
|
2016-11-08 03:09:33 +08:00
|
|
|
; GCN: s_add_u32 vcc_lo, vcc_lo, [[LONG_BR_DEST0:BB[0-9]+_[0-9]+]]-(
|
2016-11-03 00:18:29 +08:00
|
|
|
; GCN: s_setpc_b64
|
|
|
|
|
2016-12-06 09:02:51 +08:00
|
|
|
; GCN-NEXT: [[LONG_BR_0]]:
|
2016-11-03 00:18:29 +08:00
|
|
|
; GCN-DAG: v_cmp_lt_i32
|
|
|
|
; GCN-DAG: v_cmp_gt_i32
|
|
|
|
; GCN: s_cbranch_vccnz
|
|
|
|
|
|
|
|
; GCN: s_setpc_b64
|
|
|
|
; GCN: s_setpc_b64
|
|
|
|
|
2016-11-08 03:09:33 +08:00
|
|
|
; GCN: [[LONG_BR_DEST0]]
|
[AMDGPU] Fixed incorrect uniform branch condition
Summary:
I had a case where multiple nested uniform ifs resulted in code that did
v_cmp comparisons, combining the results with s_and_b64, s_or_b64 and
s_xor_b64 and using the resulting mask in s_cbranch_vccnz, without first
ensuring that bits for inactive lanes were clear.
There was already code for inserting an "s_and_b64 vcc, exec, vcc" to
clear bits for inactive lanes in the case that the branch is instruction
selected as s_cbranch_scc1 and is then changed to s_cbranch_vccnz in
SIFixSGPRCopies. I have added the same code into SILowerControlFlow for
the case that the branch is instruction selected as s_cbranch_vccnz.
This de-optimizes the code in some cases where the s_and is not needed,
because vcc is the result of a v_cmp, or multiple v_cmp instructions
combined by s_and/s_or. We should add a pass to re-optimize those cases.
Reviewers: arsenm, kzhuravl
Subscribers: wdng, yaxunl, t-tye, llvm-commits, dstuttard, timcorringham, nhaehnle
Differential Revision: https://reviews.llvm.org/D41292
llvm-svn: 322119
2018-01-10 05:34:43 +08:00
|
|
|
; GCN: s_cbranch_vccz
|
2016-11-03 00:18:29 +08:00
|
|
|
; GCN: s_setpc_b64
|
|
|
|
|
|
|
|
; GCN: s_endpgm
|
|
|
|
define amdgpu_kernel void @long_branch_hang(i32 addrspace(1)* nocapture %arg, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i64 %arg5) #0 {
|
|
|
|
bb:
|
|
|
|
%tmp = icmp slt i32 %arg2, 9
|
|
|
|
%tmp6 = icmp eq i32 %arg1, 0
|
|
|
|
%tmp7 = icmp sgt i32 %arg4, 0
|
|
|
|
%tmp8 = icmp sgt i32 %arg4, 5
|
|
|
|
br i1 %tmp8, label %bb9, label %bb13
|
|
|
|
|
|
|
|
bb9: ; preds = %bb
|
|
|
|
%tmp10 = and i1 %tmp7, %tmp
|
|
|
|
%tmp11 = icmp slt i32 %arg3, %arg4
|
|
|
|
%tmp12 = or i1 %tmp11, %tmp7
|
|
|
|
br i1 %tmp12, label %bb19, label %bb14
|
|
|
|
|
|
|
|
bb13: ; preds = %bb
|
[AMDGPU] Fixed incorrect uniform branch condition
Summary:
I had a case where multiple nested uniform ifs resulted in code that did
v_cmp comparisons, combining the results with s_and_b64, s_or_b64 and
s_xor_b64 and using the resulting mask in s_cbranch_vccnz, without first
ensuring that bits for inactive lanes were clear.
There was already code for inserting an "s_and_b64 vcc, exec, vcc" to
clear bits for inactive lanes in the case that the branch is instruction
selected as s_cbranch_scc1 and is then changed to s_cbranch_vccnz in
SIFixSGPRCopies. I have added the same code into SILowerControlFlow for
the case that the branch is instruction selected as s_cbranch_vccnz.
This de-optimizes the code in some cases where the s_and is not needed,
because vcc is the result of a v_cmp, or multiple v_cmp instructions
combined by s_and/s_or. We should add a pass to re-optimize those cases.
Reviewers: arsenm, kzhuravl
Subscribers: wdng, yaxunl, t-tye, llvm-commits, dstuttard, timcorringham, nhaehnle
Differential Revision: https://reviews.llvm.org/D41292
llvm-svn: 322119
2018-01-10 05:34:43 +08:00
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
2016-11-03 00:18:29 +08:00
|
|
|
br i1 %tmp6, label %bb19, label %bb14
|
|
|
|
|
|
|
|
bb14: ; preds = %bb13, %bb9
|
|
|
|
%tmp15 = icmp slt i32 %arg3, %arg4
|
|
|
|
%tmp16 = or i1 %tmp15, %tmp
|
|
|
|
%tmp17 = and i1 %tmp6, %tmp16
|
|
|
|
%tmp18 = zext i1 %tmp17 to i32
|
|
|
|
br label %bb19
|
|
|
|
|
|
|
|
bb19: ; preds = %bb14, %bb13, %bb9
|
|
|
|
%tmp20 = phi i32 [ undef, %bb9 ], [ undef, %bb13 ], [ %tmp18, %bb14 ]
|
|
|
|
%tmp21 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %arg5
|
|
|
|
store i32 %tmp20, i32 addrspace(1)* %tmp21, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2016-10-07 00:20:41 +08:00
|
|
|
attributes #0 = { nounwind }
|
|
|
|
attributes #1 = { nounwind readnone }
|