2018-10-30 09:33:14 +08:00
|
|
|
; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -amdgpu-s-branch-bits=4 < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
|
2017-06-24 12:31:45 +08:00
|
|
|
|
|
|
|
|
|
|
|
; FIXME: We should use llvm-mc for this, but we can't even parse our own output.
|
|
|
|
; See PR33579.
|
2017-06-24 12:41:39 +08:00
|
|
|
; RUN: llc -march=amdgcn -verify-machineinstrs -amdgpu-s-branch-bits=4 -o %t.o -filetype=obj %s
|
2017-06-24 12:31:45 +08:00
|
|
|
; RUN: llvm-readobj -r %t.o | FileCheck --check-prefix=OBJ %s
|
|
|
|
|
|
|
|
; OBJ: Relocations [
|
|
|
|
; OBJ-NEXT: ]
|
|
|
|
|
2016-10-07 00:20:41 +08:00
|
|
|
; Restrict maximum branch to between +7 and -8 dwords
|
|
|
|
|
|
|
|
; Used to emit an always 4 byte instruction. Inline asm always assumes
|
|
|
|
; each instruction is the maximum size.
|
|
|
|
declare void @llvm.amdgcn.s.sleep(i32) #0
|
|
|
|
|
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x() #1
|
|
|
|
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_conditional_max_short_forward_branch:
|
|
|
|
; GCN: s_load_dword [[CND:s[0-9]+]]
|
|
|
|
; GCN: s_cmp_eq_u32 [[CND]], 0
|
|
|
|
; GCN-NEXT: s_cbranch_scc1 [[BB3:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
|
2017-12-05 01:18:51 +08:00
|
|
|
; GCN-NEXT: ; %bb.1: ; %bb2
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
; GCN-NEXT: s_sleep 0
|
|
|
|
|
|
|
|
; GCN-NEXT: [[BB3]]: ; %bb3
|
|
|
|
; GCN: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
|
|
|
|
; GCN: buffer_store_dword [[V_CND]]
|
|
|
|
; GCN: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_conditional_max_short_forward_branch(i32 addrspace(1)* %arg, i32 %cnd) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb:
|
|
|
|
%cmp = icmp eq i32 %cnd, 0
|
|
|
|
br i1 %cmp, label %bb3, label %bb2 ; +8 dword branch
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
; 24 bytes
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
call void @llvm.amdgcn.s.sleep(i32 0)
|
|
|
|
br label %bb3
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
store volatile i32 %cnd, i32 addrspace(1)* %arg
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_conditional_min_long_forward_branch:
|
|
|
|
; GCN: s_load_dword [[CND:s[0-9]+]]
|
|
|
|
; GCN: s_cmp_eq_u32 [[CND]], 0
|
|
|
|
; GCN-NEXT: s_cbranch_scc0 [[LONGBB:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb0
|
2018-10-30 09:33:14 +08:00
|
|
|
; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
|
|
|
|
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], [[ENDBB:BB[0-9]+_[0-9]+]]-([[LONG_JUMP]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], 0
|
|
|
|
; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB]]:
|
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
|
|
|
|
; GCN-NEXT: [[ENDBB]]:
|
|
|
|
; GCN: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
|
|
|
|
; GCN: buffer_store_dword [[V_CND]]
|
|
|
|
; GCN: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_conditional_min_long_forward_branch(i32 addrspace(1)* %arg, i32 %cnd) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb0:
|
|
|
|
%cmp = icmp eq i32 %cnd, 0
|
|
|
|
br i1 %cmp, label %bb3, label %bb2 ; +9 dword branch
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
; 32 bytes
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %bb3
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
store volatile i32 %cnd, i32 addrspace(1)* %arg
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_conditional_min_long_forward_vcnd_branch:
|
|
|
|
; GCN: s_load_dword [[CND:s[0-9]+]]
|
2019-06-07 05:13:02 +08:00
|
|
|
; GCN-DAG: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
|
[AMDGPU] Fixed incorrect uniform branch condition
Summary:
I had a case where multiple nested uniform ifs resulted in code that did
v_cmp comparisons, combining the results with s_and_b64, s_or_b64 and
s_xor_b64 and using the resulting mask in s_cbranch_vccnz, without first
ensuring that bits for inactive lanes were clear.
There was already code for inserting an "s_and_b64 vcc, exec, vcc" to
clear bits for inactive lanes in the case that the branch is instruction
selected as s_cbranch_scc1 and is then changed to s_cbranch_vccnz in
SIFixSGPRCopies. I have added the same code into SILowerControlFlow for
the case that the branch is instruction selected as s_cbranch_vccnz.
This de-optimizes the code in some cases where the s_and is not needed,
because vcc is the result of a v_cmp, or multiple v_cmp instructions
combined by s_and/s_or. We should add a pass to re-optimize those cases.
Reviewers: arsenm, kzhuravl
Subscribers: wdng, yaxunl, t-tye, llvm-commits, dstuttard, timcorringham, nhaehnle
Differential Revision: https://reviews.llvm.org/D41292
llvm-svn: 322119
2018-01-10 05:34:43 +08:00
|
|
|
; GCN-DAG: v_cmp_eq_f32_e64 [[UNMASKED:s\[[0-9]+:[0-9]+\]]], [[CND]], 0
|
|
|
|
; GCN-DAG: s_and_b64 vcc, exec, [[UNMASKED]]
|
2016-11-08 03:09:33 +08:00
|
|
|
; GCN: s_cbranch_vccz [[LONGBB:BB[0-9]+_[0-9]+]]
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb0
|
2018-10-30 09:33:14 +08:00
|
|
|
; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
|
|
|
|
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], [[ENDBB:BB[0-9]+_[0-9]+]]-([[LONG_JUMP]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], 0
|
|
|
|
; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB]]:
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
|
|
|
|
; GCN: [[ENDBB]]:
|
|
|
|
; GCN: buffer_store_dword [[V_CND]]
|
|
|
|
; GCN: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_conditional_min_long_forward_vcnd_branch(float addrspace(1)* %arg, float %cnd) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb0:
|
|
|
|
%cmp = fcmp oeq float %cnd, 0.0
|
|
|
|
br i1 %cmp, label %bb3, label %bb2 ; + 8 dword branch
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
call void asm sideeffect " ; 32 bytes
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %bb3
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
store volatile float %cnd, float addrspace(1)* %arg
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}min_long_forward_vbranch:
|
|
|
|
|
|
|
|
; GCN: buffer_load_dword
|
|
|
|
; GCN: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
|
|
|
|
; GCN: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], vcc
|
|
|
|
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
|
|
|
|
; GCN: s_or_b64 exec, exec, [[SAVE]]
|
|
|
|
; GCN: buffer_store_dword
|
|
|
|
; GCN: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @min_long_forward_vbranch(i32 addrspace(1)* %arg) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb:
|
|
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%tid.ext = zext i32 %tid to i64
|
|
|
|
%gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tid.ext
|
|
|
|
%load = load volatile i32, i32 addrspace(1)* %gep
|
|
|
|
%cmp = icmp eq i32 %load, 0
|
|
|
|
br i1 %cmp, label %bb3, label %bb2 ; + 8 dword branch
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
call void asm sideeffect " ; 32 bytes
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %bb3
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
store volatile i32 %load, i32 addrspace(1)* %gep
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}long_backward_sbranch:
|
2016-11-29 08:46:46 +08:00
|
|
|
; GCN: s_mov_b32 [[LOOPIDX:s[0-9]+]], 0{{$}}
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]: ; %bb2
|
|
|
|
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
|
2016-11-29 08:46:46 +08:00
|
|
|
; GCN-NEXT: s_add_i32 [[INC:s[0-9]+]], [[LOOPIDX]], 1
|
|
|
|
; GCN-NEXT: s_cmp_lt_i32 [[INC]], 10
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
|
2016-11-29 08:46:46 +08:00
|
|
|
; GCN-NEXT: s_cbranch_scc0 [[ENDBB:BB[0-9]+_[0-9]+]]
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb2
|
|
|
|
; GCN-NEXT: ; in Loop: Header=[[LOOPBB]] Depth=1
|
2018-10-30 09:33:14 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
|
|
|
|
; GCN-NEXT: s_sub_u32 s[[PC_LO]], s[[PC_LO]], ([[LONG_JUMP]]+4)-[[LOOPBB]]
|
|
|
|
; GCN-NEXT: s_subb_u32 s[[PC_HI]], s[[PC_HI]], 0
|
|
|
|
; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[ENDBB]]:
|
|
|
|
; GCN-NEXT: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @long_backward_sbranch(i32 addrspace(1)* %arg) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb:
|
|
|
|
br label %bb2
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
%loop.idx = phi i32 [ 0, %bb ], [ %inc, %bb2 ]
|
|
|
|
; 24 bytes
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
%inc = add nsw i32 %loop.idx, 1 ; add cost 4
|
|
|
|
%cmp = icmp slt i32 %inc, 10 ; condition cost = 8
|
|
|
|
br i1 %cmp, label %bb2, label %bb3 ; -
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Requires expansion of unconditional branch from %bb2 to %bb4 (and
|
|
|
|
; expansion of conditional branch from %bb to %bb3.
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_unconditional_min_long_forward_branch:
|
|
|
|
; GCN: s_cmp_eq_u32
|
|
|
|
; GCN-NEXT: s_cbranch_scc0 [[BB2:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONG_JUMP0:BB[0-9]+_[0-9]+]]: ; %bb0
|
2018-10-30 09:33:14 +08:00
|
|
|
; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC0_LO:[0-9]+]]:[[PC0_HI:[0-9]+]]{{\]}}
|
|
|
|
; GCN-NEXT: s_add_u32 s[[PC0_LO]], s[[PC0_LO]], [[BB3:BB[0-9]_[0-9]+]]-([[LONG_JUMP0]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 s[[PC0_HI]], s[[PC0_HI]], 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC0_LO]]:[[PC0_HI]]{{\]}}
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[BB2]]: ; %bb2
|
|
|
|
; GCN: v_mov_b32_e32 [[BB2_K:v[0-9]+]], 17
|
|
|
|
; GCN: buffer_store_dword [[BB2_K]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONG_JUMP1:BB[0-9]+_[0-9]+]]: ; %bb2
|
2018-10-30 09:33:14 +08:00
|
|
|
; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC1_LO:[0-9]+]]:[[PC1_HI:[0-9]+]]{{\]}}
|
|
|
|
; GCN-NEXT: s_add_u32 s[[PC1_LO]], s[[PC1_LO]], [[BB4:BB[0-9]_[0-9]+]]-([[LONG_JUMP1]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 s[[PC1_HI]], s[[PC1_HI]], 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC1_LO]]:[[PC1_HI]]{{\]}}
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN: [[BB3]]: ; %bb3
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: ;;#ASMEND
|
|
|
|
|
|
|
|
; GCN-NEXT: [[BB4]]: ; %bb4
|
|
|
|
; GCN: v_mov_b32_e32 [[BB4_K:v[0-9]+]], 63
|
|
|
|
; GCN: buffer_store_dword [[BB4_K]]
|
|
|
|
; GCN-NEXT: s_endpgm
|
|
|
|
; GCN-NEXT: .Lfunc_end{{[0-9]+}}:
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb0:
|
|
|
|
%tmp = icmp ne i32 %arg1, 0
|
|
|
|
br i1 %tmp, label %bb2, label %bb3
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
store volatile i32 17, i32 addrspace(1)* undef
|
|
|
|
br label %bb4
|
|
|
|
|
|
|
|
bb3:
|
|
|
|
; 32 byte asm
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %bb4
|
|
|
|
|
|
|
|
bb4:
|
|
|
|
store volatile i32 63, i32 addrspace(1)* %arg
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_unconditional_min_long_backward_branch:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GCN-NEXT: ; %bb.0: ; %entry
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[LOOP:BB[0-9]_[0-9]+]]: ; %loop
|
|
|
|
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %loop
|
|
|
|
; GCN-NEXT: ; in Loop: Header=[[LOOP]] Depth=1
|
2018-10-30 09:33:14 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
|
|
|
|
; GCN-NEXT: s_sub_u32 s[[PC_LO]], s[[PC_LO]], ([[LONGBB]]+4)-[[LOOP]]
|
|
|
|
; GCN-NEXT: s_subb_u32 s[[PC_HI]], s[[PC_HI]], 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN-NEXT .Lfunc_end{{[0-9]+}}:
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_unconditional_min_long_backward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
|
2016-10-07 00:20:41 +08:00
|
|
|
entry:
|
|
|
|
br label %loop
|
|
|
|
|
|
|
|
loop:
|
|
|
|
; 32 byte asm
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %loop
|
|
|
|
}
|
|
|
|
|
|
|
|
; Expansion of branch from %bb1 to %bb3 introduces need to expand
|
|
|
|
; branch from %bb0 to %bb2
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}expand_requires_expand:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GCN-NEXT: ; %bb.0: ; %bb0
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN: s_load_dword
|
|
|
|
; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 0{{$}}
|
|
|
|
; GCN-NEXT: s_cbranch_scc0 [[BB1:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB0:BB[0-9]+_[0-9]+]]: ; %bb0
|
2018-10-30 09:33:14 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC0_LO:[0-9]+]]:[[PC0_HI:[0-9]+]]{{\]}}
|
|
|
|
; GCN-NEXT: s_add_u32 s[[PC0_LO]], s[[PC0_LO]], [[BB2:BB[0-9]_[0-9]+]]-([[LONGBB0]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 s[[PC0_HI]], s[[PC0_HI]], 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC0_LO]]:[[PC0_HI]]{{\]}}
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[BB1]]: ; %bb1
|
|
|
|
; GCN-NEXT: s_load_dword
|
|
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
|
|
; GCN-NEXT: s_cmp_eq_u32 s{{[0-9]+}}, 3{{$}}
|
|
|
|
; GCN-NEXT: s_cbranch_scc0 [[BB2:BB[0-9]_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB1:BB[0-9]+_[0-9]+]]: ; %bb1
|
2018-10-30 09:33:14 +08:00
|
|
|
; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC1_LO:[0-9]+]]:[[PC1_HI:[0-9]+]]{{\]}}
|
|
|
|
; GCN-NEXT: s_add_u32 s[[PC1_LO]], s[[PC1_LO]], [[BB3:BB[0-9]+_[0-9]+]]-([[LONGBB1]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 s[[PC1_HI]], s[[PC1_HI]], 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC1_LO]]:[[PC1_HI]]{{\]}}
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[BB2]]: ; %bb2
|
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
|
|
|
|
; GCN-NEXT: [[BB3]]: ; %bb3
|
2017-02-01 07:48:32 +08:00
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
|
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
|
|
; GCN-NEXT: v_nop_e64
|
|
|
|
; GCN-NEXT: ;;#ASMEND
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN-NEXT: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @expand_requires_expand(i32 %cond0) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
bb0:
|
|
|
|
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
|
|
|
|
%cmp0 = icmp slt i32 %cond0, 0
|
|
|
|
br i1 %cmp0, label %bb2, label %bb1
|
|
|
|
|
|
|
|
bb1:
|
2018-02-14 02:00:25 +08:00
|
|
|
%val = load volatile i32, i32 addrspace(4)* undef
|
2016-10-07 00:20:41 +08:00
|
|
|
%cmp1 = icmp eq i32 %val, 3
|
|
|
|
br i1 %cmp1, label %bb3, label %bb2
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %bb3
|
|
|
|
|
|
|
|
bb3:
|
2017-02-01 07:48:32 +08:00
|
|
|
; These NOPs prevent tail-duplication-based outlining
|
|
|
|
; from firing, which defeats the need to expand the branches and this test.
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64", ""() #0
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64", ""() #0
|
2016-10-07 00:20:41 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Requires expanding of required skip branch.
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}uniform_inside_divergent:
|
|
|
|
; GCN: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
|
|
|
|
; GCN-NEXT: s_and_saveexec_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], vcc
|
|
|
|
; GCN-NEXT: ; mask branch [[ENDIF:BB[0-9]+_[0-9]+]]
|
|
|
|
; GCN-NEXT: s_cbranch_execnz [[IF:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %entry
|
2018-10-30 09:33:14 +08:00
|
|
|
; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
|
|
|
|
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], [[BB2:BB[0-9]_[0-9]+]]-([[LONGBB]]+4)
|
|
|
|
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], 0{{$}}
|
|
|
|
; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
|
2016-10-07 00:20:41 +08:00
|
|
|
|
|
|
|
; GCN-NEXT: [[IF]]: ; %if
|
|
|
|
; GCN: buffer_store_dword
|
|
|
|
; GCN: s_cmp_lg_u32
|
|
|
|
; GCN: s_cbranch_scc1 [[ENDIF]]
|
|
|
|
|
2017-12-05 01:18:51 +08:00
|
|
|
; GCN-NEXT: ; %bb.2: ; %if_uniform
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN: buffer_store_dword
|
|
|
|
|
|
|
|
; GCN-NEXT: [[ENDIF]]: ; %endif
|
|
|
|
; GCN-NEXT: s_or_b64 exec, exec, [[MASK]]
|
2017-02-01 07:48:32 +08:00
|
|
|
; GCN-NEXT: s_sleep 5
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN-NEXT: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @uniform_inside_divergent(i32 addrspace(1)* %out, i32 %cond) #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
entry:
|
|
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
|
|
%d_cmp = icmp ult i32 %tid, 16
|
|
|
|
br i1 %d_cmp, label %if, label %endif
|
|
|
|
|
|
|
|
if:
|
|
|
|
store i32 0, i32 addrspace(1)* %out
|
|
|
|
%u_cmp = icmp eq i32 %cond, 0
|
|
|
|
br i1 %u_cmp, label %if_uniform, label %endif
|
|
|
|
|
|
|
|
if_uniform:
|
|
|
|
store i32 1, i32 addrspace(1)* %out
|
|
|
|
br label %endif
|
|
|
|
|
|
|
|
endif:
|
2017-02-01 07:48:32 +08:00
|
|
|
; layout can remove the split branch if it can copy the return block.
|
|
|
|
; This call makes the return block long enough that it doesn't get copied.
|
|
|
|
call void @llvm.amdgcn.s.sleep(i32 5);
|
2016-10-07 00:20:41 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; si_mask_branch
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}analyze_mask_branch:
|
2018-05-18 00:45:01 +08:00
|
|
|
; GCN: v_cmp_nlt_f32_e32 vcc
|
|
|
|
; GCN-NEXT: s_and_saveexec_b64 [[TEMP_MASK:s\[[0-9]+:[0-9]+\]]], vcc
|
|
|
|
; GCN-NEXT: s_xor_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec, [[TEMP_MASK]]
|
|
|
|
; GCN-NEXT: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]]
|
|
|
|
|
|
|
|
; GCN: [[FLOW]]: ; %Flow
|
|
|
|
; GCN-NEXT: s_or_saveexec_b64 [[TEMP_MASK1:s\[[0-9]+:[0-9]+\]]], [[MASK]]
|
|
|
|
; GCN-NEXT: s_xor_b64 exec, exec, [[TEMP_MASK1]]
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN-NEXT: ; mask branch [[RET:BB[0-9]+_[0-9]+]]
|
2017-03-01 11:36:04 +08:00
|
|
|
|
2018-12-05 11:41:26 +08:00
|
|
|
; GCN: [[LOOP_BODY:BB[0-9]+_[0-9]+]]: ; %loop{{$}}
|
2016-11-11 09:34:21 +08:00
|
|
|
; GCN: ;;#ASMSTART
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: v_nop_e64
|
|
|
|
; GCN: ;;#ASMEND
|
2018-12-05 11:41:26 +08:00
|
|
|
; GCN: s_cbranch_vccz [[RET]]
|
2016-10-07 00:20:41 +08:00
|
|
|
|
Generalize MergeBlockIntoPredecessor. Replace uses of MergeBasicBlockIntoOnlyPred.
Summary:
Two utils methods have essentially the same functionality. This is an attempt to merge them into one.
1. lib/Transforms/Utils/Local.cpp : MergeBasicBlockIntoOnlyPred
2. lib/Transforms/Utils/BasicBlockUtils.cpp : MergeBlockIntoPredecessor
Prior to the patch:
1. MergeBasicBlockIntoOnlyPred
Updates either DomTree or DeferredDominance
Moves all instructions from Pred to BB, deletes Pred
Asserts BB has single predecessor
If address was taken, replace the block address with constant 1 (?)
2. MergeBlockIntoPredecessor
Updates DomTree, LoopInfo and MemoryDependenceResults
Moves all instruction from BB to Pred, deletes BB
Returns if doesn't have a single predecessor
Returns if BB's address was taken
After the patch:
Method 2. MergeBlockIntoPredecessor is attempting to become the new default:
Updates DomTree or DeferredDominance, and LoopInfo and MemoryDependenceResults
Moves all instruction from BB to Pred, deletes BB
Returns if doesn't have a single predecessor
Returns if BB's address was taken
Uses of MergeBasicBlockIntoOnlyPred that need to be replaced:
1. lib/Transforms/Scalar/LoopSimplifyCFG.cpp
Updated in this patch. No challenges.
2. lib/CodeGen/CodeGenPrepare.cpp
Updated in this patch.
i. eliminateFallThrough is straightforward, but I added using a temporary array to avoid the iterator invalidation.
ii. eliminateMostlyEmptyBlock(s) methods also now use a temporary array for blocks
Some interesting aspects:
- Since Pred is not deleted (BB is), the entry block does not need updating.
- The entry block was being updated with the deleted block in eliminateMostlyEmptyBlock. Added assert to make obvious that BB=SinglePred.
- isMergingEmptyBlockProfitable assumes BB is the one to be deleted.
- eliminateMostlyEmptyBlock(BB) does not delete BB on one path, it deletes its unique predecessor instead.
- adding some test owner as subscribers for the interesting tests modified:
test/CodeGen/X86/avx-cmp.ll
test/CodeGen/AMDGPU/nested-loop-conditions.ll
test/CodeGen/AMDGPU/si-annotate-cf.ll
test/CodeGen/X86/hoist-spill.ll
test/CodeGen/X86/2006-11-17-IllegalMove.ll
3. lib/Transforms/Scalar/JumpThreading.cpp
Not covered in this patch. It is the only use case using the DeferredDominance.
I would defer to Brian Rzycki to make this replacement.
Reviewers: chandlerc, spatel, davide, brzycki, bkramer, javed.absar
Subscribers: qcolombet, sanjoy, nemanjai, nhaehnle, jlebar, tpr, kbarton, RKSimon, wmi, arsenm, llvm-commits
Differential Revision: https://reviews.llvm.org/D48202
llvm-svn: 335183
2018-06-21 06:01:04 +08:00
|
|
|
; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %loop
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN-NEXT: ; in Loop: Header=[[LOOP_BODY]] Depth=1
|
2018-10-30 09:33:14 +08:00
|
|
|
; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
|
|
|
|
; GCN-NEXT: s_sub_u32 s[[PC_LO]], s[[PC_LO]], ([[LONGBB]]+4)-[[LOOP_BODY]]
|
|
|
|
; GCN-NEXT: s_subb_u32 s[[PC_HI]], s[[PC_HI]], 0
|
|
|
|
; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
|
2016-10-07 00:20:41 +08:00
|
|
|
|
2018-05-18 00:45:01 +08:00
|
|
|
; GCN-NEXT: [[RET]]: ; %UnifiedReturnBlock
|
2016-10-07 00:20:41 +08:00
|
|
|
; GCN-NEXT: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @analyze_mask_branch() #0 {
|
2016-10-07 00:20:41 +08:00
|
|
|
entry:
|
|
|
|
%reg = call float asm sideeffect "v_mov_b32_e64 $0, 0", "=v"()
|
|
|
|
%cmp0 = fcmp ogt float %reg, 0.000000e+00
|
|
|
|
br i1 %cmp0, label %loop, label %ret
|
2017-03-01 11:36:04 +08:00
|
|
|
|
2016-10-07 00:20:41 +08:00
|
|
|
loop:
|
|
|
|
%phi = phi float [ 0.000000e+00, %loop_body ], [ 1.000000e+00, %entry ]
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
%cmp1 = fcmp olt float %phi, 8.0
|
|
|
|
br i1 %cmp1, label %loop_body, label %ret
|
|
|
|
|
|
|
|
loop_body:
|
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
|
|
|
br label %loop
|
|
|
|
|
|
|
|
ret:
|
|
|
|
store volatile i32 7, i32 addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2016-11-03 00:18:29 +08:00
|
|
|
; GCN-LABEL: {{^}}long_branch_hang:
|
|
|
|
; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 6
|
AMDGPU: Add pass to lower kernel arguments to loads
This replaces most argument uses with loads, but for
now not all.
The code in SelectionDAG for calling convention lowering
is actively harmful for amdgpu_kernel. It attempts to
split the argument types into register legal types, which
results in low quality code for arbitary types. Since
all kernel arguments are passed in memory, we just want the
raw types.
I've tried a couple of methods of mitigating this in SelectionDAG,
but it's easier to just bypass this problem alltogether. It's
possible to hack around the problem in the initial lowering,
but the real problem is the DAG then expects to be able to use
CopyToReg/CopyFromReg for uses of the arguments outside the block.
Exposing the argument loads in the IR also has the advantage
that the LoadStoreVectorizer can merge them.
I'm not sure the best approach to dealing with the IR
argument list is. The patch as-is just leaves the IR arguments
in place, so all the existing code will still compute the same
kernarg size and pointlessly lowers the arguments.
Arguably the frontend should emit kernels with an empty argument
list in the first place. Alternatively a dummy array could be
inserted as a single argument just to reserve space.
This does have some disadvantages. Local pointer kernel arguments can
no longer have AssertZext placed on them as the equivalent !range
metadata is not valid on pointer typed loads. This is mostly bad
for SI which needs to know about the known bits in order to use the
DS instruction offset, so in this case this is not done.
More importantly, this skips noalias arguments since this pass
does not yet convert this to the equivalent !alias.scope and !noalias
metadata. Producing this metadata correctly seems to be tricky,
although this logically is the same as inlining into a function which
doesn't exist. Additionally, exposing these loads to the vectorizer
may result in degraded aliasing information if a pointer load is
merged with another argument load.
I'm also not entirely sure this is preserving the current clover
ABI, although I would greatly prefer if it would stop widening
arguments and match the HSA ABI. As-is I think it is extending
< 4-byte arguments to 4-bytes but doesn't align them to 4-bytes.
llvm-svn: 335650
2018-06-27 03:10:00 +08:00
|
|
|
; GCN: s_cbranch_scc1 {{BB[0-9]+_[0-9]+}}
|
Codegen: Make chains from trellis-shaped CFGs
Lay out trellis-shaped CFGs optimally.
A trellis of the shape below:
A B
|\ /|
| \ / |
| X |
| / \ |
|/ \|
C D
would be laid out A; B->C ; D by the current layout algorithm. Now we identify
trellises and lay them out either A->C; B->D or A->D; B->C. This scales with an
increasing number of predecessors. A trellis is a a group of 2 or more
predecessor blocks that all have the same successors.
because of this we can tail duplicate to extend existing trellises.
As an example consider the following CFG:
B D F H
/ \ / \ / \ / \
A---C---E---G---Ret
Where A,C,E,G are all small (Currently 2 instructions).
The CFG preserving layout is then A,B,C,D,E,F,G,H,Ret.
The current code will copy C into B, E into D and G into F and yield the layout
A,C,B(C),E,D(E),F(G),G,H,ret
define void @straight_test(i32 %tag) {
entry:
br label %test1
test1: ; A
%tagbit1 = and i32 %tag, 1
%tagbit1eq0 = icmp eq i32 %tagbit1, 0
br i1 %tagbit1eq0, label %test2, label %optional1
optional1: ; B
call void @a()
br label %test2
test2: ; C
%tagbit2 = and i32 %tag, 2
%tagbit2eq0 = icmp eq i32 %tagbit2, 0
br i1 %tagbit2eq0, label %test3, label %optional2
optional2: ; D
call void @b()
br label %test3
test3: ; E
%tagbit3 = and i32 %tag, 4
%tagbit3eq0 = icmp eq i32 %tagbit3, 0
br i1 %tagbit3eq0, label %test4, label %optional3
optional3: ; F
call void @c()
br label %test4
test4: ; G
%tagbit4 = and i32 %tag, 8
%tagbit4eq0 = icmp eq i32 %tagbit4, 0
br i1 %tagbit4eq0, label %exit, label %optional4
optional4: ; H
call void @d()
br label %exit
exit:
ret void
}
here is the layout after D27742:
straight_test: # @straight_test
; ... Prologue elided
; BB#0: # %entry ; A (merged with test1)
; ... More prologue elided
mr 30, 3
andi. 3, 30, 1
bc 12, 1, .LBB0_2
; BB#1: # %test2 ; C
rlwinm. 3, 30, 0, 30, 30
beq 0, .LBB0_3
b .LBB0_4
.LBB0_2: # %optional1 ; B (copy of C)
bl a
nop
rlwinm. 3, 30, 0, 30, 30
bne 0, .LBB0_4
.LBB0_3: # %test3 ; E
rlwinm. 3, 30, 0, 29, 29
beq 0, .LBB0_5
b .LBB0_6
.LBB0_4: # %optional2 ; D (copy of E)
bl b
nop
rlwinm. 3, 30, 0, 29, 29
bne 0, .LBB0_6
.LBB0_5: # %test4 ; G
rlwinm. 3, 30, 0, 28, 28
beq 0, .LBB0_8
b .LBB0_7
.LBB0_6: # %optional3 ; F (copy of G)
bl c
nop
rlwinm. 3, 30, 0, 28, 28
beq 0, .LBB0_8
.LBB0_7: # %optional4 ; H
bl d
nop
.LBB0_8: # %exit ; Ret
ld 30, 96(1) # 8-byte Folded Reload
addi 1, 1, 112
ld 0, 16(1)
mtlr 0
blr
The tail-duplication has produced some benefit, but it has also produced a
trellis which is not laid out optimally. With this patch, we improve the layouts
of such trellises, and decrease the cost calculation for tail-duplication
accordingly.
This patch produces the layout A,C,E,G,B,D,F,H,Ret. This layout does have
back edges, which is a negative, but it has a bigger compensating
positive, which is that it handles the case where there are long strings
of skipped blocks much better than the original layout. Both layouts
handle runs of executed blocks equally well. Branch prediction also
improves if there is any correlation between subsequent optional blocks.
Here is the resulting concrete layout:
straight_test: # @straight_test
; BB#0: # %entry ; A (merged with test1)
mr 30, 3
andi. 3, 30, 1
bc 12, 1, .LBB0_4
; BB#1: # %test2 ; C
rlwinm. 3, 30, 0, 30, 30
bne 0, .LBB0_5
.LBB0_2: # %test3 ; E
rlwinm. 3, 30, 0, 29, 29
bne 0, .LBB0_6
.LBB0_3: # %test4 ; G
rlwinm. 3, 30, 0, 28, 28
bne 0, .LBB0_7
b .LBB0_8
.LBB0_4: # %optional1 ; B (Copy of C)
bl a
nop
rlwinm. 3, 30, 0, 30, 30
beq 0, .LBB0_2
.LBB0_5: # %optional2 ; D (Copy of E)
bl b
nop
rlwinm. 3, 30, 0, 29, 29
beq 0, .LBB0_3
.LBB0_6: # %optional3 ; F (Copy of G)
bl c
nop
rlwinm. 3, 30, 0, 28, 28
beq 0, .LBB0_8
.LBB0_7: # %optional4 ; H
bl d
nop
.LBB0_8: # %exit
Differential Revision: https://reviews.llvm.org/D28522
llvm-svn: 295223
2017-02-16 03:49:14 +08:00
|
|
|
; GCN-NEXT: s_branch [[LONG_BR_0:BB[0-9]+_[0-9]+]]
|
2016-12-06 09:02:51 +08:00
|
|
|
; GCN-NEXT: BB{{[0-9]+_[0-9]+}}:
|
2016-11-03 00:18:29 +08:00
|
|
|
|
2018-10-30 09:33:14 +08:00
|
|
|
; GCN: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, [[LONG_BR_DEST0:BB[0-9]+_[0-9]+]]-(
|
|
|
|
; GCN-NEXT: s_addc_u32
|
|
|
|
; GCN-NEXT: s_setpc_b64
|
2016-11-03 00:18:29 +08:00
|
|
|
|
2016-12-06 09:02:51 +08:00
|
|
|
; GCN-NEXT: [[LONG_BR_0]]:
|
2016-11-03 00:18:29 +08:00
|
|
|
; GCN-DAG: v_cmp_lt_i32
|
|
|
|
; GCN-DAG: v_cmp_gt_i32
|
|
|
|
; GCN: s_cbranch_vccnz
|
|
|
|
|
|
|
|
; GCN: s_setpc_b64
|
|
|
|
; GCN: s_setpc_b64
|
|
|
|
|
2016-11-08 03:09:33 +08:00
|
|
|
; GCN: [[LONG_BR_DEST0]]
|
[AMDGPU] Fixed incorrect uniform branch condition
Summary:
I had a case where multiple nested uniform ifs resulted in code that did
v_cmp comparisons, combining the results with s_and_b64, s_or_b64 and
s_xor_b64 and using the resulting mask in s_cbranch_vccnz, without first
ensuring that bits for inactive lanes were clear.
There was already code for inserting an "s_and_b64 vcc, exec, vcc" to
clear bits for inactive lanes in the case that the branch is instruction
selected as s_cbranch_scc1 and is then changed to s_cbranch_vccnz in
SIFixSGPRCopies. I have added the same code into SILowerControlFlow for
the case that the branch is instruction selected as s_cbranch_vccnz.
This de-optimizes the code in some cases where the s_and is not needed,
because vcc is the result of a v_cmp, or multiple v_cmp instructions
combined by s_and/s_or. We should add a pass to re-optimize those cases.
Reviewers: arsenm, kzhuravl
Subscribers: wdng, yaxunl, t-tye, llvm-commits, dstuttard, timcorringham, nhaehnle
Differential Revision: https://reviews.llvm.org/D41292
llvm-svn: 322119
2018-01-10 05:34:43 +08:00
|
|
|
; GCN: s_cbranch_vccz
|
2016-11-03 00:18:29 +08:00
|
|
|
; GCN: s_setpc_b64
|
|
|
|
|
|
|
|
; GCN: s_endpgm
|
|
|
|
define amdgpu_kernel void @long_branch_hang(i32 addrspace(1)* nocapture %arg, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i64 %arg5) #0 {
|
|
|
|
bb:
|
|
|
|
%tmp = icmp slt i32 %arg2, 9
|
|
|
|
%tmp6 = icmp eq i32 %arg1, 0
|
|
|
|
%tmp7 = icmp sgt i32 %arg4, 0
|
|
|
|
%tmp8 = icmp sgt i32 %arg4, 5
|
|
|
|
br i1 %tmp8, label %bb9, label %bb13
|
|
|
|
|
|
|
|
bb9: ; preds = %bb
|
|
|
|
%tmp10 = and i1 %tmp7, %tmp
|
|
|
|
%tmp11 = icmp slt i32 %arg3, %arg4
|
|
|
|
%tmp12 = or i1 %tmp11, %tmp7
|
|
|
|
br i1 %tmp12, label %bb19, label %bb14
|
|
|
|
|
|
|
|
bb13: ; preds = %bb
|
[AMDGPU] Fixed incorrect uniform branch condition
Summary:
I had a case where multiple nested uniform ifs resulted in code that did
v_cmp comparisons, combining the results with s_and_b64, s_or_b64 and
s_xor_b64 and using the resulting mask in s_cbranch_vccnz, without first
ensuring that bits for inactive lanes were clear.
There was already code for inserting an "s_and_b64 vcc, exec, vcc" to
clear bits for inactive lanes in the case that the branch is instruction
selected as s_cbranch_scc1 and is then changed to s_cbranch_vccnz in
SIFixSGPRCopies. I have added the same code into SILowerControlFlow for
the case that the branch is instruction selected as s_cbranch_vccnz.
This de-optimizes the code in some cases where the s_and is not needed,
because vcc is the result of a v_cmp, or multiple v_cmp instructions
combined by s_and/s_or. We should add a pass to re-optimize those cases.
Reviewers: arsenm, kzhuravl
Subscribers: wdng, yaxunl, t-tye, llvm-commits, dstuttard, timcorringham, nhaehnle
Differential Revision: https://reviews.llvm.org/D41292
llvm-svn: 322119
2018-01-10 05:34:43 +08:00
|
|
|
call void asm sideeffect
|
|
|
|
"v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64", ""() #0
|
2016-11-03 00:18:29 +08:00
|
|
|
br i1 %tmp6, label %bb19, label %bb14
|
|
|
|
|
|
|
|
bb14: ; preds = %bb13, %bb9
|
|
|
|
%tmp15 = icmp slt i32 %arg3, %arg4
|
|
|
|
%tmp16 = or i1 %tmp15, %tmp
|
|
|
|
%tmp17 = and i1 %tmp6, %tmp16
|
|
|
|
%tmp18 = zext i1 %tmp17 to i32
|
|
|
|
br label %bb19
|
|
|
|
|
|
|
|
bb19: ; preds = %bb14, %bb13, %bb9
|
|
|
|
%tmp20 = phi i32 [ undef, %bb9 ], [ undef, %bb13 ], [ %tmp18, %bb14 ]
|
|
|
|
%tmp21 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %arg5
|
|
|
|
store i32 %tmp20, i32 addrspace(1)* %tmp21, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2016-10-07 00:20:41 +08:00
|
|
|
attributes #0 = { nounwind }
|
|
|
|
attributes #1 = { nounwind readnone }
|