2017-12-01 07:42:30 +08:00
|
|
|
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -mattr=-promote-alloca -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s
|
|
|
|
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=-promote-alloca -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
|
2017-05-18 05:56:25 +08:00
|
|
|
|
|
|
|
; Test that non-entry function frame indices are expanded properly to
|
|
|
|
; give an index relative to the scratch wave offset register
|
|
|
|
|
|
|
|
; Materialize into a mov. Make sure there isn't an unnecessary copy.
|
|
|
|
; GCN-LABEL: {{^}}func_mov_fi_i32:
|
|
|
|
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; CI-NEXT: v_lshr_b32_e64 v0, s32, 6
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e64 v0, 6, s32
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2017-05-18 05:56:25 +08:00
|
|
|
; GCN-NOT: v_mov
|
|
|
|
; GCN: ds_write_b32 v0, v0
|
|
|
|
define void @func_mov_fi_i32() #0 {
|
2018-02-03 00:07:16 +08:00
|
|
|
%alloca = alloca i32, addrspace(5)
|
|
|
|
store volatile i32 addrspace(5)* %alloca, i32 addrspace(5)* addrspace(3)* undef
|
2017-05-18 05:56:25 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2019-06-06 06:37:50 +08:00
|
|
|
; Offset due to different objects
|
|
|
|
; GCN-LABEL: {{^}}func_mov_fi_i32_offset:
|
|
|
|
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; CI-DAG: v_lshr_b32_e64 v0, s32, 6
|
|
|
|
; CI-DAG: v_lshr_b32_e64 [[SCALED:v[0-9]+]], s32, 6
|
2019-06-06 06:37:50 +08:00
|
|
|
; CI-NOT: v_mov
|
|
|
|
; CI: ds_write_b32 v0, v0
|
2020-01-22 06:27:57 +08:00
|
|
|
; CI-NEXT: v_add_i32_e{{32|64}} v0, {{s\[[0-9]+:[0-9]+\]|vcc}}, 4, [[SCALED]]
|
AMDGPU: Move m0 initializations earlier
Summary:
After hoisting and merging m0 initializations schedule them as early as
possible in the MBB. This helps the scheduler avoid hazards in some
cases.
Reviewers: rampitec, arsenm
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67450
llvm-svn: 371671
2019-09-12 05:28:41 +08:00
|
|
|
; CI-NEXT: ds_write_b32 v0, v0
|
2019-06-06 06:37:50 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; GFX9: v_lshrrev_b32_e64 v0, 6, s32
|
|
|
|
; GFX9-NEXT: v_lshrrev_b32_e64 [[SCALED:v[0-9]+]], 6, s32
|
2019-06-06 06:37:50 +08:00
|
|
|
; GFX9-DAG: ds_write_b32 v0, v0
|
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, 4, [[SCALED]]
|
|
|
|
; GFX9-NEXT: ds_write_b32 v0, v0
|
|
|
|
define void @func_mov_fi_i32_offset() #0 {
|
|
|
|
%alloca0 = alloca i32, addrspace(5)
|
|
|
|
%alloca1 = alloca i32, addrspace(5)
|
|
|
|
store volatile i32 addrspace(5)* %alloca0, i32 addrspace(5)* addrspace(3)* undef
|
|
|
|
store volatile i32 addrspace(5)* %alloca1, i32 addrspace(5)* addrspace(3)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-05-18 05:56:25 +08:00
|
|
|
; Materialize into an add of a constant offset from the FI.
|
|
|
|
; FIXME: Should be able to merge adds
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}func_add_constant_to_fi_i32:
|
|
|
|
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; CI: v_lshr_b32_e64 [[SCALED:v[0-9]+]], s32, 6
|
2019-06-06 06:37:50 +08:00
|
|
|
; CI-NEXT: v_add_i32_e32 v0, vcc, 4, [[SCALED]]
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; GFX9: v_lshrrev_b32_e64 [[SCALED:v[0-9]+]], 6, s32
|
2017-12-01 07:42:30 +08:00
|
|
|
; GFX9-NEXT: v_add_u32_e32 v0, 4, [[SCALED]]
|
|
|
|
|
2017-05-18 05:56:25 +08:00
|
|
|
; GCN-NOT: v_mov
|
|
|
|
; GCN: ds_write_b32 v0, v0
|
|
|
|
define void @func_add_constant_to_fi_i32() #0 {
|
2018-02-03 00:07:16 +08:00
|
|
|
%alloca = alloca [2 x i32], align 4, addrspace(5)
|
|
|
|
%gep0 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %alloca, i32 0, i32 1
|
|
|
|
store volatile i32 addrspace(5)* %gep0, i32 addrspace(5)* addrspace(3)* undef
|
2017-05-18 05:56:25 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; A user the materialized frame index can't be meaningfully folded
|
|
|
|
; into.
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}func_other_fi_user_i32:
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; CI: v_lshr_b32_e64 v0, s32, 6
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; GFX9: v_lshrrev_b32_e64 v0, 6, s32
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2019-05-24 03:38:14 +08:00
|
|
|
; GCN-NEXT: v_mul_u32_u24_e32 v0, 9, v0
|
2017-05-18 05:56:25 +08:00
|
|
|
; GCN-NOT: v_mov
|
|
|
|
; GCN: ds_write_b32 v0, v0
|
|
|
|
define void @func_other_fi_user_i32() #0 {
|
2018-02-03 00:07:16 +08:00
|
|
|
%alloca = alloca [2 x i32], align 4, addrspace(5)
|
|
|
|
%ptrtoint = ptrtoint [2 x i32] addrspace(5)* %alloca to i32
|
2017-05-18 05:56:25 +08:00
|
|
|
%mul = mul i32 %ptrtoint, 9
|
|
|
|
store volatile i32 %mul, i32 addrspace(3)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}func_store_private_arg_i32_ptr:
|
|
|
|
; GCN: v_mov_b32_e32 v1, 15{{$}}
|
2020-01-22 06:27:57 +08:00
|
|
|
; GCN: buffer_store_dword v1, v0, s[0:3], 0 offen{{$}}
|
2018-02-03 00:07:16 +08:00
|
|
|
define void @func_store_private_arg_i32_ptr(i32 addrspace(5)* %ptr) #0 {
|
|
|
|
store volatile i32 15, i32 addrspace(5)* %ptr
|
2017-05-18 05:56:25 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}func_load_private_arg_i32_ptr:
|
|
|
|
; GCN: s_waitcnt
|
2020-01-22 06:27:57 +08:00
|
|
|
; GCN-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen{{$}}
|
2018-02-03 00:07:16 +08:00
|
|
|
define void @func_load_private_arg_i32_ptr(i32 addrspace(5)* %ptr) #0 {
|
|
|
|
%val = load volatile i32, i32 addrspace(5)* %ptr
|
2017-05-18 05:56:25 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr:
|
|
|
|
; GCN: s_waitcnt
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; CI: v_lshr_b32_e64 [[SHIFT:v[0-9]+]], s32, 6
|
2019-06-06 06:37:50 +08:00
|
|
|
; CI-NEXT: v_or_b32_e32 v0, 4, [[SHIFT]]
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; GFX9: v_lshrrev_b32_e64 [[SHIFT:v[0-9]+]], 6, s32
|
2019-06-06 06:37:50 +08:00
|
|
|
; GFX9-NEXT: v_or_b32_e32 v0, 4, [[SHIFT]]
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2017-05-18 05:56:25 +08:00
|
|
|
; GCN-NOT: v_mov
|
|
|
|
; GCN: ds_write_b32 v0, v0
|
2018-02-03 00:07:16 +08:00
|
|
|
define void @void_func_byval_struct_i8_i32_ptr({ i8, i32 } addrspace(5)* byval %arg0) #0 {
|
|
|
|
%gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 0
|
|
|
|
%gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 1
|
|
|
|
%load1 = load i32, i32 addrspace(5)* %gep1
|
|
|
|
store volatile i32 addrspace(5)* %gep1, i32 addrspace(5)* addrspace(3)* undef
|
2017-05-18 05:56:25 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr_value:
|
|
|
|
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
2019-06-06 06:20:47 +08:00
|
|
|
; GCN-NEXT: buffer_load_ubyte v0, off, s[0:3], s32
|
|
|
|
; GCN_NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4
|
2018-02-03 00:07:16 +08:00
|
|
|
define void @void_func_byval_struct_i8_i32_ptr_value({ i8, i32 } addrspace(5)* byval %arg0) #0 {
|
|
|
|
%gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 0
|
|
|
|
%gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 1
|
|
|
|
%load0 = load i8, i8 addrspace(5)* %gep0
|
|
|
|
%load1 = load i32, i32 addrspace(5)* %gep1
|
2017-05-18 05:56:25 +08:00
|
|
|
store volatile i8 %load0, i8 addrspace(3)* undef
|
|
|
|
store volatile i32 %load1, i32 addrspace(3)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr_nonentry_block:
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; CI: v_lshr_b32_e64 [[SHIFT:v[0-9]+]], s32, 6
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; GFX9: v_lshrrev_b32_e64 [[SHIFT:v[0-9]+]], 6, s32
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2017-05-18 05:56:25 +08:00
|
|
|
; GCN: s_and_saveexec_b64
|
|
|
|
|
2019-06-24 22:53:56 +08:00
|
|
|
; CI: v_add_i32_e32 [[GEP:v[0-9]+]], vcc, 4, [[SHIFT]]
|
|
|
|
; CI: buffer_load_dword v{{[0-9]+}}, off, s[0:3], s32 offset:4{{$}}
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2019-06-24 22:53:56 +08:00
|
|
|
; GFX9: v_add_u32_e32 [[GEP:v[0-9]+]], 4, [[SHIFT]]
|
|
|
|
; GFX9: buffer_load_dword v{{[0-9]+}}, off, s[0:3], s32 offset:4{{$}}
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2019-06-24 22:53:56 +08:00
|
|
|
; GCN: ds_write_b32 v{{[0-9]+}}, [[GEP]]
|
2018-02-03 00:07:16 +08:00
|
|
|
define void @void_func_byval_struct_i8_i32_ptr_nonentry_block({ i8, i32 } addrspace(5)* byval %arg0, i32 %arg2) #0 {
|
2017-05-18 05:56:25 +08:00
|
|
|
%cmp = icmp eq i32 %arg2, 0
|
|
|
|
br i1 %cmp, label %bb, label %ret
|
|
|
|
|
|
|
|
bb:
|
2018-02-03 00:07:16 +08:00
|
|
|
%gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 0
|
|
|
|
%gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 1
|
|
|
|
%load1 = load volatile i32, i32 addrspace(5)* %gep1
|
|
|
|
store volatile i32 addrspace(5)* %gep1, i32 addrspace(5)* addrspace(3)* undef
|
2017-05-18 05:56:25 +08:00
|
|
|
br label %ret
|
|
|
|
|
|
|
|
ret:
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-06-20 07:47:21 +08:00
|
|
|
; Added offset can't be used with VOP3 add
|
|
|
|
; GCN-LABEL: {{^}}func_other_fi_user_non_inline_imm_offset_i32:
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; CI-DAG: s_movk_i32 [[K:s[0-9]+|vcc_lo|vcc_hi]], 0x200
|
|
|
|
; CI-DAG: v_lshr_b32_e64 [[SCALED:v[0-9]+]], s32, 6
|
2019-08-29 09:13:47 +08:00
|
|
|
; CI: v_add_i32_e32 [[VZ:v[0-9]+]], vcc, [[K]], [[SCALED]]
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; GFX9-DAG: v_lshrrev_b32_e64 [[SCALED:v[0-9]+]], 6, s32
|
2019-09-13 07:46:46 +08:00
|
|
|
; GFX9: v_add_u32_e32 [[VZ:v[0-9]+]], 0x200, [[SCALED]]
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2019-05-24 03:38:14 +08:00
|
|
|
; GCN: v_mul_u32_u24_e32 [[VZ]], 9, [[VZ]]
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; GCN: ds_write_b32 v0, [[VZ]]
|
2017-06-20 07:47:21 +08:00
|
|
|
define void @func_other_fi_user_non_inline_imm_offset_i32() #0 {
|
2018-02-03 00:07:16 +08:00
|
|
|
%alloca0 = alloca [128 x i32], align 4, addrspace(5)
|
|
|
|
%alloca1 = alloca [8 x i32], align 4, addrspace(5)
|
|
|
|
%gep0 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(5)* %alloca0, i32 0, i32 65
|
|
|
|
%gep1 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %alloca1, i32 0, i32 0
|
|
|
|
store volatile i32 7, i32 addrspace(5)* %gep0
|
|
|
|
%ptrtoint = ptrtoint i32 addrspace(5)* %gep1 to i32
|
2017-06-20 07:47:21 +08:00
|
|
|
%mul = mul i32 %ptrtoint, 9
|
|
|
|
store volatile i32 %mul, i32 addrspace(3)* undef
|
|
|
|
ret void
|
|
|
|
}
|
2017-08-04 07:24:05 +08:00
|
|
|
|
2017-06-20 07:47:21 +08:00
|
|
|
; GCN-LABEL: {{^}}func_other_fi_user_non_inline_imm_offset_i32_vcc_live:
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; CI-DAG: s_movk_i32 [[OFFSET:s[0-9]+]], 0x200
|
|
|
|
; CI-DAG: v_lshr_b32_e64 [[SCALED:v[0-9]+]], s32, 6
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; CI: v_add_i32_e64 [[VZ:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, [[OFFSET]], [[SCALED]]
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; GFX9-DAG: v_lshrrev_b32_e64 [[SCALED:v[0-9]+]], 6, s32
|
2019-09-13 07:46:46 +08:00
|
|
|
; GFX9: v_add_u32_e32 [[VZ:v[0-9]+]], 0x200, [[SCALED]]
|
2017-12-01 07:42:30 +08:00
|
|
|
|
2019-05-24 03:38:14 +08:00
|
|
|
; GCN: v_mul_u32_u24_e32 [[VZ]], 9, [[VZ]]
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; GCN: ds_write_b32 v0, [[VZ]]
|
2017-06-20 07:47:21 +08:00
|
|
|
define void @func_other_fi_user_non_inline_imm_offset_i32_vcc_live() #0 {
|
2018-02-03 00:07:16 +08:00
|
|
|
%alloca0 = alloca [128 x i32], align 4, addrspace(5)
|
|
|
|
%alloca1 = alloca [8 x i32], align 4, addrspace(5)
|
2019-06-15 05:16:06 +08:00
|
|
|
%vcc = call i64 asm sideeffect "; def $0", "={vcc}"()
|
2018-02-03 00:07:16 +08:00
|
|
|
%gep0 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(5)* %alloca0, i32 0, i32 65
|
|
|
|
%gep1 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %alloca1, i32 0, i32 0
|
|
|
|
store volatile i32 7, i32 addrspace(5)* %gep0
|
2019-06-15 05:16:06 +08:00
|
|
|
call void asm sideeffect "; use $0", "{vcc}"(i64 %vcc)
|
2018-02-03 00:07:16 +08:00
|
|
|
%ptrtoint = ptrtoint i32 addrspace(5)* %gep1 to i32
|
2017-06-20 07:47:21 +08:00
|
|
|
%mul = mul i32 %ptrtoint, 9
|
|
|
|
store volatile i32 %mul, i32 addrspace(3)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2018-02-03 00:07:16 +08:00
|
|
|
declare void @func(<4 x float> addrspace(5)* nocapture) #0
|
2017-08-02 03:54:58 +08:00
|
|
|
|
|
|
|
; undef flag not preserved in eliminateFrameIndex when handling the
|
|
|
|
; stores in the middle block.
|
|
|
|
|
|
|
|
; GCN-LABEL: {{^}}undefined_stack_store_reg:
|
|
|
|
; GCN: s_and_saveexec_b64
|
2019-07-09 03:03:38 +08:00
|
|
|
; GCN: buffer_store_dword v0, off, s[0:3], s34 offset:
|
|
|
|
; GCN: buffer_store_dword v0, off, s[0:3], s34 offset:
|
|
|
|
; GCN: buffer_store_dword v0, off, s[0:3], s34 offset:
|
[MachineScheduler] Reduce reordering due to mem op clustering
Summary:
Mem op clustering adds a weak edge in the DAG between two loads or
stores that should be clustered, but the direction of this edge is
pretty arbitrary (it depends on the sort order of MemOpInfo, which
represents the operands of a load or store). This often means that two
loads or stores will get reordered even if they would naturally have
been scheduled together anyway, which leads to test case churn and goes
against the scheduler's "do no harm" philosophy.
The fix makes sure that the direction of the edge always matches the
original code order of the instructions.
Reviewers: atrick, MatzeB, arsenm, rampitec, t.p.northover
Subscribers: jvesely, wdng, nhaehnle, kristof.beyls, hiraditya, javed.absar, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72706
2020-01-14 23:40:52 +08:00
|
|
|
; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s34 offset:
|
2017-08-02 03:54:58 +08:00
|
|
|
define void @undefined_stack_store_reg(float %arg, i32 %arg1) #0 {
|
|
|
|
bb:
|
2018-02-03 00:07:16 +08:00
|
|
|
%tmp = alloca <4 x float>, align 16, addrspace(5)
|
2017-08-02 03:54:58 +08:00
|
|
|
%tmp2 = insertelement <4 x float> undef, float %arg, i32 0
|
2018-02-03 00:07:16 +08:00
|
|
|
store <4 x float> %tmp2, <4 x float> addrspace(5)* undef
|
2017-08-02 03:54:58 +08:00
|
|
|
%tmp3 = icmp eq i32 %arg1, 0
|
|
|
|
br i1 %tmp3, label %bb4, label %bb5
|
|
|
|
|
|
|
|
bb4:
|
2018-02-03 00:07:16 +08:00
|
|
|
call void @func(<4 x float> addrspace(5)* nonnull undef)
|
|
|
|
store <4 x float> %tmp2, <4 x float> addrspace(5)* %tmp, align 16
|
|
|
|
call void @func(<4 x float> addrspace(5)* nonnull %tmp)
|
2017-08-02 03:54:58 +08:00
|
|
|
br label %bb5
|
|
|
|
|
|
|
|
bb5:
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-11-15 08:45:43 +08:00
|
|
|
; GCN-LABEL: {{^}}alloca_ptr_nonentry_block:
|
|
|
|
; GCN: s_and_saveexec_b64
|
2019-06-06 06:37:50 +08:00
|
|
|
; GCN: buffer_load_dword v{{[0-9]+}}, off, s[0:3], s32 offset:4
|
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; CI: v_lshr_b32_e64 [[SHIFT:v[0-9]+]], s32, 6
|
2019-06-06 06:37:50 +08:00
|
|
|
; CI-NEXT: v_or_b32_e32 [[PTR:v[0-9]+]], 4, [[SHIFT]]
|
|
|
|
|
2020-01-22 06:27:57 +08:00
|
|
|
; GFX9: v_lshrrev_b32_e64 [[SHIFT:v[0-9]+]], 6, s32
|
2019-06-06 06:37:50 +08:00
|
|
|
; GFX9-NEXT: v_or_b32_e32 [[PTR:v[0-9]+]], 4, [[SHIFT]]
|
|
|
|
|
|
|
|
; GCN: ds_write_b32 v{{[0-9]+}}, [[PTR]]
|
2017-11-15 08:45:43 +08:00
|
|
|
define void @alloca_ptr_nonentry_block(i32 %arg0) #0 {
|
2018-02-03 00:07:16 +08:00
|
|
|
%alloca0 = alloca { i8, i32 }, align 4, addrspace(5)
|
2017-11-15 08:45:43 +08:00
|
|
|
%cmp = icmp eq i32 %arg0, 0
|
|
|
|
br i1 %cmp, label %bb, label %ret
|
|
|
|
|
|
|
|
bb:
|
2018-02-03 00:07:16 +08:00
|
|
|
%gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %alloca0, i32 0, i32 0
|
|
|
|
%gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %alloca0, i32 0, i32 1
|
|
|
|
%load1 = load volatile i32, i32 addrspace(5)* %gep1
|
|
|
|
store volatile i32 addrspace(5)* %gep1, i32 addrspace(5)* addrspace(3)* undef
|
2017-11-15 08:45:43 +08:00
|
|
|
br label %ret
|
|
|
|
|
|
|
|
ret:
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-05-18 05:56:25 +08:00
|
|
|
attributes #0 = { nounwind }
|