2019-09-20 00:44:38 +08:00
|
|
|
; RUN: llc -march=amdgcn -mcpu=pitcairn < %s | FileCheck -enable-var-scope -check-prefix=SI -check-prefix=FUNC %s
|
|
|
|
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -enable-var-scope -check-prefix=EG -check-prefix=FUNC %s
|
2014-11-15 02:30:06 +08:00
|
|
|
|
|
|
|
|
2015-12-12 07:16:47 +08:00
|
|
|
; FUNC-LABEL: {{^}}v_test_imax_sge_i32:
|
2014-11-15 02:30:06 +08:00
|
|
|
; SI: v_max_i32_e32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_INT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_test_imax_sge_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
|
2020-02-10 05:38:56 +08:00
|
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
2019-09-20 00:44:38 +08:00
|
|
|
%gep.in = getelementptr inbounds i32, i32 addrspace(1)* %bptr, i32 %tid
|
2016-06-10 00:04:00 +08:00
|
|
|
%a = load i32, i32 addrspace(1)* %aptr, align 4
|
2019-09-20 00:44:38 +08:00
|
|
|
%b = load i32, i32 addrspace(1)* %gep.in, align 4
|
2014-11-15 02:30:06 +08:00
|
|
|
%cmp = icmp sge i32 %a, %b
|
|
|
|
%val = select i1 %cmp, i32 %a, i32 %b
|
2016-06-10 00:04:00 +08:00
|
|
|
store i32 %val, i32 addrspace(1)* %out, align 4
|
2014-11-15 02:30:06 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-12-12 07:16:47 +08:00
|
|
|
; FUNC-LABEL: {{^}}v_test_imax_sge_v4i32:
|
|
|
|
; SI: v_max_i32_e32
|
|
|
|
; SI: v_max_i32_e32
|
|
|
|
; SI: v_max_i32_e32
|
|
|
|
; SI: v_max_i32_e32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; These could be merged into one
|
|
|
|
; EG: MAX_INT
|
|
|
|
; EG: MAX_INT
|
|
|
|
; EG: MAX_INT
|
|
|
|
; EG: MAX_INT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_test_imax_sge_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %aptr, <4 x i32> addrspace(1)* %bptr) nounwind {
|
2020-02-10 05:38:56 +08:00
|
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
2019-09-20 00:44:38 +08:00
|
|
|
%gep.in = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %bptr, i32 %tid
|
2016-06-10 00:04:00 +08:00
|
|
|
%a = load <4 x i32>, <4 x i32> addrspace(1)* %aptr, align 4
|
2019-09-20 00:44:38 +08:00
|
|
|
%b = load <4 x i32>, <4 x i32> addrspace(1)* %gep.in, align 4
|
2015-12-12 07:16:47 +08:00
|
|
|
%cmp = icmp sge <4 x i32> %a, %b
|
|
|
|
%val = select <4 x i1> %cmp, <4 x i32> %a, <4 x i32> %b
|
2016-06-10 00:04:00 +08:00
|
|
|
store <4 x i32> %val, <4 x i32> addrspace(1)* %out, align 4
|
2015-12-12 07:16:47 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2014-11-15 02:30:06 +08:00
|
|
|
; FUNC-LABEL: @s_test_imax_sge_i32
|
|
|
|
; SI: s_max_i32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_INT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @s_test_imax_sge_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
|
2014-11-15 02:30:06 +08:00
|
|
|
%cmp = icmp sge i32 %a, %b
|
|
|
|
%val = select i1 %cmp, i32 %a, i32 %b
|
|
|
|
store i32 %val, i32 addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-03-14 00:43:48 +08:00
|
|
|
; FUNC-LABEL: {{^}}s_test_imax_sge_imm_i32:
|
|
|
|
; SI: s_max_i32 {{s[0-9]+}}, {{s[0-9]+}}, 9
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_INT {{.*}}literal.{{[xyzw]}}
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @s_test_imax_sge_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
|
2015-03-14 00:43:48 +08:00
|
|
|
%cmp = icmp sge i32 %a, 9
|
|
|
|
%val = select i1 %cmp, i32 %a, i32 9
|
|
|
|
store i32 %val, i32 addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-12-19 09:39:48 +08:00
|
|
|
; FUNC-LABEL: {{^}}v_test_imax_sge_i8:
|
|
|
|
; SI: buffer_load_sbyte
|
|
|
|
; SI: buffer_load_sbyte
|
|
|
|
; SI: v_max_i32_e32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_INT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_test_imax_sge_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i8 addrspace(1)* %bptr) nounwind {
|
2016-06-10 00:04:00 +08:00
|
|
|
%a = load i8, i8 addrspace(1)* %aptr, align 1
|
|
|
|
%b = load i8, i8 addrspace(1)* %bptr, align 1
|
2015-12-19 09:39:48 +08:00
|
|
|
%cmp = icmp sge i8 %a, %b
|
|
|
|
%val = select i1 %cmp, i8 %a, i8 %b
|
2016-06-10 00:04:00 +08:00
|
|
|
store i8 %val, i8 addrspace(1)* %out, align 1
|
2015-12-19 09:39:48 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-03-14 00:43:48 +08:00
|
|
|
; FUNC-LABEL: {{^}}s_test_imax_sgt_imm_i32:
|
|
|
|
; SI: s_max_i32 {{s[0-9]+}}, {{s[0-9]+}}, 9
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_INT {{.*}}literal.{{[xyzw]}}
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @s_test_imax_sgt_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
|
2015-03-14 00:43:48 +08:00
|
|
|
%cmp = icmp sgt i32 %a, 9
|
|
|
|
%val = select i1 %cmp, i32 %a, i32 9
|
|
|
|
store i32 %val, i32 addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-12-12 07:16:47 +08:00
|
|
|
; FUNC-LABEL: {{^}}s_test_imax_sgt_imm_v2i32:
|
|
|
|
; SI: s_max_i32 {{s[0-9]+}}, {{s[0-9]+}}, 9
|
|
|
|
; SI: s_max_i32 {{s[0-9]+}}, {{s[0-9]+}}, 9
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_INT {{.*}}literal.{{[xyzw]}}
|
|
|
|
; EG: MAX_INT {{.*}}literal.{{[xyzw]}}
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @s_test_imax_sgt_imm_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a) nounwind {
|
2015-12-12 07:16:47 +08:00
|
|
|
%cmp = icmp sgt <2 x i32> %a, <i32 9, i32 9>
|
|
|
|
%val = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> <i32 9, i32 9>
|
|
|
|
store <2 x i32> %val, <2 x i32> addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
2016-06-10 00:04:00 +08:00
|
|
|
|
2014-11-15 02:30:06 +08:00
|
|
|
; FUNC-LABEL: @v_test_imax_sgt_i32
|
|
|
|
; SI: v_max_i32_e32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_INT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_test_imax_sgt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
|
2020-02-10 05:38:56 +08:00
|
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
2019-09-20 00:44:38 +08:00
|
|
|
%gep.in = getelementptr inbounds i32, i32 addrspace(1)* %bptr, i32 %tid
|
2016-06-10 00:04:00 +08:00
|
|
|
%a = load i32, i32 addrspace(1)* %aptr, align 4
|
2019-09-20 00:44:38 +08:00
|
|
|
%b = load i32, i32 addrspace(1)* %gep.in, align 4
|
2014-11-15 02:30:06 +08:00
|
|
|
%cmp = icmp sgt i32 %a, %b
|
|
|
|
%val = select i1 %cmp, i32 %a, i32 %b
|
2016-06-10 00:04:00 +08:00
|
|
|
store i32 %val, i32 addrspace(1)* %out, align 4
|
2014-11-15 02:30:06 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; FUNC-LABEL: @s_test_imax_sgt_i32
|
|
|
|
; SI: s_max_i32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_INT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @s_test_imax_sgt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
|
2014-11-15 02:30:06 +08:00
|
|
|
%cmp = icmp sgt i32 %a, %b
|
|
|
|
%val = select i1 %cmp, i32 %a, i32 %b
|
|
|
|
store i32 %val, i32 addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; FUNC-LABEL: @v_test_umax_uge_i32
|
|
|
|
; SI: v_max_u32_e32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_UINT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_test_umax_uge_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
|
2020-02-10 05:38:56 +08:00
|
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
2019-09-20 00:44:38 +08:00
|
|
|
%gep.in = getelementptr inbounds i32, i32 addrspace(1)* %bptr, i32 %tid
|
2016-06-10 00:04:00 +08:00
|
|
|
%a = load i32, i32 addrspace(1)* %aptr, align 4
|
2019-09-20 00:44:38 +08:00
|
|
|
%b = load i32, i32 addrspace(1)* %gep.in, align 4
|
2014-11-15 02:30:06 +08:00
|
|
|
%cmp = icmp uge i32 %a, %b
|
|
|
|
%val = select i1 %cmp, i32 %a, i32 %b
|
2016-06-10 00:04:00 +08:00
|
|
|
store i32 %val, i32 addrspace(1)* %out, align 4
|
2014-11-15 02:30:06 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; FUNC-LABEL: @s_test_umax_uge_i32
|
|
|
|
; SI: s_max_u32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_UINT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @s_test_umax_uge_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
|
2014-11-15 02:30:06 +08:00
|
|
|
%cmp = icmp uge i32 %a, %b
|
|
|
|
%val = select i1 %cmp, i32 %a, i32 %b
|
|
|
|
store i32 %val, i32 addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-12-12 07:16:47 +08:00
|
|
|
; FUNC-LABEL: {{^}}s_test_umax_uge_v3i32:
|
|
|
|
; SI: s_max_u32
|
|
|
|
; SI: s_max_u32
|
|
|
|
; SI: s_max_u32
|
|
|
|
; SI-NOT: s_max_u32
|
|
|
|
; SI: s_endpgm
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_UINT
|
|
|
|
; EG: MAX_UINT
|
|
|
|
; EG: MAX_UINT
|
|
|
|
; EG-NOT: MAX_UINT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @s_test_umax_uge_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a, <3 x i32> %b) nounwind {
|
2015-12-12 07:16:47 +08:00
|
|
|
%cmp = icmp uge <3 x i32> %a, %b
|
|
|
|
%val = select <3 x i1> %cmp, <3 x i32> %a, <3 x i32> %b
|
|
|
|
store <3 x i32> %val, <3 x i32> addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-12-19 09:39:48 +08:00
|
|
|
; FUNC-LABEL: {{^}}v_test_umax_uge_i8:
|
|
|
|
; SI: buffer_load_ubyte
|
|
|
|
; SI: buffer_load_ubyte
|
|
|
|
; SI: v_max_u32_e32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_UINT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_test_umax_uge_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i8 addrspace(1)* %bptr) nounwind {
|
2016-06-10 00:04:00 +08:00
|
|
|
%a = load i8, i8 addrspace(1)* %aptr, align 1
|
|
|
|
%b = load i8, i8 addrspace(1)* %bptr, align 1
|
2015-12-19 09:39:48 +08:00
|
|
|
%cmp = icmp uge i8 %a, %b
|
|
|
|
%val = select i1 %cmp, i8 %a, i8 %b
|
2016-06-10 00:04:00 +08:00
|
|
|
store i8 %val, i8 addrspace(1)* %out, align 1
|
2015-12-19 09:39:48 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2014-11-15 02:30:06 +08:00
|
|
|
; FUNC-LABEL: @v_test_umax_ugt_i32
|
|
|
|
; SI: v_max_u32_e32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_UINT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_test_umax_ugt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
|
2020-02-10 05:38:56 +08:00
|
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
2019-09-20 00:44:38 +08:00
|
|
|
%gep.in = getelementptr inbounds i32, i32 addrspace(1)* %bptr, i32 %tid
|
|
|
|
%a = load i32, i32 addrspace(1)* %gep.in, align 4
|
2016-06-10 00:04:00 +08:00
|
|
|
%b = load i32, i32 addrspace(1)* %bptr, align 4
|
2014-11-15 02:30:06 +08:00
|
|
|
%cmp = icmp ugt i32 %a, %b
|
|
|
|
%val = select i1 %cmp, i32 %a, i32 %b
|
2016-06-10 00:04:00 +08:00
|
|
|
store i32 %val, i32 addrspace(1)* %out, align 4
|
2014-11-15 02:30:06 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-12-12 07:16:47 +08:00
|
|
|
; FUNC-LABEL: {{^}}s_test_umax_ugt_i32:
|
2014-11-15 02:30:06 +08:00
|
|
|
; SI: s_max_u32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_UINT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @s_test_umax_ugt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
|
2014-11-15 02:30:06 +08:00
|
|
|
%cmp = icmp ugt i32 %a, %b
|
|
|
|
%val = select i1 %cmp, i32 %a, i32 %b
|
|
|
|
store i32 %val, i32 addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
2015-06-09 08:52:41 +08:00
|
|
|
|
2015-12-12 07:16:47 +08:00
|
|
|
; FUNC-LABEL: {{^}}s_test_umax_ugt_imm_v2i32:
|
2016-03-31 00:35:09 +08:00
|
|
|
; SI-DAG: s_max_u32 {{s[0-9]+}}, {{s[0-9]+}}, 15
|
|
|
|
; SI-DAG: s_max_u32 {{s[0-9]+}}, {{s[0-9]+}}, 23
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_UINT {{.*}}literal.{{[xyzw]}}
|
|
|
|
; EG: MAX_UINT {{.*}}literal.{{[xyzw]}}
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @s_test_umax_ugt_imm_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a) nounwind {
|
2015-12-12 07:16:47 +08:00
|
|
|
%cmp = icmp ugt <2 x i32> %a, <i32 15, i32 23>
|
|
|
|
%val = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> <i32 15, i32 23>
|
|
|
|
store <2 x i32> %val, <2 x i32> addrspace(1)* %out, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-06-09 08:52:41 +08:00
|
|
|
; Make sure redundant and removed
|
|
|
|
; FUNC-LABEL: {{^}}simplify_demanded_bits_test_umax_ugt_i16:
|
AMDGPU: Add pass to lower kernel arguments to loads
This replaces most argument uses with loads, but for
now not all.
The code in SelectionDAG for calling convention lowering
is actively harmful for amdgpu_kernel. It attempts to
split the argument types into register legal types, which
results in low quality code for arbitary types. Since
all kernel arguments are passed in memory, we just want the
raw types.
I've tried a couple of methods of mitigating this in SelectionDAG,
but it's easier to just bypass this problem alltogether. It's
possible to hack around the problem in the initial lowering,
but the real problem is the DAG then expects to be able to use
CopyToReg/CopyFromReg for uses of the arguments outside the block.
Exposing the argument loads in the IR also has the advantage
that the LoadStoreVectorizer can merge them.
I'm not sure the best approach to dealing with the IR
argument list is. The patch as-is just leaves the IR arguments
in place, so all the existing code will still compute the same
kernarg size and pointlessly lowers the arguments.
Arguably the frontend should emit kernels with an empty argument
list in the first place. Alternatively a dummy array could be
inserted as a single argument just to reserve space.
This does have some disadvantages. Local pointer kernel arguments can
no longer have AssertZext placed on them as the equivalent !range
metadata is not valid on pointer typed loads. This is mostly bad
for SI which needs to know about the known bits in order to use the
DS instruction offset, so in this case this is not done.
More importantly, this skips noalias arguments since this pass
does not yet convert this to the equivalent !alias.scope and !noalias
metadata. Producing this metadata correctly seems to be tricky,
although this logically is the same as inlining into a function which
doesn't exist. Additionally, exposing these loads to the vectorizer
may result in degraded aliasing information if a pointer load is
merged with another argument load.
I'm also not entirely sure this is preserving the current clover
ABI, although I would greatly prefer if it would stop widening
arguments and match the HSA ABI. As-is I think it is extending
< 4-byte arguments to 4-bytes but doesn't align them to 4-bytes.
llvm-svn: 335650
2018-06-27 03:10:00 +08:00
|
|
|
; SI-DAG: s_load_dword [[A:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0x13
|
|
|
|
; SI-DAG: s_load_dword [[B:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0x1c
|
2015-12-12 07:16:47 +08:00
|
|
|
; SI: s_max_u32 [[MAX:s[0-9]+]], [[A]], [[B]]
|
2016-03-31 00:35:09 +08:00
|
|
|
; SI: v_mov_b32_e32 [[VMAX:v[0-9]+]], [[MAX]]
|
|
|
|
; SI: buffer_store_dword [[VMAX]]
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_UINT
|
AMDGPU: Add pass to lower kernel arguments to loads
This replaces most argument uses with loads, but for
now not all.
The code in SelectionDAG for calling convention lowering
is actively harmful for amdgpu_kernel. It attempts to
split the argument types into register legal types, which
results in low quality code for arbitary types. Since
all kernel arguments are passed in memory, we just want the
raw types.
I've tried a couple of methods of mitigating this in SelectionDAG,
but it's easier to just bypass this problem alltogether. It's
possible to hack around the problem in the initial lowering,
but the real problem is the DAG then expects to be able to use
CopyToReg/CopyFromReg for uses of the arguments outside the block.
Exposing the argument loads in the IR also has the advantage
that the LoadStoreVectorizer can merge them.
I'm not sure the best approach to dealing with the IR
argument list is. The patch as-is just leaves the IR arguments
in place, so all the existing code will still compute the same
kernarg size and pointlessly lowers the arguments.
Arguably the frontend should emit kernels with an empty argument
list in the first place. Alternatively a dummy array could be
inserted as a single argument just to reserve space.
This does have some disadvantages. Local pointer kernel arguments can
no longer have AssertZext placed on them as the equivalent !range
metadata is not valid on pointer typed loads. This is mostly bad
for SI which needs to know about the known bits in order to use the
DS instruction offset, so in this case this is not done.
More importantly, this skips noalias arguments since this pass
does not yet convert this to the equivalent !alias.scope and !noalias
metadata. Producing this metadata correctly seems to be tricky,
although this logically is the same as inlining into a function which
doesn't exist. Additionally, exposing these loads to the vectorizer
may result in degraded aliasing information if a pointer load is
merged with another argument load.
I'm also not entirely sure this is preserving the current clover
ABI, although I would greatly prefer if it would stop widening
arguments and match the HSA ABI. As-is I think it is extending
< 4-byte arguments to 4-bytes but doesn't align them to 4-bytes.
llvm-svn: 335650
2018-06-27 03:10:00 +08:00
|
|
|
define amdgpu_kernel void @simplify_demanded_bits_test_umax_ugt_i16(i32 addrspace(1)* %out, [8 x i32], i16 zeroext %a, [8 x i32], i16 zeroext %b) nounwind {
|
2015-06-09 08:52:41 +08:00
|
|
|
%a.ext = zext i16 %a to i32
|
|
|
|
%b.ext = zext i16 %b to i32
|
|
|
|
%cmp = icmp ugt i32 %a.ext, %b.ext
|
|
|
|
%val = select i1 %cmp, i32 %a.ext, i32 %b.ext
|
|
|
|
%mask = and i32 %val, 65535
|
|
|
|
store i32 %mask, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Make sure redundant sign_extend_inreg removed.
|
|
|
|
|
2015-12-12 07:16:47 +08:00
|
|
|
; FUNC-LABEL: {{^}}simplify_demanded_bits_test_max_slt_i16:
|
AMDGPU: Add pass to lower kernel arguments to loads
This replaces most argument uses with loads, but for
now not all.
The code in SelectionDAG for calling convention lowering
is actively harmful for amdgpu_kernel. It attempts to
split the argument types into register legal types, which
results in low quality code for arbitary types. Since
all kernel arguments are passed in memory, we just want the
raw types.
I've tried a couple of methods of mitigating this in SelectionDAG,
but it's easier to just bypass this problem alltogether. It's
possible to hack around the problem in the initial lowering,
but the real problem is the DAG then expects to be able to use
CopyToReg/CopyFromReg for uses of the arguments outside the block.
Exposing the argument loads in the IR also has the advantage
that the LoadStoreVectorizer can merge them.
I'm not sure the best approach to dealing with the IR
argument list is. The patch as-is just leaves the IR arguments
in place, so all the existing code will still compute the same
kernarg size and pointlessly lowers the arguments.
Arguably the frontend should emit kernels with an empty argument
list in the first place. Alternatively a dummy array could be
inserted as a single argument just to reserve space.
This does have some disadvantages. Local pointer kernel arguments can
no longer have AssertZext placed on them as the equivalent !range
metadata is not valid on pointer typed loads. This is mostly bad
for SI which needs to know about the known bits in order to use the
DS instruction offset, so in this case this is not done.
More importantly, this skips noalias arguments since this pass
does not yet convert this to the equivalent !alias.scope and !noalias
metadata. Producing this metadata correctly seems to be tricky,
although this logically is the same as inlining into a function which
doesn't exist. Additionally, exposing these loads to the vectorizer
may result in degraded aliasing information if a pointer load is
merged with another argument load.
I'm also not entirely sure this is preserving the current clover
ABI, although I would greatly prefer if it would stop widening
arguments and match the HSA ABI. As-is I think it is extending
< 4-byte arguments to 4-bytes but doesn't align them to 4-bytes.
llvm-svn: 335650
2018-06-27 03:10:00 +08:00
|
|
|
; SI-DAG: s_load_dword [[A:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0x13
|
|
|
|
; SI-DAG: s_load_dword [[B:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0x1c
|
2019-04-03 08:00:58 +08:00
|
|
|
; SI-DAG: s_sext_i32_i16 [[EXT_A:s[0-9]+]], [[A]]
|
|
|
|
; SI-DAG: s_sext_i32_i16 [[EXT_B:s[0-9]+]], [[B]]
|
|
|
|
|
|
|
|
; SI: s_max_i32 [[MAX:s[0-9]+]], [[EXT_A]], [[EXT_B]]
|
2016-03-31 00:35:09 +08:00
|
|
|
; SI: v_mov_b32_e32 [[VMAX:v[0-9]+]], [[MAX]]
|
|
|
|
; SI: buffer_store_dword [[VMAX]]
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_INT
|
AMDGPU: Add pass to lower kernel arguments to loads
This replaces most argument uses with loads, but for
now not all.
The code in SelectionDAG for calling convention lowering
is actively harmful for amdgpu_kernel. It attempts to
split the argument types into register legal types, which
results in low quality code for arbitary types. Since
all kernel arguments are passed in memory, we just want the
raw types.
I've tried a couple of methods of mitigating this in SelectionDAG,
but it's easier to just bypass this problem alltogether. It's
possible to hack around the problem in the initial lowering,
but the real problem is the DAG then expects to be able to use
CopyToReg/CopyFromReg for uses of the arguments outside the block.
Exposing the argument loads in the IR also has the advantage
that the LoadStoreVectorizer can merge them.
I'm not sure the best approach to dealing with the IR
argument list is. The patch as-is just leaves the IR arguments
in place, so all the existing code will still compute the same
kernarg size and pointlessly lowers the arguments.
Arguably the frontend should emit kernels with an empty argument
list in the first place. Alternatively a dummy array could be
inserted as a single argument just to reserve space.
This does have some disadvantages. Local pointer kernel arguments can
no longer have AssertZext placed on them as the equivalent !range
metadata is not valid on pointer typed loads. This is mostly bad
for SI which needs to know about the known bits in order to use the
DS instruction offset, so in this case this is not done.
More importantly, this skips noalias arguments since this pass
does not yet convert this to the equivalent !alias.scope and !noalias
metadata. Producing this metadata correctly seems to be tricky,
although this logically is the same as inlining into a function which
doesn't exist. Additionally, exposing these loads to the vectorizer
may result in degraded aliasing information if a pointer load is
merged with another argument load.
I'm also not entirely sure this is preserving the current clover
ABI, although I would greatly prefer if it would stop widening
arguments and match the HSA ABI. As-is I think it is extending
< 4-byte arguments to 4-bytes but doesn't align them to 4-bytes.
llvm-svn: 335650
2018-06-27 03:10:00 +08:00
|
|
|
define amdgpu_kernel void @simplify_demanded_bits_test_max_slt_i16(i32 addrspace(1)* %out, [8 x i32], i16 signext %a, [8 x i32], i16 signext %b) nounwind {
|
2015-06-09 08:52:41 +08:00
|
|
|
%a.ext = sext i16 %a to i32
|
|
|
|
%b.ext = sext i16 %b to i32
|
|
|
|
%cmp = icmp sgt i32 %a.ext, %b.ext
|
|
|
|
%val = select i1 %cmp, i32 %a.ext, i32 %b.ext
|
|
|
|
%shl = shl i32 %val, 16
|
|
|
|
%sextinreg = ashr i32 %shl, 16
|
|
|
|
store i32 %sextinreg, i32 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-12-12 07:16:47 +08:00
|
|
|
; FUNC-LABEL: {{^}}s_test_imax_sge_i16:
|
2015-12-19 09:39:48 +08:00
|
|
|
; SI: s_load_dword
|
|
|
|
; SI: s_load_dword
|
|
|
|
; SI: s_sext_i32_i16
|
|
|
|
; SI: s_sext_i32_i16
|
2015-12-12 07:16:47 +08:00
|
|
|
; SI: s_max_i32
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; EG: MAX_INT
|
AMDGPU: Add pass to lower kernel arguments to loads
This replaces most argument uses with loads, but for
now not all.
The code in SelectionDAG for calling convention lowering
is actively harmful for amdgpu_kernel. It attempts to
split the argument types into register legal types, which
results in low quality code for arbitary types. Since
all kernel arguments are passed in memory, we just want the
raw types.
I've tried a couple of methods of mitigating this in SelectionDAG,
but it's easier to just bypass this problem alltogether. It's
possible to hack around the problem in the initial lowering,
but the real problem is the DAG then expects to be able to use
CopyToReg/CopyFromReg for uses of the arguments outside the block.
Exposing the argument loads in the IR also has the advantage
that the LoadStoreVectorizer can merge them.
I'm not sure the best approach to dealing with the IR
argument list is. The patch as-is just leaves the IR arguments
in place, so all the existing code will still compute the same
kernarg size and pointlessly lowers the arguments.
Arguably the frontend should emit kernels with an empty argument
list in the first place. Alternatively a dummy array could be
inserted as a single argument just to reserve space.
This does have some disadvantages. Local pointer kernel arguments can
no longer have AssertZext placed on them as the equivalent !range
metadata is not valid on pointer typed loads. This is mostly bad
for SI which needs to know about the known bits in order to use the
DS instruction offset, so in this case this is not done.
More importantly, this skips noalias arguments since this pass
does not yet convert this to the equivalent !alias.scope and !noalias
metadata. Producing this metadata correctly seems to be tricky,
although this logically is the same as inlining into a function which
doesn't exist. Additionally, exposing these loads to the vectorizer
may result in degraded aliasing information if a pointer load is
merged with another argument load.
I'm also not entirely sure this is preserving the current clover
ABI, although I would greatly prefer if it would stop widening
arguments and match the HSA ABI. As-is I think it is extending
< 4-byte arguments to 4-bytes but doesn't align them to 4-bytes.
llvm-svn: 335650
2018-06-27 03:10:00 +08:00
|
|
|
define amdgpu_kernel void @s_test_imax_sge_i16(i16 addrspace(1)* %out, [8 x i32], i16 %a, [8 x i32], i16 %b) nounwind {
|
2015-06-09 08:52:41 +08:00
|
|
|
%cmp = icmp sge i16 %a, %b
|
|
|
|
%val = select i1 %cmp, i16 %a, i16 %b
|
|
|
|
store i16 %val, i16 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
2016-06-10 00:04:00 +08:00
|
|
|
|
|
|
|
; 64 bit
|
|
|
|
; FUNC-LABEL: {{^}}test_umax_ugt_i64
|
|
|
|
; SI: s_endpgm
|
|
|
|
|
|
|
|
; EG: MAX_UINT
|
|
|
|
; EG: MAX_UINT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @test_umax_ugt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
|
2016-06-10 00:04:00 +08:00
|
|
|
%tmp = icmp ugt i64 %a, %b
|
|
|
|
%val = select i1 %tmp, i64 %a, i64 %b
|
|
|
|
store i64 %val, i64 addrspace(1)* %out, align 8
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; FUNC-LABEL: {{^}}test_umax_uge_i64
|
|
|
|
; SI: s_endpgm
|
|
|
|
|
|
|
|
; EG: MAX_UINT
|
|
|
|
; EG: MAX_UINT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @test_umax_uge_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
|
2016-06-10 00:04:00 +08:00
|
|
|
%tmp = icmp uge i64 %a, %b
|
|
|
|
%val = select i1 %tmp, i64 %a, i64 %b
|
|
|
|
store i64 %val, i64 addrspace(1)* %out, align 8
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; FUNC-LABEL: {{^}}test_imax_sgt_i64
|
|
|
|
; SI: s_endpgm
|
|
|
|
|
|
|
|
; EG-DAG: MAX_UINT
|
|
|
|
; EG-DAG: MAX_INT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @test_imax_sgt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
|
2016-06-10 00:04:00 +08:00
|
|
|
%tmp = icmp sgt i64 %a, %b
|
|
|
|
%val = select i1 %tmp, i64 %a, i64 %b
|
|
|
|
store i64 %val, i64 addrspace(1)* %out, align 8
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; FUNC-LABEL: {{^}}test_imax_sge_i64
|
|
|
|
; SI: s_endpgm
|
|
|
|
|
|
|
|
; EG-DAG: MAX_UINT
|
|
|
|
; EG-DAG: MAX_INT
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @test_imax_sge_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
|
2016-06-10 00:04:00 +08:00
|
|
|
%tmp = icmp sge i64 %a, %b
|
|
|
|
%val = select i1 %tmp, i64 %a, i64 %b
|
|
|
|
store i64 %val, i64 addrspace(1)* %out, align 8
|
|
|
|
ret void
|
|
|
|
}
|
2019-09-20 00:44:38 +08:00
|
|
|
|
|
|
|
|
2020-02-10 05:38:56 +08:00
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x() #0
|
2019-09-20 00:44:38 +08:00
|
|
|
|
|
|
|
attributes #0 = { nounwind readnone }
|
|
|
|
attributes #1 = { nounwind }
|