2017-01-25 06:02:15 +08:00
|
|
|
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -strict-whitespace %s --check-prefix=DEFAULT
|
|
|
|
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -strict-whitespace %s --check-prefix=DEFAULT
|
|
|
|
; RUN: llc -march=amdgcn --misched=ilpmax -verify-machineinstrs < %s | FileCheck -strict-whitespace %s --check-prefix=ILPMAX
|
|
|
|
; RUN: llc -march=amdgcn --misched=ilpmax -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -strict-whitespace %s --check-prefix=ILPMAX
|
AMDGPU/SI: Better handle s_wait insertion
We can wait on either VM, EXP or LGKM.
The waits are independent.
Without this patch, a wait inserted because of one of them
would also wait for all the previous others.
This patch makes s_wait only wait for the ones we need for the next
instruction.
Here's an example of subtle perf reduction this patch solves:
This is without the patch:
buffer_load_format_xyzw v[8:11], v0, s[44:47], 0 idxen
buffer_load_format_xyzw v[12:15], v0, s[48:51], 0 idxen
s_load_dwordx4 s[44:47], s[8:9], 0xc
s_waitcnt lgkmcnt(0)
buffer_load_format_xyzw v[16:19], v0, s[52:55], 0 idxen
s_load_dwordx4 s[48:51], s[8:9], 0x10
s_waitcnt vmcnt(1)
buffer_load_format_xyzw v[20:23], v0, s[44:47], 0 idxen
The s_waitcnt vmcnt(1) is useless.
The reason it is added is because the last
buffer_load_format_xyzw needs s[44:47], which was issued
by the first s_load_dwordx4. It waits for all VM
before that call to have finished.
Internally after every instruction, 3 counters (for VM, EXP and LGTM)
are updated after every instruction. For example buffer_load_format_xyzw
will
increase the VM counter, and s_load_dwordx4 the LGKM one.
Without the patch, for every defined register,
the current 3 counters are stored, and are used to know
how long to wait when an instruction needs the register.
Because of that, the s[44:47] counter includes that to use the register
you need to wait for the previous buffer_load_format_xyzw.
Instead this patch stores only the counters that matter for the
register,
and puts zero for the other ones, since we don't need any wait for them.
Patch by: Axel Davy
Differential Revision: http://reviews.llvm.org/D11883
llvm-svn: 245755
2015-08-22 06:47:27 +08:00
|
|
|
; The ilpmax scheduler is used for the second test to get the ordering we want for the test.
|
|
|
|
|
|
|
|
; DEFAULT-LABEL: {{^}}main:
|
|
|
|
; DEFAULT: s_load_dwordx4
|
|
|
|
; DEFAULT: s_load_dwordx4
|
|
|
|
; DEFAULT: s_waitcnt vmcnt(0)
|
|
|
|
; DEFAULT: exp
|
|
|
|
; DEFAULT: s_waitcnt lgkmcnt(0)
|
|
|
|
; DEFAULT: s_endpgm
|
2018-02-14 02:00:25 +08:00
|
|
|
define amdgpu_vs void @main(<16 x i8> addrspace(4)* inreg %arg, <16 x i8> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, <16 x i8> addrspace(4)* inreg %arg3, <16 x i8> addrspace(4)* inreg %arg4, i32 inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(4)* inreg %constptr) #0 {
|
2013-10-14 01:56:28 +08:00
|
|
|
main_body:
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp = getelementptr <16 x i8>, <16 x i8> addrspace(4)* %arg3, i32 0
|
|
|
|
%tmp10 = load <16 x i8>, <16 x i8> addrspace(4)* %tmp, !tbaa !0
|
2017-04-04 05:45:13 +08:00
|
|
|
%tmp10.cast = bitcast <16 x i8> %tmp10 to <4 x i32>
|
|
|
|
%tmp11 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp10.cast, i32 %arg6, i32 0, i1 false, i1 false)
|
2014-08-06 08:29:43 +08:00
|
|
|
%tmp12 = extractelement <4 x float> %tmp11, i32 0
|
|
|
|
%tmp13 = extractelement <4 x float> %tmp11, i32 1
|
2016-02-11 14:02:01 +08:00
|
|
|
call void @llvm.amdgcn.s.barrier() #1
|
2014-08-06 08:29:43 +08:00
|
|
|
%tmp14 = extractelement <4 x float> %tmp11, i32 2
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp15 = load float, float addrspace(4)* %constptr, align 4
|
|
|
|
%tmp16 = getelementptr <16 x i8>, <16 x i8> addrspace(4)* %arg3, i32 1
|
|
|
|
%tmp17 = load <16 x i8>, <16 x i8> addrspace(4)* %tmp16, !tbaa !0
|
2017-04-04 05:45:13 +08:00
|
|
|
%tmp17.cast = bitcast <16 x i8> %tmp17 to <4 x i32>
|
|
|
|
%tmp18 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp17.cast, i32 %arg6, i32 0, i1 false, i1 false)
|
2014-08-06 08:29:43 +08:00
|
|
|
%tmp19 = extractelement <4 x float> %tmp18, i32 0
|
|
|
|
%tmp20 = extractelement <4 x float> %tmp18, i32 1
|
|
|
|
%tmp21 = extractelement <4 x float> %tmp18, i32 2
|
|
|
|
%tmp22 = extractelement <4 x float> %tmp18, i32 3
|
2017-02-22 08:02:21 +08:00
|
|
|
call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %tmp19, float %tmp20, float %tmp21, float %tmp22, i1 false, i1 false) #0
|
|
|
|
call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float %tmp12, float %tmp13, float %tmp14, float %tmp15, i1 true, i1 false) #0
|
2013-10-14 01:56:28 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
AMDGPU/SI: Better handle s_wait insertion
We can wait on either VM, EXP or LGKM.
The waits are independent.
Without this patch, a wait inserted because of one of them
would also wait for all the previous others.
This patch makes s_wait only wait for the ones we need for the next
instruction.
Here's an example of subtle perf reduction this patch solves:
This is without the patch:
buffer_load_format_xyzw v[8:11], v0, s[44:47], 0 idxen
buffer_load_format_xyzw v[12:15], v0, s[48:51], 0 idxen
s_load_dwordx4 s[44:47], s[8:9], 0xc
s_waitcnt lgkmcnt(0)
buffer_load_format_xyzw v[16:19], v0, s[52:55], 0 idxen
s_load_dwordx4 s[48:51], s[8:9], 0x10
s_waitcnt vmcnt(1)
buffer_load_format_xyzw v[20:23], v0, s[44:47], 0 idxen
The s_waitcnt vmcnt(1) is useless.
The reason it is added is because the last
buffer_load_format_xyzw needs s[44:47], which was issued
by the first s_load_dwordx4. It waits for all VM
before that call to have finished.
Internally after every instruction, 3 counters (for VM, EXP and LGTM)
are updated after every instruction. For example buffer_load_format_xyzw
will
increase the VM counter, and s_load_dwordx4 the LGKM one.
Without the patch, for every defined register,
the current 3 counters are stored, and are used to know
how long to wait when an instruction needs the register.
Because of that, the s[44:47] counter includes that to use the register
you need to wait for the previous buffer_load_format_xyzw.
Instead this patch stores only the counters that matter for the
register,
and puts zero for the other ones, since we don't need any wait for them.
Patch by: Axel Davy
Differential Revision: http://reviews.llvm.org/D11883
llvm-svn: 245755
2015-08-22 06:47:27 +08:00
|
|
|
; ILPMAX-LABEL: {{^}}main2:
|
|
|
|
; ILPMAX: s_load_dwordx4
|
|
|
|
; ILPMAX: s_waitcnt lgkmcnt(0)
|
|
|
|
; ILPMAX: buffer_load
|
|
|
|
; ILPMAX: s_load_dwordx4
|
|
|
|
; ILPMAX: s_waitcnt lgkmcnt(0)
|
|
|
|
; ILPMAX: buffer_load
|
|
|
|
; ILPMAX: s_waitcnt vmcnt(0)
|
2017-04-04 05:45:13 +08:00
|
|
|
; ILPMAX: exp pos0
|
|
|
|
; ILPMAX-NEXT: exp param0
|
AMDGPU/SI: Better handle s_wait insertion
We can wait on either VM, EXP or LGKM.
The waits are independent.
Without this patch, a wait inserted because of one of them
would also wait for all the previous others.
This patch makes s_wait only wait for the ones we need for the next
instruction.
Here's an example of subtle perf reduction this patch solves:
This is without the patch:
buffer_load_format_xyzw v[8:11], v0, s[44:47], 0 idxen
buffer_load_format_xyzw v[12:15], v0, s[48:51], 0 idxen
s_load_dwordx4 s[44:47], s[8:9], 0xc
s_waitcnt lgkmcnt(0)
buffer_load_format_xyzw v[16:19], v0, s[52:55], 0 idxen
s_load_dwordx4 s[48:51], s[8:9], 0x10
s_waitcnt vmcnt(1)
buffer_load_format_xyzw v[20:23], v0, s[44:47], 0 idxen
The s_waitcnt vmcnt(1) is useless.
The reason it is added is because the last
buffer_load_format_xyzw needs s[44:47], which was issued
by the first s_load_dwordx4. It waits for all VM
before that call to have finished.
Internally after every instruction, 3 counters (for VM, EXP and LGTM)
are updated after every instruction. For example buffer_load_format_xyzw
will
increase the VM counter, and s_load_dwordx4 the LGKM one.
Without the patch, for every defined register,
the current 3 counters are stored, and are used to know
how long to wait when an instruction needs the register.
Because of that, the s[44:47] counter includes that to use the register
you need to wait for the previous buffer_load_format_xyzw.
Instead this patch stores only the counters that matter for the
register,
and puts zero for the other ones, since we don't need any wait for them.
Patch by: Axel Davy
Differential Revision: http://reviews.llvm.org/D11883
llvm-svn: 245755
2015-08-22 06:47:27 +08:00
|
|
|
; ILPMAX: s_endpgm
|
2018-02-14 02:00:25 +08:00
|
|
|
define amdgpu_vs void @main2([6 x <16 x i8>] addrspace(4)* byval %arg, [17 x <16 x i8>] addrspace(4)* byval %arg1, [17 x <4 x i32>] addrspace(4)* byval %arg2, [34 x <8 x i32>] addrspace(4)* byval %arg3, [16 x <16 x i8>] addrspace(4)* byval %arg4, i32 inreg %arg5, i32 inreg %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) #0 {
|
AMDGPU/SI: Better handle s_wait insertion
We can wait on either VM, EXP or LGKM.
The waits are independent.
Without this patch, a wait inserted because of one of them
would also wait for all the previous others.
This patch makes s_wait only wait for the ones we need for the next
instruction.
Here's an example of subtle perf reduction this patch solves:
This is without the patch:
buffer_load_format_xyzw v[8:11], v0, s[44:47], 0 idxen
buffer_load_format_xyzw v[12:15], v0, s[48:51], 0 idxen
s_load_dwordx4 s[44:47], s[8:9], 0xc
s_waitcnt lgkmcnt(0)
buffer_load_format_xyzw v[16:19], v0, s[52:55], 0 idxen
s_load_dwordx4 s[48:51], s[8:9], 0x10
s_waitcnt vmcnt(1)
buffer_load_format_xyzw v[20:23], v0, s[44:47], 0 idxen
The s_waitcnt vmcnt(1) is useless.
The reason it is added is because the last
buffer_load_format_xyzw needs s[44:47], which was issued
by the first s_load_dwordx4. It waits for all VM
before that call to have finished.
Internally after every instruction, 3 counters (for VM, EXP and LGTM)
are updated after every instruction. For example buffer_load_format_xyzw
will
increase the VM counter, and s_load_dwordx4 the LGKM one.
Without the patch, for every defined register,
the current 3 counters are stored, and are used to know
how long to wait when an instruction needs the register.
Because of that, the s[44:47] counter includes that to use the register
you need to wait for the previous buffer_load_format_xyzw.
Instead this patch stores only the counters that matter for the
register,
and puts zero for the other ones, since we don't need any wait for them.
Patch by: Axel Davy
Differential Revision: http://reviews.llvm.org/D11883
llvm-svn: 245755
2015-08-22 06:47:27 +08:00
|
|
|
main_body:
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(4)* %arg4, i64 0, i64 0
|
|
|
|
%tmp11 = load <16 x i8>, <16 x i8> addrspace(4)* %tmp, align 16, !tbaa !0
|
2017-02-22 08:02:21 +08:00
|
|
|
%tmp12 = add i32 %arg5, %arg7
|
2017-04-04 05:45:13 +08:00
|
|
|
%tmp11.cast = bitcast <16 x i8> %tmp11 to <4 x i32>
|
|
|
|
%tmp13 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp11.cast, i32 %tmp12, i32 0, i1 false, i1 false)
|
2017-02-22 08:02:21 +08:00
|
|
|
%tmp14 = extractelement <4 x float> %tmp13, i32 0
|
|
|
|
%tmp15 = extractelement <4 x float> %tmp13, i32 1
|
|
|
|
%tmp16 = extractelement <4 x float> %tmp13, i32 2
|
|
|
|
%tmp17 = extractelement <4 x float> %tmp13, i32 3
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp18 = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(4)* %arg4, i64 0, i64 1
|
|
|
|
%tmp19 = load <16 x i8>, <16 x i8> addrspace(4)* %tmp18, align 16, !tbaa !0
|
2017-02-22 08:02:21 +08:00
|
|
|
%tmp20 = add i32 %arg5, %arg7
|
2017-04-04 05:45:13 +08:00
|
|
|
%tmp19.cast = bitcast <16 x i8> %tmp19 to <4 x i32>
|
|
|
|
%tmp21 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp19.cast, i32 %tmp20, i32 0, i1 false, i1 false)
|
2017-02-22 08:02:21 +08:00
|
|
|
%tmp22 = extractelement <4 x float> %tmp21, i32 0
|
|
|
|
%tmp23 = extractelement <4 x float> %tmp21, i32 1
|
|
|
|
%tmp24 = extractelement <4 x float> %tmp21, i32 2
|
|
|
|
%tmp25 = extractelement <4 x float> %tmp21, i32 3
|
2017-04-04 05:45:13 +08:00
|
|
|
call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float %tmp14, float %tmp15, float %tmp16, float %tmp17, i1 false, i1 false) #0
|
|
|
|
call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %tmp22, float %tmp23, float %tmp24, float %tmp25, i1 true, i1 false) #0
|
AMDGPU/SI: Better handle s_wait insertion
We can wait on either VM, EXP or LGKM.
The waits are independent.
Without this patch, a wait inserted because of one of them
would also wait for all the previous others.
This patch makes s_wait only wait for the ones we need for the next
instruction.
Here's an example of subtle perf reduction this patch solves:
This is without the patch:
buffer_load_format_xyzw v[8:11], v0, s[44:47], 0 idxen
buffer_load_format_xyzw v[12:15], v0, s[48:51], 0 idxen
s_load_dwordx4 s[44:47], s[8:9], 0xc
s_waitcnt lgkmcnt(0)
buffer_load_format_xyzw v[16:19], v0, s[52:55], 0 idxen
s_load_dwordx4 s[48:51], s[8:9], 0x10
s_waitcnt vmcnt(1)
buffer_load_format_xyzw v[20:23], v0, s[44:47], 0 idxen
The s_waitcnt vmcnt(1) is useless.
The reason it is added is because the last
buffer_load_format_xyzw needs s[44:47], which was issued
by the first s_load_dwordx4. It waits for all VM
before that call to have finished.
Internally after every instruction, 3 counters (for VM, EXP and LGTM)
are updated after every instruction. For example buffer_load_format_xyzw
will
increase the VM counter, and s_load_dwordx4 the LGKM one.
Without the patch, for every defined register,
the current 3 counters are stored, and are used to know
how long to wait when an instruction needs the register.
Because of that, the s[44:47] counter includes that to use the register
you need to wait for the previous buffer_load_format_xyzw.
Instead this patch stores only the counters that matter for the
register,
and puts zero for the other ones, since we don't need any wait for them.
Patch by: Axel Davy
Differential Revision: http://reviews.llvm.org/D11883
llvm-svn: 245755
2015-08-22 06:47:27 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2016-02-11 14:02:01 +08:00
|
|
|
declare void @llvm.amdgcn.s.barrier() #1
|
2017-04-04 05:45:13 +08:00
|
|
|
declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #2
|
2017-02-22 08:02:21 +08:00
|
|
|
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
|
2013-10-14 01:56:28 +08:00
|
|
|
|
2017-02-22 08:02:21 +08:00
|
|
|
attributes #0 = { nounwind }
|
2015-12-19 09:46:41 +08:00
|
|
|
attributes #1 = { convergent nounwind }
|
2017-04-04 05:45:13 +08:00
|
|
|
attributes #2 = { nounwind readonly }
|
2013-10-14 01:56:28 +08:00
|
|
|
|
IR: Make metadata typeless in assembly
Now that `Metadata` is typeless, reflect that in the assembly. These
are the matching assembly changes for the metadata/value split in
r223802.
- Only use the `metadata` type when referencing metadata from a call
intrinsic -- i.e., only when it's used as a `Value`.
- Stop pretending that `ValueAsMetadata` is wrapped in an `MDNode`
when referencing it from call intrinsics.
So, assembly like this:
define @foo(i32 %v) {
call void @llvm.foo(metadata !{i32 %v}, metadata !0)
call void @llvm.foo(metadata !{i32 7}, metadata !0)
call void @llvm.foo(metadata !1, metadata !0)
call void @llvm.foo(metadata !3, metadata !0)
call void @llvm.foo(metadata !{metadata !3}, metadata !0)
ret void, !bar !2
}
!0 = metadata !{metadata !2}
!1 = metadata !{i32* @global}
!2 = metadata !{metadata !3}
!3 = metadata !{}
turns into this:
define @foo(i32 %v) {
call void @llvm.foo(metadata i32 %v, metadata !0)
call void @llvm.foo(metadata i32 7, metadata !0)
call void @llvm.foo(metadata i32* @global, metadata !0)
call void @llvm.foo(metadata !3, metadata !0)
call void @llvm.foo(metadata !{!3}, metadata !0)
ret void, !bar !2
}
!0 = !{!2}
!1 = !{i32* @global}
!2 = !{!3}
!3 = !{}
I wrote an upgrade script that handled almost all of the tests in llvm
and many of the tests in cfe (even handling many `CHECK` lines). I've
attached it (or will attach it in a moment if you're speedy) to PR21532
to help everyone update their out-of-tree testcases.
This is part of PR21532.
llvm-svn: 224257
2014-12-16 03:07:53 +08:00
|
|
|
!0 = !{!1, !1, i64 0, i32 1}
|
[Verifier] Add verification for TBAA metadata
Summary:
This change adds some verification in the IR verifier around struct path
TBAA metadata.
Other than some basic sanity checks (e.g. we get constant integers where
we expect constant integers), this checks:
- That by the time an struct access tuple `(base-type, offset)` is
"reduced" to a scalar base type, the offset is `0`. For instance, in
C++ you can't start from, say `("struct-a", 16)`, and end up with
`("int", 4)` -- by the time the base type is `"int"`, the offset
better be zero. In particular, a variant of this invariant is needed
for `llvm::getMostGenericTBAA` to be correct.
- That there are no cycles in a struct path.
- That struct type nodes have their offsets listed in an ascending
order.
- That when generating the struct access path, you eventually reach the
access type listed in the tbaa tag node.
Reviewers: dexonsmith, chandlerc, reames, mehdi_amini, manmanren
Subscribers: mcrosier, llvm-commits
Differential Revision: https://reviews.llvm.org/D26438
llvm-svn: 289402
2016-12-12 04:07:15 +08:00
|
|
|
!1 = !{!"const", !2}
|
|
|
|
!2 = !{!"tbaa root"}
|