2016-12-10 08:39:12 +08:00
|
|
|
# RUN: llc --mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -run-pass si-fold-operands,si-shrink-instructions %s -o - | FileCheck %s
|
|
|
|
--- |
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @add_f32_1.0_one_f16_use() #0 {
|
2016-12-10 08:39:12 +08:00
|
|
|
%f16.val0 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f16.val1 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f32.val = load volatile float, float addrspace(1)* undef
|
|
|
|
%f16.add0 = fadd half %f16.val0, 0xH3C00
|
|
|
|
%f32.add = fadd float %f32.val, 1.000000e+00
|
|
|
|
store volatile half %f16.add0, half addrspace(1)* undef
|
|
|
|
store volatile float %f32.add, float addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @add_f32_1.0_multi_f16_use() #0 {
|
2016-12-10 08:39:12 +08:00
|
|
|
%f16.val0 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f16.val1 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f32.val = load volatile float, float addrspace(1)* undef
|
|
|
|
%f16.add0 = fadd half %f16.val0, 0xH3C00
|
|
|
|
%f32.add = fadd float %f32.val, 1.000000e+00
|
|
|
|
store volatile half %f16.add0, half addrspace(1)* undef
|
|
|
|
store volatile float %f32.add, float addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @add_f32_1.0_one_f32_use_one_f16_use () #0 {
|
2016-12-10 08:39:12 +08:00
|
|
|
%f16.val0 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f16.val1 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f32.val = load volatile float, float addrspace(1)* undef
|
|
|
|
%f16.add0 = fadd half %f16.val0, 0xH3C00
|
|
|
|
%f32.add = fadd float %f32.val, 1.000000e+00
|
|
|
|
store volatile half %f16.add0, half addrspace(1)* undef
|
|
|
|
store volatile float %f32.add, float addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @add_f32_1.0_one_f32_use_multi_f16_use () #0 {
|
2016-12-10 08:39:12 +08:00
|
|
|
%f16.val0 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f16.val1 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f32.val = load volatile float, float addrspace(1)* undef
|
|
|
|
%f16.add0 = fadd half %f16.val0, 0xH3C00
|
|
|
|
%f16.add1 = fadd half %f16.val1, 0xH3C00
|
|
|
|
%f32.add = fadd float %f32.val, 1.000000e+00
|
|
|
|
store volatile half %f16.add0, half addrspace(1)* undef
|
|
|
|
store volatile half %f16.add1, half addrspace(1)* undef
|
|
|
|
store volatile float %f32.add, float addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @add_i32_1_multi_f16_use() #0 {
|
2016-12-10 08:39:12 +08:00
|
|
|
%f16.val0 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f16.val1 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f16.add0 = fadd half %f16.val0, 0xH0001
|
|
|
|
%f16.add1 = fadd half %f16.val1, 0xH0001
|
|
|
|
store volatile half %f16.add0, half addrspace(1)* undef
|
|
|
|
store volatile half %f16.add1,half addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @add_i32_m2_one_f32_use_multi_f16_use () #0 {
|
2016-12-10 08:39:12 +08:00
|
|
|
%f16.val0 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f16.val1 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f32.val = load volatile float, float addrspace(1)* undef
|
|
|
|
%f16.add0 = fadd half %f16.val0, 0xHFFFE
|
|
|
|
%f16.add1 = fadd half %f16.val1, 0xHFFFE
|
|
|
|
%f32.add = fadd float %f32.val, 0xffffffffc0000000
|
|
|
|
store volatile half %f16.add0, half addrspace(1)* undef
|
|
|
|
store volatile half %f16.add1, half addrspace(1)* undef
|
|
|
|
store volatile float %f32.add, float addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @add_f16_1.0_multi_f32_use() #0 {
|
2016-12-10 08:39:12 +08:00
|
|
|
%f32.val0 = load volatile float, float addrspace(1)* undef
|
|
|
|
%f32.val1 = load volatile float, float addrspace(1)* undef
|
|
|
|
%f32.val = load volatile float, float addrspace(1)* undef
|
|
|
|
%f32.add0 = fadd float %f32.val0, 1.0
|
|
|
|
%f32.add1 = fadd float %f32.val1, 1.0
|
|
|
|
store volatile float %f32.add0, float addrspace(1)* undef
|
|
|
|
store volatile float %f32.add1, float addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @add_f16_1.0_other_high_bits_multi_f16_use() #0 {
|
2016-12-10 08:39:12 +08:00
|
|
|
%f16.val0 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f16.val1 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f32.val = load volatile half, half addrspace(1)* undef
|
|
|
|
%f16.add0 = fadd half %f16.val0, 0xH3C00
|
|
|
|
%f32.add = fadd half %f32.val, 1.000000e+00
|
|
|
|
store volatile half %f16.add0, half addrspace(1)* undef
|
|
|
|
store volatile half %f32.add, half addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @add_f16_1.0_other_high_bits_use_f16_f32() #0 {
|
2016-12-10 08:39:12 +08:00
|
|
|
%f16.val0 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f16.val1 = load volatile half, half addrspace(1)* undef
|
|
|
|
%f32.val = load volatile half, half addrspace(1)* undef
|
|
|
|
%f16.add0 = fadd half %f16.val0, 0xH3C00
|
|
|
|
%f32.add = fadd half %f32.val, 1.000000e+00
|
|
|
|
store volatile half %f16.add0, half addrspace(1)* undef
|
|
|
|
store volatile half %f32.add, half addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
attributes #0 = { nounwind }
|
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
|
|
|
|
# f32 1.0 with a single use should be folded as the low 32-bits of a
|
|
|
|
# literal constant.
|
|
|
|
|
|
|
|
# CHECK-LABEL: name: add_f32_1.0_one_f16_use
|
2018-02-01 06:04:26 +08:00
|
|
|
# CHECK: %13:vgpr_32 = V_ADD_F16_e32 1065353216, killed %11, implicit $exec
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
name: add_f32_1.0_one_f16_use
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 1
|
2016-12-10 08:39:12 +08:00
|
|
|
exposesReturnsTwice: false
|
|
|
|
legalized: false
|
|
|
|
regBankSelected: false
|
|
|
|
selected: false
|
|
|
|
tracksRegLiveness: true
|
|
|
|
registers:
|
|
|
|
- { id: 0, class: sreg_64 }
|
|
|
|
- { id: 1, class: sreg_32 }
|
|
|
|
- { id: 2, class: sgpr_32 }
|
|
|
|
- { id: 3, class: vgpr_32 }
|
|
|
|
- { id: 4, class: sreg_64 }
|
|
|
|
- { id: 5, class: sreg_32 }
|
|
|
|
- { id: 6, class: sreg_64 }
|
|
|
|
- { id: 7, class: sreg_32 }
|
|
|
|
- { id: 8, class: sreg_32 }
|
|
|
|
- { id: 9, class: sreg_32 }
|
|
|
|
- { id: 10, class: sreg_128 }
|
|
|
|
- { id: 11, class: vgpr_32 }
|
|
|
|
- { id: 12, class: vgpr_32 }
|
|
|
|
- { id: 13, class: vgpr_32 }
|
|
|
|
frameInfo:
|
|
|
|
isFrameAddressTaken: false
|
|
|
|
isReturnAddressTaken: false
|
|
|
|
hasStackMap: false
|
|
|
|
hasPatchPoint: false
|
|
|
|
stackSize: 0
|
|
|
|
offsetAdjustment: 0
|
|
|
|
maxAlignment: 0
|
|
|
|
adjustsStack: false
|
|
|
|
hasCalls: false
|
|
|
|
maxCallFrameSize: 0
|
|
|
|
hasOpaqueSPAdjustment: false
|
|
|
|
hasVAStart: false
|
|
|
|
hasMustTailInVarArgFunc: false
|
|
|
|
body: |
|
|
|
|
bb.0 (%ir-block.0):
|
|
|
|
%4 = IMPLICIT_DEF
|
|
|
|
%5 = COPY %4.sub1
|
|
|
|
%6 = IMPLICIT_DEF
|
|
|
|
%7 = COPY %6.sub0
|
|
|
|
%8 = S_MOV_B32 61440
|
|
|
|
%9 = S_MOV_B32 -1
|
|
|
|
%10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
%11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
2018-02-01 06:04:26 +08:00
|
|
|
%12 = V_MOV_B32_e32 1065353216, implicit $exec
|
|
|
|
%13 = V_ADD_F16_e64 0, killed %11, 0, %12, 0, 0, implicit $exec
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
S_ENDPGM 0
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
# Materialized f32 inline immediate should not be folded into the f16
|
|
|
|
# operands
|
|
|
|
|
|
|
|
# CHECK-LABEL: name: add_f32_1.0_multi_f16_use
|
2018-02-01 06:04:26 +08:00
|
|
|
# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
|
|
|
|
# CHECK: %14:vgpr_32 = V_ADD_F16_e32 killed %11, %13, implicit $exec
|
|
|
|
# CHECK: %15:vgpr_32 = V_ADD_F16_e32 killed %12, killed %13, implicit $exec
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
|
|
|
|
name: add_f32_1.0_multi_f16_use
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 1
|
2016-12-10 08:39:12 +08:00
|
|
|
exposesReturnsTwice: false
|
|
|
|
legalized: false
|
|
|
|
regBankSelected: false
|
|
|
|
selected: false
|
|
|
|
tracksRegLiveness: true
|
|
|
|
registers:
|
|
|
|
- { id: 0, class: sreg_64 }
|
|
|
|
- { id: 1, class: sreg_32 }
|
|
|
|
- { id: 2, class: sgpr_32 }
|
|
|
|
- { id: 3, class: vgpr_32 }
|
|
|
|
- { id: 4, class: sreg_64 }
|
|
|
|
- { id: 5, class: sreg_32 }
|
|
|
|
- { id: 6, class: sreg_64 }
|
|
|
|
- { id: 7, class: sreg_32 }
|
|
|
|
- { id: 8, class: sreg_32 }
|
|
|
|
- { id: 9, class: sreg_32 }
|
|
|
|
- { id: 10, class: sreg_128 }
|
|
|
|
- { id: 11, class: vgpr_32 }
|
|
|
|
- { id: 12, class: vgpr_32 }
|
|
|
|
- { id: 13, class: vgpr_32 }
|
|
|
|
- { id: 14, class: vgpr_32 }
|
|
|
|
- { id: 15, class: vgpr_32 }
|
|
|
|
frameInfo:
|
|
|
|
isFrameAddressTaken: false
|
|
|
|
isReturnAddressTaken: false
|
|
|
|
hasStackMap: false
|
|
|
|
hasPatchPoint: false
|
|
|
|
stackSize: 0
|
|
|
|
offsetAdjustment: 0
|
|
|
|
maxAlignment: 0
|
|
|
|
adjustsStack: false
|
|
|
|
hasCalls: false
|
|
|
|
maxCallFrameSize: 0
|
|
|
|
hasOpaqueSPAdjustment: false
|
|
|
|
hasVAStart: false
|
|
|
|
hasMustTailInVarArgFunc: false
|
|
|
|
body: |
|
|
|
|
bb.0 (%ir-block.0):
|
|
|
|
%4 = IMPLICIT_DEF
|
|
|
|
%5 = COPY %4.sub1
|
|
|
|
%6 = IMPLICIT_DEF
|
|
|
|
%7 = COPY %6.sub0
|
|
|
|
%8 = S_MOV_B32 61440
|
|
|
|
%9 = S_MOV_B32 -1
|
|
|
|
%10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
%11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
|
|
|
%12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
|
2018-02-01 06:04:26 +08:00
|
|
|
%13 = V_MOV_B32_e32 1065353216, implicit $exec
|
|
|
|
%14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $exec
|
|
|
|
%15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $exec
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
S_ENDPGM 0
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
|
|
|
|
# f32 1.0 should be folded into the single f32 use as an inline
|
|
|
|
# immediate, and folded into the single f16 use as a literal constant
|
|
|
|
|
|
|
|
# CHECK-LABEL: name: add_f32_1.0_one_f32_use_one_f16_use
|
2018-02-01 06:04:26 +08:00
|
|
|
# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1065353216, %11, implicit $exec
|
|
|
|
# CHECK: %16:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $exec
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
name: add_f32_1.0_one_f32_use_one_f16_use
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 1
|
2016-12-10 08:39:12 +08:00
|
|
|
exposesReturnsTwice: false
|
|
|
|
legalized: false
|
|
|
|
regBankSelected: false
|
|
|
|
selected: false
|
|
|
|
tracksRegLiveness: true
|
|
|
|
registers:
|
|
|
|
- { id: 0, class: sreg_64 }
|
|
|
|
- { id: 1, class: sreg_32 }
|
|
|
|
- { id: 2, class: sgpr_32 }
|
|
|
|
- { id: 3, class: vgpr_32 }
|
|
|
|
- { id: 4, class: sreg_64 }
|
|
|
|
- { id: 5, class: sreg_32 }
|
|
|
|
- { id: 6, class: sreg_64 }
|
|
|
|
- { id: 7, class: sreg_32 }
|
|
|
|
- { id: 8, class: sreg_32 }
|
|
|
|
- { id: 9, class: sreg_32 }
|
|
|
|
- { id: 10, class: sreg_128 }
|
|
|
|
- { id: 11, class: vgpr_32 }
|
|
|
|
- { id: 12, class: vgpr_32 }
|
|
|
|
- { id: 13, class: vgpr_32 }
|
|
|
|
- { id: 14, class: vgpr_32 }
|
|
|
|
- { id: 15, class: vgpr_32 }
|
|
|
|
- { id: 16, class: vgpr_32 }
|
|
|
|
frameInfo:
|
|
|
|
isFrameAddressTaken: false
|
|
|
|
isReturnAddressTaken: false
|
|
|
|
hasStackMap: false
|
|
|
|
hasPatchPoint: false
|
|
|
|
stackSize: 0
|
|
|
|
offsetAdjustment: 0
|
|
|
|
maxAlignment: 0
|
|
|
|
adjustsStack: false
|
|
|
|
hasCalls: false
|
|
|
|
maxCallFrameSize: 0
|
|
|
|
hasOpaqueSPAdjustment: false
|
|
|
|
hasVAStart: false
|
|
|
|
hasMustTailInVarArgFunc: false
|
|
|
|
body: |
|
|
|
|
bb.0 (%ir-block.0):
|
|
|
|
%4 = IMPLICIT_DEF
|
|
|
|
%5 = COPY %4.sub1
|
|
|
|
%6 = IMPLICIT_DEF
|
|
|
|
%7 = COPY %6.sub0
|
|
|
|
%8 = S_MOV_B32 61440
|
|
|
|
%9 = S_MOV_B32 -1
|
|
|
|
%10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
%11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
|
|
|
%12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
|
|
|
%13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
|
2018-02-01 06:04:26 +08:00
|
|
|
%14 = V_MOV_B32_e32 1065353216, implicit $exec
|
|
|
|
%15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
|
|
|
|
%16 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
|
|
|
BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
S_ENDPGM 0
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
|
|
|
|
# f32 1.0 should be folded for the single f32 use as an inline
|
|
|
|
# constant, and not folded as a multi-use literal for the f16 cases
|
|
|
|
|
|
|
|
# CHECK-LABEL: name: add_f32_1.0_one_f32_use_multi_f16_use
|
2018-02-01 06:04:26 +08:00
|
|
|
# CHECK: %14:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
|
|
|
|
# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %11, %14, implicit $exec
|
|
|
|
# CHECK: %16:vgpr_32 = V_ADD_F16_e32 %12, %14, implicit $exec
|
|
|
|
# CHECK: %17:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $exec
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
name: add_f32_1.0_one_f32_use_multi_f16_use
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 1
|
2016-12-10 08:39:12 +08:00
|
|
|
exposesReturnsTwice: false
|
|
|
|
legalized: false
|
|
|
|
regBankSelected: false
|
|
|
|
selected: false
|
|
|
|
tracksRegLiveness: true
|
|
|
|
registers:
|
|
|
|
- { id: 0, class: sreg_64 }
|
|
|
|
- { id: 1, class: sreg_32 }
|
|
|
|
- { id: 2, class: sgpr_32 }
|
|
|
|
- { id: 3, class: vgpr_32 }
|
|
|
|
- { id: 4, class: sreg_64 }
|
|
|
|
- { id: 5, class: sreg_32 }
|
|
|
|
- { id: 6, class: sreg_64 }
|
|
|
|
- { id: 7, class: sreg_32 }
|
|
|
|
- { id: 8, class: sreg_32 }
|
|
|
|
- { id: 9, class: sreg_32 }
|
|
|
|
- { id: 10, class: sreg_128 }
|
|
|
|
- { id: 11, class: vgpr_32 }
|
|
|
|
- { id: 12, class: vgpr_32 }
|
|
|
|
- { id: 13, class: vgpr_32 }
|
|
|
|
- { id: 14, class: vgpr_32 }
|
|
|
|
- { id: 15, class: vgpr_32 }
|
|
|
|
- { id: 16, class: vgpr_32 }
|
|
|
|
- { id: 17, class: vgpr_32 }
|
|
|
|
frameInfo:
|
|
|
|
isFrameAddressTaken: false
|
|
|
|
isReturnAddressTaken: false
|
|
|
|
hasStackMap: false
|
|
|
|
hasPatchPoint: false
|
|
|
|
stackSize: 0
|
|
|
|
offsetAdjustment: 0
|
|
|
|
maxAlignment: 0
|
|
|
|
adjustsStack: false
|
|
|
|
hasCalls: false
|
|
|
|
maxCallFrameSize: 0
|
|
|
|
hasOpaqueSPAdjustment: false
|
|
|
|
hasVAStart: false
|
|
|
|
hasMustTailInVarArgFunc: false
|
|
|
|
body: |
|
|
|
|
bb.0 (%ir-block.0):
|
|
|
|
%4 = IMPLICIT_DEF
|
|
|
|
%5 = COPY %4.sub1
|
|
|
|
%6 = IMPLICIT_DEF
|
|
|
|
%7 = COPY %6.sub0
|
|
|
|
%8 = S_MOV_B32 61440
|
|
|
|
%9 = S_MOV_B32 -1
|
|
|
|
%10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
%11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
|
|
|
%12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
|
|
|
%13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
|
2018-02-01 06:04:26 +08:00
|
|
|
%14 = V_MOV_B32_e32 1065353216, implicit $exec
|
|
|
|
%15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
|
|
|
|
%16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $exec
|
|
|
|
%17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
|
|
|
BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
S_ENDPGM 0
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
# CHECK-LABEL: name: add_i32_1_multi_f16_use
|
2018-02-01 06:04:26 +08:00
|
|
|
# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
|
|
|
|
# CHECK: %14:vgpr_32 = V_ADD_F16_e32 1, killed %11, implicit $exec
|
|
|
|
# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1, killed %12, implicit $exec
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
|
|
|
|
name: add_i32_1_multi_f16_use
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 1
|
2016-12-10 08:39:12 +08:00
|
|
|
exposesReturnsTwice: false
|
|
|
|
legalized: false
|
|
|
|
regBankSelected: false
|
|
|
|
selected: false
|
|
|
|
tracksRegLiveness: true
|
|
|
|
registers:
|
|
|
|
- { id: 0, class: sreg_64 }
|
|
|
|
- { id: 1, class: sreg_32 }
|
|
|
|
- { id: 2, class: sgpr_32 }
|
|
|
|
- { id: 3, class: vgpr_32 }
|
|
|
|
- { id: 4, class: sreg_64 }
|
|
|
|
- { id: 5, class: sreg_32 }
|
|
|
|
- { id: 6, class: sreg_64 }
|
|
|
|
- { id: 7, class: sreg_32 }
|
|
|
|
- { id: 8, class: sreg_32 }
|
|
|
|
- { id: 9, class: sreg_32 }
|
|
|
|
- { id: 10, class: sreg_128 }
|
|
|
|
- { id: 11, class: vgpr_32 }
|
|
|
|
- { id: 12, class: vgpr_32 }
|
|
|
|
- { id: 13, class: vgpr_32 }
|
|
|
|
- { id: 14, class: vgpr_32 }
|
|
|
|
- { id: 15, class: vgpr_32 }
|
|
|
|
frameInfo:
|
|
|
|
isFrameAddressTaken: false
|
|
|
|
isReturnAddressTaken: false
|
|
|
|
hasStackMap: false
|
|
|
|
hasPatchPoint: false
|
|
|
|
stackSize: 0
|
|
|
|
offsetAdjustment: 0
|
|
|
|
maxAlignment: 0
|
|
|
|
adjustsStack: false
|
|
|
|
hasCalls: false
|
|
|
|
maxCallFrameSize: 0
|
|
|
|
hasOpaqueSPAdjustment: false
|
|
|
|
hasVAStart: false
|
|
|
|
hasMustTailInVarArgFunc: false
|
|
|
|
body: |
|
|
|
|
bb.0 (%ir-block.0):
|
|
|
|
%4 = IMPLICIT_DEF
|
|
|
|
%5 = COPY %4.sub1
|
|
|
|
%6 = IMPLICIT_DEF
|
|
|
|
%7 = COPY %6.sub0
|
|
|
|
%8 = S_MOV_B32 61440
|
|
|
|
%9 = S_MOV_B32 -1
|
|
|
|
%10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
%11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
|
|
|
%12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
|
2018-02-01 06:04:26 +08:00
|
|
|
%13 = V_MOV_B32_e32 1, implicit $exec
|
|
|
|
%14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $exec
|
|
|
|
%15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $exec
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
S_ENDPGM 0
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
|
|
|
|
# CHECK-LABEL: name: add_i32_m2_one_f32_use_multi_f16_use
|
2018-02-01 06:04:26 +08:00
|
|
|
# CHECK: %14:vgpr_32 = V_MOV_B32_e32 -2, implicit $exec
|
|
|
|
# CHECK: %15:vgpr_32 = V_ADD_F16_e32 -2, %11, implicit $exec
|
|
|
|
# CHECK: %16:vgpr_32 = V_ADD_F16_e32 -2, %12, implicit $exec
|
|
|
|
# CHECK: %17:vgpr_32 = V_ADD_F32_e32 -2, killed %13, implicit $exec
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
name: add_i32_m2_one_f32_use_multi_f16_use
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 1
|
2016-12-10 08:39:12 +08:00
|
|
|
exposesReturnsTwice: false
|
|
|
|
legalized: false
|
|
|
|
regBankSelected: false
|
|
|
|
selected: false
|
|
|
|
tracksRegLiveness: true
|
|
|
|
registers:
|
|
|
|
- { id: 0, class: sreg_64 }
|
|
|
|
- { id: 1, class: sreg_32 }
|
|
|
|
- { id: 2, class: sgpr_32 }
|
|
|
|
- { id: 3, class: vgpr_32 }
|
|
|
|
- { id: 4, class: sreg_64 }
|
|
|
|
- { id: 5, class: sreg_32 }
|
|
|
|
- { id: 6, class: sreg_64 }
|
|
|
|
- { id: 7, class: sreg_32 }
|
|
|
|
- { id: 8, class: sreg_32 }
|
|
|
|
- { id: 9, class: sreg_32 }
|
|
|
|
- { id: 10, class: sreg_128 }
|
|
|
|
- { id: 11, class: vgpr_32 }
|
|
|
|
- { id: 12, class: vgpr_32 }
|
|
|
|
- { id: 13, class: vgpr_32 }
|
|
|
|
- { id: 14, class: vgpr_32 }
|
|
|
|
- { id: 15, class: vgpr_32 }
|
|
|
|
- { id: 16, class: vgpr_32 }
|
|
|
|
- { id: 17, class: vgpr_32 }
|
|
|
|
frameInfo:
|
|
|
|
isFrameAddressTaken: false
|
|
|
|
isReturnAddressTaken: false
|
|
|
|
hasStackMap: false
|
|
|
|
hasPatchPoint: false
|
|
|
|
stackSize: 0
|
|
|
|
offsetAdjustment: 0
|
|
|
|
maxAlignment: 0
|
|
|
|
adjustsStack: false
|
|
|
|
hasCalls: false
|
|
|
|
maxCallFrameSize: 0
|
|
|
|
hasOpaqueSPAdjustment: false
|
|
|
|
hasVAStart: false
|
|
|
|
hasMustTailInVarArgFunc: false
|
|
|
|
body: |
|
|
|
|
bb.0 (%ir-block.0):
|
|
|
|
%4 = IMPLICIT_DEF
|
|
|
|
%5 = COPY %4.sub1
|
|
|
|
%6 = IMPLICIT_DEF
|
|
|
|
%7 = COPY %6.sub0
|
|
|
|
%8 = S_MOV_B32 61440
|
|
|
|
%9 = S_MOV_B32 -1
|
|
|
|
%10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
%11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
|
|
|
%12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
|
|
|
%13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
|
2018-02-01 06:04:26 +08:00
|
|
|
%14 = V_MOV_B32_e32 -2, implicit $exec
|
|
|
|
%15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
|
|
|
|
%16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $exec
|
|
|
|
%17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
|
|
|
BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
S_ENDPGM 0
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
|
|
|
|
# f32 1.0 should be folded for the single f32 use as an inline
|
|
|
|
# constant, and not folded as a multi-use literal for the f16 cases
|
|
|
|
|
|
|
|
# CHECK-LABEL: name: add_f16_1.0_multi_f32_use
|
2018-02-01 06:04:26 +08:00
|
|
|
# CHECK: %13:vgpr_32 = V_MOV_B32_e32 15360, implicit $exec
|
|
|
|
# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $exec
|
|
|
|
# CHECK: %15:vgpr_32 = V_ADD_F32_e32 %12, %13, implicit $exec
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
name: add_f16_1.0_multi_f32_use
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 1
|
2016-12-10 08:39:12 +08:00
|
|
|
exposesReturnsTwice: false
|
|
|
|
legalized: false
|
|
|
|
regBankSelected: false
|
|
|
|
selected: false
|
|
|
|
tracksRegLiveness: true
|
|
|
|
registers:
|
|
|
|
- { id: 0, class: sreg_64 }
|
|
|
|
- { id: 1, class: sreg_32 }
|
|
|
|
- { id: 2, class: sgpr_32 }
|
|
|
|
- { id: 3, class: vgpr_32 }
|
|
|
|
- { id: 4, class: sreg_64 }
|
|
|
|
- { id: 5, class: sreg_32 }
|
|
|
|
- { id: 6, class: sreg_64 }
|
|
|
|
- { id: 7, class: sreg_32 }
|
|
|
|
- { id: 8, class: sreg_32 }
|
|
|
|
- { id: 9, class: sreg_32 }
|
|
|
|
- { id: 10, class: sreg_128 }
|
|
|
|
- { id: 11, class: vgpr_32 }
|
|
|
|
- { id: 12, class: vgpr_32 }
|
|
|
|
- { id: 13, class: vgpr_32 }
|
|
|
|
- { id: 14, class: vgpr_32 }
|
|
|
|
- { id: 15, class: vgpr_32 }
|
|
|
|
frameInfo:
|
|
|
|
isFrameAddressTaken: false
|
|
|
|
isReturnAddressTaken: false
|
|
|
|
hasStackMap: false
|
|
|
|
hasPatchPoint: false
|
|
|
|
stackSize: 0
|
|
|
|
offsetAdjustment: 0
|
|
|
|
maxAlignment: 0
|
|
|
|
adjustsStack: false
|
|
|
|
hasCalls: false
|
|
|
|
maxCallFrameSize: 0
|
|
|
|
hasOpaqueSPAdjustment: false
|
|
|
|
hasVAStart: false
|
|
|
|
hasMustTailInVarArgFunc: false
|
|
|
|
body: |
|
|
|
|
bb.0 (%ir-block.0):
|
|
|
|
%4 = IMPLICIT_DEF
|
|
|
|
%5 = COPY %4.sub1
|
|
|
|
%6 = IMPLICIT_DEF
|
|
|
|
%7 = COPY %6.sub0
|
|
|
|
%8 = S_MOV_B32 61440
|
|
|
|
%9 = S_MOV_B32 -1
|
|
|
|
%10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
%11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
|
|
|
|
%12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
|
2018-02-01 06:04:26 +08:00
|
|
|
%13 = V_MOV_B32_e32 15360, implicit $exec
|
|
|
|
%14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $exec
|
|
|
|
%15 = V_ADD_F32_e64 0, %12, 0, %13, 0, 0, implicit $exec
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
|
|
|
|
BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
S_ENDPGM 0
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
|
|
|
|
# The low 16-bits are an inline immediate, but the high bits are junk
|
|
|
|
# FIXME: Should be able to fold this
|
|
|
|
|
|
|
|
# CHECK-LABEL: name: add_f16_1.0_other_high_bits_multi_f16_use
|
2018-02-01 06:04:26 +08:00
|
|
|
# CHECK: %13:vgpr_32 = V_MOV_B32_e32 80886784, implicit $exec
|
|
|
|
# CHECK: %14:vgpr_32 = V_ADD_F16_e32 %11, %13, implicit $exec
|
|
|
|
# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $exec
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
name: add_f16_1.0_other_high_bits_multi_f16_use
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 1
|
2016-12-10 08:39:12 +08:00
|
|
|
exposesReturnsTwice: false
|
|
|
|
legalized: false
|
|
|
|
regBankSelected: false
|
|
|
|
selected: false
|
|
|
|
tracksRegLiveness: true
|
|
|
|
registers:
|
|
|
|
- { id: 0, class: sreg_64 }
|
|
|
|
- { id: 1, class: sreg_32 }
|
|
|
|
- { id: 2, class: sgpr_32 }
|
|
|
|
- { id: 3, class: vgpr_32 }
|
|
|
|
- { id: 4, class: sreg_64 }
|
|
|
|
- { id: 5, class: sreg_32 }
|
|
|
|
- { id: 6, class: sreg_64 }
|
|
|
|
- { id: 7, class: sreg_32 }
|
|
|
|
- { id: 8, class: sreg_32 }
|
|
|
|
- { id: 9, class: sreg_32 }
|
|
|
|
- { id: 10, class: sreg_128 }
|
|
|
|
- { id: 11, class: vgpr_32 }
|
|
|
|
- { id: 12, class: vgpr_32 }
|
|
|
|
- { id: 13, class: vgpr_32 }
|
|
|
|
- { id: 14, class: vgpr_32 }
|
|
|
|
- { id: 15, class: vgpr_32 }
|
|
|
|
frameInfo:
|
|
|
|
isFrameAddressTaken: false
|
|
|
|
isReturnAddressTaken: false
|
|
|
|
hasStackMap: false
|
|
|
|
hasPatchPoint: false
|
|
|
|
stackSize: 0
|
|
|
|
offsetAdjustment: 0
|
|
|
|
maxAlignment: 0
|
|
|
|
adjustsStack: false
|
|
|
|
hasCalls: false
|
|
|
|
maxCallFrameSize: 0
|
|
|
|
hasOpaqueSPAdjustment: false
|
|
|
|
hasVAStart: false
|
|
|
|
hasMustTailInVarArgFunc: false
|
|
|
|
body: |
|
|
|
|
bb.0 (%ir-block.0):
|
|
|
|
%4 = IMPLICIT_DEF
|
|
|
|
%5 = COPY %4.sub1
|
|
|
|
%6 = IMPLICIT_DEF
|
|
|
|
%7 = COPY %6.sub0
|
|
|
|
%8 = S_MOV_B32 61440
|
|
|
|
%9 = S_MOV_B32 -1
|
|
|
|
%10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
%11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
|
|
|
%12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
2018-02-01 06:04:26 +08:00
|
|
|
%13 = V_MOV_B32_e32 80886784, implicit $exec
|
|
|
|
%14 = V_ADD_F16_e64 0, %11, 0, %13, 0, 0, implicit $exec
|
|
|
|
%15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $exec
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
S_ENDPGM 0
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
...
|
|
|
|
---
|
|
|
|
|
|
|
|
# FIXME: Should fold inline immediate into f16 and literal use into
|
|
|
|
# f32 instruction.
|
|
|
|
|
|
|
|
# CHECK-LABEL: name: add_f16_1.0_other_high_bits_use_f16_f32
|
2018-02-01 06:04:26 +08:00
|
|
|
# CHECK: %13:vgpr_32 = V_MOV_B32_e32 305413120, implicit $exec
|
|
|
|
# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $exec
|
|
|
|
# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $exec
|
2016-12-10 08:39:12 +08:00
|
|
|
name: add_f16_1.0_other_high_bits_use_f16_f32
|
[Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67433
llvm-svn: 371608
2019-09-11 19:16:48 +08:00
|
|
|
alignment: 1
|
2016-12-10 08:39:12 +08:00
|
|
|
exposesReturnsTwice: false
|
|
|
|
legalized: false
|
|
|
|
regBankSelected: false
|
|
|
|
selected: false
|
|
|
|
tracksRegLiveness: true
|
|
|
|
registers:
|
|
|
|
- { id: 0, class: sreg_64 }
|
|
|
|
- { id: 1, class: sreg_32 }
|
|
|
|
- { id: 2, class: sgpr_32 }
|
|
|
|
- { id: 3, class: vgpr_32 }
|
|
|
|
- { id: 4, class: sreg_64 }
|
|
|
|
- { id: 5, class: sreg_32 }
|
|
|
|
- { id: 6, class: sreg_64 }
|
|
|
|
- { id: 7, class: sreg_32 }
|
|
|
|
- { id: 8, class: sreg_32 }
|
|
|
|
- { id: 9, class: sreg_32 }
|
|
|
|
- { id: 10, class: sreg_128 }
|
|
|
|
- { id: 11, class: vgpr_32 }
|
|
|
|
- { id: 12, class: vgpr_32 }
|
|
|
|
- { id: 13, class: vgpr_32 }
|
|
|
|
- { id: 14, class: vgpr_32 }
|
|
|
|
- { id: 15, class: vgpr_32 }
|
|
|
|
frameInfo:
|
|
|
|
isFrameAddressTaken: false
|
|
|
|
isReturnAddressTaken: false
|
|
|
|
hasStackMap: false
|
|
|
|
hasPatchPoint: false
|
|
|
|
stackSize: 0
|
|
|
|
offsetAdjustment: 0
|
|
|
|
maxAlignment: 0
|
|
|
|
adjustsStack: false
|
|
|
|
hasCalls: false
|
|
|
|
maxCallFrameSize: 0
|
|
|
|
hasOpaqueSPAdjustment: false
|
|
|
|
hasVAStart: false
|
|
|
|
hasMustTailInVarArgFunc: false
|
|
|
|
body: |
|
|
|
|
bb.0 (%ir-block.0):
|
|
|
|
%4 = IMPLICIT_DEF
|
|
|
|
%5 = COPY %4.sub1
|
|
|
|
%6 = IMPLICIT_DEF
|
|
|
|
%7 = COPY %6.sub0
|
|
|
|
%8 = S_MOV_B32 61440
|
|
|
|
%9 = S_MOV_B32 -1
|
|
|
|
%10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
%11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
|
|
|
|
%12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
|
2018-02-01 06:04:26 +08:00
|
|
|
%13 = V_MOV_B32_e32 305413120, implicit $exec
|
|
|
|
%14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $exec
|
|
|
|
%15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $exec
|
[AMDGPU] Extend buffer intrinsics with swizzling
Summary:
Extend cachepolicy operand in the new VMEM buffer intrinsics
to supply information whether the buffer data is swizzled.
Also, propagate this information to MIR.
Intrinsics updated:
int_amdgcn_raw_buffer_load
int_amdgcn_raw_buffer_load_format
int_amdgcn_raw_buffer_store
int_amdgcn_raw_buffer_store_format
int_amdgcn_raw_tbuffer_load
int_amdgcn_raw_tbuffer_store
int_amdgcn_struct_buffer_load
int_amdgcn_struct_buffer_load_format
int_amdgcn_struct_buffer_store
int_amdgcn_struct_buffer_store_format
int_amdgcn_struct_tbuffer_load
int_amdgcn_struct_tbuffer_store
Furthermore, disable merging of VMEM buffer instructions
in SI Load/Store optimizer, if the "swizzled" bit on the instruction
is on.
The default value of the bit is 0, meaning that data in buffer
is linear and buffer instructions can be merged.
There is no difference in the generated code with this commit.
However, in the future it will be expected that front-ends
use buffer intrinsics with correct "swizzled" bit set.
Reviewers: arsenm, nhaehnle, tpr
Reviewed By: nhaehnle
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, arphaman, jfb, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68200
llvm-svn: 373491
2019-10-03 01:22:36 +08:00
|
|
|
BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
|
|
|
|
BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
|
[AMDGPU] Add support for immediate operand for S_ENDPGM
Summary:
Add support for immediate operand in S_ENDPGM
Change-Id: I0c56a076a10980f719fb2a8f16407e9c301013f6
Reviewers: alexshap
Subscribers: qcolombet, arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, tpr, t-tye, eraman, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59213
llvm-svn: 355902
2019-03-12 17:52:58 +08:00
|
|
|
S_ENDPGM 0
|
2016-12-10 08:39:12 +08:00
|
|
|
|
|
|
|
...
|