2016-09-02 01:54:54 +08:00
|
|
|
//===-- MIMGInstructions.td - MIMG Instruction Defintions -----------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
// MIMG-specific encoding families to distinguish between semantically
|
|
|
|
// equivalent machine instructions with different encoding.
|
|
|
|
//
|
|
|
|
// - MIMGEncGfx6: encoding introduced with gfx6 (obsoleted for atomics in gfx8)
|
|
|
|
// - MIMGEncGfx8: encoding introduced with gfx8 for atomics
|
|
|
|
class MIMGEncoding;
|
|
|
|
|
|
|
|
def MIMGEncGfx6 : MIMGEncoding;
|
|
|
|
def MIMGEncGfx8 : MIMGEncoding;
|
|
|
|
|
|
|
|
def MIMGEncoding : GenericEnum {
|
|
|
|
let FilterClass = "MIMGEncoding";
|
2016-09-02 01:54:54 +08:00
|
|
|
}
|
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
// Represent an ISA-level opcode, independent of the encoding and the
|
|
|
|
// vdata/vaddr size.
|
|
|
|
class MIMGBaseOpcode {
|
|
|
|
MIMGBaseOpcode BaseOpcode = !cast<MIMGBaseOpcode>(NAME);
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
bit Store = 0;
|
|
|
|
bit Atomic = 0;
|
|
|
|
bit AtomicX2 = 0; // (f)cmpswap
|
|
|
|
bit Sampler = 0;
|
2018-06-21 21:36:44 +08:00
|
|
|
bits<8> NumExtraArgs = 0;
|
|
|
|
bit Gradients = 0;
|
|
|
|
bit Coordinates = 1;
|
|
|
|
bit LodOrClampOrMip = 0;
|
|
|
|
bit HasD16 = 0;
|
2018-01-26 23:43:29 +08:00
|
|
|
}
|
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
def MIMGBaseOpcode : GenericEnum {
|
|
|
|
let FilterClass = "MIMGBaseOpcode";
|
|
|
|
}
|
|
|
|
|
|
|
|
def MIMGBaseOpcodesTable : GenericTable {
|
|
|
|
let FilterClass = "MIMGBaseOpcode";
|
|
|
|
let CppTypeName = "MIMGBaseOpcodeInfo";
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
let Fields = ["BaseOpcode", "Store", "Atomic", "AtomicX2", "Sampler",
|
|
|
|
"NumExtraArgs", "Gradients", "Coordinates", "LodOrClampOrMip",
|
|
|
|
"HasD16"];
|
2018-06-21 21:36:44 +08:00
|
|
|
GenericEnum TypeOf_BaseOpcode = MIMGBaseOpcode;
|
|
|
|
|
|
|
|
let PrimaryKey = ["BaseOpcode"];
|
|
|
|
let PrimaryKeyName = "getMIMGBaseOpcodeInfo";
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
}
|
|
|
|
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
def MIMGDim : GenericEnum {
|
|
|
|
let FilterClass = "AMDGPUDimProps";
|
|
|
|
}
|
|
|
|
|
|
|
|
def MIMGDimInfoTable : GenericTable {
|
|
|
|
let FilterClass = "AMDGPUDimProps";
|
|
|
|
let CppTypeName = "MIMGDimInfo";
|
|
|
|
let Fields = ["Dim", "NumCoords", "NumGradients", "DA"];
|
|
|
|
GenericEnum TypeOf_Dim = MIMGDim;
|
|
|
|
|
|
|
|
let PrimaryKey = ["Dim"];
|
|
|
|
let PrimaryKeyName = "getMIMGDimInfo";
|
|
|
|
}
|
|
|
|
|
[AMDGPU] Optimize _L image intrinsic to _LZ when lod is zero
Summary:
Add _L to _LZ image intrinsic table mapping to table gen.
In ISelLowering check if image intrinsic has lod and if it's equal
to zero, if so remove lod and change opcode to equivalent mapped _LZ.
Change-Id: Ie24cd7e788e2195d846c7bd256151178cbb9ec71
Subscribers: arsenm, mehdi_amini, kzhuravl, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, steven_wu, dexonsmith, llvm-commits
Differential Revision: https://reviews.llvm.org/D49483
llvm-svn: 338523
2018-08-01 20:12:01 +08:00
|
|
|
class MIMGLZMapping<MIMGBaseOpcode l, MIMGBaseOpcode lz> {
|
|
|
|
MIMGBaseOpcode L = l;
|
|
|
|
MIMGBaseOpcode LZ = lz;
|
|
|
|
}
|
|
|
|
|
|
|
|
def MIMGLZMappingTable : GenericTable {
|
|
|
|
let FilterClass = "MIMGLZMapping";
|
|
|
|
let CppTypeName = "MIMGLZMappingInfo";
|
|
|
|
let Fields = ["L", "LZ"];
|
|
|
|
GenericEnum TypeOf_L = MIMGBaseOpcode;
|
|
|
|
GenericEnum TypeOf_LZ = MIMGBaseOpcode;
|
|
|
|
|
|
|
|
let PrimaryKey = ["L"];
|
|
|
|
let PrimaryKeyName = "getMIMGLZMappingInfo";
|
|
|
|
}
|
|
|
|
|
2016-09-02 01:54:54 +08:00
|
|
|
class mimg <bits<7> si, bits<7> vi = si> {
|
|
|
|
field bits<7> SI = si;
|
|
|
|
field bits<7> VI = vi;
|
|
|
|
}
|
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
class MIMG <dag outs, string dns = "">
|
|
|
|
: InstSI <outs, (ins), "", []> {
|
|
|
|
|
|
|
|
let VM_CNT = 1;
|
|
|
|
let EXP_CNT = 1;
|
|
|
|
let MIMG = 1;
|
|
|
|
let Uses = [EXEC];
|
2016-09-02 01:54:54 +08:00
|
|
|
let mayLoad = 1;
|
|
|
|
let mayStore = 0;
|
|
|
|
let hasPostISelHook = 1;
|
2018-06-21 21:36:44 +08:00
|
|
|
let SchedRW = [WriteVMEM];
|
|
|
|
let UseNamedOperandTable = 1;
|
|
|
|
let hasSideEffects = 0; // XXX ????
|
|
|
|
|
|
|
|
let SubtargetPredicate = isGCN;
|
2016-09-02 01:54:54 +08:00
|
|
|
let DecoderNamespace = dns;
|
|
|
|
let isAsmParserOnly = !if(!eq(dns,""), 1, 0);
|
|
|
|
let AsmMatchConverter = "cvtMIMG";
|
2016-12-20 23:52:17 +08:00
|
|
|
let usesCustomInserter = 1;
|
2018-06-21 21:36:44 +08:00
|
|
|
|
|
|
|
Instruction Opcode = !cast<Instruction>(NAME);
|
|
|
|
MIMGBaseOpcode BaseOpcode;
|
|
|
|
MIMGEncoding MIMGEncoding = MIMGEncGfx6;
|
|
|
|
bits<8> VDataDwords;
|
|
|
|
bits<8> VAddrDwords;
|
|
|
|
}
|
|
|
|
|
|
|
|
def MIMGInfoTable : GenericTable {
|
|
|
|
let FilterClass = "MIMG";
|
|
|
|
let CppTypeName = "MIMGInfo";
|
|
|
|
let Fields = ["Opcode", "BaseOpcode", "MIMGEncoding", "VDataDwords", "VAddrDwords"];
|
|
|
|
GenericEnum TypeOf_BaseOpcode = MIMGBaseOpcode;
|
|
|
|
GenericEnum TypeOf_MIMGEncoding = MIMGEncoding;
|
|
|
|
|
|
|
|
let PrimaryKey = ["BaseOpcode", "MIMGEncoding", "VDataDwords", "VAddrDwords"];
|
|
|
|
let PrimaryKeyName = "getMIMGOpcodeHelper";
|
|
|
|
}
|
|
|
|
|
|
|
|
def getMIMGInfo : SearchIndex {
|
|
|
|
let Table = MIMGInfoTable;
|
|
|
|
let Key = ["Opcode"];
|
2016-09-02 01:54:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
class MIMG_NoSampler_Helper <bits<7> op, string asm,
|
|
|
|
RegisterClass dst_rc,
|
|
|
|
RegisterClass addr_rc,
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
string dns="">
|
2018-06-21 21:36:44 +08:00
|
|
|
: MIMG <(outs dst_rc:$vdata), dns>,
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
MIMGe<op> {
|
2016-09-02 01:54:54 +08:00
|
|
|
let ssamp = 0;
|
2018-06-21 21:36:44 +08:00
|
|
|
let d16 = !if(BaseOpcode.HasD16, ?, 0);
|
2018-01-19 06:08:53 +08:00
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
let InOperandList = !con((ins addr_rc:$vaddr, SReg_256:$srsrc,
|
|
|
|
DMask:$dmask, UNorm:$unorm, GLC:$glc, SLC:$slc,
|
|
|
|
R128:$r128, TFE:$tfe, LWE:$lwe, DA:$da),
|
|
|
|
!if(BaseOpcode.HasD16, (ins D16:$d16), (ins)));
|
|
|
|
let AsmString = asm#" $vdata, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da"
|
|
|
|
#!if(BaseOpcode.HasD16, "$d16", "");
|
2016-09-02 01:54:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
multiclass MIMG_NoSampler_Src_Helper <bits<7> op, string asm,
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
RegisterClass dst_rc,
|
2018-06-21 21:36:44 +08:00
|
|
|
bit enableDisasm> {
|
|
|
|
let VAddrDwords = 1 in
|
|
|
|
def NAME # _V1 : MIMG_NoSampler_Helper <op, asm, dst_rc, VGPR_32,
|
|
|
|
!if(enableDisasm, "AMDGPU", "")>;
|
|
|
|
let VAddrDwords = 2 in
|
|
|
|
def NAME # _V2 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_64>;
|
|
|
|
let VAddrDwords = 3 in
|
|
|
|
def NAME # _V3 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_96>;
|
|
|
|
let VAddrDwords = 4 in
|
|
|
|
def NAME # _V4 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_128>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass MIMG_NoSampler <bits<7> op, string asm, bit has_d16, bit mip = 0,
|
|
|
|
bit isResInfo = 0> {
|
|
|
|
def "" : MIMGBaseOpcode {
|
|
|
|
let Coordinates = !if(isResInfo, 0, 1);
|
|
|
|
let LodOrClampOrMip = mip;
|
|
|
|
let HasD16 = has_d16;
|
|
|
|
}
|
2016-09-02 01:54:54 +08:00
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
let BaseOpcode = !cast<MIMGBaseOpcode>(NAME),
|
|
|
|
mayLoad = !if(isResInfo, 0, 1) in {
|
|
|
|
let VDataDwords = 1 in
|
|
|
|
defm _V1 : MIMG_NoSampler_Src_Helper <op, asm, VGPR_32, 1>;
|
|
|
|
let VDataDwords = 2 in
|
|
|
|
defm _V2 : MIMG_NoSampler_Src_Helper <op, asm, VReg_64, 0>;
|
|
|
|
let VDataDwords = 3 in
|
|
|
|
defm _V3 : MIMG_NoSampler_Src_Helper <op, asm, VReg_96, 0>;
|
|
|
|
let VDataDwords = 4 in
|
|
|
|
defm _V4 : MIMG_NoSampler_Src_Helper <op, asm, VReg_128, 0>;
|
|
|
|
}
|
2018-03-28 23:44:16 +08:00
|
|
|
}
|
|
|
|
|
2016-09-02 01:54:54 +08:00
|
|
|
class MIMG_Store_Helper <bits<7> op, string asm,
|
|
|
|
RegisterClass data_rc,
|
2017-12-14 05:07:51 +08:00
|
|
|
RegisterClass addr_rc,
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
string dns = "">
|
2018-06-21 21:36:44 +08:00
|
|
|
: MIMG <(outs), dns>,
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
MIMGe<op> {
|
2016-09-02 01:54:54 +08:00
|
|
|
let ssamp = 0;
|
2018-06-21 21:36:44 +08:00
|
|
|
let d16 = !if(BaseOpcode.HasD16, ?, 0);
|
|
|
|
|
2017-12-30 01:18:18 +08:00
|
|
|
let mayLoad = 0;
|
2016-09-02 01:54:54 +08:00
|
|
|
let mayStore = 1;
|
2017-12-30 01:18:18 +08:00
|
|
|
let hasSideEffects = 0;
|
2016-09-02 01:54:54 +08:00
|
|
|
let hasPostISelHook = 0;
|
|
|
|
let DisableWQM = 1;
|
2018-01-19 06:08:53 +08:00
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
let InOperandList = !con((ins data_rc:$vdata, addr_rc:$vaddr, SReg_256:$srsrc,
|
|
|
|
DMask:$dmask, UNorm:$unorm, GLC:$glc, SLC:$slc,
|
|
|
|
R128:$r128, TFE:$tfe, LWE:$lwe, DA:$da),
|
|
|
|
!if(BaseOpcode.HasD16, (ins D16:$d16), (ins)));
|
|
|
|
let AsmString = asm#" $vdata, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da"
|
|
|
|
#!if(BaseOpcode.HasD16, "$d16", "");
|
2016-09-02 01:54:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
multiclass MIMG_Store_Addr_Helper <bits<7> op, string asm,
|
|
|
|
RegisterClass data_rc,
|
2018-06-21 21:36:44 +08:00
|
|
|
bit enableDisasm> {
|
|
|
|
let VAddrDwords = 1 in
|
|
|
|
def NAME # _V1 : MIMG_Store_Helper <op, asm, data_rc, VGPR_32,
|
|
|
|
!if(enableDisasm, "AMDGPU", "")>;
|
|
|
|
let VAddrDwords = 2 in
|
|
|
|
def NAME # _V2 : MIMG_Store_Helper <op, asm, data_rc, VReg_64>;
|
|
|
|
let VAddrDwords = 3 in
|
|
|
|
def NAME # _V3 : MIMG_Store_Helper <op, asm, data_rc, VReg_96>;
|
|
|
|
let VAddrDwords = 4 in
|
|
|
|
def NAME # _V4 : MIMG_Store_Helper <op, asm, data_rc, VReg_128>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass MIMG_Store <bits<7> op, string asm, bit has_d16, bit mip = 0> {
|
|
|
|
def "" : MIMGBaseOpcode {
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
let Store = 1;
|
2018-06-21 21:36:44 +08:00
|
|
|
let LodOrClampOrMip = mip;
|
|
|
|
let HasD16 = has_d16;
|
|
|
|
}
|
2016-09-02 01:54:54 +08:00
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
let BaseOpcode = !cast<MIMGBaseOpcode>(NAME) in {
|
|
|
|
let VDataDwords = 1 in
|
|
|
|
defm _V1 : MIMG_Store_Addr_Helper <op, asm, VGPR_32, 1>;
|
|
|
|
let VDataDwords = 2 in
|
|
|
|
defm _V2 : MIMG_Store_Addr_Helper <op, asm, VReg_64, 0>;
|
|
|
|
let VDataDwords = 3 in
|
|
|
|
defm _V3 : MIMG_Store_Addr_Helper <op, asm, VReg_96, 0>;
|
|
|
|
let VDataDwords = 4 in
|
|
|
|
defm _V4 : MIMG_Store_Addr_Helper <op, asm, VReg_128, 0>;
|
|
|
|
}
|
2018-03-28 23:44:16 +08:00
|
|
|
}
|
|
|
|
|
2016-09-02 01:54:54 +08:00
|
|
|
class MIMG_Atomic_Helper <string asm, RegisterClass data_rc,
|
2018-01-26 22:07:38 +08:00
|
|
|
RegisterClass addr_rc, string dns="",
|
2018-06-21 21:36:44 +08:00
|
|
|
bit enableDasm = 0>
|
|
|
|
: MIMG <(outs data_rc:$vdst), !if(enableDasm, dns, "")> {
|
2017-12-30 01:18:18 +08:00
|
|
|
let mayLoad = 1;
|
2016-09-02 01:54:54 +08:00
|
|
|
let mayStore = 1;
|
2017-12-30 01:18:18 +08:00
|
|
|
let hasSideEffects = 1; // FIXME: Remove this
|
2016-09-02 01:54:54 +08:00
|
|
|
let hasPostISelHook = 0;
|
|
|
|
let DisableWQM = 1;
|
|
|
|
let Constraints = "$vdst = $vdata";
|
|
|
|
let AsmMatchConverter = "cvtMIMGAtomic";
|
2018-06-21 21:36:44 +08:00
|
|
|
|
|
|
|
let InOperandList = (ins data_rc:$vdata, addr_rc:$vaddr, SReg_256:$srsrc,
|
|
|
|
DMask:$dmask, UNorm:$unorm, GLC:$glc, SLC:$slc,
|
|
|
|
R128:$r128, TFE:$tfe, LWE:$lwe, DA:$da);
|
|
|
|
let AsmString = asm#" $vdst, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da";
|
2016-09-02 01:54:54 +08:00
|
|
|
}
|
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
multiclass MIMG_Atomic_Helper_m <mimg op, string asm, RegisterClass data_rc,
|
|
|
|
RegisterClass addr_rc, bit enableDasm = 0> {
|
AMDGPU: Remove old-style image intrinsics
Summary:
This also removes the need for atomic pseudo instructions, since
we select the correct encoding directly in SITargetLowering::lowerImage
for dimension-aware image intrinsics.
Mesa uses dimension-aware image intrinsics since
commit a9a7993441.
Change-Id: I7473d20009476a4ed6d919cae4e6dca9ff42e77a
Reviewers: arsenm, rampitec, mareko, tpr, b-sumner
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48167
llvm-svn: 335231
2018-06-21 21:37:45 +08:00
|
|
|
let ssamp = 0, d16 = 0 in {
|
2018-06-21 21:36:44 +08:00
|
|
|
def _si : MIMG_Atomic_Helper<asm, data_rc, addr_rc, "SICI", enableDasm>,
|
|
|
|
SIMCInstr<NAME, SIEncodingFamily.SI>,
|
|
|
|
MIMGe<op.SI> {
|
|
|
|
let AssemblerPredicates = [isSICI];
|
|
|
|
let DisableDecoder = DisableSIDecoder;
|
|
|
|
}
|
|
|
|
|
|
|
|
def _vi : MIMG_Atomic_Helper<asm, data_rc, addr_rc, "VI", enableDasm>,
|
|
|
|
SIMCInstr<NAME, SIEncodingFamily.VI>,
|
|
|
|
MIMGe<op.VI> {
|
|
|
|
let AssemblerPredicates = [isVI];
|
|
|
|
let DisableDecoder = DisableVIDecoder;
|
|
|
|
let MIMGEncoding = MIMGEncGfx8;
|
|
|
|
}
|
2016-09-02 01:54:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
multiclass MIMG_Atomic_Addr_Helper_m <mimg op, string asm,
|
2018-01-26 23:43:29 +08:00
|
|
|
RegisterClass data_rc,
|
|
|
|
bit enableDasm = 0> {
|
2018-01-26 22:07:38 +08:00
|
|
|
// _V* variants have different address size, but the size is not encoded.
|
|
|
|
// So only one variant can be disassembled. V1 looks the safest to decode.
|
2018-06-21 21:36:44 +08:00
|
|
|
let VAddrDwords = 1 in
|
|
|
|
defm _V1 : MIMG_Atomic_Helper_m <op, asm, data_rc, VGPR_32, enableDasm>;
|
|
|
|
let VAddrDwords = 2 in
|
|
|
|
defm _V2 : MIMG_Atomic_Helper_m <op, asm, data_rc, VReg_64>;
|
|
|
|
let VAddrDwords = 3 in
|
|
|
|
defm _V3 : MIMG_Atomic_Helper_m <op, asm, data_rc, VReg_96>;
|
|
|
|
let VAddrDwords = 4 in
|
|
|
|
defm _V4 : MIMG_Atomic_Helper_m <op, asm, data_rc, VReg_128>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass MIMG_Atomic <mimg op, string asm, bit isCmpSwap = 0> { // 64-bit atomics
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
def "" : MIMGBaseOpcode {
|
|
|
|
let Atomic = 1;
|
|
|
|
let AtomicX2 = isCmpSwap;
|
|
|
|
}
|
2018-06-21 21:36:44 +08:00
|
|
|
|
|
|
|
let BaseOpcode = !cast<MIMGBaseOpcode>(NAME) in {
|
|
|
|
// _V* variants have different dst size, but the size is encoded implicitly,
|
|
|
|
// using dmask and tfe. Only 32-bit variant is registered with disassembler.
|
|
|
|
// Other variants are reconstructed by disassembler using dmask and tfe.
|
|
|
|
let VDataDwords = !if(isCmpSwap, 2, 1) in
|
|
|
|
defm _V1 : MIMG_Atomic_Addr_Helper_m <op, asm, !if(isCmpSwap, VReg_64, VGPR_32), 1>;
|
|
|
|
let VDataDwords = !if(isCmpSwap, 4, 2) in
|
|
|
|
defm _V2 : MIMG_Atomic_Addr_Helper_m <op, asm, !if(isCmpSwap, VReg_128, VReg_64)>;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
class MIMG_Sampler_Helper <bits<7> op, string asm, RegisterClass dst_rc,
|
|
|
|
RegisterClass src_rc, string dns="">
|
|
|
|
: MIMG <(outs dst_rc:$vdata), dns>,
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
MIMGe<op> {
|
2018-06-21 21:36:44 +08:00
|
|
|
let d16 = !if(BaseOpcode.HasD16, ?, 0);
|
|
|
|
|
|
|
|
let InOperandList = !con((ins src_rc:$vaddr, SReg_256:$srsrc, SReg_128:$ssamp,
|
|
|
|
DMask:$dmask, UNorm:$unorm, GLC:$glc, SLC:$slc,
|
|
|
|
R128:$r128, TFE:$tfe, LWE:$lwe, DA:$da),
|
|
|
|
!if(BaseOpcode.HasD16, (ins D16:$d16), (ins)));
|
|
|
|
let AsmString = asm#" $vdata, $vaddr, $srsrc, $ssamp$dmask$unorm$glc$slc$r128$tfe$lwe$da"
|
|
|
|
#!if(BaseOpcode.HasD16, "$d16", "");
|
|
|
|
}
|
|
|
|
|
2018-06-21 21:37:55 +08:00
|
|
|
class MIMGAddrSize<int dw, bit enable_disasm> {
|
|
|
|
int NumWords = dw;
|
|
|
|
|
|
|
|
RegisterClass RegClass = !if(!le(NumWords, 0), ?,
|
|
|
|
!if(!eq(NumWords, 1), VGPR_32,
|
|
|
|
!if(!eq(NumWords, 2), VReg_64,
|
|
|
|
!if(!eq(NumWords, 3), VReg_96,
|
|
|
|
!if(!eq(NumWords, 4), VReg_128,
|
|
|
|
!if(!le(NumWords, 8), VReg_256,
|
|
|
|
!if(!le(NumWords, 16), VReg_512, ?)))))));
|
|
|
|
|
|
|
|
// Whether the instruction variant with this vaddr size should be enabled for
|
|
|
|
// the auto-generated disassembler.
|
|
|
|
bit Disassemble = enable_disasm;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return whether a value inside the range [min, max] (endpoints inclusive)
|
|
|
|
// is in the given list.
|
|
|
|
class isRangeInList<int min, int max, list<int> lst> {
|
|
|
|
bit ret = !foldl(0, lst, lhs, y, !or(lhs, !and(!le(min, y), !le(y, max))));
|
|
|
|
}
|
|
|
|
|
|
|
|
class MIMGAddrSizes_tmp<list<MIMGAddrSize> lst, int min> {
|
|
|
|
list<MIMGAddrSize> List = lst;
|
|
|
|
int Min = min;
|
|
|
|
}
|
|
|
|
|
|
|
|
class MIMG_Sampler_AddrSizes<AMDGPUSampleVariant sample> {
|
|
|
|
// List of all possible numbers of address words, taking all combinations of
|
|
|
|
// A16 and image dimension into account (note: no MSAA, since this is for
|
|
|
|
// sample/gather ops).
|
|
|
|
list<int> AllNumAddrWords =
|
|
|
|
!foreach(dw, !if(sample.Gradients,
|
|
|
|
!if(!eq(sample.LodOrClamp, ""),
|
|
|
|
[2, 3, 4, 5, 6, 7, 9],
|
|
|
|
[2, 3, 4, 5, 7, 8, 10]),
|
|
|
|
!if(!eq(sample.LodOrClamp, ""),
|
|
|
|
[1, 2, 3],
|
|
|
|
[1, 2, 3, 4])),
|
|
|
|
!add(dw, !size(sample.ExtraAddrArgs)));
|
|
|
|
|
|
|
|
// Generate machine instructions based on possible register classes for the
|
|
|
|
// required numbers of address words. The disassembler defaults to the
|
|
|
|
// smallest register class.
|
|
|
|
list<MIMGAddrSize> MachineInstrs =
|
|
|
|
!foldl(MIMGAddrSizes_tmp<[], 0>, [1, 2, 3, 4, 8, 16], lhs, dw,
|
|
|
|
!if(isRangeInList<lhs.Min, dw, AllNumAddrWords>.ret,
|
|
|
|
MIMGAddrSizes_tmp<
|
|
|
|
!listconcat(lhs.List, [MIMGAddrSize<dw, !empty(lhs.List)>]),
|
|
|
|
!if(!eq(dw, 3), 3, !add(dw, 1))>, // we still need _V4 for codegen w/ 3 dwords
|
|
|
|
lhs)).List;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass MIMG_Sampler_Src_Helper <bits<7> op, string asm,
|
|
|
|
AMDGPUSampleVariant sample, RegisterClass dst_rc,
|
2018-06-21 21:36:44 +08:00
|
|
|
bit enableDisasm = 0> {
|
2018-06-21 21:37:55 +08:00
|
|
|
foreach addr = MIMG_Sampler_AddrSizes<sample>.MachineInstrs in {
|
|
|
|
let VAddrDwords = addr.NumWords in
|
|
|
|
def _V # addr.NumWords
|
|
|
|
: MIMG_Sampler_Helper <op, asm, dst_rc, addr.RegClass,
|
|
|
|
!if(!and(enableDisasm, addr.Disassemble), "AMDGPU", "")>;
|
|
|
|
}
|
2018-06-21 21:36:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
class MIMG_Sampler_BaseOpcode<AMDGPUSampleVariant sample>
|
|
|
|
: MIMGBaseOpcode {
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
let Sampler = 1;
|
2018-06-21 21:36:44 +08:00
|
|
|
let NumExtraArgs = !size(sample.ExtraAddrArgs);
|
|
|
|
let Gradients = sample.Gradients;
|
|
|
|
let LodOrClampOrMip = !ne(sample.LodOrClamp, "");
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
}
|
|
|
|
|
2018-06-21 21:36:13 +08:00
|
|
|
multiclass MIMG_Sampler <bits<7> op, AMDGPUSampleVariant sample, bit wqm = 0,
|
2018-06-21 21:36:44 +08:00
|
|
|
bit isGetLod = 0,
|
2018-06-21 21:36:13 +08:00
|
|
|
string asm = "image_sample"#sample.LowerCaseMod> {
|
2018-06-21 21:36:44 +08:00
|
|
|
def "" : MIMG_Sampler_BaseOpcode<sample> {
|
|
|
|
let HasD16 = !if(isGetLod, 0, 1);
|
|
|
|
}
|
2016-09-02 01:54:54 +08:00
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
let BaseOpcode = !cast<MIMGBaseOpcode>(NAME), WQM = wqm,
|
|
|
|
mayLoad = !if(isGetLod, 0, 1) in {
|
|
|
|
let VDataDwords = 1 in
|
2018-06-21 21:37:55 +08:00
|
|
|
defm _V1 : MIMG_Sampler_Src_Helper<op, asm, sample, VGPR_32, 1>;
|
2018-06-21 21:36:44 +08:00
|
|
|
let VDataDwords = 2 in
|
2018-06-21 21:37:55 +08:00
|
|
|
defm _V2 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_64>;
|
2018-06-21 21:36:44 +08:00
|
|
|
let VDataDwords = 3 in
|
2018-06-21 21:37:55 +08:00
|
|
|
defm _V3 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_96>;
|
2018-06-21 21:36:44 +08:00
|
|
|
let VDataDwords = 4 in
|
2018-06-21 21:37:55 +08:00
|
|
|
defm _V4 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_128>;
|
2018-06-21 21:36:44 +08:00
|
|
|
}
|
2016-09-02 01:54:54 +08:00
|
|
|
}
|
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
multiclass MIMG_Sampler_WQM <bits<7> op, AMDGPUSampleVariant sample>
|
|
|
|
: MIMG_Sampler<op, sample, 1>;
|
2018-01-19 06:08:53 +08:00
|
|
|
|
2018-06-21 21:36:13 +08:00
|
|
|
multiclass MIMG_Gather <bits<7> op, AMDGPUSampleVariant sample, bit wqm = 0,
|
|
|
|
string asm = "image_gather4"#sample.LowerCaseMod> {
|
2018-06-21 21:36:44 +08:00
|
|
|
def "" : MIMG_Sampler_BaseOpcode<sample> {
|
|
|
|
let HasD16 = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
let BaseOpcode = !cast<MIMGBaseOpcode>(NAME), WQM = wqm,
|
|
|
|
Gather4 = 1, hasPostISelHook = 0 in {
|
|
|
|
let VDataDwords = 2 in
|
2018-06-21 21:37:55 +08:00
|
|
|
defm _V2 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_64>; /* for packed D16 only */
|
2018-06-21 21:36:44 +08:00
|
|
|
let VDataDwords = 4 in
|
2018-06-21 21:37:55 +08:00
|
|
|
defm _V4 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_128, 1>;
|
2018-06-21 21:36:44 +08:00
|
|
|
}
|
2016-09-02 01:54:54 +08:00
|
|
|
}
|
|
|
|
|
2018-06-21 21:36:13 +08:00
|
|
|
multiclass MIMG_Gather_WQM <bits<7> op, AMDGPUSampleVariant sample>
|
|
|
|
: MIMG_Gather<op, sample, 1>;
|
2016-09-02 01:54:54 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MIMG Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
defm IMAGE_LOAD : MIMG_NoSampler <0x00000000, "image_load", 1>;
|
2018-06-21 21:36:44 +08:00
|
|
|
defm IMAGE_LOAD_MIP : MIMG_NoSampler <0x00000001, "image_load_mip", 1, 1>;
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
defm IMAGE_LOAD_PCK : MIMG_NoSampler <0x00000002, "image_load_pck", 0>;
|
|
|
|
defm IMAGE_LOAD_PCK_SGN : MIMG_NoSampler <0x00000003, "image_load_pck_sgn", 0>;
|
2018-06-21 21:36:44 +08:00
|
|
|
defm IMAGE_LOAD_MIP_PCK : MIMG_NoSampler <0x00000004, "image_load_mip_pck", 0, 1>;
|
|
|
|
defm IMAGE_LOAD_MIP_PCK_SGN : MIMG_NoSampler <0x00000005, "image_load_mip_pck_sgn", 0, 1>;
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
defm IMAGE_STORE : MIMG_Store <0x00000008, "image_store", 1>;
|
2018-06-21 21:36:44 +08:00
|
|
|
defm IMAGE_STORE_MIP : MIMG_Store <0x00000009, "image_store_mip", 1, 1>;
|
AMDGPU: Turn D16 for MIMG instructions into a regular operand
Summary:
This allows us to reduce the number of different machine instruction
opcodes, which reduces the table sizes and helps flatten the TableGen
multiclass hierarchies.
We can do this because for each hardware MIMG opcode, we have a full set
of IMAGE_xxx_Vn_Vm machine instructions for all required sizes of vdata
and vaddr registers. Instead of having separate D16 machine instructions,
a packed D16 instructions loading e.g. 4 components can simply use the
same V2 opcode variant that non-D16 instructions use.
We still require a TSFlag for D16 buffer instructions, because the
D16-ness of buffer instructions is part of the opcode. Renaming the flag
should help avoid future confusion.
The one non-obvious code change is that for gather4 instructions, the
disassembler can no longer automatically decide whether to use a V2 or
a V4 variant. The existing logic which choose the correct variant for
other MIMG instruction is extended to cover gather4 as well.
As a bonus, some of the assembler error messages are now more helpful
(e.g., complaining about a wrong data size instead of a non-existing
instruction).
While we're at it, delete a whole bunch of dead legacy TableGen code.
Change-Id: I89b02c2841c06f95e662541433e597f5d4553978
Reviewers: arsenm, rampitec, kzhuravl, artem.tamazov, dp, rtaylor
Subscribers: wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D47434
llvm-svn: 335222
2018-06-21 21:36:01 +08:00
|
|
|
defm IMAGE_STORE_PCK : MIMG_Store <0x0000000a, "image_store_pck", 0>;
|
2018-06-21 21:36:44 +08:00
|
|
|
defm IMAGE_STORE_MIP_PCK : MIMG_Store <0x0000000b, "image_store_mip_pck", 0, 1>;
|
2017-12-09 04:00:57 +08:00
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "image_get_resinfo", 0, 1, 1>;
|
2017-12-09 04:00:57 +08:00
|
|
|
|
2016-09-02 01:54:54 +08:00
|
|
|
defm IMAGE_ATOMIC_SWAP : MIMG_Atomic <mimg<0x0f, 0x10>, "image_atomic_swap">;
|
2018-06-21 21:36:44 +08:00
|
|
|
defm IMAGE_ATOMIC_CMPSWAP : MIMG_Atomic <mimg<0x10, 0x11>, "image_atomic_cmpswap", 1>;
|
2016-09-02 01:54:54 +08:00
|
|
|
defm IMAGE_ATOMIC_ADD : MIMG_Atomic <mimg<0x11, 0x12>, "image_atomic_add">;
|
|
|
|
defm IMAGE_ATOMIC_SUB : MIMG_Atomic <mimg<0x12, 0x13>, "image_atomic_sub">;
|
|
|
|
//def IMAGE_ATOMIC_RSUB : MIMG_NoPattern_ <"image_atomic_rsub", 0x00000013>; -- not on VI
|
|
|
|
defm IMAGE_ATOMIC_SMIN : MIMG_Atomic <mimg<0x14>, "image_atomic_smin">;
|
|
|
|
defm IMAGE_ATOMIC_UMIN : MIMG_Atomic <mimg<0x15>, "image_atomic_umin">;
|
|
|
|
defm IMAGE_ATOMIC_SMAX : MIMG_Atomic <mimg<0x16>, "image_atomic_smax">;
|
|
|
|
defm IMAGE_ATOMIC_UMAX : MIMG_Atomic <mimg<0x17>, "image_atomic_umax">;
|
|
|
|
defm IMAGE_ATOMIC_AND : MIMG_Atomic <mimg<0x18>, "image_atomic_and">;
|
|
|
|
defm IMAGE_ATOMIC_OR : MIMG_Atomic <mimg<0x19>, "image_atomic_or">;
|
|
|
|
defm IMAGE_ATOMIC_XOR : MIMG_Atomic <mimg<0x1a>, "image_atomic_xor">;
|
|
|
|
defm IMAGE_ATOMIC_INC : MIMG_Atomic <mimg<0x1b>, "image_atomic_inc">;
|
|
|
|
defm IMAGE_ATOMIC_DEC : MIMG_Atomic <mimg<0x1c>, "image_atomic_dec">;
|
2018-06-21 21:36:44 +08:00
|
|
|
//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"image_atomic_fcmpswap", 0x0000001d, 1>; -- not on VI
|
2016-09-02 01:54:54 +08:00
|
|
|
//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"image_atomic_fmin", 0x0000001e>; -- not on VI
|
|
|
|
//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"image_atomic_fmax", 0x0000001f>; -- not on VI
|
2018-06-21 21:36:13 +08:00
|
|
|
defm IMAGE_SAMPLE : MIMG_Sampler_WQM <0x00000020, AMDGPUSample>;
|
|
|
|
defm IMAGE_SAMPLE_CL : MIMG_Sampler_WQM <0x00000021, AMDGPUSample_cl>;
|
|
|
|
defm IMAGE_SAMPLE_D : MIMG_Sampler <0x00000022, AMDGPUSample_d>;
|
|
|
|
defm IMAGE_SAMPLE_D_CL : MIMG_Sampler <0x00000023, AMDGPUSample_d_cl>;
|
|
|
|
defm IMAGE_SAMPLE_L : MIMG_Sampler <0x00000024, AMDGPUSample_l>;
|
|
|
|
defm IMAGE_SAMPLE_B : MIMG_Sampler_WQM <0x00000025, AMDGPUSample_b>;
|
|
|
|
defm IMAGE_SAMPLE_B_CL : MIMG_Sampler_WQM <0x00000026, AMDGPUSample_b_cl>;
|
|
|
|
defm IMAGE_SAMPLE_LZ : MIMG_Sampler <0x00000027, AMDGPUSample_lz>;
|
|
|
|
defm IMAGE_SAMPLE_C : MIMG_Sampler_WQM <0x00000028, AMDGPUSample_c>;
|
|
|
|
defm IMAGE_SAMPLE_C_CL : MIMG_Sampler_WQM <0x00000029, AMDGPUSample_c_cl>;
|
|
|
|
defm IMAGE_SAMPLE_C_D : MIMG_Sampler <0x0000002a, AMDGPUSample_c_d>;
|
|
|
|
defm IMAGE_SAMPLE_C_D_CL : MIMG_Sampler <0x0000002b, AMDGPUSample_c_d_cl>;
|
|
|
|
defm IMAGE_SAMPLE_C_L : MIMG_Sampler <0x0000002c, AMDGPUSample_c_l>;
|
|
|
|
defm IMAGE_SAMPLE_C_B : MIMG_Sampler_WQM <0x0000002d, AMDGPUSample_c_b>;
|
|
|
|
defm IMAGE_SAMPLE_C_B_CL : MIMG_Sampler_WQM <0x0000002e, AMDGPUSample_c_b_cl>;
|
|
|
|
defm IMAGE_SAMPLE_C_LZ : MIMG_Sampler <0x0000002f, AMDGPUSample_c_lz>;
|
|
|
|
defm IMAGE_SAMPLE_O : MIMG_Sampler_WQM <0x00000030, AMDGPUSample_o>;
|
|
|
|
defm IMAGE_SAMPLE_CL_O : MIMG_Sampler_WQM <0x00000031, AMDGPUSample_cl_o>;
|
|
|
|
defm IMAGE_SAMPLE_D_O : MIMG_Sampler <0x00000032, AMDGPUSample_d_o>;
|
|
|
|
defm IMAGE_SAMPLE_D_CL_O : MIMG_Sampler <0x00000033, AMDGPUSample_d_cl_o>;
|
|
|
|
defm IMAGE_SAMPLE_L_O : MIMG_Sampler <0x00000034, AMDGPUSample_l_o>;
|
|
|
|
defm IMAGE_SAMPLE_B_O : MIMG_Sampler_WQM <0x00000035, AMDGPUSample_b_o>;
|
|
|
|
defm IMAGE_SAMPLE_B_CL_O : MIMG_Sampler_WQM <0x00000036, AMDGPUSample_b_cl_o>;
|
|
|
|
defm IMAGE_SAMPLE_LZ_O : MIMG_Sampler <0x00000037, AMDGPUSample_lz_o>;
|
|
|
|
defm IMAGE_SAMPLE_C_O : MIMG_Sampler_WQM <0x00000038, AMDGPUSample_c_o>;
|
|
|
|
defm IMAGE_SAMPLE_C_CL_O : MIMG_Sampler_WQM <0x00000039, AMDGPUSample_c_cl_o>;
|
|
|
|
defm IMAGE_SAMPLE_C_D_O : MIMG_Sampler <0x0000003a, AMDGPUSample_c_d_o>;
|
|
|
|
defm IMAGE_SAMPLE_C_D_CL_O : MIMG_Sampler <0x0000003b, AMDGPUSample_c_d_cl_o>;
|
|
|
|
defm IMAGE_SAMPLE_C_L_O : MIMG_Sampler <0x0000003c, AMDGPUSample_c_l_o>;
|
|
|
|
defm IMAGE_SAMPLE_C_B_CL_O : MIMG_Sampler_WQM <0x0000003e, AMDGPUSample_c_b_cl_o>;
|
|
|
|
defm IMAGE_SAMPLE_C_B_O : MIMG_Sampler_WQM <0x0000003d, AMDGPUSample_c_b_o>;
|
|
|
|
defm IMAGE_SAMPLE_C_LZ_O : MIMG_Sampler <0x0000003f, AMDGPUSample_c_lz_o>;
|
|
|
|
defm IMAGE_GATHER4 : MIMG_Gather_WQM <0x00000040, AMDGPUSample>;
|
|
|
|
defm IMAGE_GATHER4_CL : MIMG_Gather_WQM <0x00000041, AMDGPUSample_cl>;
|
|
|
|
defm IMAGE_GATHER4_L : MIMG_Gather <0x00000044, AMDGPUSample_l>;
|
|
|
|
defm IMAGE_GATHER4_B : MIMG_Gather_WQM <0x00000045, AMDGPUSample_b>;
|
|
|
|
defm IMAGE_GATHER4_B_CL : MIMG_Gather_WQM <0x00000046, AMDGPUSample_b_cl>;
|
|
|
|
defm IMAGE_GATHER4_LZ : MIMG_Gather <0x00000047, AMDGPUSample_lz>;
|
|
|
|
defm IMAGE_GATHER4_C : MIMG_Gather_WQM <0x00000048, AMDGPUSample_c>;
|
|
|
|
defm IMAGE_GATHER4_C_CL : MIMG_Gather_WQM <0x00000049, AMDGPUSample_c_cl>;
|
|
|
|
defm IMAGE_GATHER4_C_L : MIMG_Gather <0x0000004c, AMDGPUSample_c_l>;
|
|
|
|
defm IMAGE_GATHER4_C_B : MIMG_Gather_WQM <0x0000004d, AMDGPUSample_c_b>;
|
|
|
|
defm IMAGE_GATHER4_C_B_CL : MIMG_Gather_WQM <0x0000004e, AMDGPUSample_c_b_cl>;
|
|
|
|
defm IMAGE_GATHER4_C_LZ : MIMG_Gather <0x0000004f, AMDGPUSample_c_lz>;
|
|
|
|
defm IMAGE_GATHER4_O : MIMG_Gather_WQM <0x00000050, AMDGPUSample_o>;
|
|
|
|
defm IMAGE_GATHER4_CL_O : MIMG_Gather_WQM <0x00000051, AMDGPUSample_cl_o>;
|
|
|
|
defm IMAGE_GATHER4_L_O : MIMG_Gather <0x00000054, AMDGPUSample_l_o>;
|
|
|
|
defm IMAGE_GATHER4_B_O : MIMG_Gather_WQM <0x00000055, AMDGPUSample_b_o>;
|
|
|
|
defm IMAGE_GATHER4_B_CL_O : MIMG_Gather <0x00000056, AMDGPUSample_b_cl_o>;
|
|
|
|
defm IMAGE_GATHER4_LZ_O : MIMG_Gather <0x00000057, AMDGPUSample_lz_o>;
|
|
|
|
defm IMAGE_GATHER4_C_O : MIMG_Gather_WQM <0x00000058, AMDGPUSample_c_o>;
|
|
|
|
defm IMAGE_GATHER4_C_CL_O : MIMG_Gather_WQM <0x00000059, AMDGPUSample_c_cl_o>;
|
|
|
|
defm IMAGE_GATHER4_C_L_O : MIMG_Gather <0x0000005c, AMDGPUSample_c_l_o>;
|
|
|
|
defm IMAGE_GATHER4_C_B_O : MIMG_Gather_WQM <0x0000005d, AMDGPUSample_c_b_o>;
|
|
|
|
defm IMAGE_GATHER4_C_B_CL_O : MIMG_Gather_WQM <0x0000005e, AMDGPUSample_c_b_cl_o>;
|
|
|
|
defm IMAGE_GATHER4_C_LZ_O : MIMG_Gather <0x0000005f, AMDGPUSample_c_lz_o>;
|
2017-12-09 04:00:57 +08:00
|
|
|
|
2018-06-21 21:36:44 +08:00
|
|
|
defm IMAGE_GET_LOD : MIMG_Sampler <0x00000060, AMDGPUSample, 1, 1, "image_get_lod">;
|
2017-12-09 04:00:57 +08:00
|
|
|
|
2018-06-21 21:36:13 +08:00
|
|
|
defm IMAGE_SAMPLE_CD : MIMG_Sampler <0x00000068, AMDGPUSample_cd>;
|
|
|
|
defm IMAGE_SAMPLE_CD_CL : MIMG_Sampler <0x00000069, AMDGPUSample_cd_cl>;
|
|
|
|
defm IMAGE_SAMPLE_C_CD : MIMG_Sampler <0x0000006a, AMDGPUSample_c_cd>;
|
|
|
|
defm IMAGE_SAMPLE_C_CD_CL : MIMG_Sampler <0x0000006b, AMDGPUSample_c_cd_cl>;
|
|
|
|
defm IMAGE_SAMPLE_CD_O : MIMG_Sampler <0x0000006c, AMDGPUSample_cd_o>;
|
|
|
|
defm IMAGE_SAMPLE_CD_CL_O : MIMG_Sampler <0x0000006d, AMDGPUSample_cd_cl_o>;
|
|
|
|
defm IMAGE_SAMPLE_C_CD_O : MIMG_Sampler <0x0000006e, AMDGPUSample_c_cd_o>;
|
|
|
|
defm IMAGE_SAMPLE_C_CD_CL_O : MIMG_Sampler <0x0000006f, AMDGPUSample_c_cd_cl_o>;
|
2016-09-02 01:54:54 +08:00
|
|
|
//def IMAGE_RSRC256 : MIMG_NoPattern_RSRC256 <"image_rsrc256", 0x0000007e>;
|
|
|
|
//def IMAGE_SAMPLER : MIMG_NoPattern_ <"image_sampler", 0x0000007f>;
|
|
|
|
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
/********** ========================================= **********/
|
|
|
|
/********** Table of dimension-aware image intrinsics **********/
|
|
|
|
/********** ========================================= **********/
|
AMDGPU: Dimension-aware image intrinsics
Summary:
These new image intrinsics contain the texture type as part of
their name and have each component of the address/coordinate as
individual parameters.
This is a preparatory step for implementing the A16 feature, where
coordinates are passed as half-floats or -ints, but the Z compare
value and texel offsets are still full dwords, making it difficult
or impossible to distinguish between A16 on or off in the old-style
intrinsics.
Additionally, these intrinsics pass the 'texfailpolicy' and
'cachectrl' as i32 bit fields to reduce operand clutter and allow
for future extensibility.
v2:
- gather4 supports 2darray images
- fix a bug with 1D images on SI
Change-Id: I099f309e0a394082a5901ea196c3967afb867f04
Reviewers: arsenm, rampitec, b-sumner
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D44939
llvm-svn: 329166
2018-04-04 18:58:54 +08:00
|
|
|
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
class ImageDimIntrinsicInfo<AMDGPUImageDimIntrinsic I> {
|
|
|
|
Intrinsic Intr = I;
|
|
|
|
MIMGBaseOpcode BaseOpcode = !cast<MIMGBaseOpcode>(!strconcat("IMAGE_", I.P.OpMod));
|
|
|
|
AMDGPUDimProps Dim = I.P.Dim;
|
AMDGPU: Dimension-aware image intrinsics
Summary:
These new image intrinsics contain the texture type as part of
their name and have each component of the address/coordinate as
individual parameters.
This is a preparatory step for implementing the A16 feature, where
coordinates are passed as half-floats or -ints, but the Z compare
value and texel offsets are still full dwords, making it difficult
or impossible to distinguish between A16 on or off in the old-style
intrinsics.
Additionally, these intrinsics pass the 'texfailpolicy' and
'cachectrl' as i32 bit fields to reduce operand clutter and allow
for future extensibility.
v2:
- gather4 supports 2darray images
- fix a bug with 1D images on SI
Change-Id: I099f309e0a394082a5901ea196c3967afb867f04
Reviewers: arsenm, rampitec, b-sumner
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D44939
llvm-svn: 329166
2018-04-04 18:58:54 +08:00
|
|
|
}
|
|
|
|
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
def ImageDimIntrinsicTable : GenericTable {
|
|
|
|
let FilterClass = "ImageDimIntrinsicInfo";
|
|
|
|
let Fields = ["Intr", "BaseOpcode", "Dim"];
|
|
|
|
GenericEnum TypeOf_BaseOpcode = MIMGBaseOpcode;
|
|
|
|
GenericEnum TypeOf_Dim = MIMGDim;
|
AMDGPU: Dimension-aware image intrinsics
Summary:
These new image intrinsics contain the texture type as part of
their name and have each component of the address/coordinate as
individual parameters.
This is a preparatory step for implementing the A16 feature, where
coordinates are passed as half-floats or -ints, but the Z compare
value and texel offsets are still full dwords, making it difficult
or impossible to distinguish between A16 on or off in the old-style
intrinsics.
Additionally, these intrinsics pass the 'texfailpolicy' and
'cachectrl' as i32 bit fields to reduce operand clutter and allow
for future extensibility.
v2:
- gather4 supports 2darray images
- fix a bug with 1D images on SI
Change-Id: I099f309e0a394082a5901ea196c3967afb867f04
Reviewers: arsenm, rampitec, b-sumner
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D44939
llvm-svn: 329166
2018-04-04 18:58:54 +08:00
|
|
|
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
let PrimaryKey = ["Intr"];
|
|
|
|
let PrimaryKeyName = "getImageDimIntrinsicInfo";
|
|
|
|
let PrimaryKeyEarlyOut = 1;
|
AMDGPU: Dimension-aware image intrinsics
Summary:
These new image intrinsics contain the texture type as part of
their name and have each component of the address/coordinate as
individual parameters.
This is a preparatory step for implementing the A16 feature, where
coordinates are passed as half-floats or -ints, but the Z compare
value and texel offsets are still full dwords, making it difficult
or impossible to distinguish between A16 on or off in the old-style
intrinsics.
Additionally, these intrinsics pass the 'texfailpolicy' and
'cachectrl' as i32 bit fields to reduce operand clutter and allow
for future extensibility.
v2:
- gather4 supports 2darray images
- fix a bug with 1D images on SI
Change-Id: I099f309e0a394082a5901ea196c3967afb867f04
Reviewers: arsenm, rampitec, b-sumner
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D44939
llvm-svn: 329166
2018-04-04 18:58:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
foreach intr = !listconcat(AMDGPUImageDimIntrinsics,
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
AMDGPUImageDimAtomicIntrinsics) in {
|
|
|
|
def : ImageDimIntrinsicInfo<intr>;
|
AMDGPU: Dimension-aware image intrinsics
Summary:
These new image intrinsics contain the texture type as part of
their name and have each component of the address/coordinate as
individual parameters.
This is a preparatory step for implementing the A16 feature, where
coordinates are passed as half-floats or -ints, but the Z compare
value and texel offsets are still full dwords, making it difficult
or impossible to distinguish between A16 on or off in the old-style
intrinsics.
Additionally, these intrinsics pass the 'texfailpolicy' and
'cachectrl' as i32 bit fields to reduce operand clutter and allow
for future extensibility.
v2:
- gather4 supports 2darray images
- fix a bug with 1D images on SI
Change-Id: I099f309e0a394082a5901ea196c3967afb867f04
Reviewers: arsenm, rampitec, b-sumner
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D44939
llvm-svn: 329166
2018-04-04 18:58:54 +08:00
|
|
|
}
|
[AMDGPU] Optimize _L image intrinsic to _LZ when lod is zero
Summary:
Add _L to _LZ image intrinsic table mapping to table gen.
In ISelLowering check if image intrinsic has lod and if it's equal
to zero, if so remove lod and change opcode to equivalent mapped _LZ.
Change-Id: Ie24cd7e788e2195d846c7bd256151178cbb9ec71
Subscribers: arsenm, mehdi_amini, kzhuravl, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, steven_wu, dexonsmith, llvm-commits
Differential Revision: https://reviews.llvm.org/D49483
llvm-svn: 338523
2018-08-01 20:12:01 +08:00
|
|
|
|
|
|
|
// L to LZ Optimization Mapping
|
|
|
|
def : MIMGLZMapping<IMAGE_SAMPLE_L, IMAGE_SAMPLE_LZ>;
|
|
|
|
def : MIMGLZMapping<IMAGE_SAMPLE_C_L, IMAGE_SAMPLE_C_LZ>;
|
|
|
|
def : MIMGLZMapping<IMAGE_SAMPLE_L_O, IMAGE_SAMPLE_LZ_O>;
|
|
|
|
def : MIMGLZMapping<IMAGE_SAMPLE_C_L_O, IMAGE_SAMPLE_C_LZ_O>;
|
|
|
|
def : MIMGLZMapping<IMAGE_GATHER4_L, IMAGE_GATHER4_LZ>;
|
|
|
|
def : MIMGLZMapping<IMAGE_GATHER4_C_L, IMAGE_GATHER4_C_LZ>;
|
|
|
|
def : MIMGLZMapping<IMAGE_GATHER4_L_O, IMAGE_GATHER4_LZ_O>;
|
|
|
|
def : MIMGLZMapping<IMAGE_GATHER4_C_L_O, IMAGE_GATHER4_C_LZ_O>;
|