forked from OSchip/llvm-project
1134 lines
40 KiB
TableGen
1134 lines
40 KiB
TableGen
//===-- SIInstrInfo.td - SI Instruction Infos -------------*- tablegen -*--===//
|
|
//
|
|
// The LLVM Compiler Infrastructure
|
|
//
|
|
// This file is distributed under the University of Illinois Open Source
|
|
// License. See LICENSE.TXT for details.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
def isCI : Predicate<"Subtarget->getGeneration() "
|
|
">= SISubtarget::SEA_ISLANDS">;
|
|
def isCIOnly : Predicate<"Subtarget->getGeneration() =="
|
|
"SISubtarget::SEA_ISLANDS">,
|
|
AssemblerPredicate <"FeatureSeaIslands">;
|
|
|
|
def DisableInst : Predicate <"false">, AssemblerPredicate<"FeatureDisable">;
|
|
|
|
// Execpt for the NONE field, this must be kept in sync with the
|
|
// SIEncodingFamily enum in AMDGPUInstrInfo.cpp
|
|
def SIEncodingFamily {
|
|
int NONE = -1;
|
|
int SI = 0;
|
|
int VI = 1;
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// SI DAG Nodes
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
def SIload_constant : SDNode<"AMDGPUISD::LOAD_CONSTANT",
|
|
SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisVT<1, v4i32>, SDTCisVT<2, i32>]>,
|
|
[SDNPMayLoad, SDNPMemOperand]
|
|
>;
|
|
|
|
def SIatomic_inc : SDNode<"AMDGPUISD::ATOMIC_INC", SDTAtomic2,
|
|
[SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
|
|
>;
|
|
|
|
def SIatomic_dec : SDNode<"AMDGPUISD::ATOMIC_DEC", SDTAtomic2,
|
|
[SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
|
|
>;
|
|
|
|
def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT",
|
|
SDTypeProfile<0, 13,
|
|
[SDTCisVT<0, v4i32>, // rsrc(SGPR)
|
|
SDTCisVT<1, iAny>, // vdata(VGPR)
|
|
SDTCisVT<2, i32>, // num_channels(imm)
|
|
SDTCisVT<3, i32>, // vaddr(VGPR)
|
|
SDTCisVT<4, i32>, // soffset(SGPR)
|
|
SDTCisVT<5, i32>, // inst_offset(imm)
|
|
SDTCisVT<6, i32>, // dfmt(imm)
|
|
SDTCisVT<7, i32>, // nfmt(imm)
|
|
SDTCisVT<8, i32>, // offen(imm)
|
|
SDTCisVT<9, i32>, // idxen(imm)
|
|
SDTCisVT<10, i32>, // glc(imm)
|
|
SDTCisVT<11, i32>, // slc(imm)
|
|
SDTCisVT<12, i32> // tfe(imm)
|
|
]>,
|
|
[SDNPMayStore, SDNPMemOperand, SDNPHasChain]
|
|
>;
|
|
|
|
def SIload_input : SDNode<"AMDGPUISD::LOAD_INPUT",
|
|
SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisVT<1, v4i32>, SDTCisVT<2, i16>,
|
|
SDTCisVT<3, i32>]>
|
|
>;
|
|
|
|
class SDSample<string opcode> : SDNode <opcode,
|
|
SDTypeProfile<1, 4, [SDTCisVT<0, v4f32>, SDTCisVT<2, v8i32>,
|
|
SDTCisVT<3, v4i32>, SDTCisVT<4, i32>]>
|
|
>;
|
|
|
|
def SIsample : SDSample<"AMDGPUISD::SAMPLE">;
|
|
def SIsampleb : SDSample<"AMDGPUISD::SAMPLEB">;
|
|
def SIsampled : SDSample<"AMDGPUISD::SAMPLED">;
|
|
def SIsamplel : SDSample<"AMDGPUISD::SAMPLEL">;
|
|
|
|
def SIpc_add_rel_offset : SDNode<"AMDGPUISD::PC_ADD_REL_OFFSET",
|
|
SDTypeProfile<1, 2, [SDTCisVT<0, iPTR>, SDTCisSameAs<0,1>, SDTCisSameAs<0,2>]>
|
|
>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// PatFrags for global memory operations
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
def atomic_inc_global : global_binary_atomic_op<SIatomic_inc>;
|
|
def atomic_dec_global : global_binary_atomic_op<SIatomic_dec>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// SDNodes and PatFrag for local loads and stores to enable s_mov_b32 m0, -1
|
|
// to be glued to the memory instructions.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
def SIld_local : SDNode <"ISD::LOAD", SDTLoad,
|
|
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
|
|
>;
|
|
|
|
def si_ld_local : PatFrag <(ops node:$ptr), (SIld_local node:$ptr), [{
|
|
return cast<LoadSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
|
|
}]>;
|
|
|
|
def si_load_local : PatFrag <(ops node:$ptr), (si_ld_local node:$ptr), [{
|
|
return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED &&
|
|
cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
|
|
}]>;
|
|
|
|
def si_load_local_align8 : Aligned8Bytes <
|
|
(ops node:$ptr), (si_load_local node:$ptr)
|
|
>;
|
|
|
|
def si_sextload_local : PatFrag <(ops node:$ptr), (si_ld_local node:$ptr), [{
|
|
return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
|
|
}]>;
|
|
def si_az_extload_local : AZExtLoadBase <si_ld_local>;
|
|
|
|
multiclass SIExtLoadLocal <PatFrag ld_node> {
|
|
|
|
def _i8 : PatFrag <(ops node:$ptr), (ld_node node:$ptr),
|
|
[{return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;}]
|
|
>;
|
|
|
|
def _i16 : PatFrag <(ops node:$ptr), (ld_node node:$ptr),
|
|
[{return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;}]
|
|
>;
|
|
}
|
|
|
|
defm si_sextload_local : SIExtLoadLocal <si_sextload_local>;
|
|
defm si_az_extload_local : SIExtLoadLocal <si_az_extload_local>;
|
|
|
|
def SIst_local : SDNode <"ISD::STORE", SDTStore,
|
|
[SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue]
|
|
>;
|
|
|
|
def si_st_local : PatFrag <
|
|
(ops node:$val, node:$ptr), (SIst_local node:$val, node:$ptr), [{
|
|
return cast<StoreSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
|
|
}]>;
|
|
|
|
def si_store_local : PatFrag <
|
|
(ops node:$val, node:$ptr), (si_st_local node:$val, node:$ptr), [{
|
|
return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED &&
|
|
!cast<StoreSDNode>(N)->isTruncatingStore();
|
|
}]>;
|
|
|
|
def si_store_local_align8 : Aligned8Bytes <
|
|
(ops node:$val, node:$ptr), (si_store_local node:$val, node:$ptr)
|
|
>;
|
|
|
|
def si_truncstore_local : PatFrag <
|
|
(ops node:$val, node:$ptr), (si_st_local node:$val, node:$ptr), [{
|
|
return cast<StoreSDNode>(N)->isTruncatingStore();
|
|
}]>;
|
|
|
|
def si_truncstore_local_i8 : PatFrag <
|
|
(ops node:$val, node:$ptr), (si_truncstore_local node:$val, node:$ptr), [{
|
|
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
|
|
}]>;
|
|
|
|
def si_truncstore_local_i16 : PatFrag <
|
|
(ops node:$val, node:$ptr), (si_truncstore_local node:$val, node:$ptr), [{
|
|
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
|
|
}]>;
|
|
|
|
def si_setcc_uniform : PatFrag <
|
|
(ops node:$lhs, node:$rhs, node:$cond),
|
|
(setcc node:$lhs, node:$rhs, node:$cond), [{
|
|
for (SDNode *Use : N->uses()) {
|
|
if (Use->isMachineOpcode() || Use->getOpcode() != ISD::CopyToReg)
|
|
return false;
|
|
|
|
unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
|
|
if (Reg != AMDGPU::SCC)
|
|
return false;
|
|
}
|
|
return true;
|
|
}]>;
|
|
|
|
def si_uniform_br : PatFrag <
|
|
(ops node:$cond, node:$bb), (brcond node:$cond, node:$bb), [{
|
|
return isUniformBr(N);
|
|
}]>;
|
|
|
|
def si_uniform_br_scc : PatFrag <
|
|
(ops node:$cond, node:$bb), (si_uniform_br node:$cond, node:$bb), [{
|
|
return isCBranchSCC(N);
|
|
}]>;
|
|
|
|
multiclass SIAtomicM0Glue2 <string op_name, bit is_amdgpu = 0> {
|
|
|
|
def _glue : SDNode <
|
|
!if(is_amdgpu, "AMDGPUISD", "ISD")#"::ATOMIC_"#op_name, SDTAtomic2,
|
|
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
|
|
>;
|
|
|
|
def _local : local_binary_atomic_op <!cast<SDNode>(NAME#"_glue")>;
|
|
}
|
|
|
|
defm si_atomic_load_add : SIAtomicM0Glue2 <"LOAD_ADD">;
|
|
defm si_atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">;
|
|
defm si_atomic_inc : SIAtomicM0Glue2 <"INC", 1>;
|
|
defm si_atomic_dec : SIAtomicM0Glue2 <"DEC", 1>;
|
|
defm si_atomic_load_and : SIAtomicM0Glue2 <"LOAD_AND">;
|
|
defm si_atomic_load_min : SIAtomicM0Glue2 <"LOAD_MIN">;
|
|
defm si_atomic_load_max : SIAtomicM0Glue2 <"LOAD_MAX">;
|
|
defm si_atomic_load_or : SIAtomicM0Glue2 <"LOAD_OR">;
|
|
defm si_atomic_load_xor : SIAtomicM0Glue2 <"LOAD_XOR">;
|
|
defm si_atomic_load_umin : SIAtomicM0Glue2 <"LOAD_UMIN">;
|
|
defm si_atomic_load_umax : SIAtomicM0Glue2 <"LOAD_UMAX">;
|
|
defm si_atomic_swap : SIAtomicM0Glue2 <"SWAP">;
|
|
|
|
def si_atomic_cmp_swap_glue : SDNode <"ISD::ATOMIC_CMP_SWAP", SDTAtomic3,
|
|
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
|
|
>;
|
|
|
|
defm si_atomic_cmp_swap : AtomicCmpSwapLocal <si_atomic_cmp_swap_glue>;
|
|
|
|
def as_i1imm : SDNodeXForm<imm, [{
|
|
return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i1);
|
|
}]>;
|
|
|
|
def as_i8imm : SDNodeXForm<imm, [{
|
|
return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i8);
|
|
}]>;
|
|
|
|
def as_i16imm : SDNodeXForm<imm, [{
|
|
return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16);
|
|
}]>;
|
|
|
|
def as_i32imm: SDNodeXForm<imm, [{
|
|
return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32);
|
|
}]>;
|
|
|
|
def as_i64imm: SDNodeXForm<imm, [{
|
|
return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i64);
|
|
}]>;
|
|
|
|
// Copied from the AArch64 backend:
|
|
def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
|
|
return CurDAG->getTargetConstant(
|
|
N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
|
|
}]>;
|
|
|
|
def frameindex_to_targetframeindex : SDNodeXForm<frameindex, [{
|
|
auto FI = cast<FrameIndexSDNode>(N);
|
|
return CurDAG->getTargetFrameIndex(FI->getIndex(), MVT::i32);
|
|
}]>;
|
|
|
|
// Copied from the AArch64 backend:
|
|
def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
|
|
return CurDAG->getTargetConstant(
|
|
N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
|
|
}]>;
|
|
|
|
def SIMM16bit : PatLeaf <(imm),
|
|
[{return isInt<16>(N->getSExtValue());}]
|
|
>;
|
|
|
|
def IMM20bit : PatLeaf <(imm),
|
|
[{return isUInt<20>(N->getZExtValue());}]
|
|
>;
|
|
|
|
class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{
|
|
return isInlineImmediate(N);
|
|
}]>;
|
|
|
|
class InlineFPImm <ValueType vt> : PatLeaf <(vt fpimm), [{
|
|
return isInlineImmediate(N);
|
|
}]>;
|
|
|
|
class VGPRImm <dag frag> : PatLeaf<frag, [{
|
|
if (Subtarget->getGeneration() < SISubtarget::SOUTHERN_ISLANDS) {
|
|
return false;
|
|
}
|
|
const SIRegisterInfo *SIRI =
|
|
static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
|
|
unsigned Limit = 0;
|
|
for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
|
|
Limit < 10 && U != E; ++U, ++Limit) {
|
|
const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
|
|
|
|
// If the register class is unknown, it could be an unknown
|
|
// register class that needs to be an SGPR, e.g. an inline asm
|
|
// constraint
|
|
if (!RC || SIRI->isSGPRClass(RC))
|
|
return false;
|
|
}
|
|
|
|
return Limit < 10;
|
|
}]>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Custom Operands
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
def SoppBrTarget : AsmOperandClass {
|
|
let Name = "SoppBrTarget";
|
|
let ParserMethod = "parseSOppBrTarget";
|
|
}
|
|
|
|
def sopp_brtarget : Operand<OtherVT> {
|
|
let EncoderMethod = "getSOPPBrEncoding";
|
|
let DecoderMethod = "decodeSoppBrTarget";
|
|
let OperandType = "OPERAND_PCREL";
|
|
let ParserMatchClass = SoppBrTarget;
|
|
}
|
|
|
|
def si_ga : Operand<iPTR>;
|
|
|
|
def InterpSlot : Operand<i32> {
|
|
let PrintMethod = "printInterpSlot";
|
|
}
|
|
|
|
def SendMsgMatchClass : AsmOperandClass {
|
|
let Name = "SendMsg";
|
|
let PredicateMethod = "isSendMsg";
|
|
let ParserMethod = "parseSendMsgOp";
|
|
let RenderMethod = "addImmOperands";
|
|
}
|
|
|
|
def SendMsgImm : Operand<i32> {
|
|
let PrintMethod = "printSendMsg";
|
|
let ParserMatchClass = SendMsgMatchClass;
|
|
}
|
|
|
|
def SWaitMatchClass : AsmOperandClass {
|
|
let Name = "SWaitCnt";
|
|
let RenderMethod = "addImmOperands";
|
|
let ParserMethod = "parseSWaitCntOps";
|
|
}
|
|
|
|
def WAIT_FLAG : Operand <i32> {
|
|
let ParserMatchClass = SWaitMatchClass;
|
|
let PrintMethod = "printWaitFlag";
|
|
}
|
|
|
|
include "SIInstrFormats.td"
|
|
include "VIInstrFormats.td"
|
|
|
|
class NamedMatchClass<string CName, bit Optional = 1> : AsmOperandClass {
|
|
let Name = "Imm"#CName;
|
|
let PredicateMethod = "is"#CName;
|
|
let ParserMethod = !if(Optional, "parseOptionalOperand", "parse"#CName);
|
|
let RenderMethod = "addImmOperands";
|
|
let IsOptional = Optional;
|
|
let DefaultMethod = !if(Optional, "default"#CName, ?);
|
|
}
|
|
|
|
class NamedOperandBit<string Name, AsmOperandClass MatchClass> : Operand<i1> {
|
|
let PrintMethod = "print"#Name;
|
|
let ParserMatchClass = MatchClass;
|
|
}
|
|
|
|
class NamedOperandU8<string Name, AsmOperandClass MatchClass> : Operand<i8> {
|
|
let PrintMethod = "print"#Name;
|
|
let ParserMatchClass = MatchClass;
|
|
}
|
|
|
|
class NamedOperandU16<string Name, AsmOperandClass MatchClass> : Operand<i16> {
|
|
let PrintMethod = "print"#Name;
|
|
let ParserMatchClass = MatchClass;
|
|
}
|
|
|
|
class NamedOperandU32<string Name, AsmOperandClass MatchClass> : Operand<i32> {
|
|
let PrintMethod = "print"#Name;
|
|
let ParserMatchClass = MatchClass;
|
|
}
|
|
|
|
let OperandType = "OPERAND_IMMEDIATE" in {
|
|
|
|
def offen : NamedOperandBit<"Offen", NamedMatchClass<"Offen">>;
|
|
def idxen : NamedOperandBit<"Idxen", NamedMatchClass<"Idxen">>;
|
|
def addr64 : NamedOperandBit<"Addr64", NamedMatchClass<"Addr64">>;
|
|
|
|
def offset : NamedOperandU16<"Offset", NamedMatchClass<"Offset">>;
|
|
def offset0 : NamedOperandU8<"Offset0", NamedMatchClass<"Offset0">>;
|
|
def offset1 : NamedOperandU8<"Offset1", NamedMatchClass<"Offset1">>;
|
|
|
|
def gds : NamedOperandBit<"GDS", NamedMatchClass<"GDS">>;
|
|
|
|
def omod : NamedOperandU32<"OModSI", NamedMatchClass<"OModSI">>;
|
|
def clampmod : NamedOperandBit<"ClampSI", NamedMatchClass<"ClampSI">>;
|
|
|
|
def GLC : NamedOperandBit<"GLC", NamedMatchClass<"GLC">>;
|
|
def slc : NamedOperandBit<"SLC", NamedMatchClass<"SLC">>;
|
|
def tfe : NamedOperandBit<"TFE", NamedMatchClass<"TFE">>;
|
|
def unorm : NamedOperandBit<"UNorm", NamedMatchClass<"UNorm">>;
|
|
def da : NamedOperandBit<"DA", NamedMatchClass<"DA">>;
|
|
def r128 : NamedOperandBit<"R128", NamedMatchClass<"R128">>;
|
|
def lwe : NamedOperandBit<"LWE", NamedMatchClass<"LWE">>;
|
|
|
|
def dmask : NamedOperandU16<"DMask", NamedMatchClass<"DMask">>;
|
|
|
|
def dpp_ctrl : NamedOperandU32<"DPPCtrl", NamedMatchClass<"DPPCtrl", 0>>;
|
|
def row_mask : NamedOperandU32<"RowMask", NamedMatchClass<"RowMask">>;
|
|
def bank_mask : NamedOperandU32<"BankMask", NamedMatchClass<"BankMask">>;
|
|
def bound_ctrl : NamedOperandBit<"BoundCtrl", NamedMatchClass<"BoundCtrl">>;
|
|
|
|
def dst_sel : NamedOperandU32<"SDWADstSel", NamedMatchClass<"SDWADstSel">>;
|
|
def src0_sel : NamedOperandU32<"SDWASrc0Sel", NamedMatchClass<"SDWASrc0Sel">>;
|
|
def src1_sel : NamedOperandU32<"SDWASrc1Sel", NamedMatchClass<"SDWASrc1Sel">>;
|
|
def dst_unused : NamedOperandU32<"SDWADstUnused", NamedMatchClass<"SDWADstUnused">>;
|
|
|
|
def hwreg : NamedOperandU16<"Hwreg", NamedMatchClass<"Hwreg", 0>>;
|
|
|
|
} // End OperandType = "OPERAND_IMMEDIATE"
|
|
|
|
|
|
// 32-bit VALU immediate operand that uses the constant bus.
|
|
def KImmFP32MatchClass : AsmOperandClass {
|
|
let Name = "KImmFP32";
|
|
let PredicateMethod = "isKImmFP32";
|
|
let ParserMethod = "parseImm";
|
|
let RenderMethod = "addKImmFP32Operands";
|
|
}
|
|
|
|
def f32kimm : Operand<i32> {
|
|
let OperandNamespace = "AMDGPU";
|
|
let OperandType = "OPERAND_KIMM32";
|
|
let PrintMethod = "printU32ImmOperand";
|
|
let ParserMatchClass = KImmFP32MatchClass;
|
|
}
|
|
|
|
def VOPDstS64 : VOPDstOperand <SReg_64>;
|
|
|
|
class FPInputModsMatchClass <int opSize> : AsmOperandClass {
|
|
let Name = "RegOrImmWithFP"#opSize#"InputMods";
|
|
let ParserMethod = "parseRegOrImmWithFPInputMods";
|
|
let PredicateMethod = "isRegOrImmWithFP"#opSize#"InputMods";
|
|
}
|
|
def FP32InputModsMatchClass : FPInputModsMatchClass<32>;
|
|
def FP64InputModsMatchClass : FPInputModsMatchClass<64>;
|
|
|
|
class InputMods <AsmOperandClass matchClass> : Operand <i32> {
|
|
let OperandNamespace = "AMDGPU";
|
|
let OperandType = "OPERAND_INPUT_MODS";
|
|
let ParserMatchClass = matchClass;
|
|
}
|
|
|
|
class FPInputMods <FPInputModsMatchClass matchClass> : InputMods <matchClass> {
|
|
let PrintMethod = "printOperandAndFPInputMods";
|
|
}
|
|
def FP32InputMods : FPInputMods<FP32InputModsMatchClass>;
|
|
def FP64InputMods : FPInputMods<FP64InputModsMatchClass>;
|
|
|
|
class IntInputModsMatchClass <int opSize> : AsmOperandClass {
|
|
let Name = "RegOrImmWithInt"#opSize#"InputMods";
|
|
let ParserMethod = "parseRegOrImmWithIntInputMods";
|
|
let PredicateMethod = "isRegOrImmWithInt"#opSize#"InputMods";
|
|
}
|
|
def Int32InputModsMatchClass : IntInputModsMatchClass<32>;
|
|
def Int64InputModsMatchClass : IntInputModsMatchClass<64>;
|
|
|
|
class IntInputMods <IntInputModsMatchClass matchClass> : InputMods <matchClass> {
|
|
let PrintMethod = "printOperandAndIntInputMods";
|
|
}
|
|
def Int32InputMods : IntInputMods<Int32InputModsMatchClass>;
|
|
def Int64InputMods : IntInputMods<Int64InputModsMatchClass>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Complex patterns
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
def DS1Addr1Offset : ComplexPattern<i32, 2, "SelectDS1Addr1Offset">;
|
|
def DS64Bit4ByteAligned : ComplexPattern<i32, 3, "SelectDS64Bit4ByteAligned">;
|
|
|
|
def MOVRELOffset : ComplexPattern<i32, 2, "SelectMOVRELOffset">;
|
|
|
|
def VOP3Mods0 : ComplexPattern<untyped, 4, "SelectVOP3Mods0">;
|
|
def VOP3NoMods0 : ComplexPattern<untyped, 4, "SelectVOP3NoMods0">;
|
|
def VOP3Mods0Clamp : ComplexPattern<untyped, 3, "SelectVOP3Mods0Clamp">;
|
|
def VOP3Mods0Clamp0OMod : ComplexPattern<untyped, 4, "SelectVOP3Mods0Clamp0OMod">;
|
|
def VOP3Mods : ComplexPattern<untyped, 2, "SelectVOP3Mods">;
|
|
def VOP3NoMods : ComplexPattern<untyped, 2, "SelectVOP3NoMods">;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// SI assembler operands
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
def SIOperand {
|
|
int ZERO = 0x80;
|
|
int VCC = 0x6A;
|
|
int FLAT_SCR = 0x68;
|
|
}
|
|
|
|
def SRCMODS {
|
|
int NONE = 0;
|
|
int NEG = 1;
|
|
}
|
|
|
|
def DSTCLAMP {
|
|
int NONE = 0;
|
|
}
|
|
|
|
def DSTOMOD {
|
|
int NONE = 0;
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// SI Instruction multiclass helpers.
|
|
//
|
|
// Instructions with _32 take 32-bit operands.
|
|
// Instructions with _64 take 64-bit operands.
|
|
//
|
|
// VOP_* instructions can use either a 32-bit or 64-bit encoding. The 32-bit
|
|
// encoding is the standard encoding, but instruction that make use of
|
|
// any of the instruction modifiers must use the 64-bit encoding.
|
|
//
|
|
// Instructions with _e32 use the 32-bit encoding.
|
|
// Instructions with _e64 use the 64-bit encoding.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
class SIMCInstr <string pseudo, int subtarget> {
|
|
string PseudoInstr = pseudo;
|
|
int Subtarget = subtarget;
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// EXP classes
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
class EXPCommon : InstSI<
|
|
(outs),
|
|
(ins i32imm:$en, i32imm:$tgt, i32imm:$compr, i32imm:$done, i32imm:$vm,
|
|
VGPR_32:$src0, VGPR_32:$src1, VGPR_32:$src2, VGPR_32:$src3),
|
|
"exp $en, $tgt, $compr, $done, $vm, $src0, $src1, $src2, $src3",
|
|
[] > {
|
|
|
|
let EXP_CNT = 1;
|
|
let Uses = [EXEC];
|
|
let SchedRW = [WriteExport];
|
|
}
|
|
|
|
multiclass EXP_m {
|
|
|
|
let isPseudo = 1, isCodeGenOnly = 1 in {
|
|
def "" : EXPCommon, SIMCInstr <"exp", SIEncodingFamily.NONE> ;
|
|
}
|
|
|
|
def _si : EXPCommon, SIMCInstr <"exp", SIEncodingFamily.SI>, EXPe {
|
|
let DecoderNamespace="SICI";
|
|
let DisableDecoder = DisableSIDecoder;
|
|
}
|
|
|
|
def _vi : EXPCommon, SIMCInstr <"exp", SIEncodingFamily.VI>, EXPe_vi {
|
|
let DecoderNamespace="VI";
|
|
let DisableDecoder = DisableVIDecoder;
|
|
}
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Vector ALU classes
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
class getNumSrcArgs<ValueType Src0, ValueType Src1, ValueType Src2> {
|
|
int ret =
|
|
!if (!eq(Src0.Value, untyped.Value), 0,
|
|
!if (!eq(Src1.Value, untyped.Value), 1, // VOP1
|
|
!if (!eq(Src2.Value, untyped.Value), 2, // VOP2
|
|
3))); // VOP3
|
|
}
|
|
|
|
// Returns the register class to use for the destination of VOP[123C]
|
|
// instructions for the given VT.
|
|
class getVALUDstForVT<ValueType VT> {
|
|
RegisterOperand ret = !if(!eq(VT.Size, 32), VOPDstOperand<VGPR_32>,
|
|
!if(!eq(VT.Size, 128), VOPDstOperand<VReg_128>,
|
|
!if(!eq(VT.Size, 64), VOPDstOperand<VReg_64>,
|
|
!if(!eq(VT.Size, 16), VOPDstOperand<VGPR_32>,
|
|
VOPDstOperand<SReg_64>)))); // else VT == i1
|
|
}
|
|
|
|
// Returns the register class to use for source 0 of VOP[12C]
|
|
// instructions for the given VT.
|
|
class getVOPSrc0ForVT<ValueType VT> {
|
|
bit isFP = !if(!eq(VT.Value, f16.Value), 1,
|
|
!if(!eq(VT.Value, f32.Value), 1,
|
|
!if(!eq(VT.Value, f64.Value), 1,
|
|
0)));
|
|
RegisterOperand ret = !if(isFP,
|
|
!if(!eq(VT.Size, 64), VSrc_f64, VSrc_f32),
|
|
!if(!eq(VT.Size, 64), VSrc_b64, VSrc_b32));
|
|
}
|
|
|
|
// Returns the vreg register class to use for source operand given VT
|
|
class getVregSrcForVT<ValueType VT> {
|
|
RegisterClass ret = !if(!eq(VT.Size, 128), VReg_128,
|
|
!if(!eq(VT.Size, 64), VReg_64, VGPR_32));
|
|
}
|
|
|
|
|
|
// Returns the register class to use for sources of VOP3 instructions for the
|
|
// given VT.
|
|
class getVOP3SrcForVT<ValueType VT> {
|
|
bit isFP = !if(!eq(VT.Value, f16.Value), 1,
|
|
!if(!eq(VT.Value, f32.Value), 1,
|
|
!if(!eq(VT.Value, f64.Value), 1,
|
|
0)));
|
|
RegisterOperand ret =
|
|
!if(!eq(VT.Size, 128),
|
|
VSrc_128,
|
|
!if(!eq(VT.Size, 64),
|
|
!if(isFP,
|
|
VCSrc_f64,
|
|
VCSrc_b64),
|
|
!if(!eq(VT.Value, i1.Value),
|
|
SCSrc_b64,
|
|
!if(isFP,
|
|
VCSrc_f32,
|
|
VCSrc_b32)
|
|
)
|
|
)
|
|
);
|
|
}
|
|
|
|
// Returns 1 if the source arguments have modifiers, 0 if they do not.
|
|
// XXX - do f16 instructions?
|
|
class isFloatType<ValueType SrcVT> {
|
|
bit ret =
|
|
!if(!eq(SrcVT.Value, f16.Value), 1,
|
|
!if(!eq(SrcVT.Value, f32.Value), 1,
|
|
!if(!eq(SrcVT.Value, f64.Value), 1,
|
|
0)));
|
|
}
|
|
|
|
class isIntType<ValueType SrcVT> {
|
|
bit ret =
|
|
!if(!eq(SrcVT.Value, i16.Value), 1,
|
|
!if(!eq(SrcVT.Value, i32.Value), 1,
|
|
!if(!eq(SrcVT.Value, i64.Value), 1,
|
|
0)));
|
|
}
|
|
|
|
|
|
// Return type of input modifiers operand for specified input operand
|
|
class getSrcMod <ValueType VT> {
|
|
bit isFP = !if(!eq(VT.Value, f16.Value), 1,
|
|
!if(!eq(VT.Value, f32.Value), 1,
|
|
!if(!eq(VT.Value, f64.Value), 1,
|
|
0)));
|
|
Operand ret = !if(!eq(VT.Size, 64),
|
|
!if(isFP, FP64InputMods, Int64InputMods),
|
|
!if(isFP, FP32InputMods, Int32InputMods));
|
|
}
|
|
|
|
// Returns the input arguments for VOP[12C] instructions for the given SrcVT.
|
|
class getIns32 <RegisterOperand Src0RC, RegisterClass Src1RC, int NumSrcArgs> {
|
|
dag ret = !if(!eq(NumSrcArgs, 1), (ins Src0RC:$src0), // VOP1
|
|
!if(!eq(NumSrcArgs, 2), (ins Src0RC:$src0, Src1RC:$src1), // VOP2
|
|
(ins)));
|
|
}
|
|
|
|
// Returns the input arguments for VOP3 instructions for the given SrcVT.
|
|
class getIns64 <RegisterOperand Src0RC, RegisterOperand Src1RC,
|
|
RegisterOperand Src2RC, int NumSrcArgs,
|
|
bit HasModifiers, Operand Src0Mod, Operand Src1Mod,
|
|
Operand Src2Mod> {
|
|
|
|
dag ret =
|
|
!if (!eq(NumSrcArgs, 0),
|
|
// VOP1 without input operands (V_NOP, V_CLREXCP)
|
|
(ins),
|
|
/* else */
|
|
!if (!eq(NumSrcArgs, 1),
|
|
!if (!eq(HasModifiers, 1),
|
|
// VOP1 with modifiers
|
|
(ins Src0Mod:$src0_modifiers, Src0RC:$src0,
|
|
clampmod:$clamp, omod:$omod)
|
|
/* else */,
|
|
// VOP1 without modifiers
|
|
(ins Src0RC:$src0)
|
|
/* endif */ ),
|
|
!if (!eq(NumSrcArgs, 2),
|
|
!if (!eq(HasModifiers, 1),
|
|
// VOP 2 with modifiers
|
|
(ins Src0Mod:$src0_modifiers, Src0RC:$src0,
|
|
Src1Mod:$src1_modifiers, Src1RC:$src1,
|
|
clampmod:$clamp, omod:$omod)
|
|
/* else */,
|
|
// VOP2 without modifiers
|
|
(ins Src0RC:$src0, Src1RC:$src1)
|
|
/* endif */ )
|
|
/* NumSrcArgs == 3 */,
|
|
!if (!eq(HasModifiers, 1),
|
|
// VOP3 with modifiers
|
|
(ins Src0Mod:$src0_modifiers, Src0RC:$src0,
|
|
Src1Mod:$src1_modifiers, Src1RC:$src1,
|
|
Src2Mod:$src2_modifiers, Src2RC:$src2,
|
|
clampmod:$clamp, omod:$omod)
|
|
/* else */,
|
|
// VOP3 without modifiers
|
|
(ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2)
|
|
/* endif */ ))));
|
|
}
|
|
|
|
class getInsDPP <RegisterClass Src0RC, RegisterClass Src1RC, int NumSrcArgs,
|
|
bit HasModifiers, Operand Src0Mod, Operand Src1Mod> {
|
|
|
|
dag ret = !if (!eq(NumSrcArgs, 0),
|
|
// VOP1 without input operands (V_NOP)
|
|
(ins dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
|
|
bank_mask:$bank_mask, bound_ctrl:$bound_ctrl),
|
|
!if (!eq(NumSrcArgs, 1),
|
|
!if (!eq(HasModifiers, 1),
|
|
// VOP1_DPP with modifiers
|
|
(ins Src0Mod:$src0_modifiers, Src0RC:$src0,
|
|
dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
|
|
bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
|
|
/* else */,
|
|
// VOP1_DPP without modifiers
|
|
(ins Src0RC:$src0, dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
|
|
bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
|
|
/* endif */)
|
|
/* NumSrcArgs == 2 */,
|
|
!if (!eq(HasModifiers, 1),
|
|
// VOP2_DPP with modifiers
|
|
(ins Src0Mod:$src0_modifiers, Src0RC:$src0,
|
|
Src1Mod:$src1_modifiers, Src1RC:$src1,
|
|
dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
|
|
bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
|
|
/* else */,
|
|
// VOP2_DPP without modifiers
|
|
(ins Src0RC:$src0, Src1RC:$src1, dpp_ctrl:$dpp_ctrl,
|
|
row_mask:$row_mask, bank_mask:$bank_mask,
|
|
bound_ctrl:$bound_ctrl)
|
|
/* endif */)));
|
|
}
|
|
|
|
class getInsSDWA <RegisterClass Src0RC, RegisterClass Src1RC, int NumSrcArgs,
|
|
bit HasFloatModifiers, Operand Src0Mod, Operand Src1Mod,
|
|
ValueType DstVT> {
|
|
|
|
dag ret = !if(!eq(NumSrcArgs, 0),
|
|
// VOP1 without input operands (V_NOP)
|
|
(ins),
|
|
!if(!eq(NumSrcArgs, 1),
|
|
(ins Src0Mod:$src0_modifiers, Src0RC:$src0,
|
|
clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused,
|
|
src0_sel:$src0_sel),
|
|
!if(!eq(NumSrcArgs, 2),
|
|
!if(!eq(DstVT.Size, 1),
|
|
// VOPC_SDWA with float modifiers
|
|
(ins Src0Mod:$src0_modifiers, Src0RC:$src0,
|
|
Src1Mod:$src1_modifiers, Src1RC:$src1,
|
|
clampmod:$clamp, src0_sel:$src0_sel, src1_sel:$src1_sel),
|
|
// VOP2_SDWA or VOPC_SDWA with float modifiers
|
|
(ins Src0Mod:$src0_modifiers, Src0RC:$src0,
|
|
Src1Mod:$src1_modifiers, Src1RC:$src1,
|
|
clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused,
|
|
src0_sel:$src0_sel, src1_sel:$src1_sel)),
|
|
(ins)/* endif */)));
|
|
}
|
|
|
|
// Outs for DPP and SDWA
|
|
class getOutsExt <bit HasDst, ValueType DstVT, RegisterOperand DstRCDPP> {
|
|
dag ret = !if(HasDst,
|
|
!if(!eq(DstVT.Size, 1),
|
|
(outs), // no dst for VOPC, we use "vcc"-token as dst in SDWA VOPC instructions
|
|
(outs DstRCDPP:$vdst)),
|
|
(outs)); // V_NOP
|
|
}
|
|
|
|
// Returns the assembly string for the inputs and outputs of a VOP[12C]
|
|
// instruction. This does not add the _e32 suffix, so it can be reused
|
|
// by getAsm64.
|
|
class getAsm32 <bit HasDst, int NumSrcArgs, ValueType DstVT = i32> {
|
|
string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC
|
|
string src0 = ", $src0";
|
|
string src1 = ", $src1";
|
|
string src2 = ", $src2";
|
|
string ret = !if(HasDst, dst, "") #
|
|
!if(!eq(NumSrcArgs, 1), src0, "") #
|
|
!if(!eq(NumSrcArgs, 2), src0#src1, "") #
|
|
!if(!eq(NumSrcArgs, 3), src0#src1#src2, "");
|
|
}
|
|
|
|
// Returns the assembly string for the inputs and outputs of a VOP3
|
|
// instruction.
|
|
class getAsm64 <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> {
|
|
string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC
|
|
string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
|
|
string src1 = !if(!eq(NumSrcArgs, 1), "",
|
|
!if(!eq(NumSrcArgs, 2), " $src1_modifiers",
|
|
" $src1_modifiers,"));
|
|
string src2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", "");
|
|
string ret =
|
|
!if(!eq(HasModifiers, 0),
|
|
getAsm32<HasDst, NumSrcArgs, DstVT>.ret,
|
|
dst#", "#src0#src1#src2#"$clamp"#"$omod");
|
|
}
|
|
|
|
class getAsmDPP <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> {
|
|
string dst = !if(HasDst,
|
|
!if(!eq(DstVT.Size, 1),
|
|
"$sdst",
|
|
"$vdst"),
|
|
""); // use $sdst for VOPC
|
|
string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
|
|
string src1 = !if(!eq(NumSrcArgs, 1), "",
|
|
!if(!eq(NumSrcArgs, 2), " $src1_modifiers",
|
|
" $src1_modifiers,"));
|
|
string args = !if(!eq(HasModifiers, 0),
|
|
getAsm32<0, NumSrcArgs, DstVT>.ret,
|
|
", "#src0#src1);
|
|
string ret = dst#args#" $dpp_ctrl$row_mask$bank_mask$bound_ctrl";
|
|
}
|
|
|
|
class getAsmSDWA <bit HasDst, int NumSrcArgs, bit HasFloatModifiers,
|
|
ValueType DstVT = i32> {
|
|
string dst = !if(HasDst,
|
|
!if(!eq(DstVT.Size, 1),
|
|
" vcc", // use vcc token as dst for VOPC instructioins
|
|
"$vdst"),
|
|
"");
|
|
string src0 = "$src0_modifiers";
|
|
string src1 = "$src1_modifiers";
|
|
string args = !if(!eq(NumSrcArgs, 0),
|
|
"",
|
|
!if(!eq(NumSrcArgs, 1),
|
|
", "#src0#"$clamp",
|
|
", "#src0#", "#src1#"$clamp"
|
|
)
|
|
);
|
|
string sdwa = !if(!eq(NumSrcArgs, 0),
|
|
"",
|
|
!if(!eq(NumSrcArgs, 1),
|
|
" $dst_sel $dst_unused $src0_sel",
|
|
!if(!eq(DstVT.Size, 1),
|
|
" $src0_sel $src1_sel", // No dst_sel and dst_unused for VOPC
|
|
" $dst_sel $dst_unused $src0_sel $src1_sel"
|
|
)
|
|
)
|
|
);
|
|
string ret = dst#args#sdwa;
|
|
}
|
|
|
|
// Function that checks if instruction supports DPP and SDWA
|
|
class getHasExt <int NumSrcArgs, ValueType DstVT = i32, ValueType Src0VT = i32,
|
|
ValueType Src1VT = i32> {
|
|
bit ret = !if(!eq(NumSrcArgs, 3),
|
|
0, // NumSrcArgs == 3 - No DPP or SDWA for VOP3
|
|
!if(!eq(DstVT.Size, 64),
|
|
0, // 64-bit dst - No DPP or SDWA for 64-bit operands
|
|
!if(!eq(Src0VT.Size, 64),
|
|
0, // 64-bit src0
|
|
!if(!eq(Src0VT.Size, 64),
|
|
0, // 64-bit src2
|
|
1
|
|
)
|
|
)
|
|
)
|
|
);
|
|
}
|
|
|
|
class BitOr<bit a, bit b> {
|
|
bit ret = !if(a, 1, !if(b, 1, 0));
|
|
}
|
|
|
|
class BitAnd<bit a, bit b> {
|
|
bit ret = !if(a, !if(b, 1, 0), 0);
|
|
}
|
|
|
|
class VOPProfile <list<ValueType> _ArgVT> {
|
|
|
|
field list<ValueType> ArgVT = _ArgVT;
|
|
|
|
field ValueType DstVT = ArgVT[0];
|
|
field ValueType Src0VT = ArgVT[1];
|
|
field ValueType Src1VT = ArgVT[2];
|
|
field ValueType Src2VT = ArgVT[3];
|
|
field RegisterOperand DstRC = getVALUDstForVT<DstVT>.ret;
|
|
field RegisterOperand DstRCDPP = getVALUDstForVT<DstVT>.ret;
|
|
field RegisterOperand DstRCSDWA = getVALUDstForVT<DstVT>.ret;
|
|
field RegisterOperand Src0RC32 = getVOPSrc0ForVT<Src0VT>.ret;
|
|
field RegisterClass Src1RC32 = getVregSrcForVT<Src1VT>.ret;
|
|
field RegisterOperand Src0RC64 = getVOP3SrcForVT<Src0VT>.ret;
|
|
field RegisterOperand Src1RC64 = getVOP3SrcForVT<Src1VT>.ret;
|
|
field RegisterOperand Src2RC64 = getVOP3SrcForVT<Src2VT>.ret;
|
|
field RegisterClass Src0DPP = getVregSrcForVT<Src0VT>.ret;
|
|
field RegisterClass Src1DPP = getVregSrcForVT<Src1VT>.ret;
|
|
field RegisterClass Src0SDWA = getVregSrcForVT<Src0VT>.ret;
|
|
field RegisterClass Src1SDWA = getVregSrcForVT<Src1VT>.ret;
|
|
field Operand Src0Mod = getSrcMod<Src0VT>.ret;
|
|
field Operand Src1Mod = getSrcMod<Src1VT>.ret;
|
|
field Operand Src2Mod = getSrcMod<Src2VT>.ret;
|
|
|
|
field bit HasDst = !if(!eq(DstVT.Value, untyped.Value), 0, 1);
|
|
field bit HasDst32 = HasDst;
|
|
field bit EmitDst = HasDst; // force dst encoding, see v_movreld_b32 special case
|
|
field int NumSrcArgs = getNumSrcArgs<Src0VT, Src1VT, Src2VT>.ret;
|
|
field bit HasSrc0 = !if(!eq(Src0VT.Value, untyped.Value), 0, 1);
|
|
field bit HasSrc1 = !if(!eq(Src1VT.Value, untyped.Value), 0, 1);
|
|
field bit HasSrc2 = !if(!eq(Src2VT.Value, untyped.Value), 0, 1);
|
|
|
|
// TODO: Modifiers logic is somewhat adhoc here, to be refined later
|
|
field bit HasModifiers = isFloatType<Src0VT>.ret;
|
|
|
|
field bit HasSrc0FloatMods = isFloatType<Src0VT>.ret;
|
|
field bit HasSrc1FloatMods = isFloatType<Src1VT>.ret;
|
|
field bit HasSrc2FloatMods = isFloatType<Src2VT>.ret;
|
|
|
|
field bit HasSrc0IntMods = isIntType<Src0VT>.ret;
|
|
field bit HasSrc1IntMods = isIntType<Src1VT>.ret;
|
|
field bit HasSrc2IntMods = isIntType<Src2VT>.ret;
|
|
|
|
field bit HasSrc0Mods = HasModifiers;
|
|
field bit HasSrc1Mods = !if(HasModifiers, BitOr<HasSrc1FloatMods, HasSrc1IntMods>.ret, 0);
|
|
field bit HasSrc2Mods = !if(HasModifiers, BitOr<HasSrc2FloatMods, HasSrc2IntMods>.ret, 0);
|
|
|
|
field bit HasOMod = HasModifiers;
|
|
field bit HasClamp = HasModifiers;
|
|
field bit HasSDWAClamp = HasSrc0;
|
|
|
|
field bit HasExt = getHasExt<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret;
|
|
|
|
field dag Outs = !if(HasDst,(outs DstRC:$vdst),(outs));
|
|
|
|
// VOP3b instructions are a special case with a second explicit
|
|
// output. This is manually overridden for them.
|
|
field dag Outs32 = Outs;
|
|
field dag Outs64 = Outs;
|
|
field dag OutsDPP = getOutsExt<HasDst, DstVT, DstRCDPP>.ret;
|
|
field dag OutsSDWA = getOutsExt<HasDst, DstVT, DstRCDPP>.ret;
|
|
|
|
field dag Ins32 = getIns32<Src0RC32, Src1RC32, NumSrcArgs>.ret;
|
|
field dag Ins64 = getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
|
|
HasModifiers, Src0Mod, Src1Mod, Src2Mod>.ret;
|
|
field dag InsDPP = getInsDPP<Src0DPP, Src1DPP, NumSrcArgs,
|
|
HasModifiers, Src0Mod, Src1Mod>.ret;
|
|
field dag InsSDWA = getInsSDWA<Src0SDWA, Src1SDWA, NumSrcArgs,
|
|
HasModifiers, Src0Mod, Src1Mod, DstVT>.ret;
|
|
|
|
field string Asm32 = getAsm32<HasDst, NumSrcArgs, DstVT>.ret;
|
|
field string Asm64 = getAsm64<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret;
|
|
field string AsmDPP = getAsmDPP<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret;
|
|
field string AsmSDWA = getAsmSDWA<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret;
|
|
}
|
|
|
|
class VOP_NO_EXT <VOPProfile p> : VOPProfile <p.ArgVT> {
|
|
let HasExt = 0;
|
|
}
|
|
|
|
// FIXME: I think these F16/I16 profiles will need to use f16/i16 types in order
|
|
// for the instruction patterns to work.
|
|
def VOP_F16_F16 : VOPProfile <[f16, f16, untyped, untyped]>;
|
|
def VOP_F16_I16 : VOPProfile <[f16, i32, untyped, untyped]>;
|
|
def VOP_I16_F16 : VOPProfile <[i32, f16, untyped, untyped]>;
|
|
|
|
def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>;
|
|
def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i32, untyped]>;
|
|
def VOP_I16_I16_I16 : VOPProfile <[i32, i32, i32, untyped]>;
|
|
|
|
def VOP_I16_I16_I16_I16 : VOPProfile <[i32, i32, i32, i32, untyped]>;
|
|
def VOP_F16_F16_F16_F16 : VOPProfile <[f16, f16, f16, f16, untyped]>;
|
|
|
|
def VOP_NONE : VOPProfile <[untyped, untyped, untyped, untyped]>;
|
|
|
|
def VOP_F32_F32 : VOPProfile <[f32, f32, untyped, untyped]>;
|
|
def VOP_F32_F64 : VOPProfile <[f32, f64, untyped, untyped]>;
|
|
def VOP_F32_I32 : VOPProfile <[f32, i32, untyped, untyped]>;
|
|
def VOP_F64_F32 : VOPProfile <[f64, f32, untyped, untyped]>;
|
|
def VOP_F64_F64 : VOPProfile <[f64, f64, untyped, untyped]>;
|
|
def VOP_F64_I32 : VOPProfile <[f64, i32, untyped, untyped]>;
|
|
def VOP_I32_F32 : VOPProfile <[i32, f32, untyped, untyped]>;
|
|
def VOP_I32_F64 : VOPProfile <[i32, f64, untyped, untyped]>;
|
|
def VOP_I32_I32 : VOPProfile <[i32, i32, untyped, untyped]>;
|
|
|
|
def VOP_F32_F32_F32 : VOPProfile <[f32, f32, f32, untyped]>;
|
|
def VOP_F32_F32_I32 : VOPProfile <[f32, f32, i32, untyped]>;
|
|
def VOP_F64_F64_F64 : VOPProfile <[f64, f64, f64, untyped]>;
|
|
def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>;
|
|
def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>;
|
|
def VOP_I32_F32_I32 : VOPProfile <[i32, f32, i32, untyped]>;
|
|
def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
|
|
|
|
def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>;
|
|
def VOP_I64_I32_I64 : VOPProfile <[i64, i32, i64, untyped]>;
|
|
def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>;
|
|
|
|
def VOP_F32_F32_F32_F32 : VOPProfile <[f32, f32, f32, f32]>;
|
|
def VOP_F64_F64_F64_F64 : VOPProfile <[f64, f64, f64, f64]>;
|
|
def VOP_I32_I32_I32_I32 : VOPProfile <[i32, i32, i32, i32]>;
|
|
def VOP_I64_I32_I32_I64 : VOPProfile <[i64, i32, i32, i64]>;
|
|
def VOP_I32_F32_I32_I32 : VOPProfile <[i32, f32, i32, i32]>;
|
|
def VOP_I64_I64_I32_I64 : VOPProfile <[i64, i64, i32, i64]>;
|
|
def VOP_V4I32_I64_I32_V4I32 : VOPProfile <[v4i32, i64, i32, v4i32]>;
|
|
|
|
class Commutable_REV <string revOp, bit isOrig> {
|
|
string RevOp = revOp;
|
|
bit IsOrig = isOrig;
|
|
}
|
|
|
|
class AtomicNoRet <string noRetOp, bit isRet> {
|
|
string NoRetOp = noRetOp;
|
|
bit IsRet = isRet;
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Interpolation opcodes
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
class VINTRP_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
|
|
VINTRPCommon <outs, ins, "", pattern>,
|
|
SIMCInstr<opName, SIEncodingFamily.NONE> {
|
|
let isPseudo = 1;
|
|
let isCodeGenOnly = 1;
|
|
}
|
|
|
|
class VINTRP_Real_si <bits <2> op, string opName, dag outs, dag ins,
|
|
string asm> :
|
|
VINTRPCommon <outs, ins, asm, []>,
|
|
VINTRPe <op>,
|
|
SIMCInstr<opName, SIEncodingFamily.SI> {
|
|
let AssemblerPredicate = SIAssemblerPredicate;
|
|
let DecoderNamespace = "SICI";
|
|
let DisableDecoder = DisableSIDecoder;
|
|
}
|
|
|
|
class VINTRP_Real_vi <bits <2> op, string opName, dag outs, dag ins,
|
|
string asm> :
|
|
VINTRPCommon <outs, ins, asm, []>,
|
|
VINTRPe_vi <op>,
|
|
SIMCInstr<opName, SIEncodingFamily.VI> {
|
|
let AssemblerPredicate = VIAssemblerPredicate;
|
|
let DecoderNamespace = "VI";
|
|
let DisableDecoder = DisableVIDecoder;
|
|
}
|
|
|
|
multiclass VINTRP_m <bits <2> op, dag outs, dag ins, string asm,
|
|
list<dag> pattern = []> {
|
|
def "" : VINTRP_Pseudo <NAME, outs, ins, pattern>;
|
|
|
|
def _si : VINTRP_Real_si <op, NAME, outs, ins, asm>;
|
|
|
|
def _vi : VINTRP_Real_vi <op, NAME, outs, ins, asm>;
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Vector instruction mappings
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Maps an opcode in e32 form to its e64 equivalent
|
|
def getVOPe64 : InstrMapping {
|
|
let FilterClass = "VOP";
|
|
let RowFields = ["OpName"];
|
|
let ColFields = ["Size"];
|
|
let KeyCol = ["4"];
|
|
let ValueCols = [["8"]];
|
|
}
|
|
|
|
// Maps an opcode in e64 form to its e32 equivalent
|
|
def getVOPe32 : InstrMapping {
|
|
let FilterClass = "VOP";
|
|
let RowFields = ["OpName"];
|
|
let ColFields = ["Size"];
|
|
let KeyCol = ["8"];
|
|
let ValueCols = [["4"]];
|
|
}
|
|
|
|
def getMaskedMIMGOp : InstrMapping {
|
|
let FilterClass = "MIMG_Mask";
|
|
let RowFields = ["Op"];
|
|
let ColFields = ["Channels"];
|
|
let KeyCol = ["4"];
|
|
let ValueCols = [["1"], ["2"], ["3"] ];
|
|
}
|
|
|
|
// Maps an commuted opcode to its original version
|
|
def getCommuteOrig : InstrMapping {
|
|
let FilterClass = "Commutable_REV";
|
|
let RowFields = ["RevOp"];
|
|
let ColFields = ["IsOrig"];
|
|
let KeyCol = ["0"];
|
|
let ValueCols = [["1"]];
|
|
}
|
|
|
|
// Maps an original opcode to its commuted version
|
|
def getCommuteRev : InstrMapping {
|
|
let FilterClass = "Commutable_REV";
|
|
let RowFields = ["RevOp"];
|
|
let ColFields = ["IsOrig"];
|
|
let KeyCol = ["1"];
|
|
let ValueCols = [["0"]];
|
|
}
|
|
|
|
def getMCOpcodeGen : InstrMapping {
|
|
let FilterClass = "SIMCInstr";
|
|
let RowFields = ["PseudoInstr"];
|
|
let ColFields = ["Subtarget"];
|
|
let KeyCol = [!cast<string>(SIEncodingFamily.NONE)];
|
|
let ValueCols = [[!cast<string>(SIEncodingFamily.SI)],
|
|
[!cast<string>(SIEncodingFamily.VI)]];
|
|
}
|
|
|
|
// Get equivalent SOPK instruction.
|
|
def getSOPKOp : InstrMapping {
|
|
let FilterClass = "SOPKInstTable";
|
|
let RowFields = ["BaseCmpOp"];
|
|
let ColFields = ["IsSOPK"];
|
|
let KeyCol = ["0"];
|
|
let ValueCols = [["1"]];
|
|
}
|
|
|
|
def getAddr64Inst : InstrMapping {
|
|
let FilterClass = "MUBUFAddr64Table";
|
|
let RowFields = ["OpName"];
|
|
let ColFields = ["IsAddr64"];
|
|
let KeyCol = ["0"];
|
|
let ValueCols = [["1"]];
|
|
}
|
|
|
|
// Maps an atomic opcode to its version with a return value.
|
|
def getAtomicRetOp : InstrMapping {
|
|
let FilterClass = "AtomicNoRet";
|
|
let RowFields = ["NoRetOp"];
|
|
let ColFields = ["IsRet"];
|
|
let KeyCol = ["0"];
|
|
let ValueCols = [["1"]];
|
|
}
|
|
|
|
// Maps an atomic opcode to its returnless version.
|
|
def getAtomicNoRetOp : InstrMapping {
|
|
let FilterClass = "AtomicNoRet";
|
|
let RowFields = ["NoRetOp"];
|
|
let ColFields = ["IsRet"];
|
|
let KeyCol = ["1"];
|
|
let ValueCols = [["0"]];
|
|
}
|
|
|
|
include "SIInstructions.td"
|
|
include "CIInstructions.td"
|
|
|
|
include "DSInstructions.td"
|
|
include "MIMGInstructions.td"
|