[AMDGPU] Assembler: Fix VOP3 only instructions

Separate methods to convert parsed instructions to MCInst:

  - VOP3 only instructions (always create modifiers as operands in MCInst)
  - VOP2 instrunctions with modifiers (create modifiers as operands
    in MCInst when e64 encoding is forced or modifiers are parsed)
  - VOP2 instructions without modifiers (do not create modifiers
    as operands in MCInst)
  - Add VOP3Only flag. Pass HasMods flag to VOP3Common.
  - Simplify code that deals with modifiers (-1 is now same as
    0). This is no longer needed.
  - Add few tests (more will be added separately).
    Update error message now correct.

Patch By: Nikolay Haustov

Differential Revision: http://reviews.llvm.org/D16778

llvm-svn: 260483
This commit is contained in:
Tom Stellard 2016-02-11 03:28:15 +00:00
parent ba284b60b8
commit a90b9526df
5 changed files with 193 additions and 97 deletions

View File

@ -116,8 +116,7 @@ public:
} }
void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const { void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
Inst.addOperand(MCOperand::createImm( Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
addRegOperands(Inst, N); addRegOperands(Inst, N);
} }
@ -176,11 +175,23 @@ public:
} }
bool isReg() const override { bool isReg() const override {
return Kind == Register && Reg.Modifiers == -1; return Kind == Register && Reg.Modifiers == 0;
} }
bool isRegWithInputMods() const { bool isRegWithInputMods() const {
return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1); return Kind == Register;
}
bool isClamp() const {
return isImm() && Imm.Type == ImmTyClamp;
}
bool isOMod() const {
return isImm() && Imm.Type == ImmTyOMod;
}
bool isMod() const {
return isClamp() || isOMod();
} }
void setModifiers(unsigned Mods) { void setModifiers(unsigned Mods) {
@ -190,7 +201,7 @@ public:
bool hasModifiers() const { bool hasModifiers() const {
assert(isRegKind()); assert(isRegKind());
return Reg.Modifiers != -1; return Reg.Modifiers != 0;
} }
unsigned getReg() const override { unsigned getReg() const override {
@ -202,7 +213,7 @@ public:
} }
bool isRegClass(unsigned RCID) const { bool isRegClass(unsigned RCID) const {
return Reg.TRI->getRegClass(RCID).contains(getReg()); return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
} }
bool isSCSrc32() const { bool isSCSrc32() const {
@ -306,7 +317,7 @@ public:
Op->Reg.RegNo = RegNo; Op->Reg.RegNo = RegNo;
Op->Reg.TRI = TRI; Op->Reg.TRI = TRI;
Op->Reg.STI = STI; Op->Reg.STI = STI;
Op->Reg.Modifiers = -1; Op->Reg.Modifiers = 0;
Op->Reg.IsForcedVOP3 = ForceVOP3; Op->Reg.IsForcedVOP3 = ForceVOP3;
Op->StartLoc = S; Op->StartLoc = S;
Op->EndLoc = E; Op->EndLoc = E;
@ -462,6 +473,10 @@ public:
OperandMatchResultTy parseUNorm(OperandVector &Operands); OperandMatchResultTy parseUNorm(OperandVector &Operands);
OperandMatchResultTy parseR128(OperandVector &Operands); OperandMatchResultTy parseR128(OperandVector &Operands);
void cvtId(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3(MCInst &Inst, const OperandVector &Operands); void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands); OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
}; };
@ -1103,7 +1118,7 @@ AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
// If we are parsing after we reach EndOfStatement then this means we // If we are parsing after we reach EndOfStatement then this means we
// are appending default values to the Operands list. This is only done // are appending default values to the Operands list. This is only done
// by custom parser, so we shouldn't continue on to the generic parsing. // by custom parser, so we shouldn't continue on to the generic parsing.
if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail || if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
getLexer().is(AsmToken::EndOfStatement)) getLexer().is(AsmToken::EndOfStatement))
return ResTy; return ResTy;
@ -1153,8 +1168,6 @@ AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
SMLoc S, E; SMLoc S, E;
unsigned RegNo; unsigned RegNo;
if (!ParseRegister(RegNo, S, E)) { if (!ParseRegister(RegNo, S, E)) {
bool HasModifiers = operandsHaveModifiers(Operands);
unsigned Modifiers = 0; unsigned Modifiers = 0;
if (Negate) if (Negate)
@ -1167,34 +1180,23 @@ AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Modifiers |= 0x2; Modifiers |= 0x2;
} }
if (Modifiers && !HasModifiers) {
// We are adding a modifier to src1 or src2 and previous sources
// don't have modifiers, so we need to go back and empty modifers
// for each previous source.
for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
--PrevRegIdx) {
AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
RegOp.setModifiers(0);
}
}
Operands.push_back(AMDGPUOperand::CreateReg( Operands.push_back(AMDGPUOperand::CreateReg(
RegNo, S, E, getContext().getRegisterInfo(), &getSTI(), RegNo, S, E, getContext().getRegisterInfo(), &getSTI(),
isForcedVOP3())); isForcedVOP3()));
if (HasModifiers || Modifiers) { if (Modifiers) {
AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]); AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
RegOp.setModifiers(Modifiers); RegOp.setModifiers(Modifiers);
} }
} else { } else {
Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(), ResTy = parseVOP3OptionalOps(Operands);
S)); if (ResTy == MatchOperand_NoMatch) {
Parser.Lex(); Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
} S));
return MatchOperand_Success; Parser.Lex();
}
}
return MatchOperand_Success;
} }
default: default:
return MatchOperand_NoMatch; return MatchOperand_NoMatch;
@ -1802,10 +1804,12 @@ static bool isVOP3(OperandVector &Operands) {
if (operandsHaveModifiers(Operands)) if (operandsHaveModifiers(Operands))
return true; return true;
AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]); if (Operands.size() >= 2) {
AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID)) if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
return true; return true;
}
if (Operands.size() >= 5) if (Operands.size() >= 5)
return true; return true;
@ -1848,35 +1852,70 @@ AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
return MatchOperand_NoMatch; return MatchOperand_NoMatch;
} }
void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) { void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
unsigned I = 1;
unsigned i = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
if (Desc.getNumDefs() > 0) { if (Desc.getNumDefs() > 0) {
((AMDGPUOperand &)*Operands[i++]).addRegOperands(Inst, 1); ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
for (unsigned E = Operands.size(); I != E; ++I)
((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
}
void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
if (operandsHaveModifiers(Operands) || isForcedVOP3()) {
cvtVOP3(Inst, Operands);
} else {
cvtId(Inst, Operands);
}
}
void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
if (operandsHaveModifiers(Operands)) {
cvtVOP3(Inst, Operands);
} else {
cvtId(Inst, Operands);
}
}
void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
cvtVOP3(Inst, Operands);
}
void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
if (Desc.getNumDefs() > 0) {
((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
} }
std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx; unsigned ClampIdx = 0, OModIdx = 0;
for (unsigned E = Operands.size(); I != E; ++I) {
if (operandsHaveModifiers(Operands)) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
for (unsigned e = Operands.size(); i != e; ++i) { if (Op.isRegWithInputMods()) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); Op.addRegWithInputModsOperands(Inst, 2);
} else if (Op.isClamp()) {
if (Op.isRegWithInputMods()) { ClampIdx = I;
((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2); } else if (Op.isOMod()) {
continue; OModIdx = I;
} } else if (Op.isImm()) {
OptionalIdx[Op.getImmTy()] = i; Op.addImmOperands(Inst, 1);
} else {
assert(false);
} }
}
unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp]; if (ClampIdx) {
unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod]; AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[ClampIdx]);
Op.addImmOperands(Inst, 1);
((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
} else { } else {
for (unsigned e = Operands.size(); i != e; ++i) Inst.addOperand(MCOperand::createImm(0));
((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1); }
if (OModIdx) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[OModIdx]);
Op.addImmOperands(Inst, 1);
} else {
Inst.addOperand(MCOperand::createImm(0));
} }
} }

View File

@ -123,7 +123,7 @@ class VOP2Common <dag outs, dag ins, string asm, list<dag> pattern> :
let Size = 4; let Size = 4;
} }
class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> : class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern, bit HasMods = 0, bit VOP3Only = 0> :
VOPAnyCommon <outs, ins, asm, pattern> { VOPAnyCommon <outs, ins, asm, pattern> {
// Using complex patterns gives VOP3 patterns a very high complexity rating, // Using complex patterns gives VOP3 patterns a very high complexity rating,
@ -135,7 +135,10 @@ class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
let VOP3 = 1; let VOP3 = 1;
let VALU = 1; let VALU = 1;
let AsmMatchConverter = "cvtVOP3"; let AsmMatchConverter =
!if(!eq(VOP3Only,1),
"cvtVOP3_only",
!if(!eq(HasMods,1), "cvtVOP3_2_mod", "cvtVOP3_2_nomod"));
let isCodeGenOnly = 0; let isCodeGenOnly = 0;
int Size = 8; int Size = 8;

View File

@ -477,6 +477,7 @@ def OModMatchClass : AsmOperandClass {
let PredicateMethod = "isImm"; let PredicateMethod = "isImm";
let ParserMethod = "parseVOP3OptionalOps"; let ParserMethod = "parseVOP3OptionalOps";
let RenderMethod = "addImmOperands"; let RenderMethod = "addImmOperands";
let IsOptional = 1;
} }
def ClampMatchClass : AsmOperandClass { def ClampMatchClass : AsmOperandClass {
@ -484,6 +485,7 @@ def ClampMatchClass : AsmOperandClass {
let PredicateMethod = "isImm"; let PredicateMethod = "isImm";
let ParserMethod = "parseVOP3OptionalOps"; let ParserMethod = "parseVOP3OptionalOps";
let RenderMethod = "addImmOperands"; let RenderMethod = "addImmOperands";
let IsOptional = 1;
} }
class SMRDOffsetBaseMatchClass <string predicate> : AsmOperandClass { class SMRDOffsetBaseMatchClass <string predicate> : AsmOperandClass {
@ -1072,8 +1074,10 @@ class getVOP3SrcForVT<ValueType VT> {
// Returns 1 if the source arguments have modifiers, 0 if they do not. // Returns 1 if the source arguments have modifiers, 0 if they do not.
// XXX - do f16 instructions? // XXX - do f16 instructions?
class hasModifiers<ValueType SrcVT> { class hasModifiers<ValueType SrcVT> {
bit ret = !if(!eq(SrcVT.Value, f32.Value), 1, bit ret =
!if(!eq(SrcVT.Value, f64.Value), 1, 0)); !if(!eq(SrcVT.Value, f32.Value), 1,
!if(!eq(SrcVT.Value, f64.Value), 1,
0));
} }
// Returns the input arguments for VOP[12C] instructions for the given SrcVT. // Returns the input arguments for VOP[12C] instructions for the given SrcVT.
@ -1471,8 +1475,9 @@ class VOP3DisableModFields <bit HasSrc0Mods,
bits<1> clamp = !if(HasOutputMods, ?, 0); bits<1> clamp = !if(HasOutputMods, ?, 0);
} }
class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> : class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName,
VOP3Common <outs, ins, "", pattern>, bit HasMods = 0, bit VOP3Only = 0> :
VOP3Common <outs, ins, "", pattern, HasMods, VOP3Only>,
VOP <opName>, VOP <opName>,
SIMCInstr<opName#"_e64", SISubtarget.NONE>, SIMCInstr<opName#"_e64", SISubtarget.NONE>,
MnemonicAlias<opName#"_e64", opName> { MnemonicAlias<opName#"_e64", opName> {
@ -1483,44 +1488,48 @@ class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
field bit src0; field bit src0;
} }
class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> : class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName,
VOP3Common <outs, ins, asm, []>, bit HasMods = 0, bit VOP3Only = 0> :
VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3e <op>, VOP3e <op>,
SIMCInstr<opName#"_e64", SISubtarget.SI> { SIMCInstr<opName#"_e64", SISubtarget.SI> {
let AssemblerPredicates = [isSICI]; let AssemblerPredicates = [isSICI];
} }
class VOP3_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> : class VOP3_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName,
VOP3Common <outs, ins, asm, []>, bit HasMods = 0, bit VOP3Only = 0> :
VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3e_vi <op>, VOP3e_vi <op>,
SIMCInstr <opName#"_e64", SISubtarget.VI> { SIMCInstr <opName#"_e64", SISubtarget.VI> {
let AssemblerPredicates = [isVI]; let AssemblerPredicates = [isVI];
} }
class VOP3b_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> : class VOP3b_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName,
VOP3Common <outs, ins, asm, []>, bit HasMods = 0, bit VOP3Only = 0> :
VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3be <op>, VOP3be <op>,
SIMCInstr<opName#"_e64", SISubtarget.SI> { SIMCInstr<opName#"_e64", SISubtarget.SI> {
let AssemblerPredicates = [isSICI]; let AssemblerPredicates = [isSICI];
} }
class VOP3b_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> : class VOP3b_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName,
VOP3Common <outs, ins, asm, []>, bit HasMods = 0, bit VOP3Only = 0> :
VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3be_vi <op>, VOP3be_vi <op>,
SIMCInstr <opName#"_e64", SISubtarget.VI> { SIMCInstr <opName#"_e64", SISubtarget.VI> {
let AssemblerPredicates = [isVI]; let AssemblerPredicates = [isVI];
} }
multiclass VOP3_m <vop op, dag outs, dag ins, string asm, list<dag> pattern, multiclass VOP3_m <vop op, dag outs, dag ins, string asm, list<dag> pattern,
string opName, int NumSrcArgs, bit HasMods = 1> { string opName, int NumSrcArgs, bit HasMods = 1, bit VOP3Only = 0> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>; def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>, def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods, VOP3Only>,
VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1), VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
!if(!eq(NumSrcArgs, 2), 0, 1), !if(!eq(NumSrcArgs, 2), 0, 1),
HasMods>; HasMods>;
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>, def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods, VOP3Only>,
VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1), VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
!if(!eq(NumSrcArgs, 2), 0, 1), !if(!eq(NumSrcArgs, 2), 0, 1),
HasMods>; HasMods>;
@ -1529,21 +1538,21 @@ multiclass VOP3_m <vop op, dag outs, dag ins, string asm, list<dag> pattern,
multiclass VOP3_1_m <vop op, dag outs, dag ins, string asm, multiclass VOP3_1_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, bit HasMods = 1> { list<dag> pattern, string opName, bit HasMods = 1> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>; def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>;
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>, def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>; VOP3DisableFields<0, 0, HasMods>;
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>, def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>; VOP3DisableFields<0, 0, HasMods>;
} }
multiclass VOP3SI_1_m <vop op, dag outs, dag ins, string asm, multiclass VOP3SI_1_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, bit HasMods = 1> { list<dag> pattern, string opName, bit HasMods = 1> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>; def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>;
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>, def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>; VOP3DisableFields<0, 0, HasMods>;
// No VI instruction. This class is for SI only. // No VI instruction. This class is for SI only.
} }
@ -1552,13 +1561,13 @@ multiclass VOP3_2_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, string revOp, list<dag> pattern, string opName, string revOp,
bit HasMods = 1> { bit HasMods = 1> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>, def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)>; VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>, def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>; VOP3DisableFields<1, 0, HasMods>;
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>, def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>; VOP3DisableFields<1, 0, HasMods>;
} }
@ -1566,10 +1575,10 @@ multiclass VOP3SI_2_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, string revOp, list<dag> pattern, string opName, string revOp,
bit HasMods = 1> { bit HasMods = 1> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>, def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)>; VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>, def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>; VOP3DisableFields<1, 0, HasMods>;
// No VI instruction. This class is for SI only. // No VI instruction. This class is for SI only.
@ -1594,19 +1603,19 @@ multiclass VOP3_C_m <vop op, dag outs, dag ins, string asm,
bit HasMods, bit defExec, bit HasMods, bit defExec,
string revOp, list<SchedReadWrite> sched> { string revOp, list<SchedReadWrite> sched> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>, def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)> { VOP2_REV<revOp#"_e64", !eq(revOp, opName)> {
let Defs = !if(defExec, [EXEC], []); let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched; let SchedRW = sched;
} }
def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>, def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods> { VOP3DisableFields<1, 0, HasMods> {
let Defs = !if(defExec, [EXEC], []); let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched; let SchedRW = sched;
} }
def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>, def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods> { VOP3DisableFields<1, 0, HasMods> {
let Defs = !if(defExec, [EXEC], []); let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched; let SchedRW = sched;
@ -1900,8 +1909,9 @@ multiclass VOPCX_I64 <vopc op, string opName, string revOp = opName> :
VOPCX <op, opName, VOPC_I1_I64_I64, COND_NULL, [Write64Bit], revOp>; VOPCX <op, opName, VOPC_I1_I64_I64, COND_NULL, [Write64Bit], revOp>;
multiclass VOP3_Helper <vop3 op, string opName, dag outs, dag ins, string asm, multiclass VOP3_Helper <vop3 op, string opName, dag outs, dag ins, string asm,
list<dag> pat, int NumSrcArgs, bit HasMods> : VOP3_m < list<dag> pat, int NumSrcArgs, bit HasMods,
op, outs, ins, opName#" "#asm, pat, opName, NumSrcArgs, HasMods bit VOP3Only = 0> : VOP3_m <
op, outs, ins, opName#" "#asm, pat, opName, NumSrcArgs, HasMods, VOP3Only
>; >;
multiclass VOPC_CLASS_F32 <vopc op, string opName> : multiclass VOPC_CLASS_F32 <vopc op, string opName> :
@ -1917,7 +1927,8 @@ multiclass VOPCX_CLASS_F64 <vopc op, string opName> :
VOPCClassInst <op, opName, VOPC_I1_F64_I32, 1, [WriteDoubleAdd]>; VOPCClassInst <op, opName, VOPC_I1_F64_I32, 1, [WriteDoubleAdd]>;
multiclass VOP3Inst <vop3 op, string opName, VOPProfile P, multiclass VOP3Inst <vop3 op, string opName, VOPProfile P,
SDPatternOperator node = null_frag> : VOP3_Helper < SDPatternOperator node = null_frag, bit VOP3Only = 0> :
VOP3_Helper <
op, opName, (outs P.DstRC.RegClass:$dst), P.Ins64, P.Asm64, op, opName, (outs P.DstRC.RegClass:$dst), P.Ins64, P.Asm64,
!if(!eq(P.NumSrcArgs, 3), !if(!eq(P.NumSrcArgs, 3),
!if(P.HasModifiers, !if(P.HasModifiers,
@ -1941,7 +1952,7 @@ multiclass VOP3Inst <vop3 op, string opName, VOPProfile P,
(node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod))))], i1:$clamp, i32:$omod))))],
[(set P.DstVT:$dst, (node P.Src0VT:$src0))]))), [(set P.DstVT:$dst, (node P.Src0VT:$src0))]))),
P.NumSrcArgs, P.HasModifiers P.NumSrcArgs, P.HasModifiers, VOP3Only
>; >;
// Special case for v_div_fmas_{f32|f64}, since it seems to be the // Special case for v_div_fmas_{f32|f64}, since it seems to be the

View File

@ -1727,23 +1727,23 @@ let SchedRW = [WriteDoubleAdd] in {
let isCommutable = 1 in { let isCommutable = 1 in {
defm V_ADD_F64 : VOP3Inst <vop3<0x164, 0x280>, "v_add_f64", defm V_ADD_F64 : VOP3Inst <vop3<0x164, 0x280>, "v_add_f64",
VOP_F64_F64_F64, fadd VOP_F64_F64_F64, fadd, 1
>; >;
defm V_MUL_F64 : VOP3Inst <vop3<0x165, 0x281>, "v_mul_f64", defm V_MUL_F64 : VOP3Inst <vop3<0x165, 0x281>, "v_mul_f64",
VOP_F64_F64_F64, fmul VOP_F64_F64_F64, fmul, 1
>; >;
defm V_MIN_F64 : VOP3Inst <vop3<0x166, 0x282>, "v_min_f64", defm V_MIN_F64 : VOP3Inst <vop3<0x166, 0x282>, "v_min_f64",
VOP_F64_F64_F64, fminnum VOP_F64_F64_F64, fminnum, 1
>; >;
defm V_MAX_F64 : VOP3Inst <vop3<0x167, 0x283>, "v_max_f64", defm V_MAX_F64 : VOP3Inst <vop3<0x167, 0x283>, "v_max_f64",
VOP_F64_F64_F64, fmaxnum VOP_F64_F64_F64, fmaxnum, 1
>; >;
} // End isCommutable = 1 } // End isCommutable = 1
defm V_LDEXP_F64 : VOP3Inst <vop3<0x168, 0x284>, "v_ldexp_f64", defm V_LDEXP_F64 : VOP3Inst <vop3<0x168, 0x284>, "v_ldexp_f64",
VOP_F64_F64_I32, AMDGPUldexp VOP_F64_F64_I32, AMDGPUldexp, 1
>; >;
} // End let SchedRW = [WriteDoubleAdd] } // End let SchedRW = [WriteDoubleAdd]

View File

@ -198,8 +198,7 @@ v_subrev_f32 v1, v3, s5
v_mac_legacy_f32 v1, v3, s5 v_mac_legacy_f32 v1, v3, s5
// SICI: v_mac_legacy_f32_e64 v1, v3, s5 ; encoding: [0x01,0x00,0x0c,0xd2,0x03,0x0b,0x00,0x00] // SICI: v_mac_legacy_f32_e64 v1, v3, s5 ; encoding: [0x01,0x00,0x0c,0xd2,0x03,0x0b,0x00,0x00]
// FIXME: The error message should be: error: instruction not supported on this GPU // NOVI: error: instruction not supported on this GPU
// NOVI: error: invalid operand for instruction
v_mul_legacy_f32 v1, v3, s5 v_mul_legacy_f32 v1, v3, s5
// SICI: v_mul_legacy_f32_e64 v1, v3, s5 ; encoding: [0x01,0x00,0x0e,0xd2,0x03,0x0b,0x00,0x00] // SICI: v_mul_legacy_f32_e64 v1, v3, s5 ; encoding: [0x01,0x00,0x0e,0xd2,0x03,0x0b,0x00,0x00]
@ -223,7 +222,51 @@ v_mad_legacy_f32 v2, v4, v6, v8
// SICI: v_mad_legacy_f32 v2, v4, v6, v8 ; encoding: [0x02,0x00,0x80,0xd2,0x04,0x0d,0x22,0x04] // SICI: v_mad_legacy_f32 v2, v4, v6, v8 ; encoding: [0x02,0x00,0x80,0xd2,0x04,0x0d,0x22,0x04]
// VI: v_mad_legacy_f32 v2, v4, v6, v8 ; encoding: [0x02,0x00,0xc0,0xd1,0x04,0x0d,0x22,0x04] // VI: v_mad_legacy_f32 v2, v4, v6, v8 ; encoding: [0x02,0x00,0xc0,0xd1,0x04,0x0d,0x22,0x04]
v_add_f64 v[0:1], v[2:3], v[5:6]
// SICI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64_e64 v[0:1], v[2:3], v[5:6]
// SICI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64 v[0:1], -v[2:3], v[5:6]
// SICI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x20]
// VI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x20]
v_add_f64_e64 v[0:1], -v[2:3], v[5:6]
// SICI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x20]
// VI: v_add_f64 v[0:1], -v[2:3], v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x20]
v_add_f64 v[0:1], v[2:3], -v[5:6]
// SICI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x40]
// VI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x40]
v_add_f64_e64 v[0:1], v[2:3], -v[5:6]
// SICI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0xc8,0xd2,0x02,0x0b,0x02,0x40]
// VI: v_add_f64 v[0:1], v[2:3], -v[5:6] ; encoding: [0x00,0x00,0x80,0xd2,0x02,0x0b,0x02,0x40]
v_add_f64 v[0:1], |v[2:3]|, v[5:6]
// SICI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64_e64 v[0:1], |v[2:3]|, v[5:6]
// SICI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], |v[2:3]|, v[5:6] ; encoding: [0x00,0x01,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64 v[0:1], v[2:3], |v[5:6]|
// SICI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64_e64 v[0:1], v[2:3], |v[5:6]|
// SICI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0xc8,0xd2,0x02,0x0b,0x02,0x00]
// VI: v_add_f64 v[0:1], v[2:3], |v[5:6]| ; encoding: [0x00,0x02,0x80,0xd2,0x02,0x0b,0x02,0x00]
v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4
// SICI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x0a,0xc8,0xd2,0x02,0x0b,0x02,0x30]
// VI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x82,0x80,0xd2,0x02,0x0b,0x02,0x30]
v_add_f64_e64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4
// SICI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x0a,0xc8,0xd2,0x02,0x0b,0x02,0x30]
// VI: v_add_f64 v[0:1], -v[2:3], |v[5:6]| clamp mul:4 ; encoding: [0x00,0x82,0x80,0xd2,0x02,0x0b,0x02,0x30]