[AArch64][SVE] Asm: Add AsmOperand classes for SVE gather/scatter addressing modes.

This patch adds parsing support for 'vector + shift/extend' and
corresponding asm operand classes, needed for implementing SVE's
gather/scatter addressing modes.

The added combinations of vector (ZPR) and Shift/Extend are:

Unscaled:
  ZPR64ExtLSL8:           signed 64-bit offsets  (z0.d)
  ZPR32ExtUXTW8:        unsigned 32-bit offsets  (z0.s, uxtw)
  ZPR32ExtSXTW8:          signed 32-bit offsets  (z0.s, sxtw)

Unpacked and unscaled:
  ZPR64ExtUXTW8:        unsigned 32-bit offsets  (z0.d, uxtw)
  ZPR64ExtSXTW8:          signed 32-bit offsets  (z0.d, sxtw)

Unpacked and scaled:
  ZPR64ExtUXTW<scale>:  unsigned 32-bit offsets  (z0.d, uxtw #<shift>)
  ZPR64ExtSXTW<scale>:    signed 32-bit offsets  (z0.d, sxtw #<shift>)

Scaled:
  ZPR32ExtUXTW<scale>:  unsigned 32-bit offsets  (z0.s, uxtw #<shift>)
  ZPR32ExtSXTW<scale>:    signed 32-bit offsets  (z0.s, sxtw #<shift>)
  ZPR64ExtLSL<scale>:   unsigned 64-bit offsets  (z0.d,  lsl #<shift>)
  ZPR64ExtLSL<scale>:     signed 64-bit offsets  (z0.d,  lsl #<shift>)


Patch [1/3] in series to add support for SVE's gather load instructions
that use scalar+vector addressing modes:
- Patch [1/3]: https://reviews.llvm.org/D45951
- Patch [2/3]: https://reviews.llvm.org/D46023
- Patch [3/3]: https://reviews.llvm.org/D45958

Reviewers: fhahn, rengolin, samparker, SjoerdMeijer, t.p.northover, echristo, evandro, javed.absar

Reviewed By: fhahn

Differential Revision: https://reviews.llvm.org/D45951

llvm-svn: 330805
This commit is contained in:
Sander de Smalen 2018-04-25 09:26:47 +00:00
parent bec2a7c4eb
commit eb896b148b
4 changed files with 150 additions and 8 deletions

View File

@ -821,7 +821,7 @@ class ZPRAsmOperand <string name, int Width>: AsmOperandClass {
let PredicateMethod = "isSVEVectorRegOfWidth<"
# Width # ", AArch64::ZPRRegClassID>";
let RenderMethod = "addRegOperands";
let ParserMethod = "tryParseSVEDataVector<"
let ParserMethod = "tryParseSVEDataVector<false, "
# !if(!eq(Width, 0), "false", "true") # ">";
}
@ -934,6 +934,64 @@ def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> {
let ParserMatchClass = ZPRVectorList<64, 4>;
}
class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale>
: AsmOperandClass {
let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale;
let PredicateMethod = "isSVEVectorRegWithShiftExtend<"
# RegWidth # ", AArch64::ZPRRegClassID, "
# "AArch64_AM::" # ShiftExtend # ", "
# Scale # ">";
let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale;
let RenderMethod = "addRegOperands";
let ParserMethod = "tryParseSVEDataVector<true, true>";
}
class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr,
int RegWidth, int Scale> : RegisterOperand<ZPR> {
let ParserMatchClass =
!cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale);
let PrintMethod = "printRegWithShiftExtend<"
# !if(SignExtend, "true", "false") # ", "
# Scale # ", "
# !if(IsLSL, "'x'", "'w'") # ", "
# !if(!eq(RegWidth, 32), "'s'", "'d'") # ">";
}
foreach RegWidth = [32, 64] in {
// UXTW(8|16|32|64)
def ZPR#RegWidth#AsmOpndExtUXTW8 : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>;
def ZPR#RegWidth#AsmOpndExtUXTW16 : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>;
def ZPR#RegWidth#AsmOpndExtUXTW32 : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>;
def ZPR#RegWidth#AsmOpndExtUXTW64 : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>;
def ZPR#RegWidth#ExtUXTW8 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>;
def ZPR#RegWidth#ExtUXTW16 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>;
def ZPR#RegWidth#ExtUXTW32 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>;
def ZPR#RegWidth#ExtUXTW64 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>;
// SXTW(8|16|32|64)
def ZPR#RegWidth#AsmOpndExtSXTW8 : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>;
def ZPR#RegWidth#AsmOpndExtSXTW16 : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>;
def ZPR#RegWidth#AsmOpndExtSXTW32 : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>;
def ZPR#RegWidth#AsmOpndExtSXTW64 : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>;
def ZPR#RegWidth#ExtSXTW8 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>;
def ZPR#RegWidth#ExtSXTW16 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>;
def ZPR#RegWidth#ExtSXTW32 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>;
def ZPR#RegWidth#ExtSXTW64 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 64>;
}
// LSL(8|16|32|64)
def ZPR64AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", 64, 8>;
def ZPR64AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", 64, 16>;
def ZPR64AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", 64, 32>;
def ZPR64AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", 64, 64>;
def ZPR64ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", 64, 8>;
def ZPR64ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", 64, 16>;
def ZPR64ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", 64, 32>;
def ZPR64ExtLSL64 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", 64, 64>;
class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegClass> : AsmOperandClass {
let Name = AsmOperandName # Scale;
let PredicateMethod = "isGPR64WithShiftExtend<AArch64::"#RegClass#"RegClassID, " # Scale # ">";
@ -944,7 +1002,7 @@ class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegCl
class GPR64ExtendRegisterOperand<string Name, int Scale, RegisterClass RegClass> : RegisterOperand<RegClass>{
let ParserMatchClass = !cast<AsmOperandClass>(Name);
let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x'>";
let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x', 0>";
}
foreach Scale = [8, 16, 32, 64] in {

View File

@ -140,7 +140,7 @@ private:
OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
template <bool ParseShiftExtend>
OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
template <bool ParseSuffix>
template <bool ParseShiftExtend, bool ParseSuffix>
OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
template <RegKind VectorKind>
@ -857,6 +857,14 @@ public:
(ElementWidth == 0 || Reg.ElementWidth == ElementWidth);
}
template <int ElementWidth, unsigned Class,
AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth>
bool isSVEVectorRegWithShiftExtend() const {
return Kind == k_Register && isSVEVectorRegOfWidth<ElementWidth, Class>() &&
ShiftExtendTy == getShiftExtendType() &&
getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
}
bool isGPR32as64() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
@ -3839,6 +3847,38 @@ bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
case Match_InvalidGPR64NoXZRshifted64:
return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
case Match_InvalidZPR32UXTW8:
case Match_InvalidZPR32SXTW8:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
case Match_InvalidZPR32UXTW16:
case Match_InvalidZPR32SXTW16:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
case Match_InvalidZPR32UXTW32:
case Match_InvalidZPR32SXTW32:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
case Match_InvalidZPR32UXTW64:
case Match_InvalidZPR32SXTW64:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
case Match_InvalidZPR64UXTW8:
case Match_InvalidZPR64SXTW8:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
case Match_InvalidZPR64UXTW16:
case Match_InvalidZPR64SXTW16:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
case Match_InvalidZPR64UXTW32:
case Match_InvalidZPR64SXTW32:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
case Match_InvalidZPR64UXTW64:
case Match_InvalidZPR64SXTW64:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
case Match_InvalidZPR64LSL8:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
case Match_InvalidZPR64LSL16:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
case Match_InvalidZPR64LSL32:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
case Match_InvalidZPR64LSL64:
return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
case Match_InvalidSVEPattern:
return Error(Loc, "invalid predicate pattern");
case Match_InvalidSVEPredicateAnyReg:
@ -4292,6 +4332,26 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
case Match_InvalidGPR64NoXZRshifted16:
case Match_InvalidGPR64NoXZRshifted32:
case Match_InvalidGPR64NoXZRshifted64:
case Match_InvalidZPR32UXTW8:
case Match_InvalidZPR32UXTW16:
case Match_InvalidZPR32UXTW32:
case Match_InvalidZPR32UXTW64:
case Match_InvalidZPR32SXTW8:
case Match_InvalidZPR32SXTW16:
case Match_InvalidZPR32SXTW32:
case Match_InvalidZPR32SXTW64:
case Match_InvalidZPR64UXTW8:
case Match_InvalidZPR64SXTW8:
case Match_InvalidZPR64UXTW16:
case Match_InvalidZPR64SXTW16:
case Match_InvalidZPR64UXTW32:
case Match_InvalidZPR64SXTW32:
case Match_InvalidZPR64UXTW64:
case Match_InvalidZPR64SXTW64:
case Match_InvalidZPR64LSL8:
case Match_InvalidZPR64LSL16:
case Match_InvalidZPR64LSL32:
case Match_InvalidZPR64LSL64:
case Match_InvalidSVEPredicateAnyReg:
case Match_InvalidSVEPattern:
case Match_InvalidSVEPredicateBReg:
@ -4897,7 +4957,7 @@ AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
return MatchOperand_Success;
}
template <bool ParseSuffix>
template <bool ParseShiftExtend, bool ParseSuffix>
OperandMatchResultTy
AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
const SMLoc S = getLoc();
@ -4919,9 +4979,29 @@ AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
return MatchOperand_NoMatch;
unsigned ElementWidth = KindRes->second;
// No shift/extend is the default.
if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
Operands.push_back(AArch64Operand::CreateVectorReg(
RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
return MatchOperand_Success;
}
// Eat the comma
getParser().Lex();
// Match the shift
SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
Res = tryParseOptionalShiftExtend(ExtOpnd);
if (Res != MatchOperand_Success)
return Res;
auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
Operands.push_back(AArch64Operand::CreateVectorReg(
RegNum, RegKind::SVEDataVector, ElementWidth, S, S,
getContext()));
RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
Ext->hasShiftExtendAmount()));
return MatchOperand_Success;
}

View File

@ -991,12 +991,16 @@ void AArch64InstPrinter::printMemExtend(const MCInst *MI, unsigned OpNum,
printMemExtendImpl(SignExtend, DoShift, Width, SrcRegKind, O);
}
template <bool SignExtend, int ExtWidth, char SrcRegKind>
template <bool SignExtend, int ExtWidth, char SrcRegKind, char Suffix>
void AArch64InstPrinter::printRegWithShiftExtend(const MCInst *MI,
unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
printOperand(MI, OpNum, STI, O);
if (Suffix == 's' || Suffix == 'd')
O << '.' << Suffix;
else
assert(Suffix == 0 && "Unsupported suffix size");
bool DoShift = ExtWidth != 8;
if (SignExtend || DoShift || SrcRegKind == 'w') {

View File

@ -90,7 +90,7 @@ protected:
const MCSubtargetInfo &STI, raw_ostream &O) {
printMemExtend(MI, OpNum, O, SrcRegKind, Width);
}
template <bool SignedExtend, int ExtWidth, char SrcRegKind>
template <bool SignedExtend, int ExtWidth, char SrcRegKind, char Suffix>
void printRegWithShiftExtend(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
void printCondCode(const MCInst *MI, unsigned OpNum,