forked from OSchip/llvm-project
[X86] Fix disassembling of EVEX instructions to stop accidentally decoding the SIB index register as an XMM/YMM/ZMM register.
This introduces a new operand type to encode the whether the index register should be XMM/YMM/ZMM. And new code to fixup the results created by readSIB. This has the nice effect of removing a bunch of code that hard coded the name of every GATHER and SCATTER instruction to map the index type. This fixes PR32807. llvm-svn: 316273
This commit is contained in:
parent
cb028c7321
commit
ca2382d809
|
@ -764,102 +764,6 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
|
|||
baseReg = MCOperand::createReg(0);
|
||||
}
|
||||
|
||||
// Check whether we are handling VSIB addressing mode for GATHER.
|
||||
// If sibIndex was set to SIB_INDEX_NONE, index offset is 4 and
|
||||
// we should use SIB_INDEX_XMM4|YMM4 for VSIB.
|
||||
// I don't see a way to get the correct IndexReg in readSIB:
|
||||
// We can tell whether it is VSIB or SIB after instruction ID is decoded,
|
||||
// but instruction ID may not be decoded yet when calling readSIB.
|
||||
uint32_t Opcode = mcInst.getOpcode();
|
||||
bool IndexIs128 = (Opcode == X86::VGATHERDPDrm ||
|
||||
Opcode == X86::VGATHERDPDYrm ||
|
||||
Opcode == X86::VGATHERQPDrm ||
|
||||
Opcode == X86::VGATHERDPSrm ||
|
||||
Opcode == X86::VGATHERQPSrm ||
|
||||
Opcode == X86::VPGATHERDQrm ||
|
||||
Opcode == X86::VPGATHERDQYrm ||
|
||||
Opcode == X86::VPGATHERQQrm ||
|
||||
Opcode == X86::VPGATHERDDrm ||
|
||||
Opcode == X86::VPGATHERQDrm ||
|
||||
Opcode == X86::VGATHERDPDZ128rm ||
|
||||
Opcode == X86::VGATHERDPDZ256rm ||
|
||||
Opcode == X86::VGATHERDPSZ128rm ||
|
||||
Opcode == X86::VGATHERQPDZ128rm ||
|
||||
Opcode == X86::VGATHERQPSZ128rm ||
|
||||
Opcode == X86::VPGATHERDDZ128rm ||
|
||||
Opcode == X86::VPGATHERDQZ128rm ||
|
||||
Opcode == X86::VPGATHERDQZ256rm ||
|
||||
Opcode == X86::VPGATHERQDZ128rm ||
|
||||
Opcode == X86::VPGATHERQQZ128rm ||
|
||||
Opcode == X86::VSCATTERDPDZ128mr ||
|
||||
Opcode == X86::VSCATTERDPDZ256mr ||
|
||||
Opcode == X86::VSCATTERDPSZ128mr ||
|
||||
Opcode == X86::VSCATTERQPDZ128mr ||
|
||||
Opcode == X86::VSCATTERQPSZ128mr ||
|
||||
Opcode == X86::VPSCATTERDDZ128mr ||
|
||||
Opcode == X86::VPSCATTERDQZ128mr ||
|
||||
Opcode == X86::VPSCATTERDQZ256mr ||
|
||||
Opcode == X86::VPSCATTERQDZ128mr ||
|
||||
Opcode == X86::VPSCATTERQQZ128mr);
|
||||
bool IndexIs256 = (Opcode == X86::VGATHERQPDYrm ||
|
||||
Opcode == X86::VGATHERDPSYrm ||
|
||||
Opcode == X86::VGATHERQPSYrm ||
|
||||
Opcode == X86::VGATHERDPDZrm ||
|
||||
Opcode == X86::VPGATHERDQZrm ||
|
||||
Opcode == X86::VPGATHERQQYrm ||
|
||||
Opcode == X86::VPGATHERDDYrm ||
|
||||
Opcode == X86::VPGATHERQDYrm ||
|
||||
Opcode == X86::VGATHERDPSZ256rm ||
|
||||
Opcode == X86::VGATHERQPDZ256rm ||
|
||||
Opcode == X86::VGATHERQPSZ256rm ||
|
||||
Opcode == X86::VPGATHERDDZ256rm ||
|
||||
Opcode == X86::VPGATHERQQZ256rm ||
|
||||
Opcode == X86::VPGATHERQDZ256rm ||
|
||||
Opcode == X86::VSCATTERDPDZmr ||
|
||||
Opcode == X86::VPSCATTERDQZmr ||
|
||||
Opcode == X86::VSCATTERDPSZ256mr ||
|
||||
Opcode == X86::VSCATTERQPDZ256mr ||
|
||||
Opcode == X86::VSCATTERQPSZ256mr ||
|
||||
Opcode == X86::VPSCATTERDDZ256mr ||
|
||||
Opcode == X86::VPSCATTERQQZ256mr ||
|
||||
Opcode == X86::VPSCATTERQDZ256mr ||
|
||||
Opcode == X86::VGATHERPF0DPDm ||
|
||||
Opcode == X86::VGATHERPF1DPDm ||
|
||||
Opcode == X86::VSCATTERPF0DPDm ||
|
||||
Opcode == X86::VSCATTERPF1DPDm);
|
||||
bool IndexIs512 = (Opcode == X86::VGATHERQPDZrm ||
|
||||
Opcode == X86::VGATHERDPSZrm ||
|
||||
Opcode == X86::VGATHERQPSZrm ||
|
||||
Opcode == X86::VPGATHERQQZrm ||
|
||||
Opcode == X86::VPGATHERDDZrm ||
|
||||
Opcode == X86::VPGATHERQDZrm ||
|
||||
Opcode == X86::VSCATTERQPDZmr ||
|
||||
Opcode == X86::VSCATTERDPSZmr ||
|
||||
Opcode == X86::VSCATTERQPSZmr ||
|
||||
Opcode == X86::VPSCATTERQQZmr ||
|
||||
Opcode == X86::VPSCATTERDDZmr ||
|
||||
Opcode == X86::VPSCATTERQDZmr ||
|
||||
Opcode == X86::VGATHERPF0DPSm ||
|
||||
Opcode == X86::VGATHERPF0QPDm ||
|
||||
Opcode == X86::VGATHERPF0QPSm ||
|
||||
Opcode == X86::VGATHERPF1DPSm ||
|
||||
Opcode == X86::VGATHERPF1QPDm ||
|
||||
Opcode == X86::VGATHERPF1QPSm ||
|
||||
Opcode == X86::VSCATTERPF0DPSm ||
|
||||
Opcode == X86::VSCATTERPF0QPDm ||
|
||||
Opcode == X86::VSCATTERPF0QPSm ||
|
||||
Opcode == X86::VSCATTERPF1DPSm ||
|
||||
Opcode == X86::VSCATTERPF1QPDm ||
|
||||
Opcode == X86::VSCATTERPF1QPSm);
|
||||
if (IndexIs128 || IndexIs256 || IndexIs512) {
|
||||
unsigned IndexOffset = insn.sibIndex -
|
||||
(insn.addressSize == 8 ? SIB_INDEX_RAX:SIB_INDEX_EAX);
|
||||
SIBIndex IndexBase = IndexIs512 ? SIB_INDEX_ZMM0 :
|
||||
IndexIs256 ? SIB_INDEX_YMM0 : SIB_INDEX_XMM0;
|
||||
insn.sibIndex = (SIBIndex)(IndexBase +
|
||||
(insn.sibIndex == SIB_INDEX_NONE ? 4 : IndexOffset));
|
||||
}
|
||||
|
||||
if (insn.sibIndex != SIB_INDEX_NONE) {
|
||||
switch (insn.sibIndex) {
|
||||
default:
|
||||
|
@ -987,6 +891,9 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
|
|||
case TYPE_BNDR:
|
||||
return translateRMRegister(mcInst, insn);
|
||||
case TYPE_M:
|
||||
case TYPE_MVSIBX:
|
||||
case TYPE_MVSIBY:
|
||||
case TYPE_MVSIBZ:
|
||||
return translateRMMemory(mcInst, insn, Dis);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1156,7 +1156,6 @@ static int getID(struct InternalInstruction* insn, const void *miiArg) {
|
|||
* @return - 0 if the SIB byte was successfully read; nonzero otherwise.
|
||||
*/
|
||||
static int readSIB(struct InternalInstruction* insn) {
|
||||
SIBIndex sibIndexBase = SIB_INDEX_NONE;
|
||||
SIBBase sibBaseBase = SIB_BASE_NONE;
|
||||
uint8_t index, base;
|
||||
|
||||
|
@ -1172,11 +1171,11 @@ static int readSIB(struct InternalInstruction* insn) {
|
|||
dbgprintf(insn, "SIB-based addressing doesn't work in 16-bit mode");
|
||||
return -1;
|
||||
case 4:
|
||||
sibIndexBase = SIB_INDEX_EAX;
|
||||
insn->sibIndexBase = SIB_INDEX_EAX;
|
||||
sibBaseBase = SIB_BASE_EAX;
|
||||
break;
|
||||
case 8:
|
||||
sibIndexBase = SIB_INDEX_RAX;
|
||||
insn->sibIndexBase = SIB_INDEX_RAX;
|
||||
sibBaseBase = SIB_BASE_RAX;
|
||||
break;
|
||||
}
|
||||
|
@ -1186,26 +1185,10 @@ static int readSIB(struct InternalInstruction* insn) {
|
|||
|
||||
index = indexFromSIB(insn->sib) | (xFromREX(insn->rexPrefix) << 3);
|
||||
|
||||
// FIXME: The fifth bit (bit index 4) is only to be used for instructions
|
||||
// that understand VSIB indexing. ORing the bit in here is mildy dangerous
|
||||
// because performing math on an 'enum SIBIndex' can produce garbage.
|
||||
// Excluding the "none" value, it should cover 6 spaces of register names:
|
||||
// - 16 possibilities for 16-bit GPR starting at SIB_INDEX_BX_SI
|
||||
// - 16 possibilities for 32-bit GPR starting at SIB_INDEX_EAX
|
||||
// - 16 possibilities for 64-bit GPR starting at SIB_INDEX_RAX
|
||||
// - 32 possibilities for each of XMM, YMM, ZMM registers
|
||||
// When sibIndexBase gets assigned SIB_INDEX_RAX as it does in 64-bit mode,
|
||||
// summing in a fully decoded index between 0 and 31 can end up with a value
|
||||
// that looks like something in the low half of the XMM range.
|
||||
// translateRMMemory() tries to reverse the damage, with only partial success,
|
||||
// as evidenced by known bugs in "test/MC/Disassembler/X86/x86-64.txt"
|
||||
if (insn->vectorExtensionType == TYPE_EVEX)
|
||||
index |= v2FromEVEX4of4(insn->vectorExtensionPrefix[3]) << 4;
|
||||
|
||||
if (index == 0x4) {
|
||||
insn->sibIndex = SIB_INDEX_NONE;
|
||||
} else {
|
||||
insn->sibIndex = (SIBIndex)(sibIndexBase + index);
|
||||
insn->sibIndex = (SIBIndex)(insn->sibIndexBase + index);
|
||||
}
|
||||
|
||||
insn->sibScale = 1 << scaleFromSIB(insn->sib);
|
||||
|
@ -1481,6 +1464,12 @@ static int readModRM(struct InternalInstruction* insn) {
|
|||
if (index > 3) \
|
||||
*valid = 0; \
|
||||
return prefix##_BND0 + index; \
|
||||
case TYPE_MVSIBX: \
|
||||
return prefix##_XMM0 + index; \
|
||||
case TYPE_MVSIBY: \
|
||||
return prefix##_YMM0 + index; \
|
||||
case TYPE_MVSIBZ: \
|
||||
return prefix##_ZMM0 + index; \
|
||||
} \
|
||||
}
|
||||
|
||||
|
@ -1536,7 +1525,6 @@ static int fixupReg(struct InternalInstruction *insn,
|
|||
return -1;
|
||||
break;
|
||||
CASE_ENCODING_RM:
|
||||
CASE_ENCODING_VSIB:
|
||||
if (insn->eaBase >= insn->eaRegBase) {
|
||||
insn->eaBase = (EABase)fixupRMValue(insn,
|
||||
(OperandType)op->type,
|
||||
|
@ -1734,8 +1722,35 @@ static int readOperands(struct InternalInstruction* insn) {
|
|||
needVVVV = hasVVVV & ((insn->vvvv & 0xf) != 0);
|
||||
if (readModRM(insn))
|
||||
return -1;
|
||||
if (fixupReg(insn, &Op))
|
||||
|
||||
// If sibIndex was set to SIB_INDEX_NONE, index offset is 4.
|
||||
if (insn->sibIndex == SIB_INDEX_NONE)
|
||||
insn->sibIndex = (SIBIndex)4;
|
||||
|
||||
// If EVEX.v2 is set this is one of the 16-31 registers.
|
||||
if (insn->vectorExtensionType == TYPE_EVEX &&
|
||||
v2FromEVEX4of4(insn->vectorExtensionPrefix[3]))
|
||||
insn->sibIndex = (SIBIndex)(insn->sibIndex + 16);
|
||||
|
||||
// Adjust the index register to the correct size.
|
||||
switch ((OperandType)Op.type) {
|
||||
default:
|
||||
debug("Unhandled VSIB index type");
|
||||
return -1;
|
||||
case TYPE_MVSIBX:
|
||||
insn->sibIndex = (SIBIndex)(SIB_INDEX_XMM0 +
|
||||
(insn->sibIndex - insn->sibIndexBase));
|
||||
break;
|
||||
case TYPE_MVSIBY:
|
||||
insn->sibIndex = (SIBIndex)(SIB_INDEX_YMM0 +
|
||||
(insn->sibIndex - insn->sibIndexBase));
|
||||
break;
|
||||
case TYPE_MVSIBZ:
|
||||
insn->sibIndex = (SIBIndex)(SIB_INDEX_ZMM0 +
|
||||
(insn->sibIndex - insn->sibIndexBase));
|
||||
break;
|
||||
}
|
||||
|
||||
// Apply the AVX512 compressed displacement scaling factor.
|
||||
if (Op.encoding != ENCODING_REG && insn->eaDisplacement == EA_DISP_8)
|
||||
insn->displacement *= 1 << (Op.encoding - ENCODING_VSIB);
|
||||
|
|
|
@ -639,6 +639,7 @@ struct InternalInstruction {
|
|||
Reg reg;
|
||||
|
||||
// SIB state
|
||||
SIBIndex sibIndexBase;
|
||||
SIBIndex sibIndex;
|
||||
uint8_t sibScale;
|
||||
SIBBase sibBase;
|
||||
|
|
|
@ -410,6 +410,9 @@ enum OperandEncoding {
|
|||
ENUM_ENTRY(TYPE_AVX512ICC, "1-byte immediate operand for AVX512 icmp") \
|
||||
ENUM_ENTRY(TYPE_UIMM8, "1-byte unsigned immediate operand") \
|
||||
ENUM_ENTRY(TYPE_M, "Memory operand") \
|
||||
ENUM_ENTRY(TYPE_MVSIBX, "Memory operand using XMM index") \
|
||||
ENUM_ENTRY(TYPE_MVSIBY, "Memory operand using YMM index") \
|
||||
ENUM_ENTRY(TYPE_MVSIBZ, "Memory operand using ZMM index") \
|
||||
ENUM_ENTRY(TYPE_SRCIDX, "memory at source index") \
|
||||
ENUM_ENTRY(TYPE_DSTIDX, "memory at destination index") \
|
||||
ENUM_ENTRY(TYPE_MOFFS, "memory offset (relative to segment base)") \
|
||||
|
|
|
@ -431,24 +431,16 @@
|
|||
# CHECK: vaddps 287453952(%rip), %zmm20, %zmm15
|
||||
0x62 0x71 0x5c 0x40 0x58 0x3d 0x00 0x33 0x22 0x11
|
||||
|
||||
# Known bugs: these use a SIB byte. The index register is incorrectly
|
||||
# printed as an xmm register. Indeed there are "gather" load instructions
|
||||
# taking a vector of indices, but ONLY those instructions can do that.
|
||||
# The CHECK lines test the current incorrect output; FIXME is desired.
|
||||
# CHECK: vaddps (%r10,%xmm9), %zmm20, %zmm15
|
||||
# FIXME: vaddps (%r10,%r9), %zmm20, %zmm15
|
||||
# CHECK: vaddps (%r10,%r9), %zmm20, %zmm15
|
||||
0x62 0x11 0x5c 0x40 0x58 0x3c 0x0a
|
||||
|
||||
# CHECK: vaddps (%rdx,%xmm9), %zmm20, %zmm15
|
||||
# FIXME: vaddps (%rdx,%r9), %zmm20, %zmm15
|
||||
# CHECK: vaddps (%rdx,%r9), %zmm20, %zmm15
|
||||
0x62 0x31 0x5c 0x40 0x58 0x3c 0x0a
|
||||
|
||||
# CHECK: vaddps (%r10,%xmm1), %zmm20, %zmm15
|
||||
# FIXME: vaddps (%r10,%rcx), %zmm20, %zmm15
|
||||
# CHECK: vaddps (%r10,%rcx), %zmm20, %zmm15
|
||||
0x62 0x51 0x5c 0x40 0x58 0x3c 0x0a
|
||||
|
||||
# CHECK: vaddps (%rdx,%xmm1), %zmm20, %zmm15
|
||||
# FIXME: vaddps (%rdx,%rcx), %zmm20, %zmm15
|
||||
# CHECK: vaddps (%rdx,%rcx), %zmm20, %zmm15
|
||||
0x62 0x71 0x5c 0x40 0x58 0x3c 0x0a
|
||||
|
||||
# CHECK: callq 32767
|
||||
|
|
|
@ -929,19 +929,19 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
|
|||
TYPE("VK64", TYPE_VK)
|
||||
TYPE("VK64WM", TYPE_VK)
|
||||
TYPE("GR32_NOAX", TYPE_Rv)
|
||||
TYPE("vx64mem", TYPE_M)
|
||||
TYPE("vx128mem", TYPE_M)
|
||||
TYPE("vx256mem", TYPE_M)
|
||||
TYPE("vy128mem", TYPE_M)
|
||||
TYPE("vy256mem", TYPE_M)
|
||||
TYPE("vx64xmem", TYPE_M)
|
||||
TYPE("vx128xmem", TYPE_M)
|
||||
TYPE("vx256xmem", TYPE_M)
|
||||
TYPE("vy128xmem", TYPE_M)
|
||||
TYPE("vy256xmem", TYPE_M)
|
||||
TYPE("vy512mem", TYPE_M)
|
||||
TYPE("vz256xmem", TYPE_M)
|
||||
TYPE("vz512mem", TYPE_M)
|
||||
TYPE("vx64mem", TYPE_MVSIBX)
|
||||
TYPE("vx128mem", TYPE_MVSIBX)
|
||||
TYPE("vx256mem", TYPE_MVSIBX)
|
||||
TYPE("vy128mem", TYPE_MVSIBY)
|
||||
TYPE("vy256mem", TYPE_MVSIBY)
|
||||
TYPE("vx64xmem", TYPE_MVSIBX)
|
||||
TYPE("vx128xmem", TYPE_MVSIBX)
|
||||
TYPE("vx256xmem", TYPE_MVSIBX)
|
||||
TYPE("vy128xmem", TYPE_MVSIBY)
|
||||
TYPE("vy256xmem", TYPE_MVSIBY)
|
||||
TYPE("vy512mem", TYPE_MVSIBY)
|
||||
TYPE("vz256xmem", TYPE_MVSIBZ)
|
||||
TYPE("vz512mem", TYPE_MVSIBZ)
|
||||
TYPE("BNDR", TYPE_BNDR)
|
||||
errs() << "Unhandled type string " << s << "\n";
|
||||
llvm_unreachable("Unhandled type string");
|
||||
|
|
Loading…
Reference in New Issue