2009-09-03 01:35:12 +08:00
|
|
|
//===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains code to lower X86 MachineInstrs to their corresponding
|
|
|
|
// MCInst records.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-09-20 15:41:30 +08:00
|
|
|
#include "X86AsmPrinter.h"
|
2014-06-25 20:41:52 +08:00
|
|
|
#include "X86RegisterInfo.h"
|
2012-03-18 02:46:09 +08:00
|
|
|
#include "InstPrinter/X86ATTInstPrinter.h"
|
2014-03-19 14:53:25 +08:00
|
|
|
#include "MCTargetDesc/X86BaseInfo.h"
|
2014-07-26 07:47:11 +08:00
|
|
|
#include "Utils/X86ShuffleDecode.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/SmallString.h"
|
2014-03-19 14:53:25 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2014-07-26 07:47:11 +08:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2009-09-16 14:25:03 +08:00
|
|
|
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
|
2013-11-01 06:11:56 +08:00
|
|
|
#include "llvm/CodeGen/StackMaps.h"
|
2014-03-19 14:53:25 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/GlobalValue.h"
|
2014-01-08 05:19:40 +08:00
|
|
|
#include "llvm/IR/Mangler.h"
|
2011-07-15 07:50:31 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2014-07-25 04:40:55 +08:00
|
|
|
#include "llvm/MC/MCCodeEmitter.h"
|
2009-09-03 01:35:12 +08:00
|
|
|
#include "llvm/MC/MCContext.h"
|
|
|
|
#include "llvm/MC/MCExpr.h"
|
|
|
|
#include "llvm/MC/MCInst.h"
|
2012-11-26 21:34:22 +08:00
|
|
|
#include "llvm/MC/MCInstBuilder.h"
|
2009-09-03 01:35:12 +08:00
|
|
|
#include "llvm/MC/MCStreamer.h"
|
2010-03-13 03:42:40 +08:00
|
|
|
#include "llvm/MC/MCSymbol.h"
|
2014-07-25 04:40:55 +08:00
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
2009-09-03 01:35:12 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2012-10-16 14:01:50 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
|
|
|
|
class X86MCInstLower {
|
|
|
|
MCContext &Ctx;
|
|
|
|
const MachineFunction &MF;
|
|
|
|
const TargetMachine &TM;
|
|
|
|
const MCAsmInfo &MAI;
|
|
|
|
X86AsmPrinter &AsmPrinter;
|
|
|
|
public:
|
2013-10-30 00:11:22 +08:00
|
|
|
X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter);
|
2012-10-16 14:01:50 +08:00
|
|
|
|
|
|
|
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
|
|
|
|
|
|
|
|
MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
|
|
|
|
MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
|
|
|
|
|
|
|
|
private:
|
|
|
|
MachineModuleInfoMachO &getMachOMMI() const;
|
2013-10-30 00:11:22 +08:00
|
|
|
Mangler *getMang() const {
|
|
|
|
return AsmPrinter.Mang;
|
|
|
|
}
|
2012-10-16 14:01:50 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2014-07-25 04:40:55 +08:00
|
|
|
// Emit a minimal sequence of nops spanning NumBytes bytes.
|
|
|
|
static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit,
|
|
|
|
const MCSubtargetInfo &STI);
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
X86AsmPrinter::StackMapShadowTracker::StackMapShadowTracker(TargetMachine &TM)
|
2014-07-25 10:29:19 +08:00
|
|
|
: TM(TM), InShadow(false), RequiredShadowSize(0), CurrentShadowSize(0) {}
|
2014-07-25 04:40:55 +08:00
|
|
|
|
|
|
|
X86AsmPrinter::StackMapShadowTracker::~StackMapShadowTracker() {}
|
|
|
|
|
|
|
|
void
|
2015-02-20 16:01:55 +08:00
|
|
|
X86AsmPrinter::StackMapShadowTracker::startFunction(MachineFunction &F) {
|
|
|
|
MF = &F;
|
2014-08-05 05:25:23 +08:00
|
|
|
CodeEmitter.reset(TM.getTarget().createMCCodeEmitter(
|
2015-03-11 06:03:14 +08:00
|
|
|
*MF->getSubtarget().getInstrInfo(),
|
|
|
|
*MF->getSubtarget().getRegisterInfo(), MF->getContext()));
|
2014-07-25 04:40:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst,
|
|
|
|
const MCSubtargetInfo &STI) {
|
2014-07-25 10:29:19 +08:00
|
|
|
if (InShadow) {
|
2014-07-25 04:40:55 +08:00
|
|
|
SmallString<256> Code;
|
|
|
|
SmallVector<MCFixup, 4> Fixups;
|
|
|
|
raw_svector_ostream VecOS(Code);
|
|
|
|
CodeEmitter->EncodeInstruction(Inst, VecOS, Fixups, STI);
|
|
|
|
VecOS.flush();
|
|
|
|
CurrentShadowSize += Code.size();
|
|
|
|
if (CurrentShadowSize >= RequiredShadowSize)
|
2014-07-25 10:29:19 +08:00
|
|
|
InShadow = false; // The shadow is big enough. Stop counting.
|
2014-07-25 04:40:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding(
|
|
|
|
MCStreamer &OutStreamer, const MCSubtargetInfo &STI) {
|
2014-07-25 10:29:19 +08:00
|
|
|
if (InShadow && CurrentShadowSize < RequiredShadowSize) {
|
|
|
|
InShadow = false;
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize,
|
2015-02-20 16:01:55 +08:00
|
|
|
MF->getSubtarget<X86Subtarget>().is64Bit(), STI);
|
2014-07-25 10:29:19 +08:00
|
|
|
}
|
2014-07-25 04:40:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) {
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitInstruction(Inst, getSubtargetInfo());
|
2014-07-25 04:40:55 +08:00
|
|
|
SMShadowTracker.count(Inst, getSubtargetInfo());
|
|
|
|
}
|
|
|
|
} // end llvm namespace
|
|
|
|
|
2013-10-30 00:11:22 +08:00
|
|
|
X86MCInstLower::X86MCInstLower(const MachineFunction &mf,
|
2010-07-23 05:10:04 +08:00
|
|
|
X86AsmPrinter &asmprinter)
|
2015-02-03 01:38:43 +08:00
|
|
|
: Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), MAI(*TM.getMCAsmInfo()),
|
|
|
|
AsmPrinter(asmprinter) {}
|
2009-09-13 04:34:57 +08:00
|
|
|
|
2009-09-16 14:25:03 +08:00
|
|
|
MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
|
2010-07-21 06:26:07 +08:00
|
|
|
return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>();
|
2009-09-16 14:25:03 +08:00
|
|
|
}
|
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
|
2010-02-09 07:03:41 +08:00
|
|
|
/// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol
|
|
|
|
/// operand to an MCSymbol.
|
2009-09-13 04:34:57 +08:00
|
|
|
MCSymbol *X86MCInstLower::
|
2010-02-09 07:03:41 +08:00
|
|
|
GetSymbolFromOperand(const MachineOperand &MO) const {
|
2015-01-27 03:03:15 +08:00
|
|
|
const DataLayout *DL = TM.getDataLayout();
|
2012-10-17 10:22:27 +08:00
|
|
|
assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference");
|
2010-02-09 07:03:41 +08:00
|
|
|
|
2009-09-11 13:58:44 +08:00
|
|
|
SmallString<128> Name;
|
2013-11-29 04:12:44 +08:00
|
|
|
StringRef Suffix;
|
|
|
|
|
|
|
|
switch (MO.getTargetFlags()) {
|
|
|
|
case X86II::MO_DLLIMPORT:
|
|
|
|
// Handle dllimport linkage.
|
|
|
|
Name += "__imp_";
|
|
|
|
break;
|
|
|
|
case X86II::MO_DARWIN_STUB:
|
|
|
|
Suffix = "$stub";
|
|
|
|
break;
|
|
|
|
case X86II::MO_DARWIN_NONLAZY:
|
|
|
|
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
|
|
|
|
case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
|
|
|
|
Suffix = "$non_lazy_ptr";
|
|
|
|
break;
|
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2013-12-05 13:19:12 +08:00
|
|
|
if (!Suffix.empty())
|
2014-01-04 03:21:54 +08:00
|
|
|
Name += DL->getPrivateGlobalPrefix();
|
2013-12-05 13:19:12 +08:00
|
|
|
|
|
|
|
unsigned PrefixLen = Name.size();
|
|
|
|
|
2012-10-17 10:22:27 +08:00
|
|
|
if (MO.isGlobal()) {
|
2010-03-13 03:42:40 +08:00
|
|
|
const GlobalValue *GV = MO.getGlobal();
|
2014-02-20 01:23:20 +08:00
|
|
|
AsmPrinter.getNameWithPrefix(Name, GV);
|
2012-10-17 10:22:27 +08:00
|
|
|
} else if (MO.isSymbol()) {
|
2015-04-30 00:46:01 +08:00
|
|
|
if (MO.getTargetFlags() == X86II::MO_NOPREFIX)
|
|
|
|
Name += MO.getSymbolName();
|
|
|
|
else
|
|
|
|
getMang()->getNameWithPrefix(Name, MO.getSymbolName());
|
2012-10-17 10:22:27 +08:00
|
|
|
} else if (MO.isMBB()) {
|
|
|
|
Name += MO.getMBB()->getSymbol()->getName();
|
2010-02-09 07:03:41 +08:00
|
|
|
}
|
2013-12-05 13:19:12 +08:00
|
|
|
unsigned OrigLen = Name.size() - PrefixLen;
|
2010-02-09 07:03:41 +08:00
|
|
|
|
2013-11-29 04:12:44 +08:00
|
|
|
Name += Suffix;
|
2013-12-05 13:19:12 +08:00
|
|
|
MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name);
|
|
|
|
|
|
|
|
StringRef OrigName = StringRef(Name).substr(PrefixLen, OrigLen);
|
2013-11-29 04:12:44 +08:00
|
|
|
|
2010-02-09 07:03:41 +08:00
|
|
|
// If the target flags on the operand changes the name of the symbol, do that
|
|
|
|
// before we return the symbol.
|
2009-09-03 13:06:07 +08:00
|
|
|
switch (MO.getTargetFlags()) {
|
2010-02-09 07:03:41 +08:00
|
|
|
default: break;
|
2009-09-03 13:06:07 +08:00
|
|
|
case X86II::MO_DARWIN_NONLAZY:
|
2009-09-11 14:59:18 +08:00
|
|
|
case X86II::MO_DARWIN_NONLAZY_PIC_BASE: {
|
2010-03-11 06:34:10 +08:00
|
|
|
MachineModuleInfoImpl::StubValueTy &StubSym =
|
|
|
|
getMachOMMI().getGVStubEntry(Sym);
|
2014-04-25 13:30:21 +08:00
|
|
|
if (!StubSym.getPointer()) {
|
2010-02-09 07:03:41 +08:00
|
|
|
assert(MO.isGlobal() && "Extern symbol not handled yet");
|
2010-03-11 06:34:10 +08:00
|
|
|
StubSym =
|
|
|
|
MachineModuleInfoImpl::
|
2013-10-30 01:07:16 +08:00
|
|
|
StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
|
2010-03-11 06:34:10 +08:00
|
|
|
!MO.getGlobal()->hasInternalLinkage());
|
2010-02-09 07:03:41 +08:00
|
|
|
}
|
2013-11-29 04:12:44 +08:00
|
|
|
break;
|
2009-09-11 14:59:18 +08:00
|
|
|
}
|
2009-09-11 15:03:20 +08:00
|
|
|
case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: {
|
2010-03-11 06:34:10 +08:00
|
|
|
MachineModuleInfoImpl::StubValueTy &StubSym =
|
|
|
|
getMachOMMI().getHiddenGVStubEntry(Sym);
|
2014-04-25 13:30:21 +08:00
|
|
|
if (!StubSym.getPointer()) {
|
2010-02-09 07:03:41 +08:00
|
|
|
assert(MO.isGlobal() && "Extern symbol not handled yet");
|
2010-03-11 06:34:10 +08:00
|
|
|
StubSym =
|
|
|
|
MachineModuleInfoImpl::
|
2013-10-30 01:07:16 +08:00
|
|
|
StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
|
2010-03-11 06:34:10 +08:00
|
|
|
!MO.getGlobal()->hasInternalLinkage());
|
2010-02-09 07:03:41 +08:00
|
|
|
}
|
2013-11-29 04:12:44 +08:00
|
|
|
break;
|
2009-09-11 15:03:20 +08:00
|
|
|
}
|
2009-09-11 14:36:33 +08:00
|
|
|
case X86II::MO_DARWIN_STUB: {
|
2010-03-11 06:34:10 +08:00
|
|
|
MachineModuleInfoImpl::StubValueTy &StubSym =
|
|
|
|
getMachOMMI().getFnStubEntry(Sym);
|
|
|
|
if (StubSym.getPointer())
|
2010-02-09 07:03:41 +08:00
|
|
|
return Sym;
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2010-02-09 07:03:41 +08:00
|
|
|
if (MO.isGlobal()) {
|
2010-03-11 06:34:10 +08:00
|
|
|
StubSym =
|
|
|
|
MachineModuleInfoImpl::
|
2013-10-30 01:07:16 +08:00
|
|
|
StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
|
2010-03-11 06:34:10 +08:00
|
|
|
!MO.getGlobal()->hasInternalLinkage());
|
2010-02-09 07:03:41 +08:00
|
|
|
} else {
|
2010-03-11 06:34:10 +08:00
|
|
|
StubSym =
|
|
|
|
MachineModuleInfoImpl::
|
2013-12-05 13:19:12 +08:00
|
|
|
StubValueTy(Ctx.GetOrCreateSymbol(OrigName), false);
|
2009-09-11 14:59:18 +08:00
|
|
|
}
|
2013-11-29 04:12:44 +08:00
|
|
|
break;
|
2009-09-11 14:36:33 +08:00
|
|
|
}
|
2009-09-03 12:44:53 +08:00
|
|
|
}
|
2010-02-09 07:03:41 +08:00
|
|
|
|
2013-11-29 04:12:44 +08:00
|
|
|
return Sym;
|
2009-09-03 12:56:20 +08:00
|
|
|
}
|
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
|
|
|
|
MCSymbol *Sym) const {
|
2009-09-03 15:30:56 +08:00
|
|
|
// FIXME: We would like an efficient form for this, so we don't have to do a
|
|
|
|
// lot of extra uniquing.
|
2014-04-25 13:30:21 +08:00
|
|
|
const MCExpr *Expr = nullptr;
|
2010-03-16 07:51:06 +08:00
|
|
|
MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-03 12:56:20 +08:00
|
|
|
switch (MO.getTargetFlags()) {
|
2009-09-03 13:06:07 +08:00
|
|
|
default: llvm_unreachable("Unknown target flag on GV operand");
|
|
|
|
case X86II::MO_NO_FLAG: // No flag.
|
|
|
|
// These affect the name of the symbol, not any suffix.
|
|
|
|
case X86II::MO_DARWIN_NONLAZY:
|
|
|
|
case X86II::MO_DLLIMPORT:
|
|
|
|
case X86II::MO_DARWIN_STUB:
|
2015-04-30 00:46:01 +08:00
|
|
|
case X86II::MO_NOPREFIX:
|
2009-09-03 13:06:07 +08:00
|
|
|
break;
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2010-06-03 12:07:48 +08:00
|
|
|
case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break;
|
|
|
|
case X86II::MO_TLVP_PIC_BASE:
|
2010-07-15 07:04:59 +08:00
|
|
|
Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
|
|
|
|
// Subtract the pic base.
|
|
|
|
Expr = MCBinaryExpr::CreateSub(Expr,
|
2010-11-15 06:48:15 +08:00
|
|
|
MCSymbolRefExpr::Create(MF.getPICBaseSymbol(),
|
2010-07-15 07:04:59 +08:00
|
|
|
Ctx),
|
|
|
|
Ctx);
|
|
|
|
break;
|
2012-02-12 01:26:53 +08:00
|
|
|
case X86II::MO_SECREL: RefKind = MCSymbolRefExpr::VK_SECREL; break;
|
2010-03-16 07:51:06 +08:00
|
|
|
case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break;
|
2012-06-02 00:27:21 +08:00
|
|
|
case X86II::MO_TLSLD: RefKind = MCSymbolRefExpr::VK_TLSLD; break;
|
|
|
|
case X86II::MO_TLSLDM: RefKind = MCSymbolRefExpr::VK_TLSLDM; break;
|
2010-03-16 07:51:06 +08:00
|
|
|
case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break;
|
|
|
|
case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break;
|
|
|
|
case X86II::MO_TPOFF: RefKind = MCSymbolRefExpr::VK_TPOFF; break;
|
2012-06-02 00:27:21 +08:00
|
|
|
case X86II::MO_DTPOFF: RefKind = MCSymbolRefExpr::VK_DTPOFF; break;
|
2010-03-16 07:51:06 +08:00
|
|
|
case X86II::MO_NTPOFF: RefKind = MCSymbolRefExpr::VK_NTPOFF; break;
|
2012-05-11 18:11:01 +08:00
|
|
|
case X86II::MO_GOTNTPOFF: RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; break;
|
2010-03-16 07:51:06 +08:00
|
|
|
case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break;
|
|
|
|
case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break;
|
|
|
|
case X86II::MO_GOTOFF: RefKind = MCSymbolRefExpr::VK_GOTOFF; break;
|
|
|
|
case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break;
|
2009-09-03 13:06:07 +08:00
|
|
|
case X86II::MO_PIC_BASE_OFFSET:
|
|
|
|
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
|
|
|
|
case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
|
2010-02-09 06:52:47 +08:00
|
|
|
Expr = MCSymbolRefExpr::Create(Sym, Ctx);
|
2009-09-03 13:06:07 +08:00
|
|
|
// Subtract the pic base.
|
2012-08-02 02:39:17 +08:00
|
|
|
Expr = MCBinaryExpr::CreateSub(Expr,
|
2010-11-15 06:48:15 +08:00
|
|
|
MCSymbolRefExpr::Create(MF.getPICBaseSymbol(), Ctx),
|
2009-09-13 04:34:57 +08:00
|
|
|
Ctx);
|
2014-10-21 09:17:30 +08:00
|
|
|
if (MO.isJTI()) {
|
|
|
|
assert(MAI.doesSetDirectiveSuppressesReloc());
|
2010-04-13 07:07:17 +08:00
|
|
|
// If .set directive is supported, use it to reduce the number of
|
|
|
|
// relocations the assembler will generate for differences between
|
|
|
|
// local labels. This is only safe when the symbols are in the same
|
|
|
|
// section so we are restricting it to jumptable references.
|
|
|
|
MCSymbol *Label = Ctx.CreateTempSymbol();
|
2015-04-25 03:11:51 +08:00
|
|
|
AsmPrinter.OutStreamer->EmitAssignment(Label, Expr);
|
2010-04-13 07:07:17 +08:00
|
|
|
Expr = MCSymbolRefExpr::Create(Label, Ctx);
|
|
|
|
}
|
2009-09-03 13:06:07 +08:00
|
|
|
break;
|
2009-09-03 15:30:56 +08:00
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2014-04-25 13:30:21 +08:00
|
|
|
if (!Expr)
|
2010-03-16 07:51:06 +08:00
|
|
|
Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx);
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2012-10-17 10:22:27 +08:00
|
|
|
if (!MO.isJTI() && !MO.isMBB() && MO.getOffset())
|
2009-09-13 04:34:57 +08:00
|
|
|
Expr = MCBinaryExpr::CreateAdd(Expr,
|
|
|
|
MCConstantExpr::Create(MO.getOffset(), Ctx),
|
|
|
|
Ctx);
|
2015-05-14 02:37:00 +08:00
|
|
|
return MCOperand::createExpr(Expr);
|
2009-09-03 12:44:53 +08:00
|
|
|
}
|
|
|
|
|
2009-09-11 12:28:13 +08:00
|
|
|
|
2010-05-19 01:22:24 +08:00
|
|
|
/// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with
|
|
|
|
/// a short fixed-register form.
|
|
|
|
static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) {
|
|
|
|
unsigned ImmOp = Inst.getNumOperands() - 1;
|
2012-02-12 01:26:53 +08:00
|
|
|
assert(Inst.getOperand(0).isReg() &&
|
|
|
|
(Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) &&
|
2010-05-19 01:22:24 +08:00
|
|
|
((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() &&
|
|
|
|
Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) ||
|
|
|
|
Inst.getNumOperands() == 2) && "Unexpected instruction!");
|
|
|
|
|
|
|
|
// Check whether the destination register can be fixed.
|
|
|
|
unsigned Reg = Inst.getOperand(0).getReg();
|
|
|
|
if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// If so, rewrite the instruction.
|
2010-05-19 14:20:44 +08:00
|
|
|
MCOperand Saved = Inst.getOperand(ImmOp);
|
|
|
|
Inst = MCInst();
|
|
|
|
Inst.setOpcode(Opcode);
|
|
|
|
Inst.addOperand(Saved);
|
|
|
|
}
|
|
|
|
|
2013-07-13 02:06:44 +08:00
|
|
|
/// \brief If a movsx instruction has a shorter encoding for the used register
|
|
|
|
/// simplify the instruction to use it instead.
|
|
|
|
static void SimplifyMOVSX(MCInst &Inst) {
|
|
|
|
unsigned NewOpcode = 0;
|
|
|
|
unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg();
|
|
|
|
switch (Inst.getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected instruction!");
|
|
|
|
case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw
|
|
|
|
if (Op0 == X86::AX && Op1 == X86::AL)
|
|
|
|
NewOpcode = X86::CBW;
|
|
|
|
break;
|
|
|
|
case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl
|
|
|
|
if (Op0 == X86::EAX && Op1 == X86::AX)
|
|
|
|
NewOpcode = X86::CWDE;
|
|
|
|
break;
|
|
|
|
case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq
|
|
|
|
if (Op0 == X86::RAX && Op1 == X86::EAX)
|
|
|
|
NewOpcode = X86::CDQE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NewOpcode != 0) {
|
|
|
|
Inst = MCInst();
|
|
|
|
Inst.setOpcode(NewOpcode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-19 14:20:44 +08:00
|
|
|
/// \brief Simplify things like MOV32rm to MOV32o32a.
|
2010-08-17 05:03:32 +08:00
|
|
|
static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
|
|
|
|
unsigned Opcode) {
|
|
|
|
// Don't make these simplifications in 64-bit mode; other assemblers don't
|
|
|
|
// perform them because they make the code larger.
|
|
|
|
if (Printer.getSubtarget().is64Bit())
|
|
|
|
return;
|
|
|
|
|
2010-05-19 14:20:44 +08:00
|
|
|
bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg();
|
|
|
|
unsigned AddrBase = IsStore;
|
|
|
|
unsigned RegOp = IsStore ? 0 : 5;
|
|
|
|
unsigned AddrOp = AddrBase + 3;
|
|
|
|
assert(Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() &&
|
2014-03-19 00:14:11 +08:00
|
|
|
Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() &&
|
|
|
|
Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() &&
|
|
|
|
Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() &&
|
|
|
|
Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() &&
|
|
|
|
(Inst.getOperand(AddrOp).isExpr() ||
|
|
|
|
Inst.getOperand(AddrOp).isImm()) &&
|
2010-05-19 14:20:44 +08:00
|
|
|
"Unexpected instruction!");
|
|
|
|
|
|
|
|
// Check whether the destination register can be fixed.
|
|
|
|
unsigned Reg = Inst.getOperand(RegOp).getReg();
|
|
|
|
if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Check whether this is an absolute address.
|
2012-08-02 02:39:17 +08:00
|
|
|
// FIXME: We know TLVP symbol refs aren't, but there should be a better way
|
2010-06-17 08:51:48 +08:00
|
|
|
// to do this here.
|
|
|
|
bool Absolute = true;
|
|
|
|
if (Inst.getOperand(AddrOp).isExpr()) {
|
|
|
|
const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr();
|
|
|
|
if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE))
|
|
|
|
if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP)
|
|
|
|
Absolute = false;
|
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2010-06-17 08:51:48 +08:00
|
|
|
if (Absolute &&
|
2014-03-19 00:14:11 +08:00
|
|
|
(Inst.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 ||
|
|
|
|
Inst.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 ||
|
|
|
|
Inst.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0))
|
2010-05-19 14:20:44 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// If so, rewrite the instruction.
|
|
|
|
MCOperand Saved = Inst.getOperand(AddrOp);
|
2014-03-19 00:14:11 +08:00
|
|
|
MCOperand Seg = Inst.getOperand(AddrBase + X86::AddrSegmentReg);
|
2010-05-19 14:20:44 +08:00
|
|
|
Inst = MCInst();
|
|
|
|
Inst.setOpcode(Opcode);
|
|
|
|
Inst.addOperand(Saved);
|
2014-01-16 15:57:45 +08:00
|
|
|
Inst.addOperand(Seg);
|
2010-05-19 01:22:24 +08:00
|
|
|
}
|
2009-09-13 04:34:57 +08:00
|
|
|
|
2014-12-04 13:20:33 +08:00
|
|
|
static unsigned getRetOpcode(const X86Subtarget &Subtarget) {
|
|
|
|
return Subtarget.is64Bit() ? X86::RETQ : X86::RETL;
|
2014-01-08 20:58:07 +08:00
|
|
|
}
|
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
|
|
|
|
OutMI.setOpcode(MI->getOpcode());
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-03 01:35:12 +08:00
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-03 01:35:12 +08:00
|
|
|
MCOperand MCOp;
|
|
|
|
switch (MO.getType()) {
|
|
|
|
default:
|
2009-09-13 04:34:57 +08:00
|
|
|
MI->dump();
|
|
|
|
llvm_unreachable("unknown operand type");
|
2009-09-03 01:35:12 +08:00
|
|
|
case MachineOperand::MO_Register:
|
2009-10-20 07:35:57 +08:00
|
|
|
// Ignore all implicit register operands.
|
|
|
|
if (MO.isImplicit()) continue;
|
2015-05-14 02:37:00 +08:00
|
|
|
MCOp = MCOperand::createReg(MO.getReg());
|
2009-09-03 01:35:12 +08:00
|
|
|
break;
|
|
|
|
case MachineOperand::MO_Immediate:
|
2015-05-14 02:37:00 +08:00
|
|
|
MCOp = MCOperand::createImm(MO.getImm());
|
2009-09-03 01:35:12 +08:00
|
|
|
break;
|
|
|
|
case MachineOperand::MO_MachineBasicBlock:
|
|
|
|
case MachineOperand::MO_GlobalAddress:
|
|
|
|
case MachineOperand::MO_ExternalSymbol:
|
2010-07-23 05:10:04 +08:00
|
|
|
MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
|
2009-09-03 01:35:12 +08:00
|
|
|
break;
|
2009-09-03 12:44:53 +08:00
|
|
|
case MachineOperand::MO_JumpTableIndex:
|
2010-07-23 05:10:04 +08:00
|
|
|
MCOp = LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()));
|
2009-09-03 12:44:53 +08:00
|
|
|
break;
|
|
|
|
case MachineOperand::MO_ConstantPoolIndex:
|
2010-07-23 05:10:04 +08:00
|
|
|
MCOp = LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()));
|
2009-09-03 12:44:53 +08:00
|
|
|
break;
|
2009-10-30 09:28:02 +08:00
|
|
|
case MachineOperand::MO_BlockAddress:
|
2010-07-23 05:10:04 +08:00
|
|
|
MCOp = LowerSymbolOperand(MO,
|
|
|
|
AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()));
|
2009-10-30 09:28:02 +08:00
|
|
|
break;
|
2012-01-19 07:52:19 +08:00
|
|
|
case MachineOperand::MO_RegisterMask:
|
|
|
|
// Ignore call clobbers.
|
|
|
|
continue;
|
2009-09-03 01:35:12 +08:00
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
OutMI.addOperand(MCOp);
|
2009-09-03 01:35:12 +08:00
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-11 12:28:13 +08:00
|
|
|
// Handle a few special cases to eliminate operand modifiers.
|
2010-10-08 11:54:52 +08:00
|
|
|
ReSimplify:
|
2009-09-13 04:34:57 +08:00
|
|
|
switch (OutMI.getOpcode()) {
|
2013-06-11 04:43:49 +08:00
|
|
|
case X86::LEA64_32r:
|
2010-07-09 07:46:44 +08:00
|
|
|
case X86::LEA64r:
|
|
|
|
case X86::LEA16r:
|
|
|
|
case X86::LEA32r:
|
|
|
|
// LEA should have a segment register, but it must be empty.
|
|
|
|
assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands &&
|
|
|
|
"Unexpected # of LEA operands");
|
|
|
|
assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&
|
|
|
|
"LEA has segment specified!");
|
2009-09-03 01:35:12 +08:00
|
|
|
break;
|
2010-02-06 05:30:49 +08:00
|
|
|
|
2013-06-01 17:55:14 +08:00
|
|
|
case X86::MOV32ri64:
|
|
|
|
OutMI.setOpcode(X86::MOV32ri);
|
|
|
|
break;
|
|
|
|
|
2013-03-14 15:09:57 +08:00
|
|
|
// Commute operands to get a smaller encoding by using VEX.R instead of VEX.B
|
|
|
|
// if one of the registers is extended, but other isn't.
|
|
|
|
case X86::VMOVAPDrr:
|
|
|
|
case X86::VMOVAPDYrr:
|
|
|
|
case X86::VMOVAPSrr:
|
|
|
|
case X86::VMOVAPSYrr:
|
|
|
|
case X86::VMOVDQArr:
|
|
|
|
case X86::VMOVDQAYrr:
|
|
|
|
case X86::VMOVDQUrr:
|
|
|
|
case X86::VMOVDQUYrr:
|
|
|
|
case X86::VMOVUPDrr:
|
|
|
|
case X86::VMOVUPDYrr:
|
|
|
|
case X86::VMOVUPSrr:
|
|
|
|
case X86::VMOVUPSYrr: {
|
2013-03-16 11:44:31 +08:00
|
|
|
if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) &&
|
|
|
|
X86II::isX86_64ExtendedReg(OutMI.getOperand(1).getReg())) {
|
|
|
|
unsigned NewOpc;
|
|
|
|
switch (OutMI.getOpcode()) {
|
|
|
|
default: llvm_unreachable("Invalid opcode");
|
|
|
|
case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break;
|
|
|
|
case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break;
|
|
|
|
case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break;
|
|
|
|
case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break;
|
|
|
|
case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break;
|
|
|
|
case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break;
|
|
|
|
case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break;
|
|
|
|
case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break;
|
|
|
|
case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break;
|
|
|
|
case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break;
|
|
|
|
case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break;
|
|
|
|
case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break;
|
|
|
|
}
|
|
|
|
OutMI.setOpcode(NewOpc);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case X86::VMOVSDrr:
|
|
|
|
case X86::VMOVSSrr: {
|
|
|
|
if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) &&
|
|
|
|
X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) {
|
|
|
|
unsigned NewOpc;
|
|
|
|
switch (OutMI.getOpcode()) {
|
|
|
|
default: llvm_unreachable("Invalid opcode");
|
|
|
|
case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break;
|
|
|
|
case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break;
|
|
|
|
}
|
|
|
|
OutMI.setOpcode(NewOpc);
|
2013-03-14 15:09:57 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-02-17 01:56:02 +08:00
|
|
|
// TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register
|
|
|
|
// inputs modeled as normal uses instead of implicit uses. As such, truncate
|
|
|
|
// off all but the first operand (the callee). FIXME: Change isel.
|
2010-05-19 16:07:12 +08:00
|
|
|
case X86::TAILJMPr64:
|
2015-01-31 05:03:31 +08:00
|
|
|
case X86::TAILJMPr64_REX:
|
2010-05-19 12:31:36 +08:00
|
|
|
case X86::CALL64r:
|
2012-02-17 01:56:02 +08:00
|
|
|
case X86::CALL64pcrel32: {
|
2010-05-19 12:31:36 +08:00
|
|
|
unsigned Opcode = OutMI.getOpcode();
|
2010-05-19 05:40:18 +08:00
|
|
|
MCOperand Saved = OutMI.getOperand(0);
|
|
|
|
OutMI = MCInst();
|
2010-05-19 12:31:36 +08:00
|
|
|
OutMI.setOpcode(Opcode);
|
2010-05-19 05:40:18 +08:00
|
|
|
OutMI.addOperand(Saved);
|
|
|
|
break;
|
|
|
|
}
|
2010-05-19 12:31:36 +08:00
|
|
|
|
2010-10-27 02:09:55 +08:00
|
|
|
case X86::EH_RETURN:
|
|
|
|
case X86::EH_RETURN64: {
|
|
|
|
OutMI = MCInst();
|
2014-01-08 20:58:07 +08:00
|
|
|
OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget()));
|
2010-10-27 02:09:55 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-05-19 23:26:43 +08:00
|
|
|
// TAILJMPd, TAILJMPd64 - Lower to the correct jump instructions.
|
2010-07-09 08:49:41 +08:00
|
|
|
case X86::TAILJMPr:
|
2010-05-19 23:26:43 +08:00
|
|
|
case X86::TAILJMPd:
|
|
|
|
case X86::TAILJMPd64: {
|
2010-07-09 08:49:41 +08:00
|
|
|
unsigned Opcode;
|
|
|
|
switch (OutMI.getOpcode()) {
|
2012-02-05 13:38:58 +08:00
|
|
|
default: llvm_unreachable("Invalid opcode");
|
2010-07-09 08:49:41 +08:00
|
|
|
case X86::TAILJMPr: Opcode = X86::JMP32r; break;
|
|
|
|
case X86::TAILJMPd:
|
|
|
|
case X86::TAILJMPd64: Opcode = X86::JMP_1; break;
|
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2010-05-19 23:26:43 +08:00
|
|
|
MCOperand Saved = OutMI.getOperand(0);
|
|
|
|
OutMI = MCInst();
|
2010-07-09 08:49:41 +08:00
|
|
|
OutMI.setOpcode(Opcode);
|
2010-05-19 23:26:43 +08:00
|
|
|
OutMI.addOperand(Saved);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-01-06 15:35:50 +08:00
|
|
|
case X86::DEC16r:
|
|
|
|
case X86::DEC32r:
|
|
|
|
case X86::INC16r:
|
|
|
|
case X86::INC32r:
|
|
|
|
// If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions.
|
|
|
|
if (!AsmPrinter.getSubtarget().is64Bit()) {
|
|
|
|
unsigned Opcode;
|
|
|
|
switch (OutMI.getOpcode()) {
|
|
|
|
default: llvm_unreachable("Invalid opcode");
|
|
|
|
case X86::DEC16r: Opcode = X86::DEC16r_alt; break;
|
|
|
|
case X86::DEC32r: Opcode = X86::DEC32r_alt; break;
|
|
|
|
case X86::INC16r: Opcode = X86::INC16r_alt; break;
|
|
|
|
case X86::INC32r: Opcode = X86::INC32r_alt; break;
|
|
|
|
}
|
|
|
|
OutMI.setOpcode(Opcode);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2010-10-08 11:54:52 +08:00
|
|
|
// These are pseudo-ops for OR to help with the OR->ADD transformation. We do
|
|
|
|
// this with an ugly goto in case the resultant OR uses EAX and needs the
|
|
|
|
// short form.
|
2010-10-08 11:57:25 +08:00
|
|
|
case X86::ADD16rr_DB: OutMI.setOpcode(X86::OR16rr); goto ReSimplify;
|
|
|
|
case X86::ADD32rr_DB: OutMI.setOpcode(X86::OR32rr); goto ReSimplify;
|
|
|
|
case X86::ADD64rr_DB: OutMI.setOpcode(X86::OR64rr); goto ReSimplify;
|
|
|
|
case X86::ADD16ri_DB: OutMI.setOpcode(X86::OR16ri); goto ReSimplify;
|
|
|
|
case X86::ADD32ri_DB: OutMI.setOpcode(X86::OR32ri); goto ReSimplify;
|
|
|
|
case X86::ADD64ri32_DB: OutMI.setOpcode(X86::OR64ri32); goto ReSimplify;
|
|
|
|
case X86::ADD16ri8_DB: OutMI.setOpcode(X86::OR16ri8); goto ReSimplify;
|
|
|
|
case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify;
|
|
|
|
case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify;
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2011-09-08 02:48:32 +08:00
|
|
|
// Atomic load and store require a separate pseudo-inst because Acquire
|
|
|
|
// implies mayStore and Release implies mayLoad; fix these to regular MOV
|
|
|
|
// instructions here
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::ACQUIRE_MOV8rm: OutMI.setOpcode(X86::MOV8rm); goto ReSimplify;
|
|
|
|
case X86::ACQUIRE_MOV16rm: OutMI.setOpcode(X86::MOV16rm); goto ReSimplify;
|
|
|
|
case X86::ACQUIRE_MOV32rm: OutMI.setOpcode(X86::MOV32rm); goto ReSimplify;
|
|
|
|
case X86::ACQUIRE_MOV64rm: OutMI.setOpcode(X86::MOV64rm); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV8mr: OutMI.setOpcode(X86::MOV8mr); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV16mr: OutMI.setOpcode(X86::MOV16mr); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV32mr: OutMI.setOpcode(X86::MOV32mr); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV64mr: OutMI.setOpcode(X86::MOV64mr); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV8mi: OutMI.setOpcode(X86::MOV8mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV16mi: OutMI.setOpcode(X86::MOV16mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV32mi: OutMI.setOpcode(X86::MOV32mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV64mi32: OutMI.setOpcode(X86::MOV64mi32); goto ReSimplify;
|
|
|
|
case X86::RELEASE_ADD8mi: OutMI.setOpcode(X86::ADD8mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_ADD32mi: OutMI.setOpcode(X86::ADD32mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_ADD64mi32: OutMI.setOpcode(X86::ADD64mi32); goto ReSimplify;
|
|
|
|
case X86::RELEASE_AND8mi: OutMI.setOpcode(X86::AND8mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_AND32mi: OutMI.setOpcode(X86::AND32mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_AND64mi32: OutMI.setOpcode(X86::AND64mi32); goto ReSimplify;
|
|
|
|
case X86::RELEASE_OR8mi: OutMI.setOpcode(X86::OR8mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_OR32mi: OutMI.setOpcode(X86::OR32mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_OR64mi32: OutMI.setOpcode(X86::OR64mi32); goto ReSimplify;
|
|
|
|
case X86::RELEASE_XOR8mi: OutMI.setOpcode(X86::XOR8mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_XOR32mi: OutMI.setOpcode(X86::XOR32mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_XOR64mi32: OutMI.setOpcode(X86::XOR64mi32); goto ReSimplify;
|
|
|
|
case X86::RELEASE_INC8m: OutMI.setOpcode(X86::INC8m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_INC16m: OutMI.setOpcode(X86::INC16m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_INC32m: OutMI.setOpcode(X86::INC32m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_INC64m: OutMI.setOpcode(X86::INC64m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_DEC8m: OutMI.setOpcode(X86::DEC8m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_DEC16m: OutMI.setOpcode(X86::DEC16m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_DEC32m: OutMI.setOpcode(X86::DEC32m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_DEC64m: OutMI.setOpcode(X86::DEC64m); goto ReSimplify;
|
2011-09-08 02:48:32 +08:00
|
|
|
|
2010-05-19 01:22:24 +08:00
|
|
|
// We don't currently select the correct instruction form for instructions
|
|
|
|
// which have a short %eax, etc. form. Handle this by custom lowering, for
|
|
|
|
// now.
|
|
|
|
//
|
|
|
|
// Note, we are currently not handling the following instructions:
|
2010-05-19 14:20:44 +08:00
|
|
|
// MOV64ao8, MOV64o8a
|
2010-05-19 01:22:24 +08:00
|
|
|
// XCHG16ar, XCHG32ar, XCHG64ar
|
2010-05-19 14:20:44 +08:00
|
|
|
case X86::MOV8mr_NOREX:
|
2015-01-02 15:36:23 +08:00
|
|
|
case X86::MOV8mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o32a); break;
|
2010-05-19 14:20:44 +08:00
|
|
|
case X86::MOV8rm_NOREX:
|
2015-01-02 15:36:23 +08:00
|
|
|
case X86::MOV8rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao32); break;
|
|
|
|
case X86::MOV16mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o32a); break;
|
|
|
|
case X86::MOV16rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao32); break;
|
|
|
|
case X86::MOV32mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break;
|
|
|
|
case X86::MOV32rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break;
|
2010-05-19 14:20:44 +08:00
|
|
|
|
2010-05-19 01:22:24 +08:00
|
|
|
case X86::ADC8ri: SimplifyShortImmForm(OutMI, X86::ADC8i8); break;
|
|
|
|
case X86::ADC16ri: SimplifyShortImmForm(OutMI, X86::ADC16i16); break;
|
|
|
|
case X86::ADC32ri: SimplifyShortImmForm(OutMI, X86::ADC32i32); break;
|
|
|
|
case X86::ADC64ri32: SimplifyShortImmForm(OutMI, X86::ADC64i32); break;
|
|
|
|
case X86::ADD8ri: SimplifyShortImmForm(OutMI, X86::ADD8i8); break;
|
|
|
|
case X86::ADD16ri: SimplifyShortImmForm(OutMI, X86::ADD16i16); break;
|
|
|
|
case X86::ADD32ri: SimplifyShortImmForm(OutMI, X86::ADD32i32); break;
|
|
|
|
case X86::ADD64ri32: SimplifyShortImmForm(OutMI, X86::ADD64i32); break;
|
|
|
|
case X86::AND8ri: SimplifyShortImmForm(OutMI, X86::AND8i8); break;
|
|
|
|
case X86::AND16ri: SimplifyShortImmForm(OutMI, X86::AND16i16); break;
|
|
|
|
case X86::AND32ri: SimplifyShortImmForm(OutMI, X86::AND32i32); break;
|
|
|
|
case X86::AND64ri32: SimplifyShortImmForm(OutMI, X86::AND64i32); break;
|
|
|
|
case X86::CMP8ri: SimplifyShortImmForm(OutMI, X86::CMP8i8); break;
|
|
|
|
case X86::CMP16ri: SimplifyShortImmForm(OutMI, X86::CMP16i16); break;
|
|
|
|
case X86::CMP32ri: SimplifyShortImmForm(OutMI, X86::CMP32i32); break;
|
|
|
|
case X86::CMP64ri32: SimplifyShortImmForm(OutMI, X86::CMP64i32); break;
|
|
|
|
case X86::OR8ri: SimplifyShortImmForm(OutMI, X86::OR8i8); break;
|
|
|
|
case X86::OR16ri: SimplifyShortImmForm(OutMI, X86::OR16i16); break;
|
|
|
|
case X86::OR32ri: SimplifyShortImmForm(OutMI, X86::OR32i32); break;
|
|
|
|
case X86::OR64ri32: SimplifyShortImmForm(OutMI, X86::OR64i32); break;
|
|
|
|
case X86::SBB8ri: SimplifyShortImmForm(OutMI, X86::SBB8i8); break;
|
|
|
|
case X86::SBB16ri: SimplifyShortImmForm(OutMI, X86::SBB16i16); break;
|
|
|
|
case X86::SBB32ri: SimplifyShortImmForm(OutMI, X86::SBB32i32); break;
|
|
|
|
case X86::SBB64ri32: SimplifyShortImmForm(OutMI, X86::SBB64i32); break;
|
|
|
|
case X86::SUB8ri: SimplifyShortImmForm(OutMI, X86::SUB8i8); break;
|
|
|
|
case X86::SUB16ri: SimplifyShortImmForm(OutMI, X86::SUB16i16); break;
|
|
|
|
case X86::SUB32ri: SimplifyShortImmForm(OutMI, X86::SUB32i32); break;
|
|
|
|
case X86::SUB64ri32: SimplifyShortImmForm(OutMI, X86::SUB64i32); break;
|
|
|
|
case X86::TEST8ri: SimplifyShortImmForm(OutMI, X86::TEST8i8); break;
|
|
|
|
case X86::TEST16ri: SimplifyShortImmForm(OutMI, X86::TEST16i16); break;
|
|
|
|
case X86::TEST32ri: SimplifyShortImmForm(OutMI, X86::TEST32i32); break;
|
|
|
|
case X86::TEST64ri32: SimplifyShortImmForm(OutMI, X86::TEST64i32); break;
|
|
|
|
case X86::XOR8ri: SimplifyShortImmForm(OutMI, X86::XOR8i8); break;
|
|
|
|
case X86::XOR16ri: SimplifyShortImmForm(OutMI, X86::XOR16i16); break;
|
|
|
|
case X86::XOR32ri: SimplifyShortImmForm(OutMI, X86::XOR32i32); break;
|
|
|
|
case X86::XOR64ri32: SimplifyShortImmForm(OutMI, X86::XOR64i32); break;
|
2011-10-27 05:12:27 +08:00
|
|
|
|
2013-07-13 02:06:44 +08:00
|
|
|
// Try to shrink some forms of movsx.
|
|
|
|
case X86::MOVSX16rr8:
|
|
|
|
case X86::MOVSX32rr16:
|
|
|
|
case X86::MOVSX64rr32:
|
|
|
|
SimplifyMOVSX(OutMI);
|
|
|
|
break;
|
2011-10-27 05:12:27 +08:00
|
|
|
}
|
2009-09-13 04:34:57 +08:00
|
|
|
}
|
|
|
|
|
2014-07-25 04:40:55 +08:00
|
|
|
void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering,
|
|
|
|
const MachineInstr &MI) {
|
2012-06-02 00:27:21 +08:00
|
|
|
|
|
|
|
bool is64Bits = MI.getOpcode() == X86::TLS_addr64 ||
|
|
|
|
MI.getOpcode() == X86::TLS_base_addr64;
|
|
|
|
|
|
|
|
bool needsPadding = MI.getOpcode() == X86::TLS_addr64;
|
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
MCContext &context = OutStreamer->getContext();
|
2010-11-29 05:16:39 +08:00
|
|
|
|
2012-11-26 21:34:22 +08:00
|
|
|
if (needsPadding)
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
|
2012-06-02 00:27:21 +08:00
|
|
|
|
|
|
|
MCSymbolRefExpr::VariantKind SRVK;
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
case X86::TLS_addr32:
|
|
|
|
case X86::TLS_addr64:
|
|
|
|
SRVK = MCSymbolRefExpr::VK_TLSGD;
|
|
|
|
break;
|
|
|
|
case X86::TLS_base_addr32:
|
|
|
|
SRVK = MCSymbolRefExpr::VK_TLSLDM;
|
|
|
|
break;
|
|
|
|
case X86::TLS_base_addr64:
|
|
|
|
SRVK = MCSymbolRefExpr::VK_TLSLD;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("unexpected opcode");
|
|
|
|
}
|
|
|
|
|
2010-11-29 05:16:39 +08:00
|
|
|
MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3));
|
2012-06-02 00:27:21 +08:00
|
|
|
const MCSymbolRefExpr *symRef = MCSymbolRefExpr::Create(sym, SRVK, context);
|
2010-11-29 05:16:39 +08:00
|
|
|
|
|
|
|
MCInst LEA;
|
|
|
|
if (is64Bits) {
|
|
|
|
LEA.setOpcode(X86::LEA64r);
|
2015-05-14 02:37:00 +08:00
|
|
|
LEA.addOperand(MCOperand::createReg(X86::RDI)); // dest
|
|
|
|
LEA.addOperand(MCOperand::createReg(X86::RIP)); // base
|
|
|
|
LEA.addOperand(MCOperand::createImm(1)); // scale
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // index
|
|
|
|
LEA.addOperand(MCOperand::createExpr(symRef)); // disp
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // seg
|
2012-06-08 02:39:19 +08:00
|
|
|
} else if (SRVK == MCSymbolRefExpr::VK_TLSLDM) {
|
|
|
|
LEA.setOpcode(X86::LEA32r);
|
2015-05-14 02:37:00 +08:00
|
|
|
LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest
|
|
|
|
LEA.addOperand(MCOperand::createReg(X86::EBX)); // base
|
|
|
|
LEA.addOperand(MCOperand::createImm(1)); // scale
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // index
|
|
|
|
LEA.addOperand(MCOperand::createExpr(symRef)); // disp
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // seg
|
2010-11-29 05:16:39 +08:00
|
|
|
} else {
|
|
|
|
LEA.setOpcode(X86::LEA32r);
|
2015-05-14 02:37:00 +08:00
|
|
|
LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // base
|
|
|
|
LEA.addOperand(MCOperand::createImm(1)); // scale
|
|
|
|
LEA.addOperand(MCOperand::createReg(X86::EBX)); // index
|
|
|
|
LEA.addOperand(MCOperand::createExpr(symRef)); // disp
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // seg
|
2010-11-29 05:16:39 +08:00
|
|
|
}
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(LEA);
|
2010-11-29 05:16:39 +08:00
|
|
|
|
2012-06-02 00:27:21 +08:00
|
|
|
if (needsPadding) {
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
|
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
|
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX));
|
2010-11-29 05:16:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr";
|
|
|
|
MCSymbol *tlsGetAddr = context.GetOrCreateSymbol(name);
|
|
|
|
const MCSymbolRefExpr *tlsRef =
|
|
|
|
MCSymbolRefExpr::Create(tlsGetAddr,
|
|
|
|
MCSymbolRefExpr::VK_PLT,
|
|
|
|
context);
|
|
|
|
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(is64Bits ? X86::CALL64pcrel32
|
|
|
|
: X86::CALLpcrel32)
|
|
|
|
.addExpr(tlsRef));
|
2010-11-29 05:16:39 +08:00
|
|
|
}
|
2010-04-28 09:39:28 +08:00
|
|
|
|
2013-12-04 08:39:08 +08:00
|
|
|
/// \brief Emit the optimal amount of multi-byte nops on X86.
|
2014-01-29 07:12:42 +08:00
|
|
|
static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, const MCSubtargetInfo &STI) {
|
2013-12-04 08:39:08 +08:00
|
|
|
// This works only for 64bit. For 32bit we have to do additional checking if
|
|
|
|
// the CPU supports multi-byte nops.
|
|
|
|
assert(Is64Bit && "EmitNops only supports X86-64");
|
|
|
|
while (NumBytes) {
|
|
|
|
unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg;
|
|
|
|
Opc = IndexReg = Displacement = SegmentReg = 0;
|
|
|
|
BaseReg = X86::RAX; ScaleVal = 1;
|
|
|
|
switch (NumBytes) {
|
|
|
|
case 0: llvm_unreachable("Zero nops?"); break;
|
|
|
|
case 1: NumBytes -= 1; Opc = X86::NOOP; break;
|
|
|
|
case 2: NumBytes -= 2; Opc = X86::XCHG16ar; break;
|
|
|
|
case 3: NumBytes -= 3; Opc = X86::NOOPL; break;
|
|
|
|
case 4: NumBytes -= 4; Opc = X86::NOOPL; Displacement = 8; break;
|
|
|
|
case 5: NumBytes -= 5; Opc = X86::NOOPL; Displacement = 8;
|
|
|
|
IndexReg = X86::RAX; break;
|
|
|
|
case 6: NumBytes -= 6; Opc = X86::NOOPW; Displacement = 8;
|
|
|
|
IndexReg = X86::RAX; break;
|
|
|
|
case 7: NumBytes -= 7; Opc = X86::NOOPL; Displacement = 512; break;
|
|
|
|
case 8: NumBytes -= 8; Opc = X86::NOOPL; Displacement = 512;
|
|
|
|
IndexReg = X86::RAX; break;
|
|
|
|
case 9: NumBytes -= 9; Opc = X86::NOOPW; Displacement = 512;
|
|
|
|
IndexReg = X86::RAX; break;
|
|
|
|
default: NumBytes -= 10; Opc = X86::NOOPW; Displacement = 512;
|
|
|
|
IndexReg = X86::RAX; SegmentReg = X86::CS; break;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned NumPrefixes = std::min(NumBytes, 5U);
|
|
|
|
NumBytes -= NumPrefixes;
|
|
|
|
for (unsigned i = 0; i != NumPrefixes; ++i)
|
|
|
|
OS.EmitBytes("\x66");
|
|
|
|
|
|
|
|
switch (Opc) {
|
|
|
|
default: llvm_unreachable("Unexpected opcode"); break;
|
|
|
|
case X86::NOOP:
|
2014-01-29 07:12:42 +08:00
|
|
|
OS.EmitInstruction(MCInstBuilder(Opc), STI);
|
2013-12-04 08:39:08 +08:00
|
|
|
break;
|
|
|
|
case X86::XCHG16ar:
|
2014-01-29 07:12:42 +08:00
|
|
|
OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI);
|
2013-12-04 08:39:08 +08:00
|
|
|
break;
|
|
|
|
case X86::NOOPL:
|
|
|
|
case X86::NOOPW:
|
2014-07-25 04:40:55 +08:00
|
|
|
OS.EmitInstruction(MCInstBuilder(Opc).addReg(BaseReg)
|
|
|
|
.addImm(ScaleVal).addReg(IndexReg)
|
|
|
|
.addImm(Displacement).addReg(SegmentReg), STI);
|
2013-12-04 08:39:08 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
} // while (NumBytes)
|
|
|
|
}
|
|
|
|
|
2015-05-07 07:53:26 +08:00
|
|
|
void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI,
|
|
|
|
X86MCInstLower &MCIL) {
|
|
|
|
assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64");
|
2014-12-02 06:52:56 +08:00
|
|
|
|
2015-05-13 07:52:24 +08:00
|
|
|
StatepointOpers SOpers(&MI);
|
|
|
|
if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
|
|
|
|
EmitNops(*OutStreamer, PatchBytes, Subtarget->is64Bit(),
|
|
|
|
getSubtargetInfo());
|
|
|
|
} else {
|
|
|
|
// Lower call target and choose correct opcode
|
|
|
|
const MachineOperand &CallTarget = SOpers.getCallTarget();
|
|
|
|
MCOperand CallTargetMCOp;
|
|
|
|
unsigned CallOpcode;
|
|
|
|
switch (CallTarget.getType()) {
|
|
|
|
case MachineOperand::MO_GlobalAddress:
|
|
|
|
case MachineOperand::MO_ExternalSymbol:
|
|
|
|
CallTargetMCOp = MCIL.LowerSymbolOperand(
|
|
|
|
CallTarget, MCIL.GetSymbolFromOperand(CallTarget));
|
|
|
|
CallOpcode = X86::CALL64pcrel32;
|
|
|
|
// Currently, we only support relative addressing with statepoints.
|
|
|
|
// Otherwise, we'll need a scratch register to hold the target
|
|
|
|
// address. You'll fail asserts during load & relocation if this
|
|
|
|
// symbol is to far away. (TODO: support non-relative addressing)
|
|
|
|
break;
|
|
|
|
case MachineOperand::MO_Immediate:
|
2015-05-14 02:37:00 +08:00
|
|
|
CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
|
2015-05-13 07:52:24 +08:00
|
|
|
CallOpcode = X86::CALL64pcrel32;
|
|
|
|
// Currently, we only support relative addressing with statepoints.
|
|
|
|
// Otherwise, we'll need a scratch register to hold the target
|
|
|
|
// immediate. You'll fail asserts during load & relocation if this
|
|
|
|
// address is to far away. (TODO: support non-relative addressing)
|
|
|
|
break;
|
|
|
|
case MachineOperand::MO_Register:
|
2015-05-14 02:37:00 +08:00
|
|
|
CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
|
2015-05-13 07:52:24 +08:00
|
|
|
CallOpcode = X86::CALL64r;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unsupported operand type in statepoint call target");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit call
|
|
|
|
MCInst CallInst;
|
|
|
|
CallInst.setOpcode(CallOpcode);
|
|
|
|
CallInst.addOperand(CallTargetMCOp);
|
|
|
|
OutStreamer->EmitInstruction(CallInst, getSubtargetInfo());
|
|
|
|
}
|
2014-12-02 06:52:56 +08:00
|
|
|
|
|
|
|
// Record our statepoint node in the same section used by STACKMAP
|
|
|
|
// and PATCHPOINT
|
2014-12-04 13:20:33 +08:00
|
|
|
SM.recordStatepoint(MI);
|
2014-12-02 06:52:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-11-19 11:29:56 +08:00
|
|
|
// Lower a stackmap of the form:
|
|
|
|
// <id>, <shadowBytes>, ...
|
2014-07-25 04:40:55 +08:00
|
|
|
void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) {
|
2015-04-25 03:11:51 +08:00
|
|
|
SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
|
2013-11-19 11:29:56 +08:00
|
|
|
SM.recordStackMap(MI);
|
2014-07-25 04:40:55 +08:00
|
|
|
unsigned NumShadowBytes = MI.getOperand(1).getImm();
|
|
|
|
SMShadowTracker.reset(NumShadowBytes);
|
2013-11-01 06:11:56 +08:00
|
|
|
}
|
|
|
|
|
2013-11-14 14:54:10 +08:00
|
|
|
// Lower a patchpoint of the form:
|
2013-11-19 11:29:56 +08:00
|
|
|
// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
|
2015-04-22 14:02:31 +08:00
|
|
|
void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
|
|
|
|
X86MCInstLower &MCIL) {
|
2014-07-25 04:40:55 +08:00
|
|
|
assert(Subtarget->is64Bit() && "Patchpoint currently only supports X86-64");
|
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
|
2014-07-25 04:40:55 +08:00
|
|
|
|
2013-11-19 11:29:56 +08:00
|
|
|
SM.recordPatchPoint(MI);
|
2013-11-01 06:11:56 +08:00
|
|
|
|
2013-11-19 11:29:56 +08:00
|
|
|
PatchPointOpers opers(&MI);
|
|
|
|
unsigned ScratchIdx = opers.getNextScratchIdx();
|
2013-11-14 14:54:10 +08:00
|
|
|
unsigned EncodedBytes = 0;
|
2015-04-22 14:02:31 +08:00
|
|
|
const MachineOperand &CalleeMO =
|
|
|
|
opers.getMetaOper(PatchPointOpers::TargetPos);
|
|
|
|
|
|
|
|
// Check for null target. If target is non-null (i.e. is non-zero or is
|
|
|
|
// symbolic) then emit a call.
|
|
|
|
if (!(CalleeMO.isImm() && !CalleeMO.getImm())) {
|
|
|
|
MCOperand CalleeMCOp;
|
|
|
|
switch (CalleeMO.getType()) {
|
|
|
|
default:
|
|
|
|
/// FIXME: Add a verifier check for bad callee types.
|
|
|
|
llvm_unreachable("Unrecognized callee operand type.");
|
|
|
|
case MachineOperand::MO_Immediate:
|
|
|
|
if (CalleeMO.getImm())
|
2015-05-14 02:37:00 +08:00
|
|
|
CalleeMCOp = MCOperand::createImm(CalleeMO.getImm());
|
2015-04-22 14:02:31 +08:00
|
|
|
break;
|
|
|
|
case MachineOperand::MO_ExternalSymbol:
|
|
|
|
case MachineOperand::MO_GlobalAddress:
|
|
|
|
CalleeMCOp =
|
|
|
|
MCIL.LowerSymbolOperand(CalleeMO,
|
|
|
|
MCIL.GetSymbolFromOperand(CalleeMO));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-11-14 14:54:10 +08:00
|
|
|
// Emit MOV to materialize the target address and the CALL to target.
|
|
|
|
// This is encoded with 12-13 bytes, depending on which register is used.
|
2013-12-04 08:39:08 +08:00
|
|
|
unsigned ScratchReg = MI.getOperand(ScratchIdx).getReg();
|
|
|
|
if (X86II::isX86_64ExtendedReg(ScratchReg))
|
|
|
|
EncodedBytes = 13;
|
|
|
|
else
|
|
|
|
EncodedBytes = 12;
|
2015-04-22 14:02:31 +08:00
|
|
|
|
|
|
|
EmitAndCountInstruction(
|
|
|
|
MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp));
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg));
|
2013-11-14 14:54:10 +08:00
|
|
|
}
|
2015-04-22 14:02:31 +08:00
|
|
|
|
2013-11-01 06:11:56 +08:00
|
|
|
// Emit padding.
|
2013-11-19 11:29:56 +08:00
|
|
|
unsigned NumBytes = opers.getMetaOper(PatchPointOpers::NBytesPos).getImm();
|
|
|
|
assert(NumBytes >= EncodedBytes &&
|
2013-11-01 06:11:56 +08:00
|
|
|
"Patchpoint can't request size less than the length of a call.");
|
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
EmitNops(*OutStreamer, NumBytes - EncodedBytes, Subtarget->is64Bit(),
|
2014-07-25 04:40:55 +08:00
|
|
|
getSubtargetInfo());
|
2013-11-01 06:11:56 +08:00
|
|
|
}
|
|
|
|
|
2014-08-05 05:05:27 +08:00
|
|
|
// Returns instruction preceding MBBI in MachineFunction.
|
|
|
|
// If MBBI is the first instruction of the first basic block, returns null.
|
|
|
|
static MachineBasicBlock::const_iterator
|
|
|
|
PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) {
|
|
|
|
const MachineBasicBlock *MBB = MBBI->getParent();
|
|
|
|
while (MBBI == MBB->begin()) {
|
|
|
|
if (MBB == MBB->getParent()->begin())
|
|
|
|
return nullptr;
|
|
|
|
MBB = MBB->getPrevNode();
|
|
|
|
MBBI = MBB->end();
|
|
|
|
}
|
|
|
|
return --MBBI;
|
|
|
|
}
|
|
|
|
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
static const Constant *getConstantFromPool(const MachineInstr &MI,
|
|
|
|
const MachineOperand &Op) {
|
|
|
|
if (!Op.isCPI())
|
2014-09-24 11:06:37 +08:00
|
|
|
return nullptr;
|
2014-09-24 10:16:12 +08:00
|
|
|
|
2014-09-24 11:06:37 +08:00
|
|
|
ArrayRef<MachineConstantPoolEntry> Constants =
|
|
|
|
MI.getParent()->getParent()->getConstantPool()->getConstants();
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
const MachineConstantPoolEntry &ConstantEntry =
|
|
|
|
Constants[Op.getIndex()];
|
2014-09-24 10:16:12 +08:00
|
|
|
|
|
|
|
// Bail if this is a machine constant pool entry, we won't be able to dig out
|
|
|
|
// anything useful.
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
if (ConstantEntry.isMachineConstantPoolEntry())
|
2014-09-24 11:06:37 +08:00
|
|
|
return nullptr;
|
2014-09-24 10:16:12 +08:00
|
|
|
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
auto *C = dyn_cast<Constant>(ConstantEntry.Val.ConstVal);
|
|
|
|
assert((!C || ConstantEntry.getType() == C->getType()) &&
|
2014-09-24 10:16:12 +08:00
|
|
|
"Expected a constant of the same type!");
|
2014-09-24 11:06:37 +08:00
|
|
|
return C;
|
|
|
|
}
|
2014-09-24 10:16:12 +08:00
|
|
|
|
2014-09-24 11:06:37 +08:00
|
|
|
static std::string getShuffleComment(const MachineOperand &DstOp,
|
|
|
|
const MachineOperand &SrcOp,
|
|
|
|
ArrayRef<int> Mask) {
|
|
|
|
std::string Comment;
|
2014-09-24 10:16:12 +08:00
|
|
|
|
|
|
|
// Compute the name for a register. This is really goofy because we have
|
|
|
|
// multiple instruction printers that could (in theory) use different
|
|
|
|
// names. Fortunately most people use the ATT style (outside of Windows)
|
|
|
|
// and they actually agree on register naming here. Ultimately, this is
|
|
|
|
// a comment, and so its OK if it isn't perfect.
|
|
|
|
auto GetRegisterName = [](unsigned RegNum) -> StringRef {
|
|
|
|
return X86ATTInstPrinter::getRegisterName(RegNum);
|
|
|
|
};
|
|
|
|
|
|
|
|
StringRef DstName = DstOp.isReg() ? GetRegisterName(DstOp.getReg()) : "mem";
|
|
|
|
StringRef SrcName = SrcOp.isReg() ? GetRegisterName(SrcOp.getReg()) : "mem";
|
|
|
|
|
|
|
|
raw_string_ostream CS(Comment);
|
|
|
|
CS << DstName << " = ";
|
|
|
|
bool NeedComma = false;
|
|
|
|
bool InSrc = false;
|
|
|
|
for (int M : Mask) {
|
|
|
|
// Wrap up any prior entry...
|
|
|
|
if (M == SM_SentinelZero && InSrc) {
|
|
|
|
InSrc = false;
|
|
|
|
CS << "]";
|
|
|
|
}
|
|
|
|
if (NeedComma)
|
|
|
|
CS << ",";
|
|
|
|
else
|
|
|
|
NeedComma = true;
|
|
|
|
|
|
|
|
// Print this shuffle...
|
|
|
|
if (M == SM_SentinelZero) {
|
|
|
|
CS << "zero";
|
|
|
|
} else {
|
|
|
|
if (!InSrc) {
|
|
|
|
InSrc = true;
|
|
|
|
CS << SrcName << "[";
|
|
|
|
}
|
|
|
|
if (M == SM_SentinelUndef)
|
|
|
|
CS << "u";
|
|
|
|
else
|
|
|
|
CS << M;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (InSrc)
|
|
|
|
CS << "]";
|
|
|
|
CS.flush();
|
|
|
|
|
|
|
|
return Comment;
|
|
|
|
}
|
|
|
|
|
2010-01-28 09:02:27 +08:00
|
|
|
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
2013-10-30 00:11:22 +08:00
|
|
|
X86MCInstLower MCInstLowering(*MF, *this);
|
2015-02-03 01:38:43 +08:00
|
|
|
const X86RegisterInfo *RI = MF->getSubtarget<X86Subtarget>().getRegisterInfo();
|
2014-06-25 20:41:52 +08:00
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
switch (MI->getOpcode()) {
|
2010-04-07 06:45:26 +08:00
|
|
|
case TargetOpcode::DBG_VALUE:
|
2013-06-17 04:34:27 +08:00
|
|
|
llvm_unreachable("Should be handled target independently");
|
2010-04-07 09:15:14 +08:00
|
|
|
|
2010-08-06 02:34:30 +08:00
|
|
|
// Emit nothing here but a comment if we can.
|
|
|
|
case X86::Int_MemBarrier:
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->emitRawComment("MEMBARRIER");
|
2010-08-06 02:34:30 +08:00
|
|
|
return;
|
2011-10-05 07:26:17 +08:00
|
|
|
|
2010-10-27 02:09:55 +08:00
|
|
|
|
|
|
|
case X86::EH_RETURN:
|
|
|
|
case X86::EH_RETURN64: {
|
|
|
|
// Lower these as normal, but add some comments.
|
|
|
|
unsigned Reg = MI->getOperand(0).getReg();
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->AddComment(StringRef("eh_return, addr: %") +
|
|
|
|
X86ATTInstPrinter::getRegisterName(Reg));
|
2010-10-27 02:09:55 +08:00
|
|
|
break;
|
|
|
|
}
|
2010-07-09 08:49:41 +08:00
|
|
|
case X86::TAILJMPr:
|
2015-01-31 05:03:31 +08:00
|
|
|
case X86::TAILJMPm:
|
2010-07-09 08:49:41 +08:00
|
|
|
case X86::TAILJMPd:
|
2015-01-31 05:03:31 +08:00
|
|
|
case X86::TAILJMPr64:
|
|
|
|
case X86::TAILJMPm64:
|
2010-07-09 08:49:41 +08:00
|
|
|
case X86::TAILJMPd64:
|
2015-01-31 05:03:31 +08:00
|
|
|
case X86::TAILJMPr64_REX:
|
|
|
|
case X86::TAILJMPm64_REX:
|
|
|
|
case X86::TAILJMPd64_REX:
|
2010-07-09 08:49:41 +08:00
|
|
|
// Lower these as normal, but add some comments.
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->AddComment("TAILCALL");
|
2010-07-09 08:49:41 +08:00
|
|
|
break;
|
2010-11-29 05:16:39 +08:00
|
|
|
|
|
|
|
case X86::TLS_addr32:
|
|
|
|
case X86::TLS_addr64:
|
2012-06-02 00:27:21 +08:00
|
|
|
case X86::TLS_base_addr32:
|
|
|
|
case X86::TLS_base_addr64:
|
2014-07-25 04:40:55 +08:00
|
|
|
return LowerTlsAddr(MCInstLowering, *MI);
|
2010-11-29 05:16:39 +08:00
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
case X86::MOVPC32r: {
|
|
|
|
// This is a pseudo op for a two instruction sequence with a label, which
|
|
|
|
// looks like:
|
|
|
|
// call "L1$pb"
|
|
|
|
// "L1$pb":
|
|
|
|
// popl %esi
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
// Emit the call.
|
2010-11-15 06:48:15 +08:00
|
|
|
MCSymbol *PICBase = MF->getPICBaseSymbol();
|
2009-09-13 04:34:57 +08:00
|
|
|
// FIXME: We would like an efficient form for this, so we don't have to do a
|
|
|
|
// lot of extra uniquing.
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::CALLpcrel32)
|
2012-11-27 02:05:52 +08:00
|
|
|
.addExpr(MCSymbolRefExpr::Create(PICBase, OutContext)));
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
// Emit the label.
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitLabel(PICBase);
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
// popl $reg
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::POP32r)
|
|
|
|
.addReg(MI->getOperand(0).getReg()));
|
2009-09-13 04:34:57 +08:00
|
|
|
return;
|
2009-09-13 05:01:20 +08:00
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 05:01:20 +08:00
|
|
|
case X86::ADD32ri: {
|
|
|
|
// Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
|
|
|
|
if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS)
|
|
|
|
break;
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 05:01:20 +08:00
|
|
|
// Okay, we have something like:
|
|
|
|
// EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 05:01:20 +08:00
|
|
|
// For this, we want to print something like:
|
|
|
|
// MYGLOBAL + (. - PICBASE)
|
|
|
|
// However, we can't generate a ".", so just emit a new label here and refer
|
2010-03-13 02:47:50 +08:00
|
|
|
// to it.
|
2010-03-17 13:41:18 +08:00
|
|
|
MCSymbol *DotSym = OutContext.CreateTempSymbol();
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitLabel(DotSym);
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 05:01:20 +08:00
|
|
|
// Now that we have emitted the label, lower the complex operand expression.
|
2010-02-09 07:03:41 +08:00
|
|
|
MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 05:01:20 +08:00
|
|
|
const MCExpr *DotExpr = MCSymbolRefExpr::Create(DotSym, OutContext);
|
|
|
|
const MCExpr *PICBase =
|
2010-11-15 06:48:15 +08:00
|
|
|
MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), OutContext);
|
2009-09-13 05:01:20 +08:00
|
|
|
DotExpr = MCBinaryExpr::CreateSub(DotExpr, PICBase, OutContext);
|
2012-08-02 02:39:17 +08:00
|
|
|
|
|
|
|
DotExpr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(OpSym,OutContext),
|
2009-09-13 05:01:20 +08:00
|
|
|
DotExpr, OutContext);
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri)
|
2012-11-26 21:34:22 +08:00
|
|
|
.addReg(MI->getOperand(0).getReg())
|
|
|
|
.addReg(MI->getOperand(1).getReg())
|
2012-11-27 02:05:52 +08:00
|
|
|
.addExpr(DotExpr));
|
2009-09-13 05:01:20 +08:00
|
|
|
return;
|
|
|
|
}
|
2014-12-02 06:52:56 +08:00
|
|
|
case TargetOpcode::STATEPOINT:
|
2015-05-07 07:53:26 +08:00
|
|
|
return LowerSTATEPOINT(*MI, MCInstLowering);
|
2014-12-04 13:20:33 +08:00
|
|
|
|
2013-11-01 06:11:56 +08:00
|
|
|
case TargetOpcode::STACKMAP:
|
2014-07-25 04:40:55 +08:00
|
|
|
return LowerSTACKMAP(*MI);
|
2013-11-01 06:11:56 +08:00
|
|
|
|
|
|
|
case TargetOpcode::PATCHPOINT:
|
2015-04-22 14:02:31 +08:00
|
|
|
return LowerPATCHPOINT(*MI, MCInstLowering);
|
2013-11-12 07:00:41 +08:00
|
|
|
|
|
|
|
case X86::MORESTACK_RET:
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
|
2013-11-12 07:00:41 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::MORESTACK_RET_RESTORE_R10:
|
|
|
|
// Return, then restore R10.
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
|
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr)
|
|
|
|
.addReg(X86::R10)
|
|
|
|
.addReg(X86::RAX));
|
2013-11-12 07:00:41 +08:00
|
|
|
return;
|
2014-06-25 20:41:52 +08:00
|
|
|
|
|
|
|
case X86::SEH_PushReg:
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFIPushReg(RI->getSEHRegNum(MI->getOperand(0).getImm()));
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_SaveReg:
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFISaveReg(RI->getSEHRegNum(MI->getOperand(0).getImm()),
|
|
|
|
MI->getOperand(1).getImm());
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_SaveXMM:
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFISaveXMM(RI->getSEHRegNum(MI->getOperand(0).getImm()),
|
|
|
|
MI->getOperand(1).getImm());
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_StackAlloc:
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFIAllocStack(MI->getOperand(0).getImm());
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_SetFrame:
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFISetFrame(RI->getSEHRegNum(MI->getOperand(0).getImm()),
|
|
|
|
MI->getOperand(1).getImm());
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_PushFrame:
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFIPushFrame(MI->getOperand(0).getImm());
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_EndPrologue:
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFIEndProlog();
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
2014-07-26 07:47:11 +08:00
|
|
|
|
2014-08-05 05:05:27 +08:00
|
|
|
case X86::SEH_Epilogue: {
|
|
|
|
MachineBasicBlock::const_iterator MBBI(MI);
|
|
|
|
// Check if preceded by a call and emit nop if so.
|
|
|
|
for (MBBI = PrevCrossBBInst(MBBI); MBBI; MBBI = PrevCrossBBInst(MBBI)) {
|
|
|
|
// Conservatively assume that pseudo instructions don't emit code and keep
|
|
|
|
// looking for a call. We may emit an unnecessary nop in some cases.
|
|
|
|
if (!MBBI->isPseudo()) {
|
|
|
|
if (MBBI->isCall())
|
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-09-24 10:24:41 +08:00
|
|
|
// Lower PSHUFB and VPERMILP normally but add a comment if we can find
|
|
|
|
// a constant shuffle mask. We won't be able to do this at the MC layer
|
|
|
|
// because the mask isn't an immediate.
|
2014-07-26 07:47:11 +08:00
|
|
|
case X86::PSHUFBrm:
|
2014-09-25 08:24:19 +08:00
|
|
|
case X86::VPSHUFBrm:
|
|
|
|
case X86::VPSHUFBYrm: {
|
2015-04-25 03:11:51 +08:00
|
|
|
if (!OutStreamer->isVerboseAsm())
|
2014-09-24 11:06:37 +08:00
|
|
|
break;
|
|
|
|
assert(MI->getNumOperands() > 5 &&
|
|
|
|
"We should always have at least 5 operands!");
|
|
|
|
const MachineOperand &DstOp = MI->getOperand(0);
|
|
|
|
const MachineOperand &SrcOp = MI->getOperand(1);
|
|
|
|
const MachineOperand &MaskOp = MI->getOperand(5);
|
|
|
|
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
|
2014-09-24 11:06:37 +08:00
|
|
|
SmallVector<int, 16> Mask;
|
2015-01-11 15:29:51 +08:00
|
|
|
DecodePSHUFBMask(C, Mask);
|
2014-09-24 11:06:37 +08:00
|
|
|
if (!Mask.empty())
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, Mask));
|
2014-09-24 11:06:37 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2014-09-23 18:08:29 +08:00
|
|
|
case X86::VPERMILPSrm:
|
|
|
|
case X86::VPERMILPDrm:
|
|
|
|
case X86::VPERMILPSYrm:
|
2014-09-24 10:16:12 +08:00
|
|
|
case X86::VPERMILPDYrm: {
|
2015-04-25 03:11:51 +08:00
|
|
|
if (!OutStreamer->isVerboseAsm())
|
2014-09-24 11:06:34 +08:00
|
|
|
break;
|
2014-09-24 10:24:41 +08:00
|
|
|
assert(MI->getNumOperands() > 5 &&
|
|
|
|
"We should always have at least 5 operands!");
|
|
|
|
const MachineOperand &DstOp = MI->getOperand(0);
|
|
|
|
const MachineOperand &SrcOp = MI->getOperand(1);
|
|
|
|
const MachineOperand &MaskOp = MI->getOperand(5);
|
|
|
|
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
|
2014-09-24 11:06:37 +08:00
|
|
|
SmallVector<int, 16> Mask;
|
|
|
|
DecodeVPERMILPMask(C, Mask);
|
|
|
|
if (!Mask.empty())
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, Mask));
|
2014-09-24 11:06:37 +08:00
|
|
|
}
|
2014-07-26 07:47:11 +08:00
|
|
|
break;
|
2009-09-13 04:34:57 +08:00
|
|
|
}
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
|
|
|
|
// For loads from a constant pool to a vector register, print the constant
|
|
|
|
// loaded.
|
|
|
|
case X86::MOVAPDrm:
|
|
|
|
case X86::VMOVAPDrm:
|
|
|
|
case X86::VMOVAPDYrm:
|
|
|
|
case X86::MOVUPDrm:
|
|
|
|
case X86::VMOVUPDrm:
|
|
|
|
case X86::VMOVUPDYrm:
|
|
|
|
case X86::MOVAPSrm:
|
|
|
|
case X86::VMOVAPSrm:
|
|
|
|
case X86::VMOVAPSYrm:
|
|
|
|
case X86::MOVUPSrm:
|
|
|
|
case X86::VMOVUPSrm:
|
|
|
|
case X86::VMOVUPSYrm:
|
|
|
|
case X86::MOVDQArm:
|
|
|
|
case X86::VMOVDQArm:
|
|
|
|
case X86::VMOVDQAYrm:
|
|
|
|
case X86::MOVDQUrm:
|
|
|
|
case X86::VMOVDQUrm:
|
|
|
|
case X86::VMOVDQUYrm:
|
2015-04-25 03:11:51 +08:00
|
|
|
if (!OutStreamer->isVerboseAsm())
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
break;
|
|
|
|
if (MI->getNumOperands() > 4)
|
|
|
|
if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) {
|
|
|
|
std::string Comment;
|
|
|
|
raw_string_ostream CS(Comment);
|
|
|
|
const MachineOperand &DstOp = MI->getOperand(0);
|
|
|
|
CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = ";
|
|
|
|
if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
|
|
|
|
CS << "[";
|
|
|
|
for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; ++i) {
|
|
|
|
if (i != 0)
|
|
|
|
CS << ",";
|
|
|
|
if (CDS->getElementType()->isIntegerTy())
|
|
|
|
CS << CDS->getElementAsInteger(i);
|
|
|
|
else if (CDS->getElementType()->isFloatTy())
|
|
|
|
CS << CDS->getElementAsFloat(i);
|
|
|
|
else if (CDS->getElementType()->isDoubleTy())
|
|
|
|
CS << CDS->getElementAsDouble(i);
|
|
|
|
else
|
|
|
|
CS << "?";
|
|
|
|
}
|
|
|
|
CS << "]";
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->AddComment(CS.str());
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
} else if (auto *CV = dyn_cast<ConstantVector>(C)) {
|
|
|
|
CS << "<";
|
|
|
|
for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; ++i) {
|
|
|
|
if (i != 0)
|
|
|
|
CS << ",";
|
|
|
|
Constant *COp = CV->getOperand(i);
|
|
|
|
if (isa<UndefValue>(COp)) {
|
|
|
|
CS << "u";
|
|
|
|
} else if (auto *CI = dyn_cast<ConstantInt>(COp)) {
|
|
|
|
CS << CI->getZExtValue();
|
|
|
|
} else if (auto *CF = dyn_cast<ConstantFP>(COp)) {
|
|
|
|
SmallString<32> Str;
|
|
|
|
CF->getValueAPF().toString(Str);
|
|
|
|
CS << Str;
|
|
|
|
} else {
|
|
|
|
CS << "?";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
CS << ">";
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->AddComment(CS.str());
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2014-09-24 10:16:12 +08:00
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
MCInst TmpInst;
|
|
|
|
MCInstLowering.Lower(MI, TmpInst);
|
2014-10-28 03:40:35 +08:00
|
|
|
|
|
|
|
// Stackmap shadows cannot include branch targets, so we can count the bytes
|
2014-10-28 06:38:45 +08:00
|
|
|
// in a call towards the shadow, but must ensure that the no thread returns
|
|
|
|
// in to the stackmap shadow. The only way to achieve this is if the call
|
|
|
|
// is at the end of the shadow.
|
|
|
|
if (MI->isCall()) {
|
|
|
|
// Count then size of the call towards the shadow
|
|
|
|
SMShadowTracker.count(TmpInst, getSubtargetInfo());
|
|
|
|
// Then flush the shadow so that we fill with nops before the call, not
|
|
|
|
// after it.
|
2015-04-25 03:11:51 +08:00
|
|
|
SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
|
2014-10-28 06:38:45 +08:00
|
|
|
// Then emit the call
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitInstruction(TmpInst, getSubtargetInfo());
|
2014-10-28 06:38:45 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
EmitAndCountInstruction(TmpInst);
|
2009-09-03 01:35:12 +08:00
|
|
|
}
|