2009-09-03 01:35:12 +08:00
|
|
|
//===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains code to lower X86 MachineInstrs to their corresponding
|
|
|
|
// MCInst records.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-09-20 15:41:30 +08:00
|
|
|
#include "X86AsmPrinter.h"
|
2014-06-25 20:41:52 +08:00
|
|
|
#include "X86RegisterInfo.h"
|
2016-01-01 06:40:45 +08:00
|
|
|
#include "X86ShuffleDecodeConstantPool.h"
|
2012-03-18 02:46:09 +08:00
|
|
|
#include "InstPrinter/X86ATTInstPrinter.h"
|
2016-12-28 18:12:48 +08:00
|
|
|
#include "InstPrinter/X86InstComments.h"
|
2014-03-19 14:53:25 +08:00
|
|
|
#include "MCTargetDesc/X86BaseInfo.h"
|
2014-07-26 07:47:11 +08:00
|
|
|
#include "Utils/X86ShuffleDecode.h"
|
2015-06-16 02:44:01 +08:00
|
|
|
#include "llvm/ADT/Optional.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/SmallString.h"
|
2016-04-19 13:24:47 +08:00
|
|
|
#include "llvm/ADT/iterator_range.h"
|
2014-03-19 14:53:25 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2014-07-26 07:47:11 +08:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2009-09-16 14:25:03 +08:00
|
|
|
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
|
2013-11-01 06:11:56 +08:00
|
|
|
#include "llvm/CodeGen/StackMaps.h"
|
2014-03-19 14:53:25 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/GlobalValue.h"
|
2014-01-08 05:19:40 +08:00
|
|
|
#include "llvm/IR/Mangler.h"
|
2011-07-15 07:50:31 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2014-07-25 04:40:55 +08:00
|
|
|
#include "llvm/MC/MCCodeEmitter.h"
|
2009-09-03 01:35:12 +08:00
|
|
|
#include "llvm/MC/MCContext.h"
|
|
|
|
#include "llvm/MC/MCExpr.h"
|
2015-05-16 06:19:42 +08:00
|
|
|
#include "llvm/MC/MCFixup.h"
|
2009-09-03 01:35:12 +08:00
|
|
|
#include "llvm/MC/MCInst.h"
|
2012-11-26 21:34:22 +08:00
|
|
|
#include "llvm/MC/MCInstBuilder.h"
|
XRay: Add entry and exit sleds
Summary:
In this patch we implement the following parts of XRay:
- Supporting a function attribute named 'function-instrument' which currently only supports 'xray-always'. We should be able to use this attribute for other instrumentation approaches.
- Supporting a function attribute named 'xray-instruction-threshold' used to determine whether a function is instrumented with a minimum number of instructions (IR instruction counts).
- X86-specific nop sleds as described in the white paper.
- A machine function pass that adds the different instrumentation marker instructions at a very late stage.
- A way of identifying which return opcode is considered "normal" for each architecture.
There are some caveats here:
1) We don't handle PATCHABLE_RET in platforms other than x86_64 yet -- this means if IR used PATCHABLE_RET directly instead of a normal ret, instruction lowering for that platform might do the wrong thing. We think this should be handled at instruction selection time to by default be unpacked for platforms where XRay is not availble yet.
2) The generated section for X86 is different from what is described from the white paper for the sole reason that LLVM allows us to do this neatly. We're taking the opportunity to deviate from the white paper from this perspective to allow us to get richer information from the runtime library.
Reviewers: sanjoy, eugenis, kcc, pcc, echristo, rnk
Subscribers: niravd, majnemer, atrick, rnk, emaste, bmakam, mcrosier, mehdi_amini, llvm-commits
Differential Revision: http://reviews.llvm.org/D19904
llvm-svn: 275367
2016-07-14 12:06:33 +08:00
|
|
|
#include "llvm/MC/MCSection.h"
|
2009-09-03 01:35:12 +08:00
|
|
|
#include "llvm/MC/MCStreamer.h"
|
2010-03-13 03:42:40 +08:00
|
|
|
#include "llvm/MC/MCSymbol.h"
|
XRay: Add entry and exit sleds
Summary:
In this patch we implement the following parts of XRay:
- Supporting a function attribute named 'function-instrument' which currently only supports 'xray-always'. We should be able to use this attribute for other instrumentation approaches.
- Supporting a function attribute named 'xray-instruction-threshold' used to determine whether a function is instrumented with a minimum number of instructions (IR instruction counts).
- X86-specific nop sleds as described in the white paper.
- A machine function pass that adds the different instrumentation marker instructions at a very late stage.
- A way of identifying which return opcode is considered "normal" for each architecture.
There are some caveats here:
1) We don't handle PATCHABLE_RET in platforms other than x86_64 yet -- this means if IR used PATCHABLE_RET directly instead of a normal ret, instruction lowering for that platform might do the wrong thing. We think this should be handled at instruction selection time to by default be unpacked for platforms where XRay is not availble yet.
2) The generated section for X86 is different from what is described from the white paper for the sole reason that LLVM allows us to do this neatly. We're taking the opportunity to deviate from the white paper from this perspective to allow us to get richer information from the runtime library.
Reviewers: sanjoy, eugenis, kcc, pcc, echristo, rnk
Subscribers: niravd, majnemer, atrick, rnk, emaste, bmakam, mcrosier, mehdi_amini, llvm-commits
Differential Revision: http://reviews.llvm.org/D19904
llvm-svn: 275367
2016-07-14 12:06:33 +08:00
|
|
|
#include "llvm/MC/MCSymbolELF.h"
|
|
|
|
#include "llvm/MC/MCSectionELF.h"
|
2016-11-23 10:07:04 +08:00
|
|
|
#include "llvm/MC/MCSectionMachO.h"
|
2014-07-25 04:40:55 +08:00
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
XRay: Add entry and exit sleds
Summary:
In this patch we implement the following parts of XRay:
- Supporting a function attribute named 'function-instrument' which currently only supports 'xray-always'. We should be able to use this attribute for other instrumentation approaches.
- Supporting a function attribute named 'xray-instruction-threshold' used to determine whether a function is instrumented with a minimum number of instructions (IR instruction counts).
- X86-specific nop sleds as described in the white paper.
- A machine function pass that adds the different instrumentation marker instructions at a very late stage.
- A way of identifying which return opcode is considered "normal" for each architecture.
There are some caveats here:
1) We don't handle PATCHABLE_RET in platforms other than x86_64 yet -- this means if IR used PATCHABLE_RET directly instead of a normal ret, instruction lowering for that platform might do the wrong thing. We think this should be handled at instruction selection time to by default be unpacked for platforms where XRay is not availble yet.
2) The generated section for X86 is different from what is described from the white paper for the sole reason that LLVM allows us to do this neatly. We're taking the opportunity to deviate from the white paper from this perspective to allow us to get richer information from the runtime library.
Reviewers: sanjoy, eugenis, kcc, pcc, echristo, rnk
Subscribers: niravd, majnemer, atrick, rnk, emaste, bmakam, mcrosier, mehdi_amini, llvm-commits
Differential Revision: http://reviews.llvm.org/D19904
llvm-svn: 275367
2016-07-14 12:06:33 +08:00
|
|
|
#include "llvm/Support/ELF.h"
|
|
|
|
#include "llvm/Target/TargetLoweringObjectFile.h"
|
|
|
|
|
2009-09-03 01:35:12 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2012-10-16 14:01:50 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
|
|
|
|
class X86MCInstLower {
|
|
|
|
MCContext &Ctx;
|
|
|
|
const MachineFunction &MF;
|
|
|
|
const TargetMachine &TM;
|
|
|
|
const MCAsmInfo &MAI;
|
|
|
|
X86AsmPrinter &AsmPrinter;
|
|
|
|
public:
|
2013-10-30 00:11:22 +08:00
|
|
|
X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter);
|
2012-10-16 14:01:50 +08:00
|
|
|
|
2015-06-16 02:44:01 +08:00
|
|
|
Optional<MCOperand> LowerMachineOperand(const MachineInstr *MI,
|
|
|
|
const MachineOperand &MO) const;
|
2012-10-16 14:01:50 +08:00
|
|
|
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
|
|
|
|
|
|
|
|
MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
|
|
|
|
MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
|
|
|
|
|
|
|
|
private:
|
|
|
|
MachineModuleInfoMachO &getMachOMMI() const;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2014-07-25 04:40:55 +08:00
|
|
|
// Emit a minimal sequence of nops spanning NumBytes bytes.
|
|
|
|
static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit,
|
2016-04-20 02:48:13 +08:00
|
|
|
const MCSubtargetInfo &STI);
|
2014-07-25 04:40:55 +08:00
|
|
|
|
2016-04-20 02:48:16 +08:00
|
|
|
void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst,
|
|
|
|
const MCSubtargetInfo &STI,
|
|
|
|
MCCodeEmitter *CodeEmitter) {
|
|
|
|
if (InShadow) {
|
|
|
|
SmallString<256> Code;
|
|
|
|
SmallVector<MCFixup, 4> Fixups;
|
|
|
|
raw_svector_ostream VecOS(Code);
|
|
|
|
CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI);
|
|
|
|
CurrentShadowSize += Code.size();
|
|
|
|
if (CurrentShadowSize >= RequiredShadowSize)
|
|
|
|
InShadow = false; // The shadow is big enough. Stop counting.
|
2014-07-25 04:40:55 +08:00
|
|
|
}
|
2016-04-20 02:48:16 +08:00
|
|
|
}
|
2014-07-25 04:40:55 +08:00
|
|
|
|
2016-04-20 02:48:16 +08:00
|
|
|
void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding(
|
2014-07-25 04:40:55 +08:00
|
|
|
MCStreamer &OutStreamer, const MCSubtargetInfo &STI) {
|
2016-04-20 02:48:16 +08:00
|
|
|
if (InShadow && CurrentShadowSize < RequiredShadowSize) {
|
|
|
|
InShadow = false;
|
|
|
|
EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize,
|
|
|
|
MF->getSubtarget<X86Subtarget>().is64Bit(), STI);
|
2014-07-25 04:40:55 +08:00
|
|
|
}
|
2016-04-20 02:48:16 +08:00
|
|
|
}
|
2014-07-25 04:40:55 +08:00
|
|
|
|
2016-04-20 02:48:16 +08:00
|
|
|
void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) {
|
|
|
|
OutStreamer->EmitInstruction(Inst, getSubtargetInfo());
|
|
|
|
SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get());
|
|
|
|
}
|
2014-07-25 04:40:55 +08:00
|
|
|
|
2013-10-30 00:11:22 +08:00
|
|
|
X86MCInstLower::X86MCInstLower(const MachineFunction &mf,
|
2010-07-23 05:10:04 +08:00
|
|
|
X86AsmPrinter &asmprinter)
|
2015-02-03 01:38:43 +08:00
|
|
|
: Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), MAI(*TM.getMCAsmInfo()),
|
|
|
|
AsmPrinter(asmprinter) {}
|
2009-09-13 04:34:57 +08:00
|
|
|
|
2009-09-16 14:25:03 +08:00
|
|
|
MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
|
2010-07-21 06:26:07 +08:00
|
|
|
return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>();
|
2009-09-16 14:25:03 +08:00
|
|
|
}
|
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
|
2010-02-09 07:03:41 +08:00
|
|
|
/// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol
|
|
|
|
/// operand to an MCSymbol.
|
2009-09-13 04:34:57 +08:00
|
|
|
MCSymbol *X86MCInstLower::
|
2010-02-09 07:03:41 +08:00
|
|
|
GetSymbolFromOperand(const MachineOperand &MO) const {
|
2015-07-16 14:11:10 +08:00
|
|
|
const DataLayout &DL = MF.getDataLayout();
|
2012-10-17 10:22:27 +08:00
|
|
|
assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference");
|
2010-02-09 07:03:41 +08:00
|
|
|
|
2015-06-03 08:02:40 +08:00
|
|
|
MCSymbol *Sym = nullptr;
|
2009-09-11 13:58:44 +08:00
|
|
|
SmallString<128> Name;
|
2013-11-29 04:12:44 +08:00
|
|
|
StringRef Suffix;
|
|
|
|
|
|
|
|
switch (MO.getTargetFlags()) {
|
2015-06-11 09:31:48 +08:00
|
|
|
case X86II::MO_DLLIMPORT:
|
|
|
|
// Handle dllimport linkage.
|
|
|
|
Name += "__imp_";
|
|
|
|
break;
|
2013-11-29 04:12:44 +08:00
|
|
|
case X86II::MO_DARWIN_NONLAZY:
|
|
|
|
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
|
|
|
|
Suffix = "$non_lazy_ptr";
|
|
|
|
break;
|
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2013-12-05 13:19:12 +08:00
|
|
|
if (!Suffix.empty())
|
2015-07-16 14:11:10 +08:00
|
|
|
Name += DL.getPrivateGlobalPrefix();
|
2013-12-05 13:19:12 +08:00
|
|
|
|
2012-10-17 10:22:27 +08:00
|
|
|
if (MO.isGlobal()) {
|
2010-03-13 03:42:40 +08:00
|
|
|
const GlobalValue *GV = MO.getGlobal();
|
2014-02-20 01:23:20 +08:00
|
|
|
AsmPrinter.getNameWithPrefix(Name, GV);
|
2012-10-17 10:22:27 +08:00
|
|
|
} else if (MO.isSymbol()) {
|
2015-07-16 14:11:10 +08:00
|
|
|
Mangler::getNameWithPrefix(Name, MO.getSymbolName(), DL);
|
2012-10-17 10:22:27 +08:00
|
|
|
} else if (MO.isMBB()) {
|
2015-06-03 08:02:40 +08:00
|
|
|
assert(Suffix.empty());
|
|
|
|
Sym = MO.getMBB()->getSymbol();
|
2010-02-09 07:03:41 +08:00
|
|
|
}
|
|
|
|
|
2013-11-29 04:12:44 +08:00
|
|
|
Name += Suffix;
|
2015-06-03 08:02:40 +08:00
|
|
|
if (!Sym)
|
|
|
|
Sym = Ctx.getOrCreateSymbol(Name);
|
2013-12-05 13:19:12 +08:00
|
|
|
|
2010-02-09 07:03:41 +08:00
|
|
|
// If the target flags on the operand changes the name of the symbol, do that
|
|
|
|
// before we return the symbol.
|
2009-09-03 13:06:07 +08:00
|
|
|
switch (MO.getTargetFlags()) {
|
2010-02-09 07:03:41 +08:00
|
|
|
default: break;
|
2009-09-03 13:06:07 +08:00
|
|
|
case X86II::MO_DARWIN_NONLAZY:
|
2009-09-11 14:59:18 +08:00
|
|
|
case X86II::MO_DARWIN_NONLAZY_PIC_BASE: {
|
2010-03-11 06:34:10 +08:00
|
|
|
MachineModuleInfoImpl::StubValueTy &StubSym =
|
|
|
|
getMachOMMI().getGVStubEntry(Sym);
|
2014-04-25 13:30:21 +08:00
|
|
|
if (!StubSym.getPointer()) {
|
2010-02-09 07:03:41 +08:00
|
|
|
assert(MO.isGlobal() && "Extern symbol not handled yet");
|
2010-03-11 06:34:10 +08:00
|
|
|
StubSym =
|
|
|
|
MachineModuleInfoImpl::
|
2013-10-30 01:07:16 +08:00
|
|
|
StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
|
2010-03-11 06:34:10 +08:00
|
|
|
!MO.getGlobal()->hasInternalLinkage());
|
2010-02-09 07:03:41 +08:00
|
|
|
}
|
2013-11-29 04:12:44 +08:00
|
|
|
break;
|
2009-09-11 14:59:18 +08:00
|
|
|
}
|
2009-09-03 12:44:53 +08:00
|
|
|
}
|
2010-02-09 07:03:41 +08:00
|
|
|
|
2013-11-29 04:12:44 +08:00
|
|
|
return Sym;
|
2009-09-03 12:56:20 +08:00
|
|
|
}
|
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
|
|
|
|
MCSymbol *Sym) const {
|
2009-09-03 15:30:56 +08:00
|
|
|
// FIXME: We would like an efficient form for this, so we don't have to do a
|
|
|
|
// lot of extra uniquing.
|
2014-04-25 13:30:21 +08:00
|
|
|
const MCExpr *Expr = nullptr;
|
2010-03-16 07:51:06 +08:00
|
|
|
MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-03 12:56:20 +08:00
|
|
|
switch (MO.getTargetFlags()) {
|
2009-09-03 13:06:07 +08:00
|
|
|
default: llvm_unreachable("Unknown target flag on GV operand");
|
|
|
|
case X86II::MO_NO_FLAG: // No flag.
|
|
|
|
// These affect the name of the symbol, not any suffix.
|
|
|
|
case X86II::MO_DARWIN_NONLAZY:
|
|
|
|
case X86II::MO_DLLIMPORT:
|
|
|
|
break;
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2010-06-03 12:07:48 +08:00
|
|
|
case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break;
|
|
|
|
case X86II::MO_TLVP_PIC_BASE:
|
2015-05-30 09:25:56 +08:00
|
|
|
Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
|
2010-07-15 07:04:59 +08:00
|
|
|
// Subtract the pic base.
|
2015-05-30 09:25:56 +08:00
|
|
|
Expr = MCBinaryExpr::createSub(Expr,
|
|
|
|
MCSymbolRefExpr::create(MF.getPICBaseSymbol(),
|
2010-07-15 07:04:59 +08:00
|
|
|
Ctx),
|
|
|
|
Ctx);
|
|
|
|
break;
|
2012-02-12 01:26:53 +08:00
|
|
|
case X86II::MO_SECREL: RefKind = MCSymbolRefExpr::VK_SECREL; break;
|
2010-03-16 07:51:06 +08:00
|
|
|
case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break;
|
2012-06-02 00:27:21 +08:00
|
|
|
case X86II::MO_TLSLD: RefKind = MCSymbolRefExpr::VK_TLSLD; break;
|
|
|
|
case X86II::MO_TLSLDM: RefKind = MCSymbolRefExpr::VK_TLSLDM; break;
|
2010-03-16 07:51:06 +08:00
|
|
|
case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break;
|
|
|
|
case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break;
|
|
|
|
case X86II::MO_TPOFF: RefKind = MCSymbolRefExpr::VK_TPOFF; break;
|
2012-06-02 00:27:21 +08:00
|
|
|
case X86II::MO_DTPOFF: RefKind = MCSymbolRefExpr::VK_DTPOFF; break;
|
2010-03-16 07:51:06 +08:00
|
|
|
case X86II::MO_NTPOFF: RefKind = MCSymbolRefExpr::VK_NTPOFF; break;
|
2012-05-11 18:11:01 +08:00
|
|
|
case X86II::MO_GOTNTPOFF: RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; break;
|
2010-03-16 07:51:06 +08:00
|
|
|
case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break;
|
|
|
|
case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break;
|
|
|
|
case X86II::MO_GOTOFF: RefKind = MCSymbolRefExpr::VK_GOTOFF; break;
|
|
|
|
case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break;
|
2017-02-02 08:32:03 +08:00
|
|
|
case X86II::MO_ABS8: RefKind = MCSymbolRefExpr::VK_X86_ABS8; break;
|
2009-09-03 13:06:07 +08:00
|
|
|
case X86II::MO_PIC_BASE_OFFSET:
|
|
|
|
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
|
2015-05-30 09:25:56 +08:00
|
|
|
Expr = MCSymbolRefExpr::create(Sym, Ctx);
|
2009-09-03 13:06:07 +08:00
|
|
|
// Subtract the pic base.
|
2015-05-30 09:25:56 +08:00
|
|
|
Expr = MCBinaryExpr::createSub(Expr,
|
|
|
|
MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx),
|
2009-09-13 04:34:57 +08:00
|
|
|
Ctx);
|
2014-10-21 09:17:30 +08:00
|
|
|
if (MO.isJTI()) {
|
2016-06-19 07:25:37 +08:00
|
|
|
assert(MAI.doesSetDirectiveSuppressReloc());
|
2010-04-13 07:07:17 +08:00
|
|
|
// If .set directive is supported, use it to reduce the number of
|
|
|
|
// relocations the assembler will generate for differences between
|
|
|
|
// local labels. This is only safe when the symbols are in the same
|
|
|
|
// section so we are restricting it to jumptable references.
|
2015-05-19 02:43:14 +08:00
|
|
|
MCSymbol *Label = Ctx.createTempSymbol();
|
2015-04-25 03:11:51 +08:00
|
|
|
AsmPrinter.OutStreamer->EmitAssignment(Label, Expr);
|
2015-05-30 09:25:56 +08:00
|
|
|
Expr = MCSymbolRefExpr::create(Label, Ctx);
|
2010-04-13 07:07:17 +08:00
|
|
|
}
|
2009-09-03 13:06:07 +08:00
|
|
|
break;
|
2009-09-03 15:30:56 +08:00
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2014-04-25 13:30:21 +08:00
|
|
|
if (!Expr)
|
2015-05-30 09:25:56 +08:00
|
|
|
Expr = MCSymbolRefExpr::create(Sym, RefKind, Ctx);
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2012-10-17 10:22:27 +08:00
|
|
|
if (!MO.isJTI() && !MO.isMBB() && MO.getOffset())
|
2015-05-30 09:25:56 +08:00
|
|
|
Expr = MCBinaryExpr::createAdd(Expr,
|
|
|
|
MCConstantExpr::create(MO.getOffset(), Ctx),
|
2009-09-13 04:34:57 +08:00
|
|
|
Ctx);
|
2015-05-14 02:37:00 +08:00
|
|
|
return MCOperand::createExpr(Expr);
|
2009-09-03 12:44:53 +08:00
|
|
|
}
|
|
|
|
|
2009-09-11 12:28:13 +08:00
|
|
|
|
2010-05-19 01:22:24 +08:00
|
|
|
/// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with
|
|
|
|
/// a short fixed-register form.
|
|
|
|
static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) {
|
|
|
|
unsigned ImmOp = Inst.getNumOperands() - 1;
|
2012-02-12 01:26:53 +08:00
|
|
|
assert(Inst.getOperand(0).isReg() &&
|
|
|
|
(Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) &&
|
2010-05-19 01:22:24 +08:00
|
|
|
((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() &&
|
|
|
|
Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) ||
|
|
|
|
Inst.getNumOperands() == 2) && "Unexpected instruction!");
|
|
|
|
|
|
|
|
// Check whether the destination register can be fixed.
|
|
|
|
unsigned Reg = Inst.getOperand(0).getReg();
|
|
|
|
if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// If so, rewrite the instruction.
|
2010-05-19 14:20:44 +08:00
|
|
|
MCOperand Saved = Inst.getOperand(ImmOp);
|
|
|
|
Inst = MCInst();
|
|
|
|
Inst.setOpcode(Opcode);
|
|
|
|
Inst.addOperand(Saved);
|
|
|
|
}
|
|
|
|
|
2013-07-13 02:06:44 +08:00
|
|
|
/// \brief If a movsx instruction has a shorter encoding for the used register
|
|
|
|
/// simplify the instruction to use it instead.
|
|
|
|
static void SimplifyMOVSX(MCInst &Inst) {
|
|
|
|
unsigned NewOpcode = 0;
|
|
|
|
unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg();
|
|
|
|
switch (Inst.getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected instruction!");
|
|
|
|
case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw
|
|
|
|
if (Op0 == X86::AX && Op1 == X86::AL)
|
|
|
|
NewOpcode = X86::CBW;
|
|
|
|
break;
|
|
|
|
case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl
|
|
|
|
if (Op0 == X86::EAX && Op1 == X86::AX)
|
|
|
|
NewOpcode = X86::CWDE;
|
|
|
|
break;
|
|
|
|
case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq
|
|
|
|
if (Op0 == X86::RAX && Op1 == X86::EAX)
|
|
|
|
NewOpcode = X86::CDQE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NewOpcode != 0) {
|
|
|
|
Inst = MCInst();
|
|
|
|
Inst.setOpcode(NewOpcode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-19 14:20:44 +08:00
|
|
|
/// \brief Simplify things like MOV32rm to MOV32o32a.
|
2010-08-17 05:03:32 +08:00
|
|
|
static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
|
|
|
|
unsigned Opcode) {
|
|
|
|
// Don't make these simplifications in 64-bit mode; other assemblers don't
|
|
|
|
// perform them because they make the code larger.
|
|
|
|
if (Printer.getSubtarget().is64Bit())
|
|
|
|
return;
|
|
|
|
|
2010-05-19 14:20:44 +08:00
|
|
|
bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg();
|
|
|
|
unsigned AddrBase = IsStore;
|
|
|
|
unsigned RegOp = IsStore ? 0 : 5;
|
|
|
|
unsigned AddrOp = AddrBase + 3;
|
|
|
|
assert(Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() &&
|
2014-03-19 00:14:11 +08:00
|
|
|
Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() &&
|
|
|
|
Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() &&
|
|
|
|
Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() &&
|
|
|
|
Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() &&
|
|
|
|
(Inst.getOperand(AddrOp).isExpr() ||
|
|
|
|
Inst.getOperand(AddrOp).isImm()) &&
|
2010-05-19 14:20:44 +08:00
|
|
|
"Unexpected instruction!");
|
|
|
|
|
|
|
|
// Check whether the destination register can be fixed.
|
|
|
|
unsigned Reg = Inst.getOperand(RegOp).getReg();
|
|
|
|
if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Check whether this is an absolute address.
|
2012-08-02 02:39:17 +08:00
|
|
|
// FIXME: We know TLVP symbol refs aren't, but there should be a better way
|
2010-06-17 08:51:48 +08:00
|
|
|
// to do this here.
|
|
|
|
bool Absolute = true;
|
|
|
|
if (Inst.getOperand(AddrOp).isExpr()) {
|
|
|
|
const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr();
|
|
|
|
if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE))
|
|
|
|
if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP)
|
|
|
|
Absolute = false;
|
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2010-06-17 08:51:48 +08:00
|
|
|
if (Absolute &&
|
2014-03-19 00:14:11 +08:00
|
|
|
(Inst.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 ||
|
|
|
|
Inst.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 ||
|
|
|
|
Inst.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0))
|
2010-05-19 14:20:44 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// If so, rewrite the instruction.
|
|
|
|
MCOperand Saved = Inst.getOperand(AddrOp);
|
2014-03-19 00:14:11 +08:00
|
|
|
MCOperand Seg = Inst.getOperand(AddrBase + X86::AddrSegmentReg);
|
2010-05-19 14:20:44 +08:00
|
|
|
Inst = MCInst();
|
|
|
|
Inst.setOpcode(Opcode);
|
|
|
|
Inst.addOperand(Saved);
|
2014-01-16 15:57:45 +08:00
|
|
|
Inst.addOperand(Seg);
|
2010-05-19 01:22:24 +08:00
|
|
|
}
|
2009-09-13 04:34:57 +08:00
|
|
|
|
2014-12-04 13:20:33 +08:00
|
|
|
static unsigned getRetOpcode(const X86Subtarget &Subtarget) {
|
|
|
|
return Subtarget.is64Bit() ? X86::RETQ : X86::RETL;
|
2014-01-08 20:58:07 +08:00
|
|
|
}
|
|
|
|
|
2015-06-16 02:44:01 +08:00
|
|
|
Optional<MCOperand>
|
|
|
|
X86MCInstLower::LowerMachineOperand(const MachineInstr *MI,
|
|
|
|
const MachineOperand &MO) const {
|
|
|
|
switch (MO.getType()) {
|
|
|
|
default:
|
2017-01-28 10:02:38 +08:00
|
|
|
MI->print(errs());
|
2015-06-16 02:44:01 +08:00
|
|
|
llvm_unreachable("unknown operand type");
|
|
|
|
case MachineOperand::MO_Register:
|
|
|
|
// Ignore all implicit register operands.
|
|
|
|
if (MO.isImplicit())
|
|
|
|
return None;
|
|
|
|
return MCOperand::createReg(MO.getReg());
|
|
|
|
case MachineOperand::MO_Immediate:
|
|
|
|
return MCOperand::createImm(MO.getImm());
|
|
|
|
case MachineOperand::MO_MachineBasicBlock:
|
|
|
|
case MachineOperand::MO_GlobalAddress:
|
|
|
|
case MachineOperand::MO_ExternalSymbol:
|
|
|
|
return LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
|
2015-06-23 01:46:53 +08:00
|
|
|
case MachineOperand::MO_MCSymbol:
|
|
|
|
return LowerSymbolOperand(MO, MO.getMCSymbol());
|
2015-06-16 02:44:01 +08:00
|
|
|
case MachineOperand::MO_JumpTableIndex:
|
|
|
|
return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()));
|
|
|
|
case MachineOperand::MO_ConstantPoolIndex:
|
|
|
|
return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()));
|
|
|
|
case MachineOperand::MO_BlockAddress:
|
|
|
|
return LowerSymbolOperand(
|
|
|
|
MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()));
|
|
|
|
case MachineOperand::MO_RegisterMask:
|
|
|
|
// Ignore call clobbers.
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
|
|
|
|
OutMI.setOpcode(MI->getOpcode());
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2015-06-16 02:44:01 +08:00
|
|
|
for (const MachineOperand &MO : MI->operands())
|
|
|
|
if (auto MaybeMCOp = LowerMachineOperand(MI, MO))
|
|
|
|
OutMI.addOperand(MaybeMCOp.getValue());
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-11 12:28:13 +08:00
|
|
|
// Handle a few special cases to eliminate operand modifiers.
|
2010-10-08 11:54:52 +08:00
|
|
|
ReSimplify:
|
2009-09-13 04:34:57 +08:00
|
|
|
switch (OutMI.getOpcode()) {
|
2013-06-11 04:43:49 +08:00
|
|
|
case X86::LEA64_32r:
|
2010-07-09 07:46:44 +08:00
|
|
|
case X86::LEA64r:
|
|
|
|
case X86::LEA16r:
|
|
|
|
case X86::LEA32r:
|
|
|
|
// LEA should have a segment register, but it must be empty.
|
|
|
|
assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands &&
|
|
|
|
"Unexpected # of LEA operands");
|
|
|
|
assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&
|
|
|
|
"LEA has segment specified!");
|
2009-09-03 01:35:12 +08:00
|
|
|
break;
|
2010-02-06 05:30:49 +08:00
|
|
|
|
2013-03-14 15:09:57 +08:00
|
|
|
// Commute operands to get a smaller encoding by using VEX.R instead of VEX.B
|
|
|
|
// if one of the registers is extended, but other isn't.
|
2015-10-12 12:57:59 +08:00
|
|
|
case X86::VMOVZPQILo2PQIrr:
|
2013-03-14 15:09:57 +08:00
|
|
|
case X86::VMOVAPDrr:
|
|
|
|
case X86::VMOVAPDYrr:
|
|
|
|
case X86::VMOVAPSrr:
|
|
|
|
case X86::VMOVAPSYrr:
|
|
|
|
case X86::VMOVDQArr:
|
|
|
|
case X86::VMOVDQAYrr:
|
|
|
|
case X86::VMOVDQUrr:
|
|
|
|
case X86::VMOVDQUYrr:
|
|
|
|
case X86::VMOVUPDrr:
|
|
|
|
case X86::VMOVUPDYrr:
|
|
|
|
case X86::VMOVUPSrr:
|
|
|
|
case X86::VMOVUPSYrr: {
|
2013-03-16 11:44:31 +08:00
|
|
|
if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) &&
|
|
|
|
X86II::isX86_64ExtendedReg(OutMI.getOperand(1).getReg())) {
|
|
|
|
unsigned NewOpc;
|
|
|
|
switch (OutMI.getOpcode()) {
|
|
|
|
default: llvm_unreachable("Invalid opcode");
|
2015-10-12 12:57:59 +08:00
|
|
|
case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break;
|
|
|
|
case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break;
|
|
|
|
case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break;
|
|
|
|
case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break;
|
|
|
|
case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break;
|
|
|
|
case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break;
|
|
|
|
case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break;
|
|
|
|
case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break;
|
|
|
|
case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break;
|
|
|
|
case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break;
|
|
|
|
case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break;
|
|
|
|
case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break;
|
|
|
|
case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break;
|
2013-03-16 11:44:31 +08:00
|
|
|
}
|
|
|
|
OutMI.setOpcode(NewOpc);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case X86::VMOVSDrr:
|
|
|
|
case X86::VMOVSSrr: {
|
|
|
|
if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) &&
|
|
|
|
X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) {
|
|
|
|
unsigned NewOpc;
|
|
|
|
switch (OutMI.getOpcode()) {
|
|
|
|
default: llvm_unreachable("Invalid opcode");
|
|
|
|
case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break;
|
|
|
|
case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break;
|
|
|
|
}
|
|
|
|
OutMI.setOpcode(NewOpc);
|
2013-03-14 15:09:57 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-02-17 01:56:02 +08:00
|
|
|
// TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register
|
|
|
|
// inputs modeled as normal uses instead of implicit uses. As such, truncate
|
|
|
|
// off all but the first operand (the callee). FIXME: Change isel.
|
2010-05-19 16:07:12 +08:00
|
|
|
case X86::TAILJMPr64:
|
2015-01-31 05:03:31 +08:00
|
|
|
case X86::TAILJMPr64_REX:
|
2010-05-19 12:31:36 +08:00
|
|
|
case X86::CALL64r:
|
2012-02-17 01:56:02 +08:00
|
|
|
case X86::CALL64pcrel32: {
|
2010-05-19 12:31:36 +08:00
|
|
|
unsigned Opcode = OutMI.getOpcode();
|
2010-05-19 05:40:18 +08:00
|
|
|
MCOperand Saved = OutMI.getOperand(0);
|
|
|
|
OutMI = MCInst();
|
2010-05-19 12:31:36 +08:00
|
|
|
OutMI.setOpcode(Opcode);
|
2010-05-19 05:40:18 +08:00
|
|
|
OutMI.addOperand(Saved);
|
|
|
|
break;
|
|
|
|
}
|
2010-05-19 12:31:36 +08:00
|
|
|
|
2010-10-27 02:09:55 +08:00
|
|
|
case X86::EH_RETURN:
|
|
|
|
case X86::EH_RETURN64: {
|
|
|
|
OutMI = MCInst();
|
2014-01-08 20:58:07 +08:00
|
|
|
OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget()));
|
2010-10-27 02:09:55 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-10-02 02:44:59 +08:00
|
|
|
case X86::CLEANUPRET: {
|
|
|
|
// Replace CATCHRET with the appropriate RET.
|
|
|
|
OutMI = MCInst();
|
|
|
|
OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget()));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case X86::CATCHRET: {
|
|
|
|
// Replace CATCHRET with the appropriate RET.
|
|
|
|
const X86Subtarget &Subtarget = AsmPrinter.getSubtarget();
|
|
|
|
unsigned ReturnReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
|
|
|
|
OutMI = MCInst();
|
|
|
|
OutMI.setOpcode(getRetOpcode(Subtarget));
|
|
|
|
OutMI.addOperand(MCOperand::createReg(ReturnReg));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-02-08 04:37:45 +08:00
|
|
|
// TAILJMPd, TAILJMPd64 - Lower to the correct jump instruction.
|
X86: Fold tail calls into conditional branches where possible (PR26302)
When branching to a block that immediately tail calls, it is possible to fold
the call directly into the branch if the call is direct and there is no stack
adjustment, saving one byte.
Example:
define void @f(i32 %x, i32 %y) {
entry:
%p = icmp eq i32 %x, %y
br i1 %p, label %bb1, label %bb2
bb1:
tail call void @foo()
ret void
bb2:
tail call void @bar()
ret void
}
before:
f:
movl 4(%esp), %eax
cmpl 8(%esp), %eax
jne .LBB0_2
jmp foo
.LBB0_2:
jmp bar
after:
f:
movl 4(%esp), %eax
cmpl 8(%esp), %eax
jne bar
.LBB0_1:
jmp foo
I don't expect any significant size savings from this (on a Clang bootstrap I
saw 288 bytes), but it does make the code a little tighter.
This patch only does 32-bit, but 64-bit would work similarly.
Differential Revision: https://reviews.llvm.org/D24108
llvm-svn: 280832
2016-09-08 01:52:14 +08:00
|
|
|
{ unsigned Opcode;
|
|
|
|
case X86::TAILJMPr: Opcode = X86::JMP32r; goto SetTailJmpOpcode;
|
2010-05-19 23:26:43 +08:00
|
|
|
case X86::TAILJMPd:
|
X86: Fold tail calls into conditional branches where possible (PR26302)
When branching to a block that immediately tail calls, it is possible to fold
the call directly into the branch if the call is direct and there is no stack
adjustment, saving one byte.
Example:
define void @f(i32 %x, i32 %y) {
entry:
%p = icmp eq i32 %x, %y
br i1 %p, label %bb1, label %bb2
bb1:
tail call void @foo()
ret void
bb2:
tail call void @bar()
ret void
}
before:
f:
movl 4(%esp), %eax
cmpl 8(%esp), %eax
jne .LBB0_2
jmp foo
.LBB0_2:
jmp bar
after:
f:
movl 4(%esp), %eax
cmpl 8(%esp), %eax
jne bar
.LBB0_1:
jmp foo
I don't expect any significant size savings from this (on a Clang bootstrap I
saw 288 bytes), but it does make the code a little tighter.
This patch only does 32-bit, but 64-bit would work similarly.
Differential Revision: https://reviews.llvm.org/D24108
llvm-svn: 280832
2016-09-08 01:52:14 +08:00
|
|
|
case X86::TAILJMPd64: Opcode = X86::JMP_1; goto SetTailJmpOpcode;
|
2012-08-02 02:39:17 +08:00
|
|
|
|
X86: Fold tail calls into conditional branches where possible (PR26302)
When branching to a block that immediately tail calls, it is possible to fold
the call directly into the branch if the call is direct and there is no stack
adjustment, saving one byte.
Example:
define void @f(i32 %x, i32 %y) {
entry:
%p = icmp eq i32 %x, %y
br i1 %p, label %bb1, label %bb2
bb1:
tail call void @foo()
ret void
bb2:
tail call void @bar()
ret void
}
before:
f:
movl 4(%esp), %eax
cmpl 8(%esp), %eax
jne .LBB0_2
jmp foo
.LBB0_2:
jmp bar
after:
f:
movl 4(%esp), %eax
cmpl 8(%esp), %eax
jne bar
.LBB0_1:
jmp foo
I don't expect any significant size savings from this (on a Clang bootstrap I
saw 288 bytes), but it does make the code a little tighter.
This patch only does 32-bit, but 64-bit would work similarly.
Differential Revision: https://reviews.llvm.org/D24108
llvm-svn: 280832
2016-09-08 01:52:14 +08:00
|
|
|
SetTailJmpOpcode:
|
2010-05-19 23:26:43 +08:00
|
|
|
MCOperand Saved = OutMI.getOperand(0);
|
|
|
|
OutMI = MCInst();
|
2010-07-09 08:49:41 +08:00
|
|
|
OutMI.setOpcode(Opcode);
|
2010-05-19 23:26:43 +08:00
|
|
|
OutMI.addOperand(Saved);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-01-06 15:35:50 +08:00
|
|
|
case X86::DEC16r:
|
|
|
|
case X86::DEC32r:
|
|
|
|
case X86::INC16r:
|
|
|
|
case X86::INC32r:
|
|
|
|
// If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions.
|
|
|
|
if (!AsmPrinter.getSubtarget().is64Bit()) {
|
|
|
|
unsigned Opcode;
|
|
|
|
switch (OutMI.getOpcode()) {
|
|
|
|
default: llvm_unreachable("Invalid opcode");
|
|
|
|
case X86::DEC16r: Opcode = X86::DEC16r_alt; break;
|
|
|
|
case X86::DEC32r: Opcode = X86::DEC32r_alt; break;
|
|
|
|
case X86::INC16r: Opcode = X86::INC16r_alt; break;
|
|
|
|
case X86::INC32r: Opcode = X86::INC32r_alt; break;
|
|
|
|
}
|
|
|
|
OutMI.setOpcode(Opcode);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2010-10-08 11:54:52 +08:00
|
|
|
// These are pseudo-ops for OR to help with the OR->ADD transformation. We do
|
|
|
|
// this with an ugly goto in case the resultant OR uses EAX and needs the
|
|
|
|
// short form.
|
2010-10-08 11:57:25 +08:00
|
|
|
case X86::ADD16rr_DB: OutMI.setOpcode(X86::OR16rr); goto ReSimplify;
|
|
|
|
case X86::ADD32rr_DB: OutMI.setOpcode(X86::OR32rr); goto ReSimplify;
|
|
|
|
case X86::ADD64rr_DB: OutMI.setOpcode(X86::OR64rr); goto ReSimplify;
|
|
|
|
case X86::ADD16ri_DB: OutMI.setOpcode(X86::OR16ri); goto ReSimplify;
|
|
|
|
case X86::ADD32ri_DB: OutMI.setOpcode(X86::OR32ri); goto ReSimplify;
|
|
|
|
case X86::ADD64ri32_DB: OutMI.setOpcode(X86::OR64ri32); goto ReSimplify;
|
|
|
|
case X86::ADD16ri8_DB: OutMI.setOpcode(X86::OR16ri8); goto ReSimplify;
|
|
|
|
case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify;
|
|
|
|
case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify;
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2011-09-08 02:48:32 +08:00
|
|
|
// Atomic load and store require a separate pseudo-inst because Acquire
|
|
|
|
// implies mayStore and Release implies mayLoad; fix these to regular MOV
|
|
|
|
// instructions here
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::ACQUIRE_MOV8rm: OutMI.setOpcode(X86::MOV8rm); goto ReSimplify;
|
|
|
|
case X86::ACQUIRE_MOV16rm: OutMI.setOpcode(X86::MOV16rm); goto ReSimplify;
|
|
|
|
case X86::ACQUIRE_MOV32rm: OutMI.setOpcode(X86::MOV32rm); goto ReSimplify;
|
|
|
|
case X86::ACQUIRE_MOV64rm: OutMI.setOpcode(X86::MOV64rm); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV8mr: OutMI.setOpcode(X86::MOV8mr); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV16mr: OutMI.setOpcode(X86::MOV16mr); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV32mr: OutMI.setOpcode(X86::MOV32mr); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV64mr: OutMI.setOpcode(X86::MOV64mr); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV8mi: OutMI.setOpcode(X86::MOV8mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV16mi: OutMI.setOpcode(X86::MOV16mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV32mi: OutMI.setOpcode(X86::MOV32mi); goto ReSimplify;
|
|
|
|
case X86::RELEASE_MOV64mi32: OutMI.setOpcode(X86::MOV64mi32); goto ReSimplify;
|
|
|
|
case X86::RELEASE_ADD8mi: OutMI.setOpcode(X86::ADD8mi); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_ADD8mr: OutMI.setOpcode(X86::ADD8mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_ADD32mi: OutMI.setOpcode(X86::ADD32mi); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_ADD32mr: OutMI.setOpcode(X86::ADD32mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_ADD64mi32: OutMI.setOpcode(X86::ADD64mi32); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_ADD64mr: OutMI.setOpcode(X86::ADD64mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_AND8mi: OutMI.setOpcode(X86::AND8mi); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_AND8mr: OutMI.setOpcode(X86::AND8mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_AND32mi: OutMI.setOpcode(X86::AND32mi); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_AND32mr: OutMI.setOpcode(X86::AND32mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_AND64mi32: OutMI.setOpcode(X86::AND64mi32); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_AND64mr: OutMI.setOpcode(X86::AND64mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_OR8mi: OutMI.setOpcode(X86::OR8mi); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_OR8mr: OutMI.setOpcode(X86::OR8mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_OR32mi: OutMI.setOpcode(X86::OR32mi); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_OR32mr: OutMI.setOpcode(X86::OR32mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_OR64mi32: OutMI.setOpcode(X86::OR64mi32); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_OR64mr: OutMI.setOpcode(X86::OR64mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_XOR8mi: OutMI.setOpcode(X86::XOR8mi); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_XOR8mr: OutMI.setOpcode(X86::XOR8mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_XOR32mi: OutMI.setOpcode(X86::XOR32mi); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_XOR32mr: OutMI.setOpcode(X86::XOR32mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_XOR64mi32: OutMI.setOpcode(X86::XOR64mi32); goto ReSimplify;
|
2015-08-06 05:04:59 +08:00
|
|
|
case X86::RELEASE_XOR64mr: OutMI.setOpcode(X86::XOR64mr); goto ReSimplify;
|
2014-09-03 06:16:29 +08:00
|
|
|
case X86::RELEASE_INC8m: OutMI.setOpcode(X86::INC8m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_INC16m: OutMI.setOpcode(X86::INC16m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_INC32m: OutMI.setOpcode(X86::INC32m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_INC64m: OutMI.setOpcode(X86::INC64m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_DEC8m: OutMI.setOpcode(X86::DEC8m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_DEC16m: OutMI.setOpcode(X86::DEC16m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_DEC32m: OutMI.setOpcode(X86::DEC32m); goto ReSimplify;
|
|
|
|
case X86::RELEASE_DEC64m: OutMI.setOpcode(X86::DEC64m); goto ReSimplify;
|
2011-09-08 02:48:32 +08:00
|
|
|
|
2010-05-19 01:22:24 +08:00
|
|
|
// We don't currently select the correct instruction form for instructions
|
|
|
|
// which have a short %eax, etc. form. Handle this by custom lowering, for
|
|
|
|
// now.
|
|
|
|
//
|
|
|
|
// Note, we are currently not handling the following instructions:
|
2010-05-19 14:20:44 +08:00
|
|
|
// MOV64ao8, MOV64o8a
|
2010-05-19 01:22:24 +08:00
|
|
|
// XCHG16ar, XCHG32ar, XCHG64ar
|
2010-05-19 14:20:44 +08:00
|
|
|
case X86::MOV8mr_NOREX:
|
2016-04-29 08:51:30 +08:00
|
|
|
case X86::MOV8mr:
|
2010-05-19 14:20:44 +08:00
|
|
|
case X86::MOV8rm_NOREX:
|
2016-04-29 08:51:30 +08:00
|
|
|
case X86::MOV8rm:
|
|
|
|
case X86::MOV16mr:
|
|
|
|
case X86::MOV16rm:
|
|
|
|
case X86::MOV32mr:
|
|
|
|
case X86::MOV32rm: {
|
|
|
|
unsigned NewOpc;
|
|
|
|
switch (OutMI.getOpcode()) {
|
|
|
|
default: llvm_unreachable("Invalid opcode");
|
|
|
|
case X86::MOV8mr_NOREX:
|
|
|
|
case X86::MOV8mr: NewOpc = X86::MOV8o32a; break;
|
|
|
|
case X86::MOV8rm_NOREX:
|
|
|
|
case X86::MOV8rm: NewOpc = X86::MOV8ao32; break;
|
|
|
|
case X86::MOV16mr: NewOpc = X86::MOV16o32a; break;
|
|
|
|
case X86::MOV16rm: NewOpc = X86::MOV16ao32; break;
|
|
|
|
case X86::MOV32mr: NewOpc = X86::MOV32o32a; break;
|
|
|
|
case X86::MOV32rm: NewOpc = X86::MOV32ao32; break;
|
|
|
|
}
|
|
|
|
SimplifyShortMoveForm(AsmPrinter, OutMI, NewOpc);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case X86::ADC8ri: case X86::ADC16ri: case X86::ADC32ri: case X86::ADC64ri32:
|
|
|
|
case X86::ADD8ri: case X86::ADD16ri: case X86::ADD32ri: case X86::ADD64ri32:
|
|
|
|
case X86::AND8ri: case X86::AND16ri: case X86::AND32ri: case X86::AND64ri32:
|
|
|
|
case X86::CMP8ri: case X86::CMP16ri: case X86::CMP32ri: case X86::CMP64ri32:
|
|
|
|
case X86::OR8ri: case X86::OR16ri: case X86::OR32ri: case X86::OR64ri32:
|
|
|
|
case X86::SBB8ri: case X86::SBB16ri: case X86::SBB32ri: case X86::SBB64ri32:
|
|
|
|
case X86::SUB8ri: case X86::SUB16ri: case X86::SUB32ri: case X86::SUB64ri32:
|
|
|
|
case X86::TEST8ri:case X86::TEST16ri:case X86::TEST32ri:case X86::TEST64ri32:
|
|
|
|
case X86::XOR8ri: case X86::XOR16ri: case X86::XOR32ri: case X86::XOR64ri32: {
|
|
|
|
unsigned NewOpc;
|
|
|
|
switch (OutMI.getOpcode()) {
|
|
|
|
default: llvm_unreachable("Invalid opcode");
|
|
|
|
case X86::ADC8ri: NewOpc = X86::ADC8i8; break;
|
|
|
|
case X86::ADC16ri: NewOpc = X86::ADC16i16; break;
|
|
|
|
case X86::ADC32ri: NewOpc = X86::ADC32i32; break;
|
|
|
|
case X86::ADC64ri32: NewOpc = X86::ADC64i32; break;
|
|
|
|
case X86::ADD8ri: NewOpc = X86::ADD8i8; break;
|
|
|
|
case X86::ADD16ri: NewOpc = X86::ADD16i16; break;
|
|
|
|
case X86::ADD32ri: NewOpc = X86::ADD32i32; break;
|
|
|
|
case X86::ADD64ri32: NewOpc = X86::ADD64i32; break;
|
|
|
|
case X86::AND8ri: NewOpc = X86::AND8i8; break;
|
|
|
|
case X86::AND16ri: NewOpc = X86::AND16i16; break;
|
|
|
|
case X86::AND32ri: NewOpc = X86::AND32i32; break;
|
|
|
|
case X86::AND64ri32: NewOpc = X86::AND64i32; break;
|
|
|
|
case X86::CMP8ri: NewOpc = X86::CMP8i8; break;
|
|
|
|
case X86::CMP16ri: NewOpc = X86::CMP16i16; break;
|
|
|
|
case X86::CMP32ri: NewOpc = X86::CMP32i32; break;
|
|
|
|
case X86::CMP64ri32: NewOpc = X86::CMP64i32; break;
|
|
|
|
case X86::OR8ri: NewOpc = X86::OR8i8; break;
|
|
|
|
case X86::OR16ri: NewOpc = X86::OR16i16; break;
|
|
|
|
case X86::OR32ri: NewOpc = X86::OR32i32; break;
|
|
|
|
case X86::OR64ri32: NewOpc = X86::OR64i32; break;
|
|
|
|
case X86::SBB8ri: NewOpc = X86::SBB8i8; break;
|
|
|
|
case X86::SBB16ri: NewOpc = X86::SBB16i16; break;
|
|
|
|
case X86::SBB32ri: NewOpc = X86::SBB32i32; break;
|
|
|
|
case X86::SBB64ri32: NewOpc = X86::SBB64i32; break;
|
|
|
|
case X86::SUB8ri: NewOpc = X86::SUB8i8; break;
|
|
|
|
case X86::SUB16ri: NewOpc = X86::SUB16i16; break;
|
|
|
|
case X86::SUB32ri: NewOpc = X86::SUB32i32; break;
|
|
|
|
case X86::SUB64ri32: NewOpc = X86::SUB64i32; break;
|
|
|
|
case X86::TEST8ri: NewOpc = X86::TEST8i8; break;
|
|
|
|
case X86::TEST16ri: NewOpc = X86::TEST16i16; break;
|
|
|
|
case X86::TEST32ri: NewOpc = X86::TEST32i32; break;
|
|
|
|
case X86::TEST64ri32: NewOpc = X86::TEST64i32; break;
|
|
|
|
case X86::XOR8ri: NewOpc = X86::XOR8i8; break;
|
|
|
|
case X86::XOR16ri: NewOpc = X86::XOR16i16; break;
|
|
|
|
case X86::XOR32ri: NewOpc = X86::XOR32i32; break;
|
|
|
|
case X86::XOR64ri32: NewOpc = X86::XOR64i32; break;
|
|
|
|
}
|
|
|
|
SimplifyShortImmForm(OutMI, NewOpc);
|
|
|
|
break;
|
|
|
|
}
|
2011-10-27 05:12:27 +08:00
|
|
|
|
2013-07-13 02:06:44 +08:00
|
|
|
// Try to shrink some forms of movsx.
|
|
|
|
case X86::MOVSX16rr8:
|
|
|
|
case X86::MOVSX32rr16:
|
|
|
|
case X86::MOVSX64rr32:
|
|
|
|
SimplifyMOVSX(OutMI);
|
|
|
|
break;
|
2011-10-27 05:12:27 +08:00
|
|
|
}
|
2009-09-13 04:34:57 +08:00
|
|
|
}
|
|
|
|
|
2014-07-25 04:40:55 +08:00
|
|
|
void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering,
|
|
|
|
const MachineInstr &MI) {
|
2012-06-02 00:27:21 +08:00
|
|
|
|
|
|
|
bool is64Bits = MI.getOpcode() == X86::TLS_addr64 ||
|
|
|
|
MI.getOpcode() == X86::TLS_base_addr64;
|
|
|
|
|
|
|
|
bool needsPadding = MI.getOpcode() == X86::TLS_addr64;
|
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
MCContext &context = OutStreamer->getContext();
|
2010-11-29 05:16:39 +08:00
|
|
|
|
2012-11-26 21:34:22 +08:00
|
|
|
if (needsPadding)
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
|
2012-06-02 00:27:21 +08:00
|
|
|
|
|
|
|
MCSymbolRefExpr::VariantKind SRVK;
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
case X86::TLS_addr32:
|
|
|
|
case X86::TLS_addr64:
|
|
|
|
SRVK = MCSymbolRefExpr::VK_TLSGD;
|
|
|
|
break;
|
|
|
|
case X86::TLS_base_addr32:
|
|
|
|
SRVK = MCSymbolRefExpr::VK_TLSLDM;
|
|
|
|
break;
|
|
|
|
case X86::TLS_base_addr64:
|
|
|
|
SRVK = MCSymbolRefExpr::VK_TLSLD;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("unexpected opcode");
|
|
|
|
}
|
|
|
|
|
2010-11-29 05:16:39 +08:00
|
|
|
MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3));
|
2015-05-30 09:25:56 +08:00
|
|
|
const MCSymbolRefExpr *symRef = MCSymbolRefExpr::create(sym, SRVK, context);
|
2010-11-29 05:16:39 +08:00
|
|
|
|
|
|
|
MCInst LEA;
|
|
|
|
if (is64Bits) {
|
|
|
|
LEA.setOpcode(X86::LEA64r);
|
2015-05-14 02:37:00 +08:00
|
|
|
LEA.addOperand(MCOperand::createReg(X86::RDI)); // dest
|
|
|
|
LEA.addOperand(MCOperand::createReg(X86::RIP)); // base
|
|
|
|
LEA.addOperand(MCOperand::createImm(1)); // scale
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // index
|
|
|
|
LEA.addOperand(MCOperand::createExpr(symRef)); // disp
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // seg
|
2012-06-08 02:39:19 +08:00
|
|
|
} else if (SRVK == MCSymbolRefExpr::VK_TLSLDM) {
|
|
|
|
LEA.setOpcode(X86::LEA32r);
|
2015-05-14 02:37:00 +08:00
|
|
|
LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest
|
|
|
|
LEA.addOperand(MCOperand::createReg(X86::EBX)); // base
|
|
|
|
LEA.addOperand(MCOperand::createImm(1)); // scale
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // index
|
|
|
|
LEA.addOperand(MCOperand::createExpr(symRef)); // disp
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // seg
|
2010-11-29 05:16:39 +08:00
|
|
|
} else {
|
|
|
|
LEA.setOpcode(X86::LEA32r);
|
2015-05-14 02:37:00 +08:00
|
|
|
LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // base
|
|
|
|
LEA.addOperand(MCOperand::createImm(1)); // scale
|
|
|
|
LEA.addOperand(MCOperand::createReg(X86::EBX)); // index
|
|
|
|
LEA.addOperand(MCOperand::createExpr(symRef)); // disp
|
|
|
|
LEA.addOperand(MCOperand::createReg(0)); // seg
|
2010-11-29 05:16:39 +08:00
|
|
|
}
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(LEA);
|
2010-11-29 05:16:39 +08:00
|
|
|
|
2012-06-02 00:27:21 +08:00
|
|
|
if (needsPadding) {
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
|
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
|
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX));
|
2010-11-29 05:16:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr";
|
2015-05-19 02:43:14 +08:00
|
|
|
MCSymbol *tlsGetAddr = context.getOrCreateSymbol(name);
|
2010-11-29 05:16:39 +08:00
|
|
|
const MCSymbolRefExpr *tlsRef =
|
2015-05-30 09:25:56 +08:00
|
|
|
MCSymbolRefExpr::create(tlsGetAddr,
|
2010-11-29 05:16:39 +08:00
|
|
|
MCSymbolRefExpr::VK_PLT,
|
|
|
|
context);
|
|
|
|
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(is64Bits ? X86::CALL64pcrel32
|
|
|
|
: X86::CALLpcrel32)
|
|
|
|
.addExpr(tlsRef));
|
2010-11-29 05:16:39 +08:00
|
|
|
}
|
2010-04-28 09:39:28 +08:00
|
|
|
|
2016-04-20 02:48:13 +08:00
|
|
|
/// \brief Emit the largest nop instruction smaller than or equal to \p NumBytes
|
|
|
|
/// bytes. Return the size of nop emitted.
|
|
|
|
static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit,
|
|
|
|
const MCSubtargetInfo &STI) {
|
2013-12-04 08:39:08 +08:00
|
|
|
// This works only for 64bit. For 32bit we have to do additional checking if
|
|
|
|
// the CPU supports multi-byte nops.
|
|
|
|
assert(Is64Bit && "EmitNops only supports X86-64");
|
|
|
|
|
2016-04-20 02:48:13 +08:00
|
|
|
unsigned NopSize;
|
|
|
|
unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg;
|
|
|
|
Opc = IndexReg = Displacement = SegmentReg = 0;
|
|
|
|
BaseReg = X86::RAX;
|
|
|
|
ScaleVal = 1;
|
|
|
|
switch (NumBytes) {
|
|
|
|
case 0: llvm_unreachable("Zero nops?"); break;
|
|
|
|
case 1: NopSize = 1; Opc = X86::NOOP; break;
|
|
|
|
case 2: NopSize = 2; Opc = X86::XCHG16ar; break;
|
|
|
|
case 3: NopSize = 3; Opc = X86::NOOPL; break;
|
|
|
|
case 4: NopSize = 4; Opc = X86::NOOPL; Displacement = 8; break;
|
|
|
|
case 5: NopSize = 5; Opc = X86::NOOPL; Displacement = 8;
|
|
|
|
IndexReg = X86::RAX; break;
|
|
|
|
case 6: NopSize = 6; Opc = X86::NOOPW; Displacement = 8;
|
|
|
|
IndexReg = X86::RAX; break;
|
|
|
|
case 7: NopSize = 7; Opc = X86::NOOPL; Displacement = 512; break;
|
|
|
|
case 8: NopSize = 8; Opc = X86::NOOPL; Displacement = 512;
|
|
|
|
IndexReg = X86::RAX; break;
|
|
|
|
case 9: NopSize = 9; Opc = X86::NOOPW; Displacement = 512;
|
|
|
|
IndexReg = X86::RAX; break;
|
|
|
|
default: NopSize = 10; Opc = X86::NOOPW; Displacement = 512;
|
|
|
|
IndexReg = X86::RAX; SegmentReg = X86::CS; break;
|
|
|
|
}
|
2013-12-04 08:39:08 +08:00
|
|
|
|
2016-04-20 02:48:13 +08:00
|
|
|
unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U);
|
|
|
|
NopSize += NumPrefixes;
|
|
|
|
for (unsigned i = 0; i != NumPrefixes; ++i)
|
|
|
|
OS.EmitBytes("\x66");
|
2016-04-19 13:24:47 +08:00
|
|
|
|
2016-04-20 02:48:13 +08:00
|
|
|
switch (Opc) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected opcode");
|
|
|
|
break;
|
|
|
|
case X86::NOOP:
|
|
|
|
OS.EmitInstruction(MCInstBuilder(Opc), STI);
|
|
|
|
break;
|
|
|
|
case X86::XCHG16ar:
|
|
|
|
OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI);
|
|
|
|
break;
|
|
|
|
case X86::NOOPL:
|
|
|
|
case X86::NOOPW:
|
|
|
|
OS.EmitInstruction(MCInstBuilder(Opc)
|
|
|
|
.addReg(BaseReg)
|
|
|
|
.addImm(ScaleVal)
|
|
|
|
.addReg(IndexReg)
|
|
|
|
.addImm(Displacement)
|
|
|
|
.addReg(SegmentReg),
|
|
|
|
STI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert(NopSize <= NumBytes && "We overemitted?");
|
|
|
|
return NopSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Emit the optimal amount of multi-byte nops on X86.
|
|
|
|
static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit,
|
|
|
|
const MCSubtargetInfo &STI) {
|
2016-04-21 01:53:21 +08:00
|
|
|
unsigned NopsToEmit = NumBytes;
|
2016-04-21 02:45:31 +08:00
|
|
|
(void)NopsToEmit;
|
2016-04-20 02:48:13 +08:00
|
|
|
while (NumBytes) {
|
|
|
|
NumBytes -= EmitNop(OS, NumBytes, Is64Bit, STI);
|
2016-04-21 01:53:21 +08:00
|
|
|
assert(NopsToEmit >= NumBytes && "Emitted more than I asked for!");
|
2016-04-20 02:48:13 +08:00
|
|
|
}
|
2013-12-04 08:39:08 +08:00
|
|
|
}
|
|
|
|
|
2015-05-07 07:53:26 +08:00
|
|
|
void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI,
|
|
|
|
X86MCInstLower &MCIL) {
|
|
|
|
assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64");
|
2014-12-02 06:52:56 +08:00
|
|
|
|
2015-05-13 07:52:24 +08:00
|
|
|
StatepointOpers SOpers(&MI);
|
|
|
|
if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
|
|
|
|
EmitNops(*OutStreamer, PatchBytes, Subtarget->is64Bit(),
|
|
|
|
getSubtargetInfo());
|
|
|
|
} else {
|
|
|
|
// Lower call target and choose correct opcode
|
|
|
|
const MachineOperand &CallTarget = SOpers.getCallTarget();
|
|
|
|
MCOperand CallTargetMCOp;
|
|
|
|
unsigned CallOpcode;
|
|
|
|
switch (CallTarget.getType()) {
|
|
|
|
case MachineOperand::MO_GlobalAddress:
|
|
|
|
case MachineOperand::MO_ExternalSymbol:
|
|
|
|
CallTargetMCOp = MCIL.LowerSymbolOperand(
|
|
|
|
CallTarget, MCIL.GetSymbolFromOperand(CallTarget));
|
|
|
|
CallOpcode = X86::CALL64pcrel32;
|
|
|
|
// Currently, we only support relative addressing with statepoints.
|
|
|
|
// Otherwise, we'll need a scratch register to hold the target
|
|
|
|
// address. You'll fail asserts during load & relocation if this
|
|
|
|
// symbol is to far away. (TODO: support non-relative addressing)
|
|
|
|
break;
|
|
|
|
case MachineOperand::MO_Immediate:
|
2015-05-14 02:37:00 +08:00
|
|
|
CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
|
2015-05-13 07:52:24 +08:00
|
|
|
CallOpcode = X86::CALL64pcrel32;
|
|
|
|
// Currently, we only support relative addressing with statepoints.
|
|
|
|
// Otherwise, we'll need a scratch register to hold the target
|
|
|
|
// immediate. You'll fail asserts during load & relocation if this
|
|
|
|
// address is to far away. (TODO: support non-relative addressing)
|
|
|
|
break;
|
|
|
|
case MachineOperand::MO_Register:
|
2015-05-14 02:37:00 +08:00
|
|
|
CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
|
2015-05-13 07:52:24 +08:00
|
|
|
CallOpcode = X86::CALL64r;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unsupported operand type in statepoint call target");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit call
|
|
|
|
MCInst CallInst;
|
|
|
|
CallInst.setOpcode(CallOpcode);
|
|
|
|
CallInst.addOperand(CallTargetMCOp);
|
|
|
|
OutStreamer->EmitInstruction(CallInst, getSubtargetInfo());
|
|
|
|
}
|
2014-12-02 06:52:56 +08:00
|
|
|
|
|
|
|
// Record our statepoint node in the same section used by STACKMAP
|
|
|
|
// and PATCHPOINT
|
2014-12-04 13:20:33 +08:00
|
|
|
SM.recordStatepoint(MI);
|
2014-12-02 06:52:56 +08:00
|
|
|
}
|
|
|
|
|
2017-02-08 03:19:49 +08:00
|
|
|
void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI,
|
|
|
|
X86MCInstLower &MCIL) {
|
|
|
|
// FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
|
|
|
|
// <opcode>, <operands>
|
2015-06-16 02:44:08 +08:00
|
|
|
|
2017-02-08 03:19:49 +08:00
|
|
|
unsigned DefRegister = FaultingMI.getOperand(0).getReg();
|
|
|
|
FaultMaps::FaultKind FK =
|
|
|
|
static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
|
|
|
|
MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
|
|
|
|
unsigned Opcode = FaultingMI.getOperand(3).getImm();
|
|
|
|
unsigned OperandsBeginIdx = 4;
|
2015-06-16 02:44:08 +08:00
|
|
|
|
2017-02-08 03:19:49 +08:00
|
|
|
assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
|
|
|
|
FM.recordFaultingOp(FK, HandlerLabel);
|
2015-06-16 02:44:08 +08:00
|
|
|
|
2017-02-08 03:19:49 +08:00
|
|
|
MCInst MI;
|
|
|
|
MI.setOpcode(Opcode);
|
2015-07-21 04:31:39 +08:00
|
|
|
|
2017-02-08 03:19:49 +08:00
|
|
|
if (DefRegister != X86::NoRegister)
|
|
|
|
MI.addOperand(MCOperand::createReg(DefRegister));
|
2015-07-21 04:31:39 +08:00
|
|
|
|
2017-02-08 03:19:49 +08:00
|
|
|
for (auto I = FaultingMI.operands_begin() + OperandsBeginIdx,
|
|
|
|
E = FaultingMI.operands_end();
|
2015-06-16 02:44:08 +08:00
|
|
|
I != E; ++I)
|
2017-02-08 03:19:49 +08:00
|
|
|
if (auto MaybeOperand = MCIL.LowerMachineOperand(&FaultingMI, *I))
|
|
|
|
MI.addOperand(MaybeOperand.getValue());
|
2015-06-16 02:44:08 +08:00
|
|
|
|
2017-02-08 03:19:49 +08:00
|
|
|
OutStreamer->EmitInstruction(MI, getSubtargetInfo());
|
2015-06-16 02:44:08 +08:00
|
|
|
}
|
2014-12-02 06:52:56 +08:00
|
|
|
|
2017-02-01 01:00:27 +08:00
|
|
|
void X86AsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI,
|
|
|
|
X86MCInstLower &MCIL) {
|
|
|
|
bool Is64Bits = Subtarget->is64Bit();
|
|
|
|
MCContext &Ctx = OutStreamer->getContext();
|
|
|
|
MCSymbol *fentry = Ctx.getOrCreateSymbol("__fentry__");
|
|
|
|
const MCSymbolRefExpr *Op =
|
|
|
|
MCSymbolRefExpr::create(fentry, MCSymbolRefExpr::VK_None, Ctx);
|
|
|
|
|
|
|
|
EmitAndCountInstruction(
|
|
|
|
MCInstBuilder(Is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32)
|
|
|
|
.addExpr(Op));
|
|
|
|
}
|
|
|
|
|
2016-04-19 13:24:47 +08:00
|
|
|
void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
|
|
|
|
X86MCInstLower &MCIL) {
|
|
|
|
// PATCHABLE_OP minsize, opcode, operands
|
|
|
|
|
|
|
|
unsigned MinSize = MI.getOperand(0).getImm();
|
|
|
|
unsigned Opcode = MI.getOperand(1).getImm();
|
|
|
|
|
|
|
|
MCInst MCI;
|
|
|
|
MCI.setOpcode(Opcode);
|
|
|
|
for (auto &MO : make_range(MI.operands_begin() + 2, MI.operands_end()))
|
|
|
|
if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO))
|
|
|
|
MCI.addOperand(MaybeOperand.getValue());
|
|
|
|
|
|
|
|
SmallString<256> Code;
|
|
|
|
SmallVector<MCFixup, 4> Fixups;
|
|
|
|
raw_svector_ostream VecOS(Code);
|
|
|
|
CodeEmitter->encodeInstruction(MCI, VecOS, Fixups, getSubtargetInfo());
|
|
|
|
|
|
|
|
if (Code.size() < MinSize) {
|
|
|
|
if (MinSize == 2 && Opcode == X86::PUSH64r) {
|
|
|
|
// This is an optimization that lets us get away without emitting a nop in
|
|
|
|
// many cases.
|
|
|
|
//
|
|
|
|
// NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %R9) takes two
|
|
|
|
// bytes too, so the check on MinSize is important.
|
|
|
|
MCI.setOpcode(X86::PUSH64rmr);
|
|
|
|
} else {
|
2016-04-20 02:48:13 +08:00
|
|
|
unsigned NopSize = EmitNop(*OutStreamer, MinSize, Subtarget->is64Bit(),
|
|
|
|
getSubtargetInfo());
|
|
|
|
assert(NopSize == MinSize && "Could not implement MinSize!");
|
|
|
|
(void) NopSize;
|
2016-04-19 13:24:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
OutStreamer->EmitInstruction(MCI, getSubtargetInfo());
|
|
|
|
}
|
|
|
|
|
2013-11-19 11:29:56 +08:00
|
|
|
// Lower a stackmap of the form:
|
|
|
|
// <id>, <shadowBytes>, ...
|
2014-07-25 04:40:55 +08:00
|
|
|
void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) {
|
2015-04-25 03:11:51 +08:00
|
|
|
SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
|
2013-11-19 11:29:56 +08:00
|
|
|
SM.recordStackMap(MI);
|
2014-07-25 04:40:55 +08:00
|
|
|
unsigned NumShadowBytes = MI.getOperand(1).getImm();
|
|
|
|
SMShadowTracker.reset(NumShadowBytes);
|
2013-11-01 06:11:56 +08:00
|
|
|
}
|
|
|
|
|
2013-11-14 14:54:10 +08:00
|
|
|
// Lower a patchpoint of the form:
|
2013-11-19 11:29:56 +08:00
|
|
|
// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
|
2015-04-22 14:02:31 +08:00
|
|
|
void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
|
|
|
|
X86MCInstLower &MCIL) {
|
2014-07-25 04:40:55 +08:00
|
|
|
assert(Subtarget->is64Bit() && "Patchpoint currently only supports X86-64");
|
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
|
2014-07-25 04:40:55 +08:00
|
|
|
|
2013-11-19 11:29:56 +08:00
|
|
|
SM.recordPatchPoint(MI);
|
2013-11-01 06:11:56 +08:00
|
|
|
|
2013-11-19 11:29:56 +08:00
|
|
|
PatchPointOpers opers(&MI);
|
|
|
|
unsigned ScratchIdx = opers.getNextScratchIdx();
|
2013-11-14 14:54:10 +08:00
|
|
|
unsigned EncodedBytes = 0;
|
2016-08-24 07:33:29 +08:00
|
|
|
const MachineOperand &CalleeMO = opers.getCallTarget();
|
2015-04-22 14:02:31 +08:00
|
|
|
|
|
|
|
// Check for null target. If target is non-null (i.e. is non-zero or is
|
|
|
|
// symbolic) then emit a call.
|
|
|
|
if (!(CalleeMO.isImm() && !CalleeMO.getImm())) {
|
|
|
|
MCOperand CalleeMCOp;
|
|
|
|
switch (CalleeMO.getType()) {
|
|
|
|
default:
|
|
|
|
/// FIXME: Add a verifier check for bad callee types.
|
|
|
|
llvm_unreachable("Unrecognized callee operand type.");
|
|
|
|
case MachineOperand::MO_Immediate:
|
|
|
|
if (CalleeMO.getImm())
|
2015-05-14 02:37:00 +08:00
|
|
|
CalleeMCOp = MCOperand::createImm(CalleeMO.getImm());
|
2015-04-22 14:02:31 +08:00
|
|
|
break;
|
|
|
|
case MachineOperand::MO_ExternalSymbol:
|
|
|
|
case MachineOperand::MO_GlobalAddress:
|
|
|
|
CalleeMCOp =
|
|
|
|
MCIL.LowerSymbolOperand(CalleeMO,
|
|
|
|
MCIL.GetSymbolFromOperand(CalleeMO));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-11-14 14:54:10 +08:00
|
|
|
// Emit MOV to materialize the target address and the CALL to target.
|
|
|
|
// This is encoded with 12-13 bytes, depending on which register is used.
|
2013-12-04 08:39:08 +08:00
|
|
|
unsigned ScratchReg = MI.getOperand(ScratchIdx).getReg();
|
|
|
|
if (X86II::isX86_64ExtendedReg(ScratchReg))
|
|
|
|
EncodedBytes = 13;
|
|
|
|
else
|
|
|
|
EncodedBytes = 12;
|
2015-04-22 14:02:31 +08:00
|
|
|
|
|
|
|
EmitAndCountInstruction(
|
|
|
|
MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp));
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg));
|
2013-11-14 14:54:10 +08:00
|
|
|
}
|
2015-04-22 14:02:31 +08:00
|
|
|
|
2013-11-01 06:11:56 +08:00
|
|
|
// Emit padding.
|
2016-08-24 07:33:29 +08:00
|
|
|
unsigned NumBytes = opers.getNumPatchBytes();
|
2013-11-19 11:29:56 +08:00
|
|
|
assert(NumBytes >= EncodedBytes &&
|
2013-11-01 06:11:56 +08:00
|
|
|
"Patchpoint can't request size less than the length of a call.");
|
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
EmitNops(*OutStreamer, NumBytes - EncodedBytes, Subtarget->is64Bit(),
|
2014-07-25 04:40:55 +08:00
|
|
|
getSubtargetInfo());
|
2013-11-01 06:11:56 +08:00
|
|
|
}
|
|
|
|
|
XRay: Add entry and exit sleds
Summary:
In this patch we implement the following parts of XRay:
- Supporting a function attribute named 'function-instrument' which currently only supports 'xray-always'. We should be able to use this attribute for other instrumentation approaches.
- Supporting a function attribute named 'xray-instruction-threshold' used to determine whether a function is instrumented with a minimum number of instructions (IR instruction counts).
- X86-specific nop sleds as described in the white paper.
- A machine function pass that adds the different instrumentation marker instructions at a very late stage.
- A way of identifying which return opcode is considered "normal" for each architecture.
There are some caveats here:
1) We don't handle PATCHABLE_RET in platforms other than x86_64 yet -- this means if IR used PATCHABLE_RET directly instead of a normal ret, instruction lowering for that platform might do the wrong thing. We think this should be handled at instruction selection time to by default be unpacked for platforms where XRay is not availble yet.
2) The generated section for X86 is different from what is described from the white paper for the sole reason that LLVM allows us to do this neatly. We're taking the opportunity to deviate from the white paper from this perspective to allow us to get richer information from the runtime library.
Reviewers: sanjoy, eugenis, kcc, pcc, echristo, rnk
Subscribers: niravd, majnemer, atrick, rnk, emaste, bmakam, mcrosier, mehdi_amini, llvm-commits
Differential Revision: http://reviews.llvm.org/D19904
llvm-svn: 275367
2016-07-14 12:06:33 +08:00
|
|
|
void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI,
|
|
|
|
X86MCInstLower &MCIL) {
|
|
|
|
// We want to emit the following pattern:
|
|
|
|
//
|
2016-08-04 15:37:28 +08:00
|
|
|
// .p2align 1, ...
|
XRay: Add entry and exit sleds
Summary:
In this patch we implement the following parts of XRay:
- Supporting a function attribute named 'function-instrument' which currently only supports 'xray-always'. We should be able to use this attribute for other instrumentation approaches.
- Supporting a function attribute named 'xray-instruction-threshold' used to determine whether a function is instrumented with a minimum number of instructions (IR instruction counts).
- X86-specific nop sleds as described in the white paper.
- A machine function pass that adds the different instrumentation marker instructions at a very late stage.
- A way of identifying which return opcode is considered "normal" for each architecture.
There are some caveats here:
1) We don't handle PATCHABLE_RET in platforms other than x86_64 yet -- this means if IR used PATCHABLE_RET directly instead of a normal ret, instruction lowering for that platform might do the wrong thing. We think this should be handled at instruction selection time to by default be unpacked for platforms where XRay is not availble yet.
2) The generated section for X86 is different from what is described from the white paper for the sole reason that LLVM allows us to do this neatly. We're taking the opportunity to deviate from the white paper from this perspective to allow us to get richer information from the runtime library.
Reviewers: sanjoy, eugenis, kcc, pcc, echristo, rnk
Subscribers: niravd, majnemer, atrick, rnk, emaste, bmakam, mcrosier, mehdi_amini, llvm-commits
Differential Revision: http://reviews.llvm.org/D19904
llvm-svn: 275367
2016-07-14 12:06:33 +08:00
|
|
|
// .Lxray_sled_N:
|
|
|
|
// jmp .tmpN
|
|
|
|
// # 9 bytes worth of noops
|
|
|
|
// .tmpN
|
|
|
|
//
|
|
|
|
// We need the 9 bytes because at runtime, we'd be patching over the full 11
|
|
|
|
// bytes with the following pattern:
|
|
|
|
//
|
|
|
|
// mov %r10, <function id, 32-bit> // 6 bytes
|
|
|
|
// call <relative offset, 32-bits> // 5 bytes
|
|
|
|
//
|
|
|
|
auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
|
2016-08-04 15:37:28 +08:00
|
|
|
OutStreamer->EmitCodeAlignment(2);
|
XRay: Add entry and exit sleds
Summary:
In this patch we implement the following parts of XRay:
- Supporting a function attribute named 'function-instrument' which currently only supports 'xray-always'. We should be able to use this attribute for other instrumentation approaches.
- Supporting a function attribute named 'xray-instruction-threshold' used to determine whether a function is instrumented with a minimum number of instructions (IR instruction counts).
- X86-specific nop sleds as described in the white paper.
- A machine function pass that adds the different instrumentation marker instructions at a very late stage.
- A way of identifying which return opcode is considered "normal" for each architecture.
There are some caveats here:
1) We don't handle PATCHABLE_RET in platforms other than x86_64 yet -- this means if IR used PATCHABLE_RET directly instead of a normal ret, instruction lowering for that platform might do the wrong thing. We think this should be handled at instruction selection time to by default be unpacked for platforms where XRay is not availble yet.
2) The generated section for X86 is different from what is described from the white paper for the sole reason that LLVM allows us to do this neatly. We're taking the opportunity to deviate from the white paper from this perspective to allow us to get richer information from the runtime library.
Reviewers: sanjoy, eugenis, kcc, pcc, echristo, rnk
Subscribers: niravd, majnemer, atrick, rnk, emaste, bmakam, mcrosier, mehdi_amini, llvm-commits
Differential Revision: http://reviews.llvm.org/D19904
llvm-svn: 275367
2016-07-14 12:06:33 +08:00
|
|
|
OutStreamer->EmitLabel(CurSled);
|
|
|
|
auto Target = OutContext.createTempSymbol();
|
|
|
|
|
|
|
|
// Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
|
|
|
|
// an operand (computed as an offset from the jmp instruction).
|
|
|
|
// FIXME: Find another less hacky way do force the relative jump.
|
|
|
|
OutStreamer->EmitBytes("\xeb\x09");
|
|
|
|
EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo());
|
|
|
|
OutStreamer->EmitLabel(Target);
|
|
|
|
recordSled(CurSled, MI, SledKind::FUNCTION_ENTER);
|
|
|
|
}
|
|
|
|
|
|
|
|
void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI,
|
|
|
|
X86MCInstLower &MCIL) {
|
|
|
|
// Since PATCHABLE_RET takes the opcode of the return statement as an
|
|
|
|
// argument, we use that to emit the correct form of the RET that we want.
|
|
|
|
// i.e. when we see this:
|
|
|
|
//
|
|
|
|
// PATCHABLE_RET X86::RET ...
|
|
|
|
//
|
|
|
|
// We should emit the RET followed by sleds.
|
|
|
|
//
|
2016-08-04 15:37:28 +08:00
|
|
|
// .p2align 1, ...
|
XRay: Add entry and exit sleds
Summary:
In this patch we implement the following parts of XRay:
- Supporting a function attribute named 'function-instrument' which currently only supports 'xray-always'. We should be able to use this attribute for other instrumentation approaches.
- Supporting a function attribute named 'xray-instruction-threshold' used to determine whether a function is instrumented with a minimum number of instructions (IR instruction counts).
- X86-specific nop sleds as described in the white paper.
- A machine function pass that adds the different instrumentation marker instructions at a very late stage.
- A way of identifying which return opcode is considered "normal" for each architecture.
There are some caveats here:
1) We don't handle PATCHABLE_RET in platforms other than x86_64 yet -- this means if IR used PATCHABLE_RET directly instead of a normal ret, instruction lowering for that platform might do the wrong thing. We think this should be handled at instruction selection time to by default be unpacked for platforms where XRay is not availble yet.
2) The generated section for X86 is different from what is described from the white paper for the sole reason that LLVM allows us to do this neatly. We're taking the opportunity to deviate from the white paper from this perspective to allow us to get richer information from the runtime library.
Reviewers: sanjoy, eugenis, kcc, pcc, echristo, rnk
Subscribers: niravd, majnemer, atrick, rnk, emaste, bmakam, mcrosier, mehdi_amini, llvm-commits
Differential Revision: http://reviews.llvm.org/D19904
llvm-svn: 275367
2016-07-14 12:06:33 +08:00
|
|
|
// .Lxray_sled_N:
|
|
|
|
// ret # or equivalent instruction
|
|
|
|
// # 10 bytes worth of noops
|
|
|
|
//
|
|
|
|
// This just makes sure that the alignment for the next instruction is 2.
|
|
|
|
auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
|
2016-08-04 15:37:28 +08:00
|
|
|
OutStreamer->EmitCodeAlignment(2);
|
XRay: Add entry and exit sleds
Summary:
In this patch we implement the following parts of XRay:
- Supporting a function attribute named 'function-instrument' which currently only supports 'xray-always'. We should be able to use this attribute for other instrumentation approaches.
- Supporting a function attribute named 'xray-instruction-threshold' used to determine whether a function is instrumented with a minimum number of instructions (IR instruction counts).
- X86-specific nop sleds as described in the white paper.
- A machine function pass that adds the different instrumentation marker instructions at a very late stage.
- A way of identifying which return opcode is considered "normal" for each architecture.
There are some caveats here:
1) We don't handle PATCHABLE_RET in platforms other than x86_64 yet -- this means if IR used PATCHABLE_RET directly instead of a normal ret, instruction lowering for that platform might do the wrong thing. We think this should be handled at instruction selection time to by default be unpacked for platforms where XRay is not availble yet.
2) The generated section for X86 is different from what is described from the white paper for the sole reason that LLVM allows us to do this neatly. We're taking the opportunity to deviate from the white paper from this perspective to allow us to get richer information from the runtime library.
Reviewers: sanjoy, eugenis, kcc, pcc, echristo, rnk
Subscribers: niravd, majnemer, atrick, rnk, emaste, bmakam, mcrosier, mehdi_amini, llvm-commits
Differential Revision: http://reviews.llvm.org/D19904
llvm-svn: 275367
2016-07-14 12:06:33 +08:00
|
|
|
OutStreamer->EmitLabel(CurSled);
|
|
|
|
unsigned OpCode = MI.getOperand(0).getImm();
|
|
|
|
MCInst Ret;
|
|
|
|
Ret.setOpcode(OpCode);
|
|
|
|
for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end()))
|
|
|
|
if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO))
|
|
|
|
Ret.addOperand(MaybeOperand.getValue());
|
|
|
|
OutStreamer->EmitInstruction(Ret, getSubtargetInfo());
|
|
|
|
EmitNops(*OutStreamer, 10, Subtarget->is64Bit(), getSubtargetInfo());
|
|
|
|
recordSled(CurSled, MI, SledKind::FUNCTION_EXIT);
|
|
|
|
}
|
|
|
|
|
[XRay] Detect and emit sleds for sibling/tail calls
Summary:
This change promotes the 'isTailCall(...)' member function to
TargetInstrInfo as a query interface for determining on a per-target
basis whether a given MachineInstr is a tail call instruction. We build
upon this in the XRay instrumentation pass to emit special sleds for
tail call optimisations, where we emit the correct kind of sled.
The tail call sleds look like a mix between the function entry and
function exit sleds. Form-wise, the sled comes before the "jmp"
instruction that implements the tail call similar to how we do it for
the function entry sled. Functionally, because we know this is a tail
call, it behaves much like an exit sled -- i.e. at runtime we may use
the exit trampolines instead of a different kind of trampoline.
A follow-up change to recognise these sleds will be done in compiler-rt,
so that we can start intercepting these initially as exits, but also
have the option to have different log entries to more accurately reflect
that this is actually a tail call.
Reviewers: echristo, rSerge, majnemer
Subscribers: mehdi_amini, dberris, llvm-commits
Differential Revision: https://reviews.llvm.org/D23986
llvm-svn: 280334
2016-09-01 09:29:13 +08:00
|
|
|
void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, X86MCInstLower &MCIL) {
|
|
|
|
// Like PATCHABLE_RET, we have the actual instruction in the operands to this
|
|
|
|
// instruction so we lower that particular instruction and its operands.
|
|
|
|
// Unlike PATCHABLE_RET though, we put the sled before the JMP, much like how
|
|
|
|
// we do it for PATCHABLE_FUNCTION_ENTER. The sled should be very similar to
|
|
|
|
// the PATCHABLE_FUNCTION_ENTER case, followed by the lowering of the actual
|
|
|
|
// tail call much like how we have it in PATCHABLE_RET.
|
|
|
|
auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
|
|
|
|
OutStreamer->EmitCodeAlignment(2);
|
|
|
|
OutStreamer->EmitLabel(CurSled);
|
|
|
|
auto Target = OutContext.createTempSymbol();
|
|
|
|
|
|
|
|
// Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
|
|
|
|
// an operand (computed as an offset from the jmp instruction).
|
|
|
|
// FIXME: Find another less hacky way do force the relative jump.
|
|
|
|
OutStreamer->EmitBytes("\xeb\x09");
|
|
|
|
EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo());
|
|
|
|
OutStreamer->EmitLabel(Target);
|
|
|
|
recordSled(CurSled, MI, SledKind::TAIL_CALL);
|
|
|
|
|
|
|
|
unsigned OpCode = MI.getOperand(0).getImm();
|
|
|
|
MCInst TC;
|
|
|
|
TC.setOpcode(OpCode);
|
|
|
|
|
|
|
|
// Before emitting the instruction, add a comment to indicate that this is
|
|
|
|
// indeed a tail call.
|
|
|
|
OutStreamer->AddComment("TAILCALL");
|
|
|
|
for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end()))
|
|
|
|
if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO))
|
|
|
|
TC.addOperand(MaybeOperand.getValue());
|
|
|
|
OutStreamer->EmitInstruction(TC, getSubtargetInfo());
|
|
|
|
}
|
|
|
|
|
2014-08-05 05:05:27 +08:00
|
|
|
// Returns instruction preceding MBBI in MachineFunction.
|
|
|
|
// If MBBI is the first instruction of the first basic block, returns null.
|
|
|
|
static MachineBasicBlock::const_iterator
|
|
|
|
PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) {
|
|
|
|
const MachineBasicBlock *MBB = MBBI->getParent();
|
|
|
|
while (MBBI == MBB->begin()) {
|
2016-02-22 04:39:50 +08:00
|
|
|
if (MBB == &MBB->getParent()->front())
|
2016-07-12 11:18:50 +08:00
|
|
|
return MachineBasicBlock::const_iterator();
|
2014-08-05 05:05:27 +08:00
|
|
|
MBB = MBB->getPrevNode();
|
|
|
|
MBBI = MBB->end();
|
|
|
|
}
|
|
|
|
return --MBBI;
|
|
|
|
}
|
|
|
|
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
static const Constant *getConstantFromPool(const MachineInstr &MI,
|
|
|
|
const MachineOperand &Op) {
|
|
|
|
if (!Op.isCPI())
|
2014-09-24 11:06:37 +08:00
|
|
|
return nullptr;
|
2014-09-24 10:16:12 +08:00
|
|
|
|
2014-09-24 11:06:37 +08:00
|
|
|
ArrayRef<MachineConstantPoolEntry> Constants =
|
|
|
|
MI.getParent()->getParent()->getConstantPool()->getConstants();
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
const MachineConstantPoolEntry &ConstantEntry =
|
|
|
|
Constants[Op.getIndex()];
|
2014-09-24 10:16:12 +08:00
|
|
|
|
|
|
|
// Bail if this is a machine constant pool entry, we won't be able to dig out
|
|
|
|
// anything useful.
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
if (ConstantEntry.isMachineConstantPoolEntry())
|
2014-09-24 11:06:37 +08:00
|
|
|
return nullptr;
|
2014-09-24 10:16:12 +08:00
|
|
|
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
auto *C = dyn_cast<Constant>(ConstantEntry.Val.ConstVal);
|
|
|
|
assert((!C || ConstantEntry.getType() == C->getType()) &&
|
2014-09-24 10:16:12 +08:00
|
|
|
"Expected a constant of the same type!");
|
2014-09-24 11:06:37 +08:00
|
|
|
return C;
|
|
|
|
}
|
2014-09-24 10:16:12 +08:00
|
|
|
|
2016-10-18 23:45:37 +08:00
|
|
|
static std::string getShuffleComment(const MachineInstr *MI,
|
|
|
|
unsigned SrcOp1Idx,
|
|
|
|
unsigned SrcOp2Idx,
|
2014-09-24 11:06:37 +08:00
|
|
|
ArrayRef<int> Mask) {
|
|
|
|
std::string Comment;
|
2014-09-24 10:16:12 +08:00
|
|
|
|
|
|
|
// Compute the name for a register. This is really goofy because we have
|
|
|
|
// multiple instruction printers that could (in theory) use different
|
|
|
|
// names. Fortunately most people use the ATT style (outside of Windows)
|
|
|
|
// and they actually agree on register naming here. Ultimately, this is
|
|
|
|
// a comment, and so its OK if it isn't perfect.
|
|
|
|
auto GetRegisterName = [](unsigned RegNum) -> StringRef {
|
|
|
|
return X86ATTInstPrinter::getRegisterName(RegNum);
|
|
|
|
};
|
|
|
|
|
2016-10-18 23:45:37 +08:00
|
|
|
const MachineOperand &DstOp = MI->getOperand(0);
|
|
|
|
const MachineOperand &SrcOp1 = MI->getOperand(SrcOp1Idx);
|
|
|
|
const MachineOperand &SrcOp2 = MI->getOperand(SrcOp2Idx);
|
|
|
|
|
2014-09-24 10:16:12 +08:00
|
|
|
StringRef DstName = DstOp.isReg() ? GetRegisterName(DstOp.getReg()) : "mem";
|
2016-04-09 22:51:26 +08:00
|
|
|
StringRef Src1Name =
|
|
|
|
SrcOp1.isReg() ? GetRegisterName(SrcOp1.getReg()) : "mem";
|
|
|
|
StringRef Src2Name =
|
|
|
|
SrcOp2.isReg() ? GetRegisterName(SrcOp2.getReg()) : "mem";
|
|
|
|
|
|
|
|
// One source operand, fix the mask to print all elements in one span.
|
|
|
|
SmallVector<int, 8> ShuffleMask(Mask.begin(), Mask.end());
|
|
|
|
if (Src1Name == Src2Name)
|
|
|
|
for (int i = 0, e = ShuffleMask.size(); i != e; ++i)
|
|
|
|
if (ShuffleMask[i] >= e)
|
|
|
|
ShuffleMask[i] -= e;
|
2014-09-24 10:16:12 +08:00
|
|
|
|
|
|
|
raw_string_ostream CS(Comment);
|
2016-10-18 23:45:37 +08:00
|
|
|
CS << DstName;
|
|
|
|
|
|
|
|
// Handle AVX512 MASK/MASXZ write mask comments.
|
|
|
|
// MASK: zmmX {%kY}
|
|
|
|
// MASKZ: zmmX {%kY} {z}
|
|
|
|
if (SrcOp1Idx > 1) {
|
|
|
|
assert((SrcOp1Idx == 2 || SrcOp1Idx == 3) && "Unexpected writemask");
|
|
|
|
|
|
|
|
const MachineOperand &WriteMaskOp = MI->getOperand(SrcOp1Idx - 1);
|
|
|
|
if (WriteMaskOp.isReg()) {
|
|
|
|
CS << " {%" << GetRegisterName(WriteMaskOp.getReg()) << "}";
|
|
|
|
|
|
|
|
if (SrcOp1Idx == 2) {
|
|
|
|
CS << " {z}";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CS << " = ";
|
|
|
|
|
2016-04-09 22:51:26 +08:00
|
|
|
for (int i = 0, e = ShuffleMask.size(); i != e; ++i) {
|
|
|
|
if (i != 0)
|
2014-09-24 10:16:12 +08:00
|
|
|
CS << ",";
|
2016-04-09 22:51:26 +08:00
|
|
|
if (ShuffleMask[i] == SM_SentinelZero) {
|
2014-09-24 10:16:12 +08:00
|
|
|
CS << "zero";
|
2016-04-09 22:51:26 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, it must come from src1 or src2. Print the span of elements
|
|
|
|
// that comes from this src.
|
|
|
|
bool isSrc1 = ShuffleMask[i] < (int)e;
|
|
|
|
CS << (isSrc1 ? Src1Name : Src2Name) << '[';
|
|
|
|
|
|
|
|
bool IsFirst = true;
|
|
|
|
while (i != e && ShuffleMask[i] != SM_SentinelZero &&
|
|
|
|
(ShuffleMask[i] < (int)e) == isSrc1) {
|
|
|
|
if (!IsFirst)
|
|
|
|
CS << ',';
|
|
|
|
else
|
|
|
|
IsFirst = false;
|
|
|
|
if (ShuffleMask[i] == SM_SentinelUndef)
|
2014-09-24 10:16:12 +08:00
|
|
|
CS << "u";
|
|
|
|
else
|
2016-04-09 22:51:26 +08:00
|
|
|
CS << ShuffleMask[i] % (int)e;
|
|
|
|
++i;
|
2014-09-24 10:16:12 +08:00
|
|
|
}
|
2016-04-09 22:51:26 +08:00
|
|
|
CS << ']';
|
|
|
|
--i; // For loop increments element #.
|
2014-09-24 10:16:12 +08:00
|
|
|
}
|
|
|
|
CS.flush();
|
|
|
|
|
|
|
|
return Comment;
|
|
|
|
}
|
|
|
|
|
2010-01-28 09:02:27 +08:00
|
|
|
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
2013-10-30 00:11:22 +08:00
|
|
|
X86MCInstLower MCInstLowering(*MF, *this);
|
2015-02-03 01:38:43 +08:00
|
|
|
const X86RegisterInfo *RI = MF->getSubtarget<X86Subtarget>().getRegisterInfo();
|
2014-06-25 20:41:52 +08:00
|
|
|
|
2016-12-28 18:12:48 +08:00
|
|
|
// Add a comment about EVEX-2-VEX compression for AVX-512 instrs that
|
|
|
|
// are compressed from EVEX encoding to VEX encoding.
|
|
|
|
if (TM.Options.MCOptions.ShowMCEncoding) {
|
|
|
|
if (MI->getAsmPrinterFlags() & AC_EVEX_2_VEX)
|
|
|
|
OutStreamer->AddComment("EVEX TO VEX Compression ", false);
|
|
|
|
}
|
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
switch (MI->getOpcode()) {
|
2010-04-07 06:45:26 +08:00
|
|
|
case TargetOpcode::DBG_VALUE:
|
2013-06-17 04:34:27 +08:00
|
|
|
llvm_unreachable("Should be handled target independently");
|
2010-04-07 09:15:14 +08:00
|
|
|
|
2010-08-06 02:34:30 +08:00
|
|
|
// Emit nothing here but a comment if we can.
|
|
|
|
case X86::Int_MemBarrier:
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->emitRawComment("MEMBARRIER");
|
2010-08-06 02:34:30 +08:00
|
|
|
return;
|
2011-10-05 07:26:17 +08:00
|
|
|
|
2010-10-27 02:09:55 +08:00
|
|
|
|
|
|
|
case X86::EH_RETURN:
|
|
|
|
case X86::EH_RETURN64: {
|
|
|
|
// Lower these as normal, but add some comments.
|
|
|
|
unsigned Reg = MI->getOperand(0).getReg();
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->AddComment(StringRef("eh_return, addr: %") +
|
|
|
|
X86ATTInstPrinter::getRegisterName(Reg));
|
2010-10-27 02:09:55 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-10-02 02:44:59 +08:00
|
|
|
case X86::CLEANUPRET: {
|
|
|
|
// Lower these as normal, but add some comments.
|
|
|
|
OutStreamer->AddComment("CLEANUPRET");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case X86::CATCHRET: {
|
|
|
|
// Lower these as normal, but add some comments.
|
|
|
|
OutStreamer->AddComment("CATCHRET");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-07-09 08:49:41 +08:00
|
|
|
case X86::TAILJMPr:
|
2015-01-31 05:03:31 +08:00
|
|
|
case X86::TAILJMPm:
|
2010-07-09 08:49:41 +08:00
|
|
|
case X86::TAILJMPd:
|
2015-01-31 05:03:31 +08:00
|
|
|
case X86::TAILJMPr64:
|
|
|
|
case X86::TAILJMPm64:
|
2010-07-09 08:49:41 +08:00
|
|
|
case X86::TAILJMPd64:
|
2015-01-31 05:03:31 +08:00
|
|
|
case X86::TAILJMPr64_REX:
|
|
|
|
case X86::TAILJMPm64_REX:
|
2010-07-09 08:49:41 +08:00
|
|
|
// Lower these as normal, but add some comments.
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->AddComment("TAILCALL");
|
2010-07-09 08:49:41 +08:00
|
|
|
break;
|
2010-11-29 05:16:39 +08:00
|
|
|
|
|
|
|
case X86::TLS_addr32:
|
|
|
|
case X86::TLS_addr64:
|
2012-06-02 00:27:21 +08:00
|
|
|
case X86::TLS_base_addr32:
|
|
|
|
case X86::TLS_base_addr64:
|
2014-07-25 04:40:55 +08:00
|
|
|
return LowerTlsAddr(MCInstLowering, *MI);
|
2010-11-29 05:16:39 +08:00
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
case X86::MOVPC32r: {
|
|
|
|
// This is a pseudo op for a two instruction sequence with a label, which
|
|
|
|
// looks like:
|
|
|
|
// call "L1$pb"
|
|
|
|
// "L1$pb":
|
|
|
|
// popl %esi
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
// Emit the call.
|
2010-11-15 06:48:15 +08:00
|
|
|
MCSymbol *PICBase = MF->getPICBaseSymbol();
|
2009-09-13 04:34:57 +08:00
|
|
|
// FIXME: We would like an efficient form for this, so we don't have to do a
|
|
|
|
// lot of extra uniquing.
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::CALLpcrel32)
|
2015-05-30 09:25:56 +08:00
|
|
|
.addExpr(MCSymbolRefExpr::create(PICBase, OutContext)));
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2015-11-06 01:19:59 +08:00
|
|
|
const X86FrameLowering* FrameLowering =
|
|
|
|
MF->getSubtarget<X86Subtarget>().getFrameLowering();
|
|
|
|
bool hasFP = FrameLowering->hasFP(*MF);
|
2015-12-06 21:06:20 +08:00
|
|
|
|
|
|
|
// TODO: This is needed only if we require precise CFA.
|
2015-12-16 02:50:32 +08:00
|
|
|
bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() &&
|
|
|
|
!OutStreamer->getDwarfFrameInfos().back().End;
|
|
|
|
|
2015-11-06 01:19:59 +08:00
|
|
|
int stackGrowth = -RI->getSlotSize();
|
|
|
|
|
2015-12-16 02:50:32 +08:00
|
|
|
if (HasActiveDwarfFrame && !hasFP) {
|
2015-11-06 01:19:59 +08:00
|
|
|
OutStreamer->EmitCFIAdjustCfaOffset(-stackGrowth);
|
|
|
|
}
|
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
// Emit the label.
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitLabel(PICBase);
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
// popl $reg
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::POP32r)
|
|
|
|
.addReg(MI->getOperand(0).getReg()));
|
2015-11-06 01:19:59 +08:00
|
|
|
|
2015-12-16 02:50:32 +08:00
|
|
|
if (HasActiveDwarfFrame && !hasFP) {
|
2015-11-06 01:19:59 +08:00
|
|
|
OutStreamer->EmitCFIAdjustCfaOffset(stackGrowth);
|
|
|
|
}
|
2009-09-13 04:34:57 +08:00
|
|
|
return;
|
2009-09-13 05:01:20 +08:00
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 05:01:20 +08:00
|
|
|
case X86::ADD32ri: {
|
|
|
|
// Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
|
|
|
|
if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS)
|
|
|
|
break;
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 05:01:20 +08:00
|
|
|
// Okay, we have something like:
|
|
|
|
// EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 05:01:20 +08:00
|
|
|
// For this, we want to print something like:
|
|
|
|
// MYGLOBAL + (. - PICBASE)
|
|
|
|
// However, we can't generate a ".", so just emit a new label here and refer
|
2010-03-13 02:47:50 +08:00
|
|
|
// to it.
|
2015-05-19 02:43:14 +08:00
|
|
|
MCSymbol *DotSym = OutContext.createTempSymbol();
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitLabel(DotSym);
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 05:01:20 +08:00
|
|
|
// Now that we have emitted the label, lower the complex operand expression.
|
2010-02-09 07:03:41 +08:00
|
|
|
MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2015-05-30 09:25:56 +08:00
|
|
|
const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext);
|
2009-09-13 05:01:20 +08:00
|
|
|
const MCExpr *PICBase =
|
2015-05-30 09:25:56 +08:00
|
|
|
MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext);
|
|
|
|
DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext);
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2015-05-30 09:25:56 +08:00
|
|
|
DotExpr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(OpSym,OutContext),
|
2009-09-13 05:01:20 +08:00
|
|
|
DotExpr, OutContext);
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri)
|
2012-11-26 21:34:22 +08:00
|
|
|
.addReg(MI->getOperand(0).getReg())
|
|
|
|
.addReg(MI->getOperand(1).getReg())
|
2012-11-27 02:05:52 +08:00
|
|
|
.addExpr(DotExpr));
|
2009-09-13 05:01:20 +08:00
|
|
|
return;
|
|
|
|
}
|
2014-12-02 06:52:56 +08:00
|
|
|
case TargetOpcode::STATEPOINT:
|
2015-05-07 07:53:26 +08:00
|
|
|
return LowerSTATEPOINT(*MI, MCInstLowering);
|
2014-12-04 13:20:33 +08:00
|
|
|
|
2017-02-08 03:19:49 +08:00
|
|
|
case TargetOpcode::FAULTING_OP:
|
|
|
|
return LowerFAULTING_OP(*MI, MCInstLowering);
|
2015-06-16 02:44:08 +08:00
|
|
|
|
2017-02-01 01:00:27 +08:00
|
|
|
case TargetOpcode::FENTRY_CALL:
|
|
|
|
return LowerFENTRY_CALL(*MI, MCInstLowering);
|
|
|
|
|
2016-04-19 13:24:47 +08:00
|
|
|
case TargetOpcode::PATCHABLE_OP:
|
|
|
|
return LowerPATCHABLE_OP(*MI, MCInstLowering);
|
|
|
|
|
2013-11-01 06:11:56 +08:00
|
|
|
case TargetOpcode::STACKMAP:
|
2014-07-25 04:40:55 +08:00
|
|
|
return LowerSTACKMAP(*MI);
|
2013-11-01 06:11:56 +08:00
|
|
|
|
|
|
|
case TargetOpcode::PATCHPOINT:
|
2015-04-22 14:02:31 +08:00
|
|
|
return LowerPATCHPOINT(*MI, MCInstLowering);
|
2013-11-12 07:00:41 +08:00
|
|
|
|
XRay: Add entry and exit sleds
Summary:
In this patch we implement the following parts of XRay:
- Supporting a function attribute named 'function-instrument' which currently only supports 'xray-always'. We should be able to use this attribute for other instrumentation approaches.
- Supporting a function attribute named 'xray-instruction-threshold' used to determine whether a function is instrumented with a minimum number of instructions (IR instruction counts).
- X86-specific nop sleds as described in the white paper.
- A machine function pass that adds the different instrumentation marker instructions at a very late stage.
- A way of identifying which return opcode is considered "normal" for each architecture.
There are some caveats here:
1) We don't handle PATCHABLE_RET in platforms other than x86_64 yet -- this means if IR used PATCHABLE_RET directly instead of a normal ret, instruction lowering for that platform might do the wrong thing. We think this should be handled at instruction selection time to by default be unpacked for platforms where XRay is not availble yet.
2) The generated section for X86 is different from what is described from the white paper for the sole reason that LLVM allows us to do this neatly. We're taking the opportunity to deviate from the white paper from this perspective to allow us to get richer information from the runtime library.
Reviewers: sanjoy, eugenis, kcc, pcc, echristo, rnk
Subscribers: niravd, majnemer, atrick, rnk, emaste, bmakam, mcrosier, mehdi_amini, llvm-commits
Differential Revision: http://reviews.llvm.org/D19904
llvm-svn: 275367
2016-07-14 12:06:33 +08:00
|
|
|
case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
|
|
|
|
return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering);
|
|
|
|
|
|
|
|
case TargetOpcode::PATCHABLE_RET:
|
|
|
|
return LowerPATCHABLE_RET(*MI, MCInstLowering);
|
|
|
|
|
[XRay] Detect and emit sleds for sibling/tail calls
Summary:
This change promotes the 'isTailCall(...)' member function to
TargetInstrInfo as a query interface for determining on a per-target
basis whether a given MachineInstr is a tail call instruction. We build
upon this in the XRay instrumentation pass to emit special sleds for
tail call optimisations, where we emit the correct kind of sled.
The tail call sleds look like a mix between the function entry and
function exit sleds. Form-wise, the sled comes before the "jmp"
instruction that implements the tail call similar to how we do it for
the function entry sled. Functionally, because we know this is a tail
call, it behaves much like an exit sled -- i.e. at runtime we may use
the exit trampolines instead of a different kind of trampoline.
A follow-up change to recognise these sleds will be done in compiler-rt,
so that we can start intercepting these initially as exits, but also
have the option to have different log entries to more accurately reflect
that this is actually a tail call.
Reviewers: echristo, rSerge, majnemer
Subscribers: mehdi_amini, dberris, llvm-commits
Differential Revision: https://reviews.llvm.org/D23986
llvm-svn: 280334
2016-09-01 09:29:13 +08:00
|
|
|
case TargetOpcode::PATCHABLE_TAIL_CALL:
|
|
|
|
return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering);
|
|
|
|
|
2013-11-12 07:00:41 +08:00
|
|
|
case X86::MORESTACK_RET:
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
|
2013-11-12 07:00:41 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::MORESTACK_RET_RESTORE_R10:
|
|
|
|
// Return, then restore R10.
|
2014-07-25 04:40:55 +08:00
|
|
|
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
|
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr)
|
|
|
|
.addReg(X86::R10)
|
|
|
|
.addReg(X86::RAX));
|
2013-11-12 07:00:41 +08:00
|
|
|
return;
|
2014-06-25 20:41:52 +08:00
|
|
|
|
|
|
|
case X86::SEH_PushReg:
|
2016-09-23 03:50:05 +08:00
|
|
|
assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFIPushReg(RI->getSEHRegNum(MI->getOperand(0).getImm()));
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_SaveReg:
|
2016-09-23 03:50:05 +08:00
|
|
|
assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFISaveReg(RI->getSEHRegNum(MI->getOperand(0).getImm()),
|
|
|
|
MI->getOperand(1).getImm());
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_SaveXMM:
|
2016-09-23 03:50:05 +08:00
|
|
|
assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFISaveXMM(RI->getSEHRegNum(MI->getOperand(0).getImm()),
|
|
|
|
MI->getOperand(1).getImm());
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_StackAlloc:
|
2016-09-23 03:50:05 +08:00
|
|
|
assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFIAllocStack(MI->getOperand(0).getImm());
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_SetFrame:
|
2016-09-23 03:50:05 +08:00
|
|
|
assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFISetFrame(RI->getSEHRegNum(MI->getOperand(0).getImm()),
|
|
|
|
MI->getOperand(1).getImm());
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_PushFrame:
|
2016-09-23 03:50:05 +08:00
|
|
|
assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFIPushFrame(MI->getOperand(0).getImm());
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case X86::SEH_EndPrologue:
|
2016-09-23 03:50:05 +08:00
|
|
|
assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitWinCFIEndProlog();
|
2014-06-25 20:41:52 +08:00
|
|
|
return;
|
2014-07-26 07:47:11 +08:00
|
|
|
|
2014-08-05 05:05:27 +08:00
|
|
|
case X86::SEH_Epilogue: {
|
2016-09-23 03:50:05 +08:00
|
|
|
assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
|
2014-08-05 05:05:27 +08:00
|
|
|
MachineBasicBlock::const_iterator MBBI(MI);
|
|
|
|
// Check if preceded by a call and emit nop if so.
|
2016-07-12 11:18:50 +08:00
|
|
|
for (MBBI = PrevCrossBBInst(MBBI);
|
|
|
|
MBBI != MachineBasicBlock::const_iterator();
|
|
|
|
MBBI = PrevCrossBBInst(MBBI)) {
|
2014-08-05 05:05:27 +08:00
|
|
|
// Conservatively assume that pseudo instructions don't emit code and keep
|
|
|
|
// looking for a call. We may emit an unnecessary nop in some cases.
|
|
|
|
if (!MBBI->isPseudo()) {
|
|
|
|
if (MBBI->isCall())
|
|
|
|
EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-12-27 03:48:43 +08:00
|
|
|
// Lower PSHUFB and VPERMILP normally but add a comment if we can find
|
|
|
|
// a constant shuffle mask. We won't be able to do this at the MC layer
|
|
|
|
// because the mask isn't an immediate.
|
2014-07-26 07:47:11 +08:00
|
|
|
case X86::PSHUFBrm:
|
2014-09-25 08:24:19 +08:00
|
|
|
case X86::VPSHUFBrm:
|
2015-12-27 03:48:43 +08:00
|
|
|
case X86::VPSHUFBYrm:
|
|
|
|
case X86::VPSHUFBZ128rm:
|
|
|
|
case X86::VPSHUFBZ128rmk:
|
|
|
|
case X86::VPSHUFBZ128rmkz:
|
|
|
|
case X86::VPSHUFBZ256rm:
|
|
|
|
case X86::VPSHUFBZ256rmk:
|
|
|
|
case X86::VPSHUFBZ256rmkz:
|
|
|
|
case X86::VPSHUFBZrm:
|
|
|
|
case X86::VPSHUFBZrmk:
|
|
|
|
case X86::VPSHUFBZrmkz: {
|
2015-04-25 03:11:51 +08:00
|
|
|
if (!OutStreamer->isVerboseAsm())
|
2014-09-24 11:06:37 +08:00
|
|
|
break;
|
2015-12-27 03:48:43 +08:00
|
|
|
unsigned SrcIdx, MaskIdx;
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: llvm_unreachable("Invalid opcode");
|
|
|
|
case X86::PSHUFBrm:
|
|
|
|
case X86::VPSHUFBrm:
|
|
|
|
case X86::VPSHUFBYrm:
|
|
|
|
case X86::VPSHUFBZ128rm:
|
|
|
|
case X86::VPSHUFBZ256rm:
|
|
|
|
case X86::VPSHUFBZrm:
|
|
|
|
SrcIdx = 1; MaskIdx = 5; break;
|
|
|
|
case X86::VPSHUFBZ128rmkz:
|
|
|
|
case X86::VPSHUFBZ256rmkz:
|
|
|
|
case X86::VPSHUFBZrmkz:
|
|
|
|
SrcIdx = 2; MaskIdx = 6; break;
|
|
|
|
case X86::VPSHUFBZ128rmk:
|
|
|
|
case X86::VPSHUFBZ256rmk:
|
|
|
|
case X86::VPSHUFBZrmk:
|
|
|
|
SrcIdx = 3; MaskIdx = 7; break;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(MI->getNumOperands() >= 6 &&
|
|
|
|
"We should always have at least 6 operands!");
|
2014-09-24 11:06:37 +08:00
|
|
|
|
2016-10-18 23:45:37 +08:00
|
|
|
const MachineOperand &MaskOp = MI->getOperand(MaskIdx);
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
|
2016-11-25 10:29:21 +08:00
|
|
|
SmallVector<int, 64> Mask;
|
2015-01-11 15:29:51 +08:00
|
|
|
DecodePSHUFBMask(C, Mask);
|
2014-09-24 11:06:37 +08:00
|
|
|
if (!Mask.empty())
|
2016-10-18 23:45:37 +08:00
|
|
|
OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
|
2014-09-24 11:06:37 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2016-07-13 23:45:36 +08:00
|
|
|
|
2016-10-18 11:36:52 +08:00
|
|
|
case X86::VPERMILPSrm:
|
|
|
|
case X86::VPERMILPSYrm:
|
|
|
|
case X86::VPERMILPSZ128rm:
|
|
|
|
case X86::VPERMILPSZ128rmk:
|
|
|
|
case X86::VPERMILPSZ128rmkz:
|
|
|
|
case X86::VPERMILPSZ256rm:
|
|
|
|
case X86::VPERMILPSZ256rmk:
|
|
|
|
case X86::VPERMILPSZ256rmkz:
|
|
|
|
case X86::VPERMILPSZrm:
|
|
|
|
case X86::VPERMILPSZrmk:
|
|
|
|
case X86::VPERMILPSZrmkz:
|
2014-09-23 18:08:29 +08:00
|
|
|
case X86::VPERMILPDrm:
|
2016-07-13 23:45:36 +08:00
|
|
|
case X86::VPERMILPDYrm:
|
|
|
|
case X86::VPERMILPDZ128rm:
|
2016-10-18 11:36:52 +08:00
|
|
|
case X86::VPERMILPDZ128rmk:
|
|
|
|
case X86::VPERMILPDZ128rmkz:
|
2016-07-13 23:45:36 +08:00
|
|
|
case X86::VPERMILPDZ256rm:
|
2016-10-18 11:36:52 +08:00
|
|
|
case X86::VPERMILPDZ256rmk:
|
|
|
|
case X86::VPERMILPDZ256rmkz:
|
|
|
|
case X86::VPERMILPDZrm:
|
|
|
|
case X86::VPERMILPDZrmk:
|
|
|
|
case X86::VPERMILPDZrmkz: {
|
2015-04-25 03:11:51 +08:00
|
|
|
if (!OutStreamer->isVerboseAsm())
|
2014-09-24 11:06:34 +08:00
|
|
|
break;
|
2016-10-18 11:36:52 +08:00
|
|
|
unsigned SrcIdx, MaskIdx;
|
|
|
|
unsigned ElSize;
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: llvm_unreachable("Invalid opcode");
|
|
|
|
case X86::VPERMILPSrm:
|
|
|
|
case X86::VPERMILPSYrm:
|
|
|
|
case X86::VPERMILPSZ128rm:
|
|
|
|
case X86::VPERMILPSZ256rm:
|
|
|
|
case X86::VPERMILPSZrm:
|
|
|
|
SrcIdx = 1; MaskIdx = 5; ElSize = 32; break;
|
|
|
|
case X86::VPERMILPSZ128rmkz:
|
|
|
|
case X86::VPERMILPSZ256rmkz:
|
|
|
|
case X86::VPERMILPSZrmkz:
|
|
|
|
SrcIdx = 2; MaskIdx = 6; ElSize = 32; break;
|
|
|
|
case X86::VPERMILPSZ128rmk:
|
|
|
|
case X86::VPERMILPSZ256rmk:
|
|
|
|
case X86::VPERMILPSZrmk:
|
|
|
|
SrcIdx = 3; MaskIdx = 7; ElSize = 32; break;
|
|
|
|
case X86::VPERMILPDrm:
|
|
|
|
case X86::VPERMILPDYrm:
|
|
|
|
case X86::VPERMILPDZ128rm:
|
|
|
|
case X86::VPERMILPDZ256rm:
|
|
|
|
case X86::VPERMILPDZrm:
|
|
|
|
SrcIdx = 1; MaskIdx = 5; ElSize = 64; break;
|
|
|
|
case X86::VPERMILPDZ128rmkz:
|
|
|
|
case X86::VPERMILPDZ256rmkz:
|
|
|
|
case X86::VPERMILPDZrmkz:
|
|
|
|
SrcIdx = 2; MaskIdx = 6; ElSize = 64; break;
|
|
|
|
case X86::VPERMILPDZ128rmk:
|
|
|
|
case X86::VPERMILPDZ256rmk:
|
|
|
|
case X86::VPERMILPDZrmk:
|
|
|
|
SrcIdx = 3; MaskIdx = 7; ElSize = 64; break;
|
2015-12-26 12:50:07 +08:00
|
|
|
}
|
2016-07-13 23:45:36 +08:00
|
|
|
|
2016-10-17 14:41:18 +08:00
|
|
|
assert(MI->getNumOperands() >= 6 &&
|
|
|
|
"We should always have at least 6 operands!");
|
2015-12-26 12:50:07 +08:00
|
|
|
|
2016-10-18 23:45:37 +08:00
|
|
|
const MachineOperand &MaskOp = MI->getOperand(MaskIdx);
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
|
2014-09-24 11:06:37 +08:00
|
|
|
SmallVector<int, 16> Mask;
|
2016-10-18 11:36:52 +08:00
|
|
|
DecodeVPERMILPMask(C, ElSize, Mask);
|
2014-09-24 11:06:37 +08:00
|
|
|
if (!Mask.empty())
|
2016-10-18 23:45:37 +08:00
|
|
|
OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
|
2016-04-09 22:51:26 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2016-06-05 05:44:28 +08:00
|
|
|
|
|
|
|
case X86::VPERMIL2PDrm:
|
|
|
|
case X86::VPERMIL2PSrm:
|
|
|
|
case X86::VPERMIL2PDrmY:
|
|
|
|
case X86::VPERMIL2PSrmY: {
|
|
|
|
if (!OutStreamer->isVerboseAsm())
|
|
|
|
break;
|
2016-10-17 14:41:18 +08:00
|
|
|
assert(MI->getNumOperands() >= 8 &&
|
|
|
|
"We should always have at least 8 operands!");
|
2016-06-05 05:44:28 +08:00
|
|
|
|
2016-10-18 23:45:37 +08:00
|
|
|
const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1);
|
2016-06-05 05:44:28 +08:00
|
|
|
if (!CtrlOp.isImm())
|
|
|
|
break;
|
|
|
|
|
|
|
|
unsigned ElSize;
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: llvm_unreachable("Invalid opcode");
|
|
|
|
case X86::VPERMIL2PSrm: case X86::VPERMIL2PSrmY: ElSize = 32; break;
|
|
|
|
case X86::VPERMIL2PDrm: case X86::VPERMIL2PDrmY: ElSize = 64; break;
|
|
|
|
}
|
|
|
|
|
2016-10-18 23:45:37 +08:00
|
|
|
const MachineOperand &MaskOp = MI->getOperand(6);
|
2016-06-05 05:44:28 +08:00
|
|
|
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
|
|
|
|
SmallVector<int, 16> Mask;
|
|
|
|
DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Mask);
|
|
|
|
if (!Mask.empty())
|
2016-10-18 23:45:37 +08:00
|
|
|
OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask));
|
2016-06-05 05:44:28 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-04-09 22:51:26 +08:00
|
|
|
case X86::VPPERMrrm: {
|
|
|
|
if (!OutStreamer->isVerboseAsm())
|
|
|
|
break;
|
2016-10-17 14:41:18 +08:00
|
|
|
assert(MI->getNumOperands() >= 7 &&
|
|
|
|
"We should always have at least 7 operands!");
|
2016-04-09 22:51:26 +08:00
|
|
|
|
2016-10-18 23:45:37 +08:00
|
|
|
const MachineOperand &MaskOp = MI->getOperand(6);
|
2016-04-09 22:51:26 +08:00
|
|
|
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
|
|
|
|
SmallVector<int, 16> Mask;
|
|
|
|
DecodeVPPERMMask(C, Mask);
|
|
|
|
if (!Mask.empty())
|
2016-10-18 23:45:37 +08:00
|
|
|
OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask));
|
2014-09-24 11:06:37 +08:00
|
|
|
}
|
2014-07-26 07:47:11 +08:00
|
|
|
break;
|
2009-09-13 04:34:57 +08:00
|
|
|
}
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
|
2015-09-08 14:38:21 +08:00
|
|
|
#define MOV_CASE(Prefix, Suffix) \
|
|
|
|
case X86::Prefix##MOVAPD##Suffix##rm: \
|
|
|
|
case X86::Prefix##MOVAPS##Suffix##rm: \
|
|
|
|
case X86::Prefix##MOVUPD##Suffix##rm: \
|
|
|
|
case X86::Prefix##MOVUPS##Suffix##rm: \
|
|
|
|
case X86::Prefix##MOVDQA##Suffix##rm: \
|
|
|
|
case X86::Prefix##MOVDQU##Suffix##rm:
|
|
|
|
|
|
|
|
#define MOV_AVX512_CASE(Suffix) \
|
|
|
|
case X86::VMOVDQA64##Suffix##rm: \
|
|
|
|
case X86::VMOVDQA32##Suffix##rm: \
|
|
|
|
case X86::VMOVDQU64##Suffix##rm: \
|
|
|
|
case X86::VMOVDQU32##Suffix##rm: \
|
|
|
|
case X86::VMOVDQU16##Suffix##rm: \
|
|
|
|
case X86::VMOVDQU8##Suffix##rm: \
|
|
|
|
case X86::VMOVAPS##Suffix##rm: \
|
|
|
|
case X86::VMOVAPD##Suffix##rm: \
|
|
|
|
case X86::VMOVUPS##Suffix##rm: \
|
|
|
|
case X86::VMOVUPD##Suffix##rm:
|
|
|
|
|
|
|
|
#define CASE_ALL_MOV_RM() \
|
|
|
|
MOV_CASE(, ) /* SSE */ \
|
|
|
|
MOV_CASE(V, ) /* AVX-128 */ \
|
|
|
|
MOV_CASE(V, Y) /* AVX-256 */ \
|
|
|
|
MOV_AVX512_CASE(Z) \
|
|
|
|
MOV_AVX512_CASE(Z256) \
|
|
|
|
MOV_AVX512_CASE(Z128)
|
|
|
|
|
|
|
|
// For loads from a constant pool to a vector register, print the constant
|
|
|
|
// loaded.
|
|
|
|
CASE_ALL_MOV_RM()
|
2015-04-25 03:11:51 +08:00
|
|
|
if (!OutStreamer->isVerboseAsm())
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
break;
|
2016-11-25 10:29:24 +08:00
|
|
|
if (MI->getNumOperands() <= 4)
|
|
|
|
break;
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) {
|
|
|
|
std::string Comment;
|
|
|
|
raw_string_ostream CS(Comment);
|
|
|
|
const MachineOperand &DstOp = MI->getOperand(0);
|
|
|
|
CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = ";
|
|
|
|
if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
|
|
|
|
CS << "[";
|
|
|
|
for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; ++i) {
|
|
|
|
if (i != 0)
|
|
|
|
CS << ",";
|
|
|
|
if (CDS->getElementType()->isIntegerTy())
|
|
|
|
CS << CDS->getElementAsInteger(i);
|
|
|
|
else if (CDS->getElementType()->isFloatTy())
|
|
|
|
CS << CDS->getElementAsFloat(i);
|
|
|
|
else if (CDS->getElementType()->isDoubleTy())
|
|
|
|
CS << CDS->getElementAsDouble(i);
|
|
|
|
else
|
|
|
|
CS << "?";
|
|
|
|
}
|
|
|
|
CS << "]";
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->AddComment(CS.str());
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
} else if (auto *CV = dyn_cast<ConstantVector>(C)) {
|
|
|
|
CS << "<";
|
|
|
|
for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; ++i) {
|
|
|
|
if (i != 0)
|
|
|
|
CS << ",";
|
|
|
|
Constant *COp = CV->getOperand(i);
|
|
|
|
if (isa<UndefValue>(COp)) {
|
|
|
|
CS << "u";
|
|
|
|
} else if (auto *CI = dyn_cast<ConstantInt>(COp)) {
|
[X86] Part 1 to fix x86-64 fp128 calling convention.
Almost all these changes are conditioned and only apply to the new
x86-64 f128 type configuration, which will be enabled in a follow up
patch. They are required together to make new f128 work. If there is
any error, we should fix or revert them as a whole.
These changes should have no impact to current configurations.
* Relax type legalization checks to accept new f128 type configuration,
whose TypeAction is TypeSoftenFloat, not TypeLegal, but also has
TLI.isTypeLegal true.
* Relax GetSoftenedFloat to return in some cases f128 type SDValue,
which is TLI.isTypeLegal but not "softened" to i128 node.
* Allow customized FABS, FNEG, FCOPYSIGN on new f128 type configuration,
to generate optimized bitwise operators for libm functions.
* Enhance related Lower* functions to handle f128 type.
* Enhance DAGTypeLegalizer::run, SoftenFloatResult, and related functions
to keep new f128 type in register, and convert f128 operators to library calls.
* Fix Combiner, Emitter, Legalizer routines that did not handle f128 type.
* Add ExpandConstant to handle i128 constants, ExpandNode
to handle ISD::Constant node.
* Add one more parameter to getCommonSubClass and firstCommonClass,
to guarantee that returned common sub class will contain the specified
simple value type.
This extra parameter is used by EmitCopyFromReg in InstrEmitter.cpp.
* Fix infinite loop in getTypeLegalizationCost when f128 is the value type.
* Fix printOperand to handle null operand.
* Enhance ISD::BITCAST node to handle f128 constant.
* Expand new f128 type for BR_CC, SELECT_CC, SELECT, SETCC nodes.
* Enhance X86AsmPrinter to emit f128 values in comments.
Differential Revision: http://reviews.llvm.org/D15134
llvm-svn: 254653
2015-12-04 06:02:40 +08:00
|
|
|
if (CI->getBitWidth() <= 64) {
|
|
|
|
CS << CI->getZExtValue();
|
|
|
|
} else {
|
|
|
|
// print multi-word constant as (w0,w1)
|
2016-06-08 18:01:20 +08:00
|
|
|
const auto &Val = CI->getValue();
|
[X86] Part 1 to fix x86-64 fp128 calling convention.
Almost all these changes are conditioned and only apply to the new
x86-64 f128 type configuration, which will be enabled in a follow up
patch. They are required together to make new f128 work. If there is
any error, we should fix or revert them as a whole.
These changes should have no impact to current configurations.
* Relax type legalization checks to accept new f128 type configuration,
whose TypeAction is TypeSoftenFloat, not TypeLegal, but also has
TLI.isTypeLegal true.
* Relax GetSoftenedFloat to return in some cases f128 type SDValue,
which is TLI.isTypeLegal but not "softened" to i128 node.
* Allow customized FABS, FNEG, FCOPYSIGN on new f128 type configuration,
to generate optimized bitwise operators for libm functions.
* Enhance related Lower* functions to handle f128 type.
* Enhance DAGTypeLegalizer::run, SoftenFloatResult, and related functions
to keep new f128 type in register, and convert f128 operators to library calls.
* Fix Combiner, Emitter, Legalizer routines that did not handle f128 type.
* Add ExpandConstant to handle i128 constants, ExpandNode
to handle ISD::Constant node.
* Add one more parameter to getCommonSubClass and firstCommonClass,
to guarantee that returned common sub class will contain the specified
simple value type.
This extra parameter is used by EmitCopyFromReg in InstrEmitter.cpp.
* Fix infinite loop in getTypeLegalizationCost when f128 is the value type.
* Fix printOperand to handle null operand.
* Enhance ISD::BITCAST node to handle f128 constant.
* Expand new f128 type for BR_CC, SELECT_CC, SELECT, SETCC nodes.
* Enhance X86AsmPrinter to emit f128 values in comments.
Differential Revision: http://reviews.llvm.org/D15134
llvm-svn: 254653
2015-12-04 06:02:40 +08:00
|
|
|
CS << "(";
|
|
|
|
for (int i = 0, N = Val.getNumWords(); i < N; ++i) {
|
|
|
|
if (i > 0)
|
|
|
|
CS << ",";
|
|
|
|
CS << Val.getRawData()[i];
|
|
|
|
}
|
|
|
|
CS << ")";
|
|
|
|
}
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
} else if (auto *CF = dyn_cast<ConstantFP>(COp)) {
|
|
|
|
SmallString<32> Str;
|
|
|
|
CF->getValueAPF().toString(Str);
|
|
|
|
CS << Str;
|
|
|
|
} else {
|
|
|
|
CS << "?";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
CS << ">";
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->AddComment(CS.str());
|
[x86] Teach the instruction lowering to add comments describing constant
pool data being loaded into a vector register.
The comments take the form of:
# ymm0 = [a,b,c,d,...]
# xmm1 = <x,y,z...>
The []s are used for generic sequential data and the <>s are used for
specifically ConstantVector loads. Undef elements are printed as the
letter 'u', integers in decimal, and floating point values as floating
point values. Suggestions on improving the formatting or other aspects
of the display are very welcome.
My primary use case for this is to be able to FileCheck test masks
passed to vector shuffle instructions in-register. It isn't fantastic
for that (no decoding special zeroing semantics or other tricks), but it
at least puts the mask onto an instruction line that could reasonably be
checked. I've updated many of the new vector shuffle lowering tests to
leverage this in their test cases so that we're actually checking the
shuffle masks remain as expected.
Before implementing this, I tried a *bunch* of different approaches.
I looked into teaching the MCInstLower code to scan up the basic block
and find a definition of a register used in a shuffle instruction and
then decode that, but this seems incredibly brittle and complex.
I talked to Hal a lot about the "right" way to do this: attach the raw
shuffle mask to the instruction itself in some form of unencoded
operands, and then use that to emit the comments. I still think that's
the optimal solution here, but it proved to be beyond what I'm up for
here. In particular, it seems likely best done by completing the
plumbing of metadata through these layers and attaching the shuffle mask
in metadata which could have fully automatic dropping when encoding an
actual instruction.
llvm-svn: 218377
2014-09-24 17:39:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2014-09-24 10:16:12 +08:00
|
|
|
}
|
2012-08-02 02:39:17 +08:00
|
|
|
|
2009-09-13 04:34:57 +08:00
|
|
|
MCInst TmpInst;
|
|
|
|
MCInstLowering.Lower(MI, TmpInst);
|
2014-10-28 03:40:35 +08:00
|
|
|
|
|
|
|
// Stackmap shadows cannot include branch targets, so we can count the bytes
|
2014-10-28 06:38:45 +08:00
|
|
|
// in a call towards the shadow, but must ensure that the no thread returns
|
|
|
|
// in to the stackmap shadow. The only way to achieve this is if the call
|
|
|
|
// is at the end of the shadow.
|
|
|
|
if (MI->isCall()) {
|
|
|
|
// Count then size of the call towards the shadow
|
2016-04-19 13:24:47 +08:00
|
|
|
SMShadowTracker.count(TmpInst, getSubtargetInfo(), CodeEmitter.get());
|
2014-10-28 06:38:45 +08:00
|
|
|
// Then flush the shadow so that we fill with nops before the call, not
|
|
|
|
// after it.
|
2015-04-25 03:11:51 +08:00
|
|
|
SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
|
2014-10-28 06:38:45 +08:00
|
|
|
// Then emit the call
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitInstruction(TmpInst, getSubtargetInfo());
|
2014-10-28 06:38:45 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
EmitAndCountInstruction(TmpInst);
|
2009-09-03 01:35:12 +08:00
|
|
|
}
|