2017-07-26 07:51:02 +08:00
|
|
|
//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
|
2014-03-29 18:18:08 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains a printer that converts from our internal representation
|
2014-05-24 20:50:23 +08:00
|
|
|
// of machine-dependent LLVM code to the AArch64 assembly language.
|
2014-03-29 18:18:08 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
#include "AArch64.h"
|
|
|
|
#include "AArch64MCInstLower.h"
|
2014-07-25 19:42:14 +08:00
|
|
|
#include "AArch64MachineFunctionInfo.h"
|
2014-05-24 20:50:23 +08:00
|
|
|
#include "AArch64RegisterInfo.h"
|
|
|
|
#include "AArch64Subtarget.h"
|
2017-08-31 16:28:48 +08:00
|
|
|
#include "AArch64TargetObjectFile.h"
|
2014-05-24 20:50:23 +08:00
|
|
|
#include "InstPrinter/AArch64InstPrinter.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "MCTargetDesc/AArch64AddressingModes.h"
|
2017-07-26 07:51:02 +08:00
|
|
|
#include "MCTargetDesc/AArch64MCTargetDesc.h"
|
|
|
|
#include "Utils/AArch64BaseInfo.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/ADT/SmallString.h"
|
2017-07-26 07:51:02 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
|
|
|
#include "llvm/ADT/Triple.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/ADT/Twine.h"
|
|
|
|
#include "llvm/CodeGen/AsmPrinter.h"
|
2017-07-26 07:51:02 +08:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2018-10-25 04:19:09 +08:00
|
|
|
#include "llvm/CodeGen/MachineJumpTableInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
|
2017-07-26 07:51:02 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2014-07-25 19:42:14 +08:00
|
|
|
#include "llvm/CodeGen/StackMaps.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2017-07-26 07:51:02 +08:00
|
|
|
#include "llvm/IR/DebugInfoMetadata.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
|
|
|
#include "llvm/MC/MCContext.h"
|
|
|
|
#include "llvm/MC/MCInst.h"
|
|
|
|
#include "llvm/MC/MCInstBuilder.h"
|
|
|
|
#include "llvm/MC/MCStreamer.h"
|
2015-03-06 04:04:21 +08:00
|
|
|
#include "llvm/MC/MCSymbol.h"
|
2017-07-26 07:51:02 +08:00
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-07-26 07:51:02 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <map>
|
|
|
|
#include <memory>
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:41:26 +08:00
|
|
|
#define DEBUG_TYPE "asm-printer"
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
namespace {
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
class AArch64AsmPrinter : public AsmPrinter {
|
|
|
|
AArch64MCInstLower MCInstLowering;
|
2014-03-29 18:18:08 +08:00
|
|
|
StackMaps SM;
|
2016-07-07 05:39:33 +08:00
|
|
|
const AArch64Subtarget *STI;
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
public:
|
2015-01-19 04:29:04 +08:00
|
|
|
AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
|
2015-02-03 14:40:19 +08:00
|
|
|
: AsmPrinter(TM, std::move(Streamer)), MCInstLowering(OutContext, *this),
|
2017-07-26 07:51:02 +08:00
|
|
|
SM(*this) {}
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Wrapper for MCInstLowering.lowerOperand() for the
|
2014-03-29 18:18:08 +08:00
|
|
|
/// tblgen'erated pseudo lowering.
|
|
|
|
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
|
|
|
|
return MCInstLowering.lowerOperand(MO, MCOp);
|
|
|
|
}
|
|
|
|
|
2018-10-25 04:19:09 +08:00
|
|
|
void EmitJumpTableInfo() override;
|
|
|
|
void emitJumpTableEntry(const MachineJumpTableInfo *MJTI,
|
|
|
|
const MachineBasicBlock *MBB, unsigned JTI);
|
|
|
|
|
|
|
|
void LowerJumpTableDestSmall(MCStreamer &OutStreamer, const MachineInstr &MI);
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
|
|
|
|
const MachineInstr &MI);
|
|
|
|
void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
|
|
|
|
const MachineInstr &MI);
|
2016-11-17 13:15:37 +08:00
|
|
|
|
|
|
|
void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
|
|
|
|
void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
|
|
|
|
void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
|
|
|
|
|
|
|
|
void EmitSled(const MachineInstr &MI, SledKind Kind);
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// tblgen'erated driver function for lowering simple MI->MC
|
2014-03-29 18:18:08 +08:00
|
|
|
/// pseudo instructions.
|
|
|
|
bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
|
|
|
|
const MachineInstr *MI);
|
|
|
|
|
2014-04-29 15:58:25 +08:00
|
|
|
void EmitInstruction(const MachineInstr *MI) override;
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2014-04-29 15:58:25 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2014-03-29 18:18:08 +08:00
|
|
|
AsmPrinter::getAnalysisUsage(AU);
|
|
|
|
AU.setPreservesAll();
|
|
|
|
}
|
|
|
|
|
2014-04-29 15:58:25 +08:00
|
|
|
bool runOnMachineFunction(MachineFunction &F) override {
|
2014-05-24 20:50:23 +08:00
|
|
|
AArch64FI = F.getInfo<AArch64FunctionInfo>();
|
2016-07-07 05:39:33 +08:00
|
|
|
STI = static_cast<const AArch64Subtarget*>(&F.getSubtarget());
|
2016-11-17 13:15:37 +08:00
|
|
|
bool Result = AsmPrinter::runOnMachineFunction(F);
|
2017-01-03 12:30:21 +08:00
|
|
|
emitXRayTable();
|
2016-11-17 13:15:37 +08:00
|
|
|
return Result;
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
|
|
|
|
bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
|
|
|
|
bool printAsmRegInClass(const MachineOperand &MO,
|
|
|
|
const TargetRegisterClass *RC, bool isVector,
|
|
|
|
raw_ostream &O);
|
|
|
|
|
|
|
|
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
|
|
|
|
unsigned AsmVariant, const char *ExtraCode,
|
2014-04-29 15:58:25 +08:00
|
|
|
raw_ostream &O) override;
|
2014-03-29 18:18:08 +08:00
|
|
|
bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
|
|
|
|
unsigned AsmVariant, const char *ExtraCode,
|
2014-04-29 15:58:25 +08:00
|
|
|
raw_ostream &O) override;
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
|
|
|
|
|
2014-04-29 15:58:25 +08:00
|
|
|
void EmitFunctionBodyEnd() override;
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2014-04-29 15:58:25 +08:00
|
|
|
MCSymbol *GetCPISymbol(unsigned CPID) const override;
|
|
|
|
void EmitEndOfAsmFile(Module &M) override;
|
2017-07-26 07:51:02 +08:00
|
|
|
|
|
|
|
AArch64FunctionInfo *AArch64FI = nullptr;
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Emit the LOHs contained in AArch64FI.
|
2014-03-29 18:18:08 +08:00
|
|
|
void EmitLOHs();
|
|
|
|
|
2016-07-07 05:39:33 +08:00
|
|
|
/// Emit instruction to set float register to zero.
|
|
|
|
void EmitFMov0(const MachineInstr &MI);
|
|
|
|
|
2017-07-26 07:51:02 +08:00
|
|
|
using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
MInstToMCSymbol LOHInstToLabel;
|
|
|
|
};
|
|
|
|
|
2017-07-26 07:51:02 +08:00
|
|
|
} // end anonymous namespace
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2016-11-17 13:15:37 +08:00
|
|
|
void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
|
|
|
|
{
|
|
|
|
EmitSled(MI, SledKind::FUNCTION_ENTER);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI)
|
|
|
|
{
|
|
|
|
EmitSled(MI, SledKind::FUNCTION_EXIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI)
|
|
|
|
{
|
|
|
|
EmitSled(MI, SledKind::TAIL_CALL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AArch64AsmPrinter::EmitSled(const MachineInstr &MI, SledKind Kind)
|
|
|
|
{
|
|
|
|
static const int8_t NoopsInSledCount = 7;
|
|
|
|
// We want to emit the following pattern:
|
|
|
|
//
|
|
|
|
// .Lxray_sled_N:
|
|
|
|
// ALIGN
|
|
|
|
// B #32
|
|
|
|
// ; 7 NOP instructions (28 bytes)
|
|
|
|
// .tmpN
|
|
|
|
//
|
|
|
|
// We need the 28 bytes (7 instructions) because at runtime, we'd be patching
|
|
|
|
// over the full 32 bytes (8 instructions) with the following pattern:
|
|
|
|
//
|
|
|
|
// STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
|
|
|
|
// LDR W0, #12 ; W0 := function ID
|
|
|
|
// LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
|
|
|
|
// BLR X16 ; call the tracing trampoline
|
|
|
|
// ;DATA: 32 bits of function ID
|
|
|
|
// ;DATA: lower 32 bits of the address of the trampoline
|
|
|
|
// ;DATA: higher 32 bits of the address of the trampoline
|
|
|
|
// LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
|
|
|
|
//
|
|
|
|
OutStreamer->EmitCodeAlignment(4);
|
|
|
|
auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
|
|
|
|
OutStreamer->EmitLabel(CurSled);
|
|
|
|
auto Target = OutContext.createTempSymbol();
|
|
|
|
|
|
|
|
// Emit "B #32" instruction, which jumps over the next 28 bytes.
|
2016-11-21 11:01:43 +08:00
|
|
|
// The operand has to be the number of 4-byte instructions to jump over,
|
|
|
|
// including the current instruction.
|
|
|
|
EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
|
2016-11-17 13:15:37 +08:00
|
|
|
|
|
|
|
for (int8_t I = 0; I < NoopsInSledCount; I++)
|
|
|
|
EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
|
|
|
|
|
|
|
|
OutStreamer->EmitLabel(Target);
|
|
|
|
recordSled(CurSled, MI, Kind);
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) {
|
2015-06-16 23:44:21 +08:00
|
|
|
const Triple &TT = TM.getTargetTriple();
|
2015-02-03 14:40:19 +08:00
|
|
|
if (TT.isOSBinFormatMachO()) {
|
2014-04-18 22:54:41 +08:00
|
|
|
// Funny Darwin hack: This flag tells the linker that no global symbols
|
|
|
|
// contain code that falls through to other global symbols (e.g. the obvious
|
|
|
|
// implementation of multiple entry points). If this doesn't occur, the
|
|
|
|
// linker can safely perform dead code stripping. Since LLVM never
|
|
|
|
// generates code that does this, it is always safe to set.
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
|
2014-04-18 22:54:41 +08:00
|
|
|
SM.serializeToStackMapSection();
|
|
|
|
}
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64AsmPrinter::EmitLOHs() {
|
2014-03-29 18:18:08 +08:00
|
|
|
SmallVector<MCSymbol *, 3> MCArgs;
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
for (const auto &D : AArch64FI->getLOHContainer()) {
|
2014-03-30 03:21:20 +08:00
|
|
|
for (const MachineInstr *MI : D.getArgs()) {
|
|
|
|
MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
|
2014-03-29 18:18:08 +08:00
|
|
|
assert(LabelIt != LOHInstToLabel.end() &&
|
|
|
|
"Label hasn't been inserted for LOH related instruction");
|
|
|
|
MCArgs.push_back(LabelIt->second);
|
|
|
|
}
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitLOHDirective(D.getKind(), MCArgs);
|
2014-03-29 18:18:08 +08:00
|
|
|
MCArgs.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64AsmPrinter::EmitFunctionBodyEnd() {
|
|
|
|
if (!AArch64FI->getLOHRelated().empty())
|
2014-03-29 18:18:08 +08:00
|
|
|
EmitLOHs();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// GetCPISymbol - Return the symbol for the specified constant pool entry.
|
2014-05-24 20:50:23 +08:00
|
|
|
MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
|
2014-03-29 18:18:08 +08:00
|
|
|
// Darwin uses a linker-private symbol name for constant-pools (to
|
|
|
|
// avoid addends on the relocation?), ELF has no such concept and
|
|
|
|
// uses a normal private symbol.
|
2016-10-01 13:57:55 +08:00
|
|
|
if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
|
2015-05-19 02:43:14 +08:00
|
|
|
return OutContext.getOrCreateSymbol(
|
2014-03-29 18:18:08 +08:00
|
|
|
Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
|
|
|
|
Twine(getFunctionNumber()) + "_" + Twine(CPID));
|
|
|
|
|
2018-07-26 02:35:31 +08:00
|
|
|
return AsmPrinter::GetCPISymbol(CPID);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
|
|
|
|
raw_ostream &O) {
|
2014-03-29 18:18:08 +08:00
|
|
|
const MachineOperand &MO = MI->getOperand(OpNum);
|
|
|
|
switch (MO.getType()) {
|
|
|
|
default:
|
2014-06-18 13:05:13 +08:00
|
|
|
llvm_unreachable("<unknown operand type>");
|
2014-03-29 18:18:08 +08:00
|
|
|
case MachineOperand::MO_Register: {
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
|
|
|
|
assert(!MO.getSubReg() && "Subregs should be eliminated!");
|
2014-05-24 20:50:23 +08:00
|
|
|
O << AArch64InstPrinter::getRegisterName(Reg);
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MachineOperand::MO_Immediate: {
|
|
|
|
int64_t Imm = MO.getImm();
|
|
|
|
O << '#' << Imm;
|
|
|
|
break;
|
|
|
|
}
|
2015-03-06 04:04:21 +08:00
|
|
|
case MachineOperand::MO_GlobalAddress: {
|
|
|
|
const GlobalValue *GV = MO.getGlobal();
|
|
|
|
MCSymbol *Sym = getSymbol(GV);
|
|
|
|
|
|
|
|
// FIXME: Can we get anything other than a plain symbol here?
|
|
|
|
assert(!MO.getTargetFlags() && "Unknown operand target flag!");
|
|
|
|
|
2015-06-09 08:31:39 +08:00
|
|
|
Sym->print(O, MAI);
|
2015-03-06 04:04:21 +08:00
|
|
|
printOffset(MO.getOffset(), O);
|
|
|
|
break;
|
|
|
|
}
|
2018-05-16 17:33:25 +08:00
|
|
|
case MachineOperand::MO_BlockAddress: {
|
|
|
|
MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
|
|
|
|
Sym->print(O, MAI);
|
|
|
|
break;
|
|
|
|
}
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
|
|
|
|
raw_ostream &O) {
|
2014-03-29 18:18:08 +08:00
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
switch (Mode) {
|
|
|
|
default:
|
|
|
|
return true; // Unknown mode.
|
|
|
|
case 'w':
|
|
|
|
Reg = getWRegFromXReg(Reg);
|
|
|
|
break;
|
|
|
|
case 'x':
|
|
|
|
Reg = getXRegFromWReg(Reg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
O << AArch64InstPrinter::getRegisterName(Reg);
|
2014-03-29 18:18:08 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prints the register in MO using class RC using the offset in the
|
|
|
|
// new register class. This should not be used for cross class
|
|
|
|
// printing.
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
|
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
bool isVector, raw_ostream &O) {
|
2014-03-29 18:18:08 +08:00
|
|
|
assert(MO.isReg() && "Should only get here with a register!");
|
2016-07-07 05:39:33 +08:00
|
|
|
const TargetRegisterInfo *RI = STI->getRegisterInfo();
|
2014-03-29 18:18:08 +08:00
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
|
|
|
|
assert(RI->regsOverlap(RegToPrint, Reg));
|
2014-05-24 20:50:23 +08:00
|
|
|
O << AArch64InstPrinter::getRegisterName(
|
|
|
|
RegToPrint, isVector ? AArch64::vreg : AArch64::NoRegAltName);
|
2014-03-29 18:18:08 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
|
|
|
|
unsigned AsmVariant,
|
|
|
|
const char *ExtraCode, raw_ostream &O) {
|
2014-03-29 18:18:08 +08:00
|
|
|
const MachineOperand &MO = MI->getOperand(OpNum);
|
2014-05-27 15:37:21 +08:00
|
|
|
|
|
|
|
// First try the generic code, which knows about modifiers like 'c' and 'n'.
|
|
|
|
if (!AsmPrinter::PrintAsmOperand(MI, OpNum, AsmVariant, ExtraCode, O))
|
|
|
|
return false;
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
// Does this asm operand have a single letter operand modifier?
|
|
|
|
if (ExtraCode && ExtraCode[0]) {
|
|
|
|
if (ExtraCode[1] != 0)
|
|
|
|
return true; // Unknown modifier.
|
|
|
|
|
|
|
|
switch (ExtraCode[0]) {
|
|
|
|
default:
|
|
|
|
return true; // Unknown modifier.
|
2017-05-26 03:07:57 +08:00
|
|
|
case 'a': // Print 'a' modifier
|
|
|
|
PrintAsmMemoryOperand(MI, OpNum, AsmVariant, ExtraCode, O);
|
|
|
|
return false;
|
2014-03-29 18:18:08 +08:00
|
|
|
case 'w': // Print W register
|
|
|
|
case 'x': // Print X register
|
|
|
|
if (MO.isReg())
|
|
|
|
return printAsmMRegister(MO, ExtraCode[0], O);
|
|
|
|
if (MO.isImm() && MO.getImm() == 0) {
|
2014-05-24 20:50:23 +08:00
|
|
|
unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
|
|
|
|
O << AArch64InstPrinter::getRegisterName(Reg);
|
2014-03-29 18:18:08 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
printOperand(MI, OpNum, O);
|
|
|
|
return false;
|
|
|
|
case 'b': // Print B register.
|
|
|
|
case 'h': // Print H register.
|
|
|
|
case 's': // Print S register.
|
|
|
|
case 'd': // Print D register.
|
|
|
|
case 'q': // Print Q register.
|
|
|
|
if (MO.isReg()) {
|
|
|
|
const TargetRegisterClass *RC;
|
|
|
|
switch (ExtraCode[0]) {
|
|
|
|
case 'b':
|
2014-05-24 20:50:23 +08:00
|
|
|
RC = &AArch64::FPR8RegClass;
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
case 'h':
|
2014-05-24 20:50:23 +08:00
|
|
|
RC = &AArch64::FPR16RegClass;
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
case 's':
|
2014-05-24 20:50:23 +08:00
|
|
|
RC = &AArch64::FPR32RegClass;
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
case 'd':
|
2014-05-24 20:50:23 +08:00
|
|
|
RC = &AArch64::FPR64RegClass;
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
case 'q':
|
2014-05-24 20:50:23 +08:00
|
|
|
RC = &AArch64::FPR128RegClass;
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return printAsmRegInClass(MO, RC, false /* vector */, O);
|
|
|
|
}
|
|
|
|
printOperand(MI, OpNum, O);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// According to ARM, we should emit x and v registers unless we have a
|
|
|
|
// modifier.
|
|
|
|
if (MO.isReg()) {
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
|
|
|
|
// If this is a w or x register, print an x register.
|
2014-05-24 20:50:23 +08:00
|
|
|
if (AArch64::GPR32allRegClass.contains(Reg) ||
|
|
|
|
AArch64::GPR64allRegClass.contains(Reg))
|
2014-03-29 18:18:08 +08:00
|
|
|
return printAsmMRegister(MO, 'x', O);
|
|
|
|
|
|
|
|
// If this is a b, h, s, d, or q register, print it as a v register.
|
2014-05-24 20:50:23 +08:00
|
|
|
return printAsmRegInClass(MO, &AArch64::FPR128RegClass, true /* vector */,
|
|
|
|
O);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
printOperand(MI, OpNum, O);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
|
|
|
|
unsigned OpNum,
|
|
|
|
unsigned AsmVariant,
|
|
|
|
const char *ExtraCode,
|
|
|
|
raw_ostream &O) {
|
2017-05-26 03:07:57 +08:00
|
|
|
if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
|
2014-03-29 18:18:08 +08:00
|
|
|
return true; // Unknown modifier.
|
|
|
|
|
|
|
|
const MachineOperand &MO = MI->getOperand(OpNum);
|
|
|
|
assert(MO.isReg() && "unexpected inline asm memory operand");
|
2014-05-24 20:50:23 +08:00
|
|
|
O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
|
2014-03-29 18:18:08 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
|
|
|
|
raw_ostream &OS) {
|
2014-03-29 18:18:08 +08:00
|
|
|
unsigned NOps = MI->getNumOperands();
|
|
|
|
assert(NOps == 4);
|
|
|
|
OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
|
|
|
|
// cast away const; DIetc do not take const operands for some reason.
|
2015-04-30 00:38:44 +08:00
|
|
|
OS << cast<DILocalVariable>(MI->getOperand(NOps - 2).getMetadata())
|
2015-04-14 10:22:36 +08:00
|
|
|
->getName();
|
2014-03-29 18:18:08 +08:00
|
|
|
OS << " <- ";
|
|
|
|
// Frame address. Currently handles register +- offset only.
|
|
|
|
assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm());
|
|
|
|
OS << '[';
|
|
|
|
printOperand(MI, 0, OS);
|
|
|
|
OS << '+';
|
|
|
|
printOperand(MI, 1, OS);
|
|
|
|
OS << ']';
|
|
|
|
OS << "+";
|
|
|
|
printOperand(MI, NOps - 2, OS);
|
|
|
|
}
|
|
|
|
|
2018-10-25 04:19:09 +08:00
|
|
|
void AArch64AsmPrinter::EmitJumpTableInfo() {
|
|
|
|
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
|
|
|
|
if (!MJTI) return;
|
|
|
|
|
|
|
|
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
|
|
|
|
if (JT.empty()) return;
|
|
|
|
|
|
|
|
const TargetLoweringObjectFile &TLOF = getObjFileLowering();
|
|
|
|
MCSection *ReadOnlySec = TLOF.getSectionForJumpTable(MF->getFunction(), TM);
|
|
|
|
OutStreamer->SwitchSection(ReadOnlySec);
|
|
|
|
|
|
|
|
auto AFI = MF->getInfo<AArch64FunctionInfo>();
|
|
|
|
for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
|
|
|
|
const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
|
|
|
|
|
|
|
|
// If this jump table was deleted, ignore it.
|
|
|
|
if (JTBBs.empty()) continue;
|
|
|
|
|
|
|
|
unsigned Size = AFI->getJumpTableEntrySize(JTI);
|
|
|
|
EmitAlignment(Log2_32(Size));
|
|
|
|
OutStreamer->EmitLabel(GetJTISymbol(JTI));
|
|
|
|
|
|
|
|
for (auto *JTBB : JTBBs)
|
|
|
|
emitJumpTableEntry(MJTI, JTBB, JTI);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void AArch64AsmPrinter::emitJumpTableEntry(const MachineJumpTableInfo *MJTI,
|
|
|
|
const MachineBasicBlock *MBB,
|
|
|
|
unsigned JTI) {
|
|
|
|
const MCExpr *Value = MCSymbolRefExpr::create(MBB->getSymbol(), OutContext);
|
|
|
|
auto AFI = MF->getInfo<AArch64FunctionInfo>();
|
|
|
|
unsigned Size = AFI->getJumpTableEntrySize(JTI);
|
|
|
|
|
|
|
|
if (Size == 4) {
|
|
|
|
// .word LBB - LJTI
|
|
|
|
const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
|
|
|
|
const MCExpr *Base = TLI->getPICJumpTableRelocBaseExpr(MF, JTI, OutContext);
|
|
|
|
Value = MCBinaryExpr::createSub(Value, Base, OutContext);
|
|
|
|
} else {
|
|
|
|
// .byte (LBB - LBB) >> 2 (or .hword)
|
|
|
|
const MCSymbol *BaseSym = AFI->getJumpTableEntryPCRelSymbol(JTI);
|
|
|
|
const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
|
|
|
|
Value = MCBinaryExpr::createSub(Value, Base, OutContext);
|
|
|
|
Value = MCBinaryExpr::createLShr(
|
|
|
|
Value, MCConstantExpr::create(2, OutContext), OutContext);
|
|
|
|
}
|
|
|
|
|
|
|
|
OutStreamer->EmitValue(Value, Size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Small jump tables contain an unsigned byte or half, representing the offset
|
|
|
|
/// from the lowest-addressed possible destination to the desired basic
|
|
|
|
/// block. Since all instructions are 4-byte aligned, this is further compressed
|
|
|
|
/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
|
|
|
|
/// materialize the correct destination we need:
|
|
|
|
///
|
|
|
|
/// adr xDest, .LBB0_0
|
|
|
|
/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
|
|
|
|
/// add xDest, xDest, xScratch, lsl #2
|
|
|
|
void AArch64AsmPrinter::LowerJumpTableDestSmall(llvm::MCStreamer &OutStreamer,
|
|
|
|
const llvm::MachineInstr &MI) {
|
|
|
|
unsigned DestReg = MI.getOperand(0).getReg();
|
|
|
|
unsigned ScratchReg = MI.getOperand(1).getReg();
|
|
|
|
unsigned ScratchRegW =
|
|
|
|
STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
|
|
|
|
unsigned TableReg = MI.getOperand(2).getReg();
|
|
|
|
unsigned EntryReg = MI.getOperand(3).getReg();
|
|
|
|
int JTIdx = MI.getOperand(4).getIndex();
|
|
|
|
bool IsByteEntry = MI.getOpcode() == AArch64::JumpTableDest8;
|
|
|
|
|
|
|
|
// This has to be first because the compression pass based its reachability
|
|
|
|
// calculations on the start of the JumpTableDest instruction.
|
|
|
|
auto Label =
|
|
|
|
MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
|
|
|
|
EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
|
|
|
|
.addReg(DestReg)
|
|
|
|
.addExpr(MCSymbolRefExpr::create(
|
|
|
|
Label, MF->getContext())));
|
|
|
|
|
|
|
|
// Load the number of instruction-steps to offset from the label.
|
|
|
|
unsigned LdrOpcode = IsByteEntry ? AArch64::LDRBBroX : AArch64::LDRHHroX;
|
|
|
|
EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
|
|
|
|
.addReg(ScratchRegW)
|
|
|
|
.addReg(TableReg)
|
|
|
|
.addReg(EntryReg)
|
|
|
|
.addImm(0)
|
|
|
|
.addImm(IsByteEntry ? 0 : 1));
|
|
|
|
|
|
|
|
// Multiply the steps by 4 and add to the already materialized base label
|
|
|
|
// address.
|
|
|
|
EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
|
|
|
|
.addReg(DestReg)
|
|
|
|
.addReg(DestReg)
|
|
|
|
.addReg(ScratchReg)
|
|
|
|
.addImm(2));
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
|
|
|
|
const MachineInstr &MI) {
|
2016-08-31 20:43:49 +08:00
|
|
|
unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
SM.recordStackMap(MI);
|
|
|
|
assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
|
2014-12-03 05:36:24 +08:00
|
|
|
|
|
|
|
// Scan ahead to trim the shadow.
|
|
|
|
const MachineBasicBlock &MBB = *MI.getParent();
|
|
|
|
MachineBasicBlock::const_iterator MII(MI);
|
|
|
|
++MII;
|
|
|
|
while (NumNOPBytes > 0) {
|
|
|
|
if (MII == MBB.end() || MII->isCall() ||
|
|
|
|
MII->getOpcode() == AArch64::DBG_VALUE ||
|
|
|
|
MII->getOpcode() == TargetOpcode::PATCHPOINT ||
|
|
|
|
MII->getOpcode() == TargetOpcode::STACKMAP)
|
|
|
|
break;
|
|
|
|
++MII;
|
|
|
|
NumNOPBytes -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit nops.
|
2014-03-29 18:18:08 +08:00
|
|
|
for (unsigned i = 0; i < NumNOPBytes; i += 4)
|
2014-05-24 20:50:23 +08:00
|
|
|
EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lower a patchpoint of the form:
|
|
|
|
// [<def>], <id>, <numBytes>, <target>, <numArgs>
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
|
|
|
|
const MachineInstr &MI) {
|
2014-03-29 18:18:08 +08:00
|
|
|
SM.recordPatchPoint(MI);
|
|
|
|
|
|
|
|
PatchPointOpers Opers(&MI);
|
|
|
|
|
2016-08-24 07:33:29 +08:00
|
|
|
int64_t CallTarget = Opers.getCallTarget().getImm();
|
2014-03-29 18:18:08 +08:00
|
|
|
unsigned EncodedBytes = 0;
|
|
|
|
if (CallTarget) {
|
|
|
|
assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
|
|
|
|
"High 16 bits of call target should be zero.");
|
|
|
|
unsigned ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
|
|
|
|
EncodedBytes = 16;
|
|
|
|
// Materialize the jump address:
|
2016-06-16 04:33:36 +08:00
|
|
|
EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZXi)
|
2014-03-29 18:18:08 +08:00
|
|
|
.addReg(ScratchReg)
|
|
|
|
.addImm((CallTarget >> 32) & 0xFFFF)
|
|
|
|
.addImm(32));
|
2016-06-16 04:33:36 +08:00
|
|
|
EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
|
2014-03-29 18:18:08 +08:00
|
|
|
.addReg(ScratchReg)
|
|
|
|
.addReg(ScratchReg)
|
|
|
|
.addImm((CallTarget >> 16) & 0xFFFF)
|
|
|
|
.addImm(16));
|
2016-06-16 04:33:36 +08:00
|
|
|
EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
|
2014-03-29 18:18:08 +08:00
|
|
|
.addReg(ScratchReg)
|
|
|
|
.addReg(ScratchReg)
|
|
|
|
.addImm(CallTarget & 0xFFFF)
|
|
|
|
.addImm(0));
|
2014-05-24 20:50:23 +08:00
|
|
|
EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
// Emit padding.
|
2016-08-24 07:33:29 +08:00
|
|
|
unsigned NumBytes = Opers.getNumPatchBytes();
|
2014-03-29 18:18:08 +08:00
|
|
|
assert(NumBytes >= EncodedBytes &&
|
|
|
|
"Patchpoint can't request size less than the length of a call.");
|
|
|
|
assert((NumBytes - EncodedBytes) % 4 == 0 &&
|
|
|
|
"Invalid number of NOP bytes requested!");
|
|
|
|
for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
|
2014-05-24 20:50:23 +08:00
|
|
|
EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2016-07-07 05:39:33 +08:00
|
|
|
void AArch64AsmPrinter::EmitFMov0(const MachineInstr &MI) {
|
|
|
|
unsigned DestReg = MI.getOperand(0).getReg();
|
2018-09-29 03:05:09 +08:00
|
|
|
if (STI->hasZeroCycleZeroingFP() && !STI->hasZeroCycleZeroingFPWorkaround()) {
|
2017-08-24 22:47:06 +08:00
|
|
|
// Convert H/S/D register to corresponding Q register
|
|
|
|
if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31)
|
|
|
|
DestReg = AArch64::Q0 + (DestReg - AArch64::H0);
|
|
|
|
else if (AArch64::S0 <= DestReg && DestReg <= AArch64::S31)
|
2016-07-07 05:39:33 +08:00
|
|
|
DestReg = AArch64::Q0 + (DestReg - AArch64::S0);
|
2017-08-24 22:47:06 +08:00
|
|
|
else {
|
2016-07-07 05:39:33 +08:00
|
|
|
assert(AArch64::D0 <= DestReg && DestReg <= AArch64::D31);
|
|
|
|
DestReg = AArch64::Q0 + (DestReg - AArch64::D0);
|
|
|
|
}
|
|
|
|
MCInst MOVI;
|
|
|
|
MOVI.setOpcode(AArch64::MOVIv2d_ns);
|
|
|
|
MOVI.addOperand(MCOperand::createReg(DestReg));
|
|
|
|
MOVI.addOperand(MCOperand::createImm(0));
|
|
|
|
EmitToStreamer(*OutStreamer, MOVI);
|
|
|
|
} else {
|
|
|
|
MCInst FMov;
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
default: llvm_unreachable("Unexpected opcode");
|
2017-08-24 22:47:06 +08:00
|
|
|
case AArch64::FMOVH0:
|
|
|
|
FMov.setOpcode(AArch64::FMOVWHr);
|
|
|
|
FMov.addOperand(MCOperand::createReg(DestReg));
|
|
|
|
FMov.addOperand(MCOperand::createReg(AArch64::WZR));
|
|
|
|
break;
|
2016-07-07 05:39:33 +08:00
|
|
|
case AArch64::FMOVS0:
|
|
|
|
FMov.setOpcode(AArch64::FMOVWSr);
|
|
|
|
FMov.addOperand(MCOperand::createReg(DestReg));
|
|
|
|
FMov.addOperand(MCOperand::createReg(AArch64::WZR));
|
|
|
|
break;
|
|
|
|
case AArch64::FMOVD0:
|
|
|
|
FMov.setOpcode(AArch64::FMOVXDr);
|
|
|
|
FMov.addOperand(MCOperand::createReg(DestReg));
|
|
|
|
FMov.addOperand(MCOperand::createReg(AArch64::XZR));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
EmitToStreamer(*OutStreamer, FMov);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
// Simple pseudo-instructions have their lowering (with expansion to real
|
|
|
|
// instructions) auto-generated.
|
2014-05-24 20:50:23 +08:00
|
|
|
#include "AArch64GenMCPseudoLowering.inc"
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
2014-03-29 18:18:08 +08:00
|
|
|
// Do any auto-generated pseudo lowerings.
|
2015-04-25 03:11:51 +08:00
|
|
|
if (emitPseudoExpansionLowering(*OutStreamer, MI))
|
2014-03-29 18:18:08 +08:00
|
|
|
return;
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
if (AArch64FI->getLOHRelated().count(MI)) {
|
2014-03-29 18:18:08 +08:00
|
|
|
// Generate a label for LOH related instruction
|
2015-03-18 04:07:06 +08:00
|
|
|
MCSymbol *LOHLabel = createTempSymbol("loh");
|
2014-03-29 18:18:08 +08:00
|
|
|
// Associate the instruction with the label
|
|
|
|
LOHInstToLabel[MI] = LOHLabel;
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitLabel(LOHLabel);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Do any manual lowerings.
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default:
|
|
|
|
break;
|
2017-12-20 18:45:39 +08:00
|
|
|
case AArch64::MOVIv2d_ns:
|
|
|
|
// If the target has <rdar://problem/16473581>, lower this
|
|
|
|
// instruction to movi.16b instead.
|
|
|
|
if (STI->hasZeroCycleZeroingFPWorkaround() &&
|
|
|
|
MI->getOperand(1).getImm() == 0) {
|
|
|
|
MCInst TmpInst;
|
|
|
|
TmpInst.setOpcode(AArch64::MOVIv16b_ns);
|
|
|
|
TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
|
|
|
|
TmpInst.addOperand(MCOperand::createImm(MI->getOperand(1).getImm()));
|
|
|
|
EmitToStreamer(*OutStreamer, TmpInst);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
case AArch64::DBG_VALUE: {
|
2015-04-25 03:11:51 +08:00
|
|
|
if (isVerbose() && OutStreamer->hasRawTextSupport()) {
|
2014-03-29 18:18:08 +08:00
|
|
|
SmallString<128> TmpStr;
|
|
|
|
raw_svector_ostream OS(TmpStr);
|
|
|
|
PrintDebugValueComment(MI, OS);
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitRawText(StringRef(OS.str()));
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tail calls use pseudo instructions so they have the proper code-gen
|
|
|
|
// attributes (isCall, isReturn, etc.). We lower them to the real
|
|
|
|
// instruction here.
|
2018-10-08 17:18:48 +08:00
|
|
|
case AArch64::TCRETURNri:
|
[AArch64][v8.5A] Restrict indirect tail calls to use x16/17 only when using BTI
When branch target identification is enabled, all indirectly-callable
functions start with a BTI C instruction. this instruction can only be
the target of certain indirect branches (direct branches and
fall-through are not affected):
- A BLR instruction, in either a protected or unprotected page.
- A BR instruction in a protected page, using x16 or x17.
- A BR instruction in an unprotected page, using any register.
Without BTI, we can use any non call-preserved register to hold the
address for an indirect tail call. However, when BTI is enabled, then
the code being compiled might be loaded into a BTI-protected page, where
only x16 and x17 can be used for indirect tail calls.
Legacy code withiout this restriction can still indirectly tail-call
BTI-protected functions, because they will be loaded into an unprotected
page, so any register is allowed.
Differential revision: https://reviews.llvm.org/D52868
llvm-svn: 343968
2018-10-08 22:09:15 +08:00
|
|
|
case AArch64::TCRETURNriBTI:
|
2018-10-08 17:18:48 +08:00
|
|
|
case AArch64::TCRETURNriALL: {
|
2014-03-29 18:18:08 +08:00
|
|
|
MCInst TmpInst;
|
2014-05-24 20:50:23 +08:00
|
|
|
TmpInst.setOpcode(AArch64::BR);
|
2015-05-14 02:37:00 +08:00
|
|
|
TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
|
2015-04-25 03:11:51 +08:00
|
|
|
EmitToStreamer(*OutStreamer, TmpInst);
|
2014-03-29 18:18:08 +08:00
|
|
|
return;
|
|
|
|
}
|
2014-05-24 20:50:23 +08:00
|
|
|
case AArch64::TCRETURNdi: {
|
2014-03-29 18:18:08 +08:00
|
|
|
MCOperand Dest;
|
|
|
|
MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
|
|
|
|
MCInst TmpInst;
|
2014-05-24 20:50:23 +08:00
|
|
|
TmpInst.setOpcode(AArch64::B);
|
2014-03-29 18:18:08 +08:00
|
|
|
TmpInst.addOperand(Dest);
|
2015-04-25 03:11:51 +08:00
|
|
|
EmitToStreamer(*OutStreamer, TmpInst);
|
2014-03-29 18:18:08 +08:00
|
|
|
return;
|
|
|
|
}
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
case AArch64::TLSDESC_CALLSEQ: {
|
|
|
|
/// lower this to:
|
|
|
|
/// adrp x0, :tlsdesc:var
|
|
|
|
/// ldr x1, [x0, #:tlsdesc_lo12:var]
|
|
|
|
/// add x0, x0, #:tlsdesc_lo12:var
|
|
|
|
/// .tlsdesccall var
|
|
|
|
/// blr x1
|
|
|
|
/// (TPIDR_EL0 offset now in x0)
|
|
|
|
const MachineOperand &MO_Sym = MI->getOperand(0);
|
|
|
|
MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
|
|
|
|
MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
|
[AArch64] ILP32 Backend Relocation Support
Remove "_NC" suffix and semantics from TLSDESC_LD{64,32}_LO12 and
TLSDESC_ADD_LO12 relocations
Rearrange ordering in AArch64.def to follow relocation encoding
Fix name:
R_AARCH64_P32_LD64_GOT_LO12_NC => R_AARCH64_P32_LD32_GOT_LO12_NC
Add support for several "TLS", "TLSGD", and "TLSLD" relocations for
ILP32
Fix return values from isNonILP32reloc
Add implementations for
R_AARCH64_ADR_PREL_PG_HI21_NC, R_AARCH64_P32_LD32_GOT_LO12_NC,
R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC,
R_AARCH64_P32_TLSDESC_LD32_LO12, R_AARCH64_LD64_GOT_LO12_NC,
*TLSLD_LDST128_DTPREL_LO12, *TLSLD_LDST128_DTPREL_LO12_NC,
*TLSLE_LDST128_TPREL_LO12, *TLSLE_LDST128_TPREL_LO12_NC
Modify error messages to give name of equivalent relocation in the
ABI not being used, along with better checking for non-existent
requested relocations.
Added assembler support for "pg_hi21_nc"
Relocation definitions added without implementations:
R_AARCH64_P32_TLSDESC_ADR_PREL21, R_AARCH64_P32_TLSGD_ADR_PREL21,
R_AARCH64_P32_TLSGD_ADD_LO12_NC, R_AARCH64_P32_TLSLD_ADR_PREL21,
R_AARCH64_P32_TLSLD_ADR_PAGE21, R_AARCH64_P32_TLSLD_ADD_LO12_NC,
R_AARCH64_P32_TLSLD_LD_PREL19, R_AARCH64_P32_TLSDESC_LD_PREL19,
R_AARCH64_P32_TLSGD_ADR_PAGE21, R_AARCH64_P32_TLS_DTPREL,
R_AARCH64_P32_TLS_DTPMOD, R_AARCH64_P32_TLS_TPREL,
R_AARCH64_P32_TLSDESC
Fix encoding:
R_AARCH64_P32_TLSDESC_ADR_PAGE21
Reviewers: Peter Smith
Patch by: Joel Jones (jjones@cavium.com)
Differential Revision: https://reviews.llvm.org/D32072
llvm-svn: 301980
2017-05-03 06:01:48 +08:00
|
|
|
MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
|
|
|
|
MCInstLowering.lowerOperand(MO_Sym, Sym);
|
|
|
|
MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
|
|
|
|
MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
|
|
|
|
|
|
|
|
MCInst Adrp;
|
|
|
|
Adrp.setOpcode(AArch64::ADRP);
|
2015-05-14 02:37:00 +08:00
|
|
|
Adrp.addOperand(MCOperand::createReg(AArch64::X0));
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
Adrp.addOperand(SymTLSDesc);
|
2015-04-25 03:11:51 +08:00
|
|
|
EmitToStreamer(*OutStreamer, Adrp);
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
|
|
|
|
MCInst Ldr;
|
|
|
|
Ldr.setOpcode(AArch64::LDRXui);
|
2015-05-14 02:37:00 +08:00
|
|
|
Ldr.addOperand(MCOperand::createReg(AArch64::X1));
|
|
|
|
Ldr.addOperand(MCOperand::createReg(AArch64::X0));
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
Ldr.addOperand(SymTLSDescLo12);
|
2015-05-14 02:37:00 +08:00
|
|
|
Ldr.addOperand(MCOperand::createImm(0));
|
2015-04-25 03:11:51 +08:00
|
|
|
EmitToStreamer(*OutStreamer, Ldr);
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
|
|
|
|
MCInst Add;
|
|
|
|
Add.setOpcode(AArch64::ADDXri);
|
2015-05-14 02:37:00 +08:00
|
|
|
Add.addOperand(MCOperand::createReg(AArch64::X0));
|
|
|
|
Add.addOperand(MCOperand::createReg(AArch64::X0));
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
Add.addOperand(SymTLSDescLo12);
|
2015-05-14 02:37:00 +08:00
|
|
|
Add.addOperand(MCOperand::createImm(AArch64_AM::getShiftValue(0)));
|
2015-04-25 03:11:51 +08:00
|
|
|
EmitToStreamer(*OutStreamer, Add);
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
|
|
|
|
// Emit a relocation-annotation. This expands to no code, but requests
|
2014-03-29 18:18:08 +08:00
|
|
|
// the following instruction gets an R_AARCH64_TLSDESC_CALL.
|
|
|
|
MCInst TLSDescCall;
|
2014-05-24 20:50:23 +08:00
|
|
|
TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
|
2014-03-29 18:18:08 +08:00
|
|
|
TLSDescCall.addOperand(Sym);
|
2015-04-25 03:11:51 +08:00
|
|
|
EmitToStreamer(*OutStreamer, TLSDescCall);
|
2014-03-29 18:18:08 +08:00
|
|
|
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
MCInst Blr;
|
|
|
|
Blr.setOpcode(AArch64::BLR);
|
2015-05-14 02:37:00 +08:00
|
|
|
Blr.addOperand(MCOperand::createReg(AArch64::X1));
|
2015-04-25 03:11:51 +08:00
|
|
|
EmitToStreamer(*OutStreamer, Blr);
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-10-25 04:19:09 +08:00
|
|
|
case AArch64::JumpTableDest32: {
|
|
|
|
// We want:
|
|
|
|
// ldrsw xScratch, [xTable, xEntry, lsl #2]
|
|
|
|
// add xDest, xTable, xScratch
|
|
|
|
unsigned DestReg = MI->getOperand(0).getReg(),
|
|
|
|
ScratchReg = MI->getOperand(1).getReg(),
|
|
|
|
TableReg = MI->getOperand(2).getReg(),
|
|
|
|
EntryReg = MI->getOperand(3).getReg();
|
|
|
|
EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
|
|
|
|
.addReg(ScratchReg)
|
|
|
|
.addReg(TableReg)
|
|
|
|
.addReg(EntryReg)
|
|
|
|
.addImm(0)
|
|
|
|
.addImm(1));
|
|
|
|
EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
|
|
|
|
.addReg(DestReg)
|
|
|
|
.addReg(TableReg)
|
|
|
|
.addReg(ScratchReg)
|
|
|
|
.addImm(0));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case AArch64::JumpTableDest16:
|
|
|
|
case AArch64::JumpTableDest8:
|
|
|
|
LowerJumpTableDestSmall(*OutStreamer, *MI);
|
|
|
|
return;
|
|
|
|
|
2017-08-24 22:47:06 +08:00
|
|
|
case AArch64::FMOVH0:
|
2016-07-07 05:39:33 +08:00
|
|
|
case AArch64::FMOVS0:
|
|
|
|
case AArch64::FMOVD0:
|
|
|
|
EmitFMov0(*MI);
|
|
|
|
return;
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
case TargetOpcode::STACKMAP:
|
2015-04-25 03:11:51 +08:00
|
|
|
return LowerSTACKMAP(*OutStreamer, SM, *MI);
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
case TargetOpcode::PATCHPOINT:
|
2015-04-25 03:11:51 +08:00
|
|
|
return LowerPATCHPOINT(*OutStreamer, SM, *MI);
|
2016-11-17 13:15:37 +08:00
|
|
|
|
|
|
|
case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
|
|
|
|
LowerPATCHABLE_FUNCTION_ENTER(*MI);
|
|
|
|
return;
|
|
|
|
|
|
|
|
case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
|
|
|
|
LowerPATCHABLE_FUNCTION_EXIT(*MI);
|
|
|
|
return;
|
|
|
|
|
|
|
|
case TargetOpcode::PATCHABLE_TAIL_CALL:
|
|
|
|
LowerPATCHABLE_TAIL_CALL(*MI);
|
|
|
|
return;
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, do the automated lowerings for everything else.
|
|
|
|
MCInst TmpInst;
|
|
|
|
MCInstLowering.Lower(MI, TmpInst);
|
2015-04-25 03:11:51 +08:00
|
|
|
EmitToStreamer(*OutStreamer, TmpInst);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Force static initialization.
|
2014-05-24 20:50:23 +08:00
|
|
|
extern "C" void LLVMInitializeAArch64AsmPrinter() {
|
2016-10-10 07:00:34 +08:00
|
|
|
RegisterAsmPrinter<AArch64AsmPrinter> X(getTheAArch64leTarget());
|
|
|
|
RegisterAsmPrinter<AArch64AsmPrinter> Y(getTheAArch64beTarget());
|
|
|
|
RegisterAsmPrinter<AArch64AsmPrinter> Z(getTheARM64Target());
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|