2014-05-24 20:50:23 +08:00
|
|
|
//==-- AArch64MCInstLower.cpp - Convert AArch64 MachineInstr to an MCInst --==//
|
2014-03-29 18:18:08 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2014-05-24 20:50:23 +08:00
|
|
|
// This file contains code to lower AArch64 MachineInstrs to their corresponding
|
2014-03-29 18:18:08 +08:00
|
|
|
// MCInst records.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
#include "AArch64MCInstLower.h"
|
|
|
|
#include "MCTargetDesc/AArch64MCExpr.h"
|
|
|
|
#include "Utils/AArch64BaseInfo.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/CodeGen/AsmPrinter.h"
|
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2018-09-05 04:56:21 +08:00
|
|
|
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/IR/Mangler.h"
|
2017-10-25 15:25:18 +08:00
|
|
|
#include "llvm/MC/MCContext.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/MC/MCExpr.h"
|
|
|
|
#include "llvm/MC/MCInst.h"
|
|
|
|
#include "llvm/Support/CodeGen.h"
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2018-03-24 07:58:19 +08:00
|
|
|
#include "llvm/Target/TargetLoweringObjectFile.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
|
|
|
using namespace llvm;
|
|
|
|
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
extern cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration;
|
|
|
|
|
2014-08-20 05:51:08 +08:00
|
|
|
AArch64MCInstLower::AArch64MCInstLower(MCContext &ctx, AsmPrinter &printer)
|
2016-10-01 09:50:25 +08:00
|
|
|
: Ctx(ctx), Printer(printer) {}
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
MCSymbol *
|
2014-05-24 20:50:23 +08:00
|
|
|
AArch64MCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const {
|
2017-10-25 15:25:18 +08:00
|
|
|
const GlobalValue *GV = MO.getGlobal();
|
|
|
|
unsigned TargetFlags = MO.getTargetFlags();
|
|
|
|
const Triple &TheTriple = Printer.TM.getTargetTriple();
|
|
|
|
if (!TheTriple.isOSBinFormatCOFF())
|
|
|
|
return Printer.getSymbol(GV);
|
|
|
|
|
|
|
|
assert(TheTriple.isOSWindows() &&
|
|
|
|
"Windows is the only supported COFF target");
|
|
|
|
|
2018-09-05 04:56:21 +08:00
|
|
|
bool IsIndirect = (TargetFlags & (AArch64II::MO_DLLIMPORT | AArch64II::MO_COFFSTUB));
|
2017-10-25 15:25:18 +08:00
|
|
|
if (!IsIndirect)
|
|
|
|
return Printer.getSymbol(GV);
|
|
|
|
|
|
|
|
SmallString<128> Name;
|
2018-09-05 04:56:21 +08:00
|
|
|
if (TargetFlags & AArch64II::MO_DLLIMPORT)
|
|
|
|
Name = "__imp_";
|
|
|
|
else if (TargetFlags & AArch64II::MO_COFFSTUB)
|
|
|
|
Name = ".refptr.";
|
2017-10-25 15:25:18 +08:00
|
|
|
Printer.TM.getNameWithPrefix(Name, GV,
|
|
|
|
Printer.getObjFileLowering().getMangler());
|
|
|
|
|
2018-09-05 04:56:21 +08:00
|
|
|
MCSymbol *MCSym = Ctx.getOrCreateSymbol(Name);
|
|
|
|
|
|
|
|
if (TargetFlags & AArch64II::MO_COFFSTUB) {
|
|
|
|
MachineModuleInfoCOFF &MMICOFF =
|
|
|
|
Printer.MMI->getObjFileInfo<MachineModuleInfoCOFF>();
|
|
|
|
MachineModuleInfoImpl::StubValueTy &StubSym =
|
|
|
|
MMICOFF.getGVStubEntry(MCSym);
|
|
|
|
|
|
|
|
if (!StubSym.getPointer())
|
|
|
|
StubSym = MachineModuleInfoImpl::StubValueTy(Printer.getSymbol(GV), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
return MCSym;
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MCSymbol *
|
2014-05-24 20:50:23 +08:00
|
|
|
AArch64MCInstLower::GetExternalSymbolSymbol(const MachineOperand &MO) const {
|
2014-03-29 18:18:08 +08:00
|
|
|
return Printer.GetExternalSymbolSymbol(MO.getSymbolName());
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
MCOperand AArch64MCInstLower::lowerSymbolOperandDarwin(const MachineOperand &MO,
|
|
|
|
MCSymbol *Sym) const {
|
2014-03-29 18:18:08 +08:00
|
|
|
// FIXME: We would like an efficient form for this, so we don't have to do a
|
|
|
|
// lot of extra uniquing.
|
|
|
|
MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
|
2014-05-24 20:50:23 +08:00
|
|
|
if ((MO.getTargetFlags() & AArch64II::MO_GOT) != 0) {
|
|
|
|
if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
|
2014-03-29 18:18:08 +08:00
|
|
|
RefKind = MCSymbolRefExpr::VK_GOTPAGE;
|
2014-05-24 20:50:23 +08:00
|
|
|
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
|
|
|
|
AArch64II::MO_PAGEOFF)
|
2014-03-29 18:18:08 +08:00
|
|
|
RefKind = MCSymbolRefExpr::VK_GOTPAGEOFF;
|
|
|
|
else
|
2014-06-18 13:05:13 +08:00
|
|
|
llvm_unreachable("Unexpected target flags with MO_GOT on GV operand");
|
2014-05-24 20:50:23 +08:00
|
|
|
} else if ((MO.getTargetFlags() & AArch64II::MO_TLS) != 0) {
|
|
|
|
if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
|
2014-03-29 18:18:08 +08:00
|
|
|
RefKind = MCSymbolRefExpr::VK_TLVPPAGE;
|
2014-05-24 20:50:23 +08:00
|
|
|
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
|
|
|
|
AArch64II::MO_PAGEOFF)
|
2014-03-29 18:18:08 +08:00
|
|
|
RefKind = MCSymbolRefExpr::VK_TLVPPAGEOFF;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected target flags with MO_TLS on GV operand");
|
|
|
|
} else {
|
2014-05-24 20:50:23 +08:00
|
|
|
if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
|
2014-03-29 18:18:08 +08:00
|
|
|
RefKind = MCSymbolRefExpr::VK_PAGE;
|
2014-05-24 20:50:23 +08:00
|
|
|
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
|
|
|
|
AArch64II::MO_PAGEOFF)
|
2014-03-29 18:18:08 +08:00
|
|
|
RefKind = MCSymbolRefExpr::VK_PAGEOFF;
|
|
|
|
}
|
2015-05-30 09:25:56 +08:00
|
|
|
const MCExpr *Expr = MCSymbolRefExpr::create(Sym, RefKind, Ctx);
|
2014-03-29 18:18:08 +08:00
|
|
|
if (!MO.isJTI() && MO.getOffset())
|
2015-05-30 09:25:56 +08:00
|
|
|
Expr = MCBinaryExpr::createAdd(
|
|
|
|
Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
|
2015-05-14 02:37:00 +08:00
|
|
|
return MCOperand::createExpr(Expr);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
MCOperand AArch64MCInstLower::lowerSymbolOperandELF(const MachineOperand &MO,
|
|
|
|
MCSymbol *Sym) const {
|
2014-03-29 18:18:08 +08:00
|
|
|
uint32_t RefFlags = 0;
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
if (MO.getTargetFlags() & AArch64II::MO_GOT)
|
|
|
|
RefFlags |= AArch64MCExpr::VK_GOT;
|
|
|
|
else if (MO.getTargetFlags() & AArch64II::MO_TLS) {
|
2014-03-29 18:18:08 +08:00
|
|
|
TLSModel::Model Model;
|
|
|
|
if (MO.isGlobal()) {
|
|
|
|
const GlobalValue *GV = MO.getGlobal();
|
|
|
|
Model = Printer.TM.getTLSModel(GV);
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
if (!EnableAArch64ELFLocalDynamicTLSGeneration &&
|
|
|
|
Model == TLSModel::LocalDynamic)
|
|
|
|
Model = TLSModel::GeneralDynamic;
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
} else {
|
|
|
|
assert(MO.isSymbol() &&
|
|
|
|
StringRef(MO.getSymbolName()) == "_TLS_MODULE_BASE_" &&
|
|
|
|
"unexpected external TLS symbol");
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
// The general dynamic access sequence is used to get the
|
|
|
|
// address of _TLS_MODULE_BASE_.
|
2014-03-29 18:18:08 +08:00
|
|
|
Model = TLSModel::GeneralDynamic;
|
|
|
|
}
|
|
|
|
switch (Model) {
|
|
|
|
case TLSModel::InitialExec:
|
2014-05-24 20:50:23 +08:00
|
|
|
RefFlags |= AArch64MCExpr::VK_GOTTPREL;
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
case TLSModel::LocalExec:
|
2014-05-24 20:50:23 +08:00
|
|
|
RefFlags |= AArch64MCExpr::VK_TPREL;
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
case TLSModel::LocalDynamic:
|
2014-05-24 20:50:23 +08:00
|
|
|
RefFlags |= AArch64MCExpr::VK_DTPREL;
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
case TLSModel::GeneralDynamic:
|
2014-05-24 20:50:23 +08:00
|
|
|
RefFlags |= AArch64MCExpr::VK_TLSDESC;
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// No modifier means this is a generic reference, classified as absolute for
|
|
|
|
// the cases where it matters (:abs_g0: etc).
|
2014-05-24 20:50:23 +08:00
|
|
|
RefFlags |= AArch64MCExpr::VK_ABS;
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGE)
|
|
|
|
RefFlags |= AArch64MCExpr::VK_PAGE;
|
|
|
|
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
|
|
|
|
AArch64II::MO_PAGEOFF)
|
|
|
|
RefFlags |= AArch64MCExpr::VK_PAGEOFF;
|
|
|
|
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G3)
|
|
|
|
RefFlags |= AArch64MCExpr::VK_G3;
|
|
|
|
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G2)
|
|
|
|
RefFlags |= AArch64MCExpr::VK_G2;
|
|
|
|
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G1)
|
|
|
|
RefFlags |= AArch64MCExpr::VK_G1;
|
|
|
|
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_G0)
|
|
|
|
RefFlags |= AArch64MCExpr::VK_G0;
|
Fix PR22408 - LLVM producing AArch64 TLS relocations that GNU linkers cannot handle yet.
As is described at http://llvm.org/bugs/show_bug.cgi?id=22408, the GNU linkers
ld.bfd and ld.gold currently only support a subset of the whole range of AArch64
ELF TLS relocations. Furthermore, they assume that some of the code sequences to
access thread-local variables are produced in a very specific sequence.
When the sequence is not as the linker expects, it can silently mis-relaxe/mis-optimize
the instructions.
Even if that wouldn't be the case, it's good to produce the exact sequence,
as that ensures that linkers can perform optimizing relaxations.
This patch:
* implements support for 16MiB TLS area size instead of 4GiB TLS area size. Ideally clang
would grow an -mtls-size option to allow support for both, but that's not part of this patch.
* by default doesn't produce local dynamic access patterns, as even modern ld.bfd and ld.gold
linkers do not support the associated relocations. An option (-aarch64-elf-ldtls-generation)
is added to enable generation of local dynamic code sequence, but is off by default.
* makes sure that the exact expected code sequence for local dynamic and general dynamic
accesses is produced, by making use of a new pseudo instruction. The patch also removes
two (AArch64ISD::TLSDESC_BLR, AArch64ISD::TLSDESC_CALL) pre-existing AArch64-specific pseudo
SDNode instructions that are superseded by the new one (TLSDESC_CALLSEQ).
llvm-svn: 231227
2015-03-04 17:12:08 +08:00
|
|
|
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_HI12)
|
|
|
|
RefFlags |= AArch64MCExpr::VK_HI12;
|
2014-05-24 20:50:23 +08:00
|
|
|
|
|
|
|
if (MO.getTargetFlags() & AArch64II::MO_NC)
|
|
|
|
RefFlags |= AArch64MCExpr::VK_NC;
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
const MCExpr *Expr =
|
2015-05-30 09:25:56 +08:00
|
|
|
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Ctx);
|
2014-03-29 18:18:08 +08:00
|
|
|
if (!MO.isJTI() && MO.getOffset())
|
2015-05-30 09:25:56 +08:00
|
|
|
Expr = MCBinaryExpr::createAdd(
|
|
|
|
Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
AArch64MCExpr::VariantKind RefKind;
|
|
|
|
RefKind = static_cast<AArch64MCExpr::VariantKind>(RefFlags);
|
2015-05-30 09:25:56 +08:00
|
|
|
Expr = AArch64MCExpr::create(Expr, RefKind, Ctx);
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2015-05-14 02:37:00 +08:00
|
|
|
return MCOperand::createExpr(Expr);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2017-06-28 07:58:19 +08:00
|
|
|
MCOperand AArch64MCInstLower::lowerSymbolOperandCOFF(const MachineOperand &MO,
|
|
|
|
MCSymbol *Sym) const {
|
2018-03-11 03:05:21 +08:00
|
|
|
AArch64MCExpr::VariantKind RefKind = AArch64MCExpr::VK_NONE;
|
|
|
|
if (MO.getTargetFlags() & AArch64II::MO_TLS) {
|
|
|
|
if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) == AArch64II::MO_PAGEOFF)
|
|
|
|
RefKind = AArch64MCExpr::VK_SECREL_LO12;
|
|
|
|
else if ((MO.getTargetFlags() & AArch64II::MO_FRAGMENT) ==
|
|
|
|
AArch64II::MO_HI12)
|
|
|
|
RefKind = AArch64MCExpr::VK_SECREL_HI12;
|
|
|
|
}
|
|
|
|
const MCExpr *Expr =
|
|
|
|
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Ctx);
|
2017-06-28 07:58:19 +08:00
|
|
|
if (!MO.isJTI() && MO.getOffset())
|
|
|
|
Expr = MCBinaryExpr::createAdd(
|
|
|
|
Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
|
2018-03-11 03:05:21 +08:00
|
|
|
Expr = AArch64MCExpr::create(Expr, RefKind, Ctx);
|
2017-06-28 07:58:19 +08:00
|
|
|
return MCOperand::createExpr(Expr);
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
MCOperand AArch64MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
|
|
|
|
MCSymbol *Sym) const {
|
2016-10-01 09:50:25 +08:00
|
|
|
if (Printer.TM.getTargetTriple().isOSDarwin())
|
2014-03-29 18:18:08 +08:00
|
|
|
return lowerSymbolOperandDarwin(MO, Sym);
|
2017-06-28 07:58:19 +08:00
|
|
|
if (Printer.TM.getTargetTriple().isOSBinFormatCOFF())
|
|
|
|
return lowerSymbolOperandCOFF(MO, Sym);
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2017-06-29 03:37:38 +08:00
|
|
|
assert(Printer.TM.getTargetTriple().isOSBinFormatELF() && "Invalid target");
|
2014-03-29 18:18:08 +08:00
|
|
|
return lowerSymbolOperandELF(MO, Sym);
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64MCInstLower::lowerOperand(const MachineOperand &MO,
|
|
|
|
MCOperand &MCOp) const {
|
2014-03-29 18:18:08 +08:00
|
|
|
switch (MO.getType()) {
|
|
|
|
default:
|
2014-06-18 13:05:13 +08:00
|
|
|
llvm_unreachable("unknown operand type");
|
2014-03-29 18:18:08 +08:00
|
|
|
case MachineOperand::MO_Register:
|
|
|
|
// Ignore all implicit register operands.
|
|
|
|
if (MO.isImplicit())
|
|
|
|
return false;
|
2015-05-14 02:37:00 +08:00
|
|
|
MCOp = MCOperand::createReg(MO.getReg());
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
case MachineOperand::MO_RegisterMask:
|
|
|
|
// Regmasks are like implicit defs.
|
|
|
|
return false;
|
|
|
|
case MachineOperand::MO_Immediate:
|
2015-05-14 02:37:00 +08:00
|
|
|
MCOp = MCOperand::createImm(MO.getImm());
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
case MachineOperand::MO_MachineBasicBlock:
|
2015-05-14 02:37:00 +08:00
|
|
|
MCOp = MCOperand::createExpr(
|
2015-05-30 09:25:56 +08:00
|
|
|
MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx));
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
case MachineOperand::MO_GlobalAddress:
|
|
|
|
MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO));
|
|
|
|
break;
|
|
|
|
case MachineOperand::MO_ExternalSymbol:
|
|
|
|
MCOp = LowerSymbolOperand(MO, GetExternalSymbolSymbol(MO));
|
|
|
|
break;
|
2015-06-23 20:21:54 +08:00
|
|
|
case MachineOperand::MO_MCSymbol:
|
|
|
|
MCOp = LowerSymbolOperand(MO, MO.getMCSymbol());
|
|
|
|
break;
|
2014-03-29 18:18:08 +08:00
|
|
|
case MachineOperand::MO_JumpTableIndex:
|
|
|
|
MCOp = LowerSymbolOperand(MO, Printer.GetJTISymbol(MO.getIndex()));
|
|
|
|
break;
|
|
|
|
case MachineOperand::MO_ConstantPoolIndex:
|
|
|
|
MCOp = LowerSymbolOperand(MO, Printer.GetCPISymbol(MO.getIndex()));
|
|
|
|
break;
|
|
|
|
case MachineOperand::MO_BlockAddress:
|
|
|
|
MCOp = LowerSymbolOperand(
|
|
|
|
MO, Printer.GetBlockAddressSymbol(MO.getBlockAddress()));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
void AArch64MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
|
2014-03-29 18:18:08 +08:00
|
|
|
OutMI.setOpcode(MI->getOpcode());
|
|
|
|
|
2015-08-04 03:04:32 +08:00
|
|
|
for (const MachineOperand &MO : MI->operands()) {
|
2014-03-29 18:18:08 +08:00
|
|
|
MCOperand MCOp;
|
2015-08-04 03:04:32 +08:00
|
|
|
if (lowerOperand(MO, MCOp))
|
2014-03-29 18:18:08 +08:00
|
|
|
OutMI.addOperand(MCOp);
|
|
|
|
}
|
|
|
|
}
|