llvm-project/lld/ELF/Target.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

325 lines
13 KiB
C
Raw Normal View History

//===- Target.h -------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLD_ELF_TARGET_H
#define LLD_ELF_TARGET_H
#include "InputSection.h"
#include "lld/Common/ErrorHandler.h"
#include "llvm/Object/ELF.h"
#include "llvm/Support/MathExtras.h"
#include <array>
namespace lld {
std::string toString(elf::RelType type);
2016-02-28 08:25:54 +08:00
namespace elf {
class Defined;
[ELF] Implement infrastructure for thunk code creation Some targets might require creation of thunks. For example, MIPS targets require stubs to call PIC code from non-PIC one. The patch implements infrastructure for thunk code creation and provides support for MIPS LA25 stubs. Any MIPS PIC code function is invoked with its address in register $t9. So if we have a branch instruction from non-PIC code to the PIC one we cannot make the jump directly and need to create a small stub to save the target function address. See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf - In relocation scanning phase we ask target about thunk creation necessity by calling `TagetInfo::needsThunk` method. The `InputSection` class maintains list of Symbols requires thunk creation. - Reassigning offsets performed for each input sections after relocation scanning complete because position of each section might change due thunk creation. - The patch introduces new dedicated value for DefinedSynthetic symbols DefinedSynthetic::SectionEnd. Synthetic symbol with that value always points to the end of the corresponding output section. That allows to escape updating synthetic symbols if output sections sizes changes after relocation scanning due thunk creation. - In the `InputSection::writeTo` method we write thunks after corresponding input section. Each thunk is written by calling `TargetInfo::writeThunk` method. - The patch supports the only type of thunk code for each target. For now, it is enough. Differential Revision: http://reviews.llvm.org/D17934 llvm-svn: 265059
2016-04-01 05:26:23 +08:00
class InputFile;
class Symbol;
class TargetInfo {
public:
virtual uint32_t calcEFlags() const { return 0; }
virtual RelExpr getRelExpr(RelType type, const Symbol &s,
const uint8_t *loc) const = 0;
virtual RelType getDynRel(RelType type) const { return 0; }
virtual void writeGotPltHeader(uint8_t *buf) const {}
virtual void writeGotHeader(uint8_t *buf) const {}
virtual void writeGotPlt(uint8_t *buf, const Symbol &s) const {};
virtual void writeIgotPlt(uint8_t *buf, const Symbol &s) const {}
virtual int64_t getImplicitAddend(const uint8_t *buf, RelType type) const;
virtual int getTlsGdRelaxSkip(RelType type) const { return 1; }
2016-01-29 11:00:30 +08:00
// If lazy binding is supported, the first entry of the PLT has code
// to call the dynamic linker to resolve PLT entries the first time
// they are called. This function writes that code.
virtual void writePltHeader(uint8_t *buf) const {}
2016-01-29 11:00:30 +08:00
virtual void writePlt(uint8_t *buf, const Symbol &sym,
uint64_t pltEntryAddr) const {}
virtual void writeIplt(uint8_t *buf, const Symbol &sym,
uint64_t pltEntryAddr) const {
// All but PPC32 and PPC64 use the same format for .plt and .iplt entries.
writePlt(buf, sym, pltEntryAddr);
}
virtual void writeIBTPlt(uint8_t *buf, size_t numEntries) const {}
virtual void addPltHeaderSymbols(InputSection &isec) const {}
virtual void addPltSymbols(InputSection &isec, uint64_t off) const {}
// Returns true if a relocation only uses the low bits of a value such that
2018-04-27 13:50:40 +08:00
// all those bits are in the same page. For example, if the relocation
// only uses the low 12 bits in a system with 4k pages. If this is true, the
// bits will always have the same value at runtime and we don't have to emit
// a dynamic relocation.
virtual bool usesOnlyLowPageBits(RelType type) const;
2016-01-08 10:41:35 +08:00
// Decide whether a Thunk is needed for the relocation from File
// targeting S.
virtual bool needsThunk(RelExpr expr, RelType relocType,
const InputFile *file, uint64_t branchAddr,
const Symbol &s, int64_t a) const;
// On systems with range extensions we place collections of Thunks at
// regular spacings that enable the majority of branches reach the Thunks.
// a value of 0 means range extension thunks are not supported.
virtual uint32_t getThunkSectionSpacing() const { return 0; }
// The function with a prologue starting at Loc was compiled with
// -fsplit-stack and it calls a function compiled without. Adjust the prologue
// to do the right thing. See https://gcc.gnu.org/wiki/SplitStacks.
// The symbols st_other flags are needed on PowerPC64 for determining the
// offset to the split-stack prologue.
virtual bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
uint8_t stOther) const;
// Return true if we can reach dst from src with RelType type.
virtual bool inBranchRange(RelType type, uint64_t src,
uint64_t dst) const;
virtual void relocate(uint8_t *loc, const Relocation &rel,
uint64_t val) const = 0;
void relocateNoSym(uint8_t *loc, RelType type, uint64_t val) const {
relocate(loc, Relocation{R_NONE, type, 0, 0, nullptr}, val);
}
LLD Support for Basic Block Sections This is part of the Propeller framework to do post link code layout optimizations. Please see the RFC here: https://groups.google.com/forum/#!msg/llvm-dev/ef3mKzAdJ7U/1shV64BYBAAJ and the detailed RFC doc here: https://github.com/google/llvm-propeller/blob/plo-dev/Propeller_RFC.pdf This patch adds lld support for basic block sections and performs relaxations after the basic blocks have been reordered. After the linker has reordered the basic block sections according to the desired sequence, it runs a relaxation pass to optimize jump instructions. Currently, the compiler emits the long form of all jump instructions. AMD64 ISA supports variants of jump instructions with one byte offset or a four byte offset. The compiler generates jump instructions with R_X86_64 32-bit PC relative relocations. We would like to use a new relocation type for these jump instructions as it makes it easy and accurate while relaxing these instructions. The relaxation pass does two things: First, it deletes all explicit fall-through direct jump instructions between adjacent basic blocks. This is done by discarding the tail of the basic block section. Second, If there are consecutive jump instructions, it checks if the first conditional jump can be inverted to convert the second into a fall through and delete the second. The jump instructions are relaxed by using jump instruction mods, something like relocations. These are used to modify the opcode of the jump instruction. Jump instruction mods contain three values, instruction offset, jump type and size. While writing this jump instruction out to the final binary, the linker uses the jump instruction mod to determine the opcode and the size of the modified jump instruction. These mods are required because the input object files are memory-mapped without write permissions and directly modifying the object files requires copying these sections. Copying a large number of basic block sections significantly bloats memory. Differential Revision: https://reviews.llvm.org/D68065
2020-04-07 21:48:18 +08:00
virtual void applyJumpInstrMod(uint8_t *loc, JumpModType type,
JumpModType val) const {}
virtual ~TargetInfo();
LLD Support for Basic Block Sections This is part of the Propeller framework to do post link code layout optimizations. Please see the RFC here: https://groups.google.com/forum/#!msg/llvm-dev/ef3mKzAdJ7U/1shV64BYBAAJ and the detailed RFC doc here: https://github.com/google/llvm-propeller/blob/plo-dev/Propeller_RFC.pdf This patch adds lld support for basic block sections and performs relaxations after the basic blocks have been reordered. After the linker has reordered the basic block sections according to the desired sequence, it runs a relaxation pass to optimize jump instructions. Currently, the compiler emits the long form of all jump instructions. AMD64 ISA supports variants of jump instructions with one byte offset or a four byte offset. The compiler generates jump instructions with R_X86_64 32-bit PC relative relocations. We would like to use a new relocation type for these jump instructions as it makes it easy and accurate while relaxing these instructions. The relaxation pass does two things: First, it deletes all explicit fall-through direct jump instructions between adjacent basic blocks. This is done by discarding the tail of the basic block section. Second, If there are consecutive jump instructions, it checks if the first conditional jump can be inverted to convert the second into a fall through and delete the second. The jump instructions are relaxed by using jump instruction mods, something like relocations. These are used to modify the opcode of the jump instruction. Jump instruction mods contain three values, instruction offset, jump type and size. While writing this jump instruction out to the final binary, the linker uses the jump instruction mod to determine the opcode and the size of the modified jump instruction. These mods are required because the input object files are memory-mapped without write permissions and directly modifying the object files requires copying these sections. Copying a large number of basic block sections significantly bloats memory. Differential Revision: https://reviews.llvm.org/D68065
2020-04-07 21:48:18 +08:00
// This deletes a jump insn at the end of the section if it is a fall thru to
// the next section. Further, if there is a conditional jump and a direct
// jump consecutively, it tries to flip the conditional jump to convert the
// direct jump into a fall thru and delete it. Returns true if a jump
// instruction can be deleted.
virtual bool deleteFallThruJmpInsn(InputSection &is, InputFile *file,
InputSection *nextIS) const {
return false;
}
unsigned defaultCommonPageSize = 4096;
unsigned defaultMaxPageSize = 4096;
uint64_t getImageBase() const;
// True if _GLOBAL_OFFSET_TABLE_ is relative to .got.plt, false if .got.
bool gotBaseSymInGotPlt = false;
[Coding style change] Rename variables so that they start with a lowercase letter This patch is mechanically generated by clang-llvm-rename tool that I wrote using Clang Refactoring Engine just for creating this patch. You can see the source code of the tool at https://reviews.llvm.org/D64123. There's no manual post-processing; you can generate the same patch by re-running the tool against lld's code base. Here is the main discussion thread to change the LLVM coding style: https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html In the discussion thread, I proposed we use lld as a testbed for variable naming scheme change, and this patch does that. I chose to rename variables so that they are in camelCase, just because that is a minimal change to make variables to start with a lowercase letter. Note to downstream patch maintainers: if you are maintaining a downstream lld repo, just rebasing ahead of this commit would cause massive merge conflicts because this patch essentially changes every line in the lld subdirectory. But there's a remedy. clang-llvm-rename tool is a batch tool, so you can rename variables in your downstream repo with the tool. Given that, here is how to rebase your repo to a commit after the mass renaming: 1. rebase to the commit just before the mass variable renaming, 2. apply the tool to your downstream repo to mass-rename variables locally, and 3. rebase again to the head. Most changes made by the tool should be identical for a downstream repo and for the head, so at the step 3, almost all changes should be merged and disappear. I'd expect that there would be some lines that you need to merge by hand, but that shouldn't be too many. Differential Revision: https://reviews.llvm.org/D64121 llvm-svn: 365595
2019-07-10 13:00:37 +08:00
static constexpr RelType noneRel = 0;
RelType copyRel;
RelType gotRel;
RelType pltRel;
RelType relativeRel;
RelType iRelativeRel;
RelType symbolicRel;
RelType tlsDescRel;
RelType tlsGotRel;
RelType tlsModuleIndexRel;
RelType tlsOffsetRel;
unsigned gotEntrySize = config->wordsize;
unsigned pltEntrySize;
unsigned pltHeaderSize;
unsigned ipltEntrySize;
// At least on x86_64 positions 1 and 2 are used by the first plt entry
// to support lazy loading.
unsigned gotPltHeaderEntriesNum = 3;
// On PPC ELF V2 abi, the first entry in the .got is the .TOC.
unsigned gotHeaderEntriesNum = 0;
bool needsThunks = false;
// A 4-byte field corresponding to one or more trap instructions, used to pad
// executable OutputSections.
std::array<uint8_t, 4> trapInstr;
LLD Support for Basic Block Sections This is part of the Propeller framework to do post link code layout optimizations. Please see the RFC here: https://groups.google.com/forum/#!msg/llvm-dev/ef3mKzAdJ7U/1shV64BYBAAJ and the detailed RFC doc here: https://github.com/google/llvm-propeller/blob/plo-dev/Propeller_RFC.pdf This patch adds lld support for basic block sections and performs relaxations after the basic blocks have been reordered. After the linker has reordered the basic block sections according to the desired sequence, it runs a relaxation pass to optimize jump instructions. Currently, the compiler emits the long form of all jump instructions. AMD64 ISA supports variants of jump instructions with one byte offset or a four byte offset. The compiler generates jump instructions with R_X86_64 32-bit PC relative relocations. We would like to use a new relocation type for these jump instructions as it makes it easy and accurate while relaxing these instructions. The relaxation pass does two things: First, it deletes all explicit fall-through direct jump instructions between adjacent basic blocks. This is done by discarding the tail of the basic block section. Second, If there are consecutive jump instructions, it checks if the first conditional jump can be inverted to convert the second into a fall through and delete the second. The jump instructions are relaxed by using jump instruction mods, something like relocations. These are used to modify the opcode of the jump instruction. Jump instruction mods contain three values, instruction offset, jump type and size. While writing this jump instruction out to the final binary, the linker uses the jump instruction mod to determine the opcode and the size of the modified jump instruction. These mods are required because the input object files are memory-mapped without write permissions and directly modifying the object files requires copying these sections. Copying a large number of basic block sections significantly bloats memory. Differential Revision: https://reviews.llvm.org/D68065
2020-04-07 21:48:18 +08:00
// Stores the NOP instructions of different sizes for the target and is used
// to pad sections that are relaxed.
llvm::Optional<std::vector<std::vector<uint8_t>>> nopInstrs;
// If a target needs to rewrite calls to __morestack to instead call
// __morestack_non_split when a split-stack enabled caller calls a
// non-split-stack callee this will return true. Otherwise returns false.
bool needsMoreStackNonSplit = true;
virtual RelExpr adjustTlsExpr(RelType type, RelExpr expr) const;
virtual RelExpr adjustGotPcExpr(RelType type, int64_t addend,
const uint8_t *loc) const;
virtual void relaxGot(uint8_t *loc, const Relocation &rel,
uint64_t val) const;
virtual void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
uint64_t val) const;
virtual void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
uint64_t val) const;
virtual void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
uint64_t val) const;
virtual void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
uint64_t val) const;
protected:
// On FreeBSD x86_64 the first page cannot be mmaped.
2019-10-31 10:17:52 +08:00
// On Linux this is controlled by vm.mmap_min_addr. At least on some x86_64
// installs this is set to 65536, so the first 15 pages cannot be used.
// Given that, the smallest value that can be used in here is 0x10000.
uint64_t defaultImageBase = 0x10000;
};
TargetInfo *getAArch64TargetInfo();
TargetInfo *getAMDGPUTargetInfo();
TargetInfo *getARMTargetInfo();
TargetInfo *getAVRTargetInfo();
TargetInfo *getHexagonTargetInfo();
TargetInfo *getMSP430TargetInfo();
TargetInfo *getPPC64TargetInfo();
TargetInfo *getPPCTargetInfo();
TargetInfo *getRISCVTargetInfo();
TargetInfo *getSPARCV9TargetInfo();
TargetInfo *getX86TargetInfo();
TargetInfo *getX86_64TargetInfo();
template <class ELFT> TargetInfo *getMipsTargetInfo();
struct ErrorPlace {
InputSectionBase *isec;
std::string loc;
std::string srcLoc;
};
// Returns input section and corresponding source string for the given location.
ErrorPlace getErrorPlace(const uint8_t *loc);
static inline std::string getErrorLocation(const uint8_t *loc) {
return getErrorPlace(loc).loc;
}
[PPC32] Improve the 32-bit PowerPC port Many -static/-no-pie/-shared/-pie applications linked against glibc or musl should work with this patch. This also helps FreeBSD PowerPC64 to migrate their lib32 (PR40888). * Fix default image base and max page size. * Support new-style Secure PLT (see below). Old-style BSS PLT is not implemented, so it is not suitable for FreeBSD rtld now because it doesn't support Secure PLT yet. * Support more initial relocation types: R_PPC_ADDR32, R_PPC_REL16*, R_PPC_LOCAL24PC, R_PPC_PLTREL24, and R_PPC_GOT16. The addend of R_PPC_PLTREL24 is special: it decides the call stub PLT type but it should be ignored for the computation of target symbol VA. * Support GNU ifunc * Support .glink used for lazy PLT resolution in glibc * Add a new thunk type: PPC32PltCallStub that is similar to PPC64PltCallStub. It is used by R_PPC_REL24 and R_PPC_PLTREL24. A PLT stub used in -fPIE/-fPIC usually loads an address relative to .got2+0x8000 (-fpie/-fpic code uses _GLOBAL_OFFSET_TABLE_ relative addresses). Two .got2 sections in two object files have different addresses, thus a PLT stub can't be shared by two object files. To handle this incompatibility, change the parameters of Thunk::isCompatibleWith to `const InputSection &, const Relocation &`. PowerPC psABI specified an old-style .plt (BSS PLT) that is both writable and executable. Linkers don't make separate RW- and RWE segments, which causes all initially writable memory (think .data) executable. This is a big security concern so a new PLT scheme (secure PLT) was developed to address the security issue. TLS will be implemented in D62940. glibc older than ~2012 requires .rela.dyn to include .rela.plt, it can not handle the DT_RELA+DT_RELASZ == DT_JMPREL case correctly. A hack (not included in this patch) in LinkerScript.cpp addOrphanSections() to work around the issue: if (Config->EMachine == EM_PPC) { // Older glibc assumes .rela.dyn includes .rela.plt Add(In.RelaDyn); if (In.RelaPlt->isLive() && !In.RelaPlt->Parent) In.RelaDyn->getParent()->addSection(In.RelaPlt); } Reviewed By: ruiu Differential Revision: https://reviews.llvm.org/D62464 llvm-svn: 362721
2019-06-07 01:03:00 +08:00
void writePPC32GlinkSection(uint8_t *buf, size_t numEntries);
bool tryRelaxPPC64TocIndirection(const Relocation &rel, uint8_t *bufLoc);
unsigned getPPCDFormOp(unsigned secondaryOp);
// In the PowerPC64 Elf V2 abi a function can have 2 entry points. The first
// is a global entry point (GEP) which typically is used to initialize the TOC
// pointer in general purpose register 2. The second is a local entry
// point (LEP) which bypasses the TOC pointer initialization code. The
// offset between GEP and LEP is encoded in a function's st_other flags.
// This function will return the offset (in bytes) from the global entry-point
// to the local entry-point.
unsigned getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther);
// Write a prefixed instruction, which is a 4-byte prefix followed by a 4-byte
// instruction (regardless of endianness). Therefore, the prefix is always in
// lower memory than the instruction.
void writePrefixedInstruction(uint8_t *loc, uint64_t insn);
void addPPC64SaveRestore();
uint64_t getPPC64TocBase();
uint64_t getAArch64Page(uint64_t expr);
class AArch64Relaxer {
bool safeToRelaxAdrpLdr = true;
public:
explicit AArch64Relaxer(ArrayRef<Relocation> relocs);
bool tryRelaxAdrpAdd(const Relocation &adrpRel, const Relocation &addRel,
uint64_t secAddr, uint8_t *buf) const;
bool tryRelaxAdrpLdr(const Relocation &adrpRel, const Relocation &ldrRel,
uint64_t secAddr, uint8_t *buf) const;
};
extern const TargetInfo *target;
TargetInfo *getTarget();
template <class ELFT> bool isMipsPIC(const Defined *sym);
void reportRangeError(uint8_t *loc, const Relocation &rel, const Twine &v,
int64_t min, uint64_t max);
void reportRangeError(uint8_t *loc, int64_t v, int n, const Symbol &sym,
const Twine &msg);
// Make sure that V can be represented as an N bit signed integer.
inline void checkInt(uint8_t *loc, int64_t v, int n, const Relocation &rel) {
if (v != llvm::SignExtend64(v, n))
reportRangeError(loc, rel, Twine(v), llvm::minIntN(n), llvm::maxIntN(n));
}
// Make sure that V can be represented as an N bit unsigned integer.
inline void checkUInt(uint8_t *loc, uint64_t v, int n, const Relocation &rel) {
if ((v >> n) != 0)
reportRangeError(loc, rel, Twine(v), 0, llvm::maxUIntN(n));
}
// Make sure that V can be represented as an N bit signed or unsigned integer.
inline void checkIntUInt(uint8_t *loc, uint64_t v, int n,
const Relocation &rel) {
// For the error message we should cast V to a signed integer so that error
// messages show a small negative value rather than an extremely large one
if (v != (uint64_t)llvm::SignExtend64(v, n) && (v >> n) != 0)
reportRangeError(loc, rel, Twine((int64_t)v), llvm::minIntN(n),
llvm::maxUIntN(n));
}
inline void checkAlignment(uint8_t *loc, uint64_t v, int n,
const Relocation &rel) {
if ((v & (n - 1)) != 0)
error(getErrorLocation(loc) + "improper alignment for relocation " +
lld::toString(rel.type) + ": 0x" + llvm::utohexstr(v) +
" is not aligned to " + Twine(n) + " bytes");
}
// Endianness-aware read/write.
inline uint16_t read16(const void *p) {
return llvm::support::endian::read16(p, config->endianness);
}
inline uint32_t read32(const void *p) {
return llvm::support::endian::read32(p, config->endianness);
}
inline uint64_t read64(const void *p) {
return llvm::support::endian::read64(p, config->endianness);
}
inline void write16(void *p, uint16_t v) {
llvm::support::endian::write16(p, v, config->endianness);
}
inline void write32(void *p, uint32_t v) {
llvm::support::endian::write32(p, v, config->endianness);
}
inline void write64(void *p, uint64_t v) {
llvm::support::endian::write64(p, v, config->endianness);
}
} // namespace elf
} // namespace lld
#ifdef __clang__
#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
#endif
#define invokeELFT(f, ...) \
switch (config->ekind) { \
case ELF32LEKind: \
f<ELF32LE>(__VA_ARGS__); \
break; \
case ELF32BEKind: \
f<ELF32BE>(__VA_ARGS__); \
break; \
case ELF64LEKind: \
f<ELF64LE>(__VA_ARGS__); \
break; \
case ELF64BEKind: \
f<ELF64BE>(__VA_ARGS__); \
break; \
default: \
llvm_unreachable("unknown config->ekind"); \
}
#endif