llvm-project/lld/ELF/Relocations.cpp

1043 lines
41 KiB
C++
Raw Normal View History

//===- Relocations.cpp ----------------------------------------------------===//
//
// The LLVM Linker
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains platform-independent functions to process relocations.
// I'll describe the overview of this file here.
//
// Simple relocations are easy to handle for the linker. For example,
// for R_X86_64_PC64 relocs, the linker just has to fix up locations
// with the relative offsets to the target symbols. It would just be
// reading records from relocation sections and applying them to output.
//
// But not all relocations are that easy to handle. For example, for
// R_386_GOTOFF relocs, the linker has to create new GOT entries for
// symbols if they don't exist, and fix up locations with GOT entry
// offsets from the beginning of GOT section. So there is more than
// fixing addresses in relocation processing.
//
// ELF defines a large number of complex relocations.
//
// The functions in this file analyze relocations and do whatever needs
// to be done. It includes, but not limited to, the following.
//
// - create GOT/PLT entries
// - create new relocations in .dynsym to let the dynamic linker resolve
// them at runtime (since ELF supports dynamic linking, not all
// relocations can be resolved at link-time)
// - create COPY relocs and reserve space in .bss
// - replace expensive relocs (in terms of runtime cost) with cheap ones
// - error out infeasible combinations such as PIC and non-relative relocs
//
// Note that the functions in this file don't actually apply relocations
// because it doesn't know about the output file nor the output file buffer.
// It instead stores Relocation objects to InputSection's Relocations
// vector to let it apply later in InputSection::writeTo.
//
//===----------------------------------------------------------------------===//
#include "Relocations.h"
#include "Config.h"
#include "Memory.h"
#include "OutputSections.h"
#include "Strings.h"
#include "SymbolTable.h"
#include "SyntheticSections.h"
#include "Target.h"
#include "Thunks.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
using namespace llvm::ELF;
using namespace llvm::object;
using namespace llvm::support::endian;
namespace lld {
namespace elf {
static bool refersToGotEntry(RelExpr Expr) {
Add `isRelExprOneOf` helper In various places in LLD's hot loops, we have expressions of the form "E == R_FOO || E == R_BAR || ..." (E is a RelExpr). Some of these expressions are quite long, and even though they usually go just a very small number of ways and so should be well predicted, they can still occupy branch predictor resources harming other parts of the code, or they won't be predicted well if they overflow branch predictor resources or if the branches are too dense and the branch predictor can't track them all (the compiler can in theory avoid this, at a cost in text size). And some of these expressions are so large and executed so frequently that even when well-predicted they probably still have a nontrivial cost. This speedup should be pretty portable. The cost of these simple bit tests is independent of: - the target we are linking for - the distribution of RelExpr's for a given link (which can depend on how the input files were compiled) - what compiler was used to compile LLD (it is just a simple bit test; hopefully the compiler gets it right!) - adding new target-dependent relocations (e.g. needsPlt doesn't pay any extra cost checking R_PPC_PLT_OPD on x86-64 builds) I did some rough measurements on clang-fsds and this patch gives over about 4% speedup for a regular -O1 link, about 2.5% for -O3 --gc-sections and over 5% for -O0. Sorry, I don't have my current machine set up for doing really accurate measurements right now. This also is just a bit cleaner. Thanks for Joerg for suggesting for this approach. Differential Revision: https://reviews.llvm.org/D27156 llvm-svn: 288314
2016-12-01 13:43:48 +08:00
return isRelExprOneOf<R_GOT, R_GOT_OFF, R_MIPS_GOT_LOCAL_PAGE, R_MIPS_GOT_OFF,
R_MIPS_GOT_OFF32, R_MIPS_TLSGD, R_MIPS_TLSLD,
R_GOT_PAGE_PC, R_GOT_PC, R_GOT_FROM_END, R_TLSGD,
R_TLSGD_PC, R_TLSDESC, R_TLSDESC_PAGE>(Expr);
}
static bool isPreemptible(const SymbolBody &Body, uint32_t Type) {
// In case of MIPS GP-relative relocations always resolve to a definition
// in a regular input file, ignoring the one-definition rule. So we,
// for example, should not attempt to create a dynamic relocation even
// if the target symbol is preemptible. There are two two MIPS GP-relative
// relocations R_MIPS_GPREL16 and R_MIPS_GPREL32. But only R_MIPS_GPREL16
// can be against a preemptible symbol.
// To get MIPS relocation type we apply 0xff mask. In case of O32 ABI all
// relocation types occupy eight bit. In case of N64 ABI we extract first
// relocation from 3-in-1 packet because only the first relocation can
// be against a real symbol.
if (Config->EMachine == EM_MIPS && (Type & 0xff) == R_MIPS_GPREL16)
return false;
return Body.isPreemptible();
}
// This function is similar to the `handleTlsRelocation`. ARM and MIPS do not
// support any relaxations for TLS relocations so by factoring out ARM and MIPS
// handling in to the separate function we can simplify the code and do not
// pollute `handleTlsRelocation` by ARM and MIPS `ifs` statements.
template <class ELFT, class GOT>
static unsigned handleNoRelaxTlsRelocation(GOT *Got, uint32_t Type,
SymbolBody &Body,
InputSectionBase &C, uint64_t Offset,
int64_t Addend, RelExpr Expr) {
auto addModuleReloc = [&](uint64_t Off, bool LD) {
// The Dynamic TLS Module Index Relocation can be statically resolved to 1
// if we know that we are linking an executable. For ARM we resolve the
// relocation when writing the Got. MIPS has a custom Got implementation
// that writes the Module index in directly.
if (!Body.isPreemptible() && !Config->Pic && Config->EMachine == EM_ARM)
Got->Relocations.push_back(
{R_ABS, Target->TlsModuleIndexRel, Off, 0, &Body});
else {
SymbolBody *Dest = LD ? nullptr : &Body;
In<ELFT>::RelaDyn->addReloc(
{Target->TlsModuleIndexRel, Got, Off, false, Dest, 0});
}
};
2017-02-16 14:24:16 +08:00
if (isRelExprOneOf<R_MIPS_TLSLD, R_TLSLD_PC>(Expr)) {
if (Got->addTlsIndex() && (Config->Pic || Config->EMachine == EM_ARM))
addModuleReloc(Got->getTlsIndexOff(), true);
C.Relocations.push_back({Expr, Type, Offset, Addend, &Body});
return 1;
}
if (Target->isTlsGlobalDynamicRel(Type)) {
if (Got->addDynTlsEntry(Body) &&
(Body.isPreemptible() || Config->EMachine == EM_ARM)) {
uint64_t Off = Got->getGlobalDynOffset(Body);
addModuleReloc(Off, false);
if (Body.isPreemptible())
In<ELFT>::RelaDyn->addReloc({Target->TlsOffsetRel, Got,
Off + Config->Wordsize, false, &Body, 0});
}
C.Relocations.push_back({Expr, Type, Offset, Addend, &Body});
return 1;
}
return 0;
}
// Returns the number of relocations processed.
template <class ELFT>
static unsigned
handleTlsRelocation(uint32_t Type, SymbolBody &Body, InputSectionBase &C,
typename ELFT::uint Offset, int64_t Addend, RelExpr Expr) {
if (!(C.Flags & SHF_ALLOC))
return 0;
if (!Body.isTls())
return 0;
if (Config->EMachine == EM_ARM)
return handleNoRelaxTlsRelocation<ELFT>(In<ELFT>::Got, Type, Body, C,
Offset, Addend, Expr);
if (Config->EMachine == EM_MIPS)
return handleNoRelaxTlsRelocation<ELFT>(In<ELFT>::MipsGot, Type, Body, C,
Offset, Addend, Expr);
bool IsPreemptible = isPreemptible(Body, Type);
2017-02-16 14:24:16 +08:00
if (isRelExprOneOf<R_TLSDESC, R_TLSDESC_PAGE, R_TLSDESC_CALL>(Expr) &&
Config->Shared) {
if (In<ELFT>::Got->addDynTlsEntry(Body)) {
uint64_t Off = In<ELFT>::Got->getGlobalDynOffset(Body);
In<ELFT>::RelaDyn->addReloc({Target->TlsDescRel, In<ELFT>::Got, Off,
!IsPreemptible, &Body, 0});
}
if (Expr != R_TLSDESC_CALL)
C.Relocations.push_back({Expr, Type, Offset, Addend, &Body});
return 1;
}
2017-02-16 14:24:16 +08:00
if (isRelExprOneOf<R_TLSLD_PC, R_TLSLD>(Expr)) {
// Local-Dynamic relocs can be relaxed to Local-Exec.
if (!Config->Shared) {
C.Relocations.push_back(
{R_RELAX_TLS_LD_TO_LE, Type, Offset, Addend, &Body});
return 2;
}
if (In<ELFT>::Got->addTlsIndex())
In<ELFT>::RelaDyn->addReloc({Target->TlsModuleIndexRel, In<ELFT>::Got,
In<ELFT>::Got->getTlsIndexOff(), false,
nullptr, 0});
C.Relocations.push_back({Expr, Type, Offset, Addend, &Body});
return 1;
}
// Local-Dynamic relocs can be relaxed to Local-Exec.
if (Target->isTlsLocalDynamicRel(Type) && !Config->Shared) {
C.Relocations.push_back(
{R_RELAX_TLS_LD_TO_LE, Type, Offset, Addend, &Body});
return 1;
}
2017-02-16 14:24:16 +08:00
if (isRelExprOneOf<R_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL>(Expr) ||
Target->isTlsGlobalDynamicRel(Type)) {
if (Config->Shared) {
if (In<ELFT>::Got->addDynTlsEntry(Body)) {
uint64_t Off = In<ELFT>::Got->getGlobalDynOffset(Body);
In<ELFT>::RelaDyn->addReloc(
{Target->TlsModuleIndexRel, In<ELFT>::Got, Off, false, &Body, 0});
// If the symbol is preemptible we need the dynamic linker to write
// the offset too.
uint64_t OffsetOff = Off + Config->Wordsize;
if (IsPreemptible)
In<ELFT>::RelaDyn->addReloc({Target->TlsOffsetRel, In<ELFT>::Got,
OffsetOff, false, &Body, 0});
else
In<ELFT>::Got->Relocations.push_back(
{R_ABS, Target->TlsOffsetRel, OffsetOff, 0, &Body});
}
C.Relocations.push_back({Expr, Type, Offset, Addend, &Body});
return 1;
}
// Global-Dynamic relocs can be relaxed to Initial-Exec or Local-Exec
// depending on the symbol being locally defined or not.
if (IsPreemptible) {
C.Relocations.push_back(
{Target->adjustRelaxExpr(Type, nullptr, R_RELAX_TLS_GD_TO_IE), Type,
Offset, Addend, &Body});
if (!Body.isInGot()) {
In<ELFT>::Got->addEntry(Body);
In<ELFT>::RelaDyn->addReloc({Target->TlsGotRel, In<ELFT>::Got,
Body.getGotOffset(), false, &Body, 0});
}
} else {
C.Relocations.push_back(
{Target->adjustRelaxExpr(Type, nullptr, R_RELAX_TLS_GD_TO_LE), Type,
Offset, Addend, &Body});
}
return Target->TlsGdRelaxSkip;
}
// Initial-Exec relocs can be relaxed to Local-Exec if the symbol is locally
// defined.
if (Target->isTlsInitialExecRel(Type) && !Config->Shared && !IsPreemptible) {
C.Relocations.push_back(
{R_RELAX_TLS_IE_TO_LE, Type, Offset, Addend, &Body});
return 1;
}
return 0;
}
static uint32_t getMipsPairType(uint32_t Type, const SymbolBody &Sym) {
switch (Type) {
case R_MIPS_HI16:
return R_MIPS_LO16;
case R_MIPS_GOT16:
return Sym.isLocal() ? R_MIPS_LO16 : R_MIPS_NONE;
case R_MIPS_PCHI16:
return R_MIPS_PCLO16;
case R_MICROMIPS_HI16:
return R_MICROMIPS_LO16;
default:
return R_MIPS_NONE;
}
}
// True if non-preemptable symbol always has the same value regardless of where
// the DSO is loaded.
static bool isAbsolute(const SymbolBody &Body) {
if (Body.isUndefined())
return !Body.isLocal() && Body.symbol()->isWeak();
if (const auto *DR = dyn_cast<DefinedRegular>(&Body))
return DR->Section == nullptr; // Absolute symbol.
return false;
}
static bool isAbsoluteValue(const SymbolBody &Body) {
return isAbsolute(Body) || Body.isTls();
}
static bool needsPlt(RelExpr Expr) {
return isRelExprOneOf<R_PLT_PC, R_PPC_PLT_OPD, R_PLT, R_PLT_PAGE_PC>(Expr);
}
// True if this expression is of the form Sym - X, where X is a position in the
// file (PC, or GOT for example).
static bool isRelExpr(RelExpr Expr) {
Add `isRelExprOneOf` helper In various places in LLD's hot loops, we have expressions of the form "E == R_FOO || E == R_BAR || ..." (E is a RelExpr). Some of these expressions are quite long, and even though they usually go just a very small number of ways and so should be well predicted, they can still occupy branch predictor resources harming other parts of the code, or they won't be predicted well if they overflow branch predictor resources or if the branches are too dense and the branch predictor can't track them all (the compiler can in theory avoid this, at a cost in text size). And some of these expressions are so large and executed so frequently that even when well-predicted they probably still have a nontrivial cost. This speedup should be pretty portable. The cost of these simple bit tests is independent of: - the target we are linking for - the distribution of RelExpr's for a given link (which can depend on how the input files were compiled) - what compiler was used to compile LLD (it is just a simple bit test; hopefully the compiler gets it right!) - adding new target-dependent relocations (e.g. needsPlt doesn't pay any extra cost checking R_PPC_PLT_OPD on x86-64 builds) I did some rough measurements on clang-fsds and this patch gives over about 4% speedup for a regular -O1 link, about 2.5% for -O3 --gc-sections and over 5% for -O0. Sorry, I don't have my current machine set up for doing really accurate measurements right now. This also is just a bit cleaner. Thanks for Joerg for suggesting for this approach. Differential Revision: https://reviews.llvm.org/D27156 llvm-svn: 288314
2016-12-01 13:43:48 +08:00
return isRelExprOneOf<R_PC, R_GOTREL, R_GOTREL_FROM_END, R_MIPS_GOTREL,
R_PAGE_PC, R_RELAX_GOT_PC>(Expr);
}
// Returns true if a given relocation can be computed at link-time.
//
// For instance, we know the offset from a relocation to its target at
// link-time if the relocation is PC-relative and refers a
2017-03-27 02:23:00 +08:00
// non-interposable function in the same executable. This function
// will return true for such relocation.
//
// If this function returns false, that means we need to emit a
// dynamic relocation so that the relocation will be fixed at load-time.
template <class ELFT>
static bool isStaticLinkTimeConstant(RelExpr E, uint32_t Type,
const SymbolBody &Body,
InputSectionBase &S, uint64_t RelOff) {
// These expressions always compute a constant
Add `isRelExprOneOf` helper In various places in LLD's hot loops, we have expressions of the form "E == R_FOO || E == R_BAR || ..." (E is a RelExpr). Some of these expressions are quite long, and even though they usually go just a very small number of ways and so should be well predicted, they can still occupy branch predictor resources harming other parts of the code, or they won't be predicted well if they overflow branch predictor resources or if the branches are too dense and the branch predictor can't track them all (the compiler can in theory avoid this, at a cost in text size). And some of these expressions are so large and executed so frequently that even when well-predicted they probably still have a nontrivial cost. This speedup should be pretty portable. The cost of these simple bit tests is independent of: - the target we are linking for - the distribution of RelExpr's for a given link (which can depend on how the input files were compiled) - what compiler was used to compile LLD (it is just a simple bit test; hopefully the compiler gets it right!) - adding new target-dependent relocations (e.g. needsPlt doesn't pay any extra cost checking R_PPC_PLT_OPD on x86-64 builds) I did some rough measurements on clang-fsds and this patch gives over about 4% speedup for a regular -O1 link, about 2.5% for -O3 --gc-sections and over 5% for -O0. Sorry, I don't have my current machine set up for doing really accurate measurements right now. This also is just a bit cleaner. Thanks for Joerg for suggesting for this approach. Differential Revision: https://reviews.llvm.org/D27156 llvm-svn: 288314
2016-12-01 13:43:48 +08:00
if (isRelExprOneOf<R_SIZE, R_GOT_FROM_END, R_GOT_OFF, R_MIPS_GOT_LOCAL_PAGE,
R_MIPS_GOT_OFF, R_MIPS_GOT_OFF32, R_MIPS_GOT_GP_PC,
R_MIPS_TLSGD, R_GOT_PAGE_PC, R_GOT_PC, R_PLT_PC,
R_TLSGD_PC, R_TLSGD, R_PPC_PLT_OPD, R_TLSDESC_CALL,
R_TLSDESC_PAGE, R_HINT>(E))
return true;
// These never do, except if the entire file is position dependent or if
// only the low bits are used.
if (E == R_GOT || E == R_PLT || E == R_TLSDESC)
return Target->usesOnlyLowPageBits(Type) || !Config->Pic;
if (isPreemptible(Body, Type))
return false;
if (!Config->Pic)
return true;
// For the target and the relocation, we want to know if they are
// absolute or relative.
bool AbsVal = isAbsoluteValue(Body);
bool RelE = isRelExpr(E);
if (AbsVal && !RelE)
return true;
if (!AbsVal && RelE)
return true;
if (!AbsVal && !RelE)
return Target->usesOnlyLowPageBits(Type);
// Relative relocation to an absolute value. This is normally unrepresentable,
// but if the relocation refers to a weak undefined symbol, we allow it to
// resolve to the image base. This is a little strange, but it allows us to
// link function calls to such symbols. Normally such a call will be guarded
// with a comparison, which will load a zero from the GOT.
// Another special case is MIPS _gp_disp symbol which represents offset
// between start of a function and '_gp' value and defined as absolute just
// to simplify the code.
assert(AbsVal && RelE);
if (Body.isUndefined() && !Body.isLocal() && Body.symbol()->isWeak())
return true;
error(S.getLocation<ELFT>(RelOff) + ": relocation " + toString(Type) +
" cannot refer to absolute symbol '" + toString(Body) +
"' defined in " + toString(Body.File));
return true;
}
static RelExpr toPlt(RelExpr Expr) {
if (Expr == R_PPC_OPD)
return R_PPC_PLT_OPD;
if (Expr == R_PC)
return R_PLT_PC;
if (Expr == R_PAGE_PC)
return R_PLT_PAGE_PC;
if (Expr == R_ABS)
return R_PLT;
return Expr;
}
static RelExpr fromPlt(RelExpr Expr) {
// We decided not to use a plt. Optimize a reference to the plt to a
// reference to the symbol itself.
if (Expr == R_PLT_PC)
return R_PC;
if (Expr == R_PPC_PLT_OPD)
return R_PPC_OPD;
if (Expr == R_PLT)
return R_ABS;
return Expr;
}
2017-03-26 11:41:41 +08:00
// Returns true if a given shared symbol is in a read-only segment in a DSO.
template <class ELFT> static bool isReadOnly(SharedSymbol *SS) {
typedef typename ELFT::Phdr Elf_Phdr;
uint64_t Value = SS->getValue<ELFT>();
// Determine if the symbol is read-only by scanning the DSO's program headers.
auto *File = cast<SharedFile<ELFT>>(SS->File);
for (const Elf_Phdr &Phdr : check(File->getObj().program_headers()))
if ((Phdr.p_type == ELF::PT_LOAD || Phdr.p_type == ELF::PT_GNU_RELRO) &&
!(Phdr.p_flags & ELF::PF_W) && Value >= Phdr.p_vaddr &&
Value < Phdr.p_vaddr + Phdr.p_memsz)
return true;
return false;
}
// Returns symbols at the same offset as a given symbol, including SS itself.
//
// If two or more symbols are at the same offset, and at least one of
// them are copied by a copy relocation, all of them need to be copied.
// Otherwise, they would refer different places at runtime.
template <class ELFT>
static std::vector<SharedSymbol *> getSymbolsAt(SharedSymbol *SS) {
typedef typename ELFT::Sym Elf_Sym;
auto *File = cast<SharedFile<ELFT>>(SS->File);
uint64_t Shndx = SS->getShndx<ELFT>();
uint64_t Value = SS->getValue<ELFT>();
std::vector<SharedSymbol *> Ret;
for (const Elf_Sym &S : File->getGlobalSymbols()) {
if (S.st_shndx != Shndx || S.st_value != Value)
continue;
StringRef Name = check(S.getName(File->getStringTable()));
SymbolBody *Sym = Symtab<ELFT>::X->find(Name);
if (auto *Alias = dyn_cast_or_null<SharedSymbol>(Sym))
Ret.push_back(Alias);
}
return Ret;
}
// Reserve space in .bss or .bss.rel.ro for copy relocation.
//
// The copy relocation is pretty much a hack. If you use a copy relocation
// in your program, not only the symbol name but the symbol's size, RW/RO
// bit and alignment become part of the ABI. In addition to that, if the
// symbol has aliases, the aliases become part of the ABI. That's subtle,
// but if you violate that implicit ABI, that can cause very counter-
// intuitive consequences.
//
// So, what is the copy relocation? It's for linking non-position
// independent code to DSOs. In an ideal world, all references to data
// exported by DSOs should go indirectly through GOT. But if object files
// are compiled as non-PIC, all data references are direct. There is no
// way for the linker to transform the code to use GOT, as machine
// instructions are already set in stone in object files. This is where
// the copy relocation takes a role.
//
// A copy relocation instructs the dynamic linker to copy data from a DSO
// to a specified address (which is usually in .bss) at load-time. If the
// static linker (that's us) finds a direct data reference to a DSO
// symbol, it creates a copy relocation, so that the symbol can be
// resolved as if it were in .bss rather than in a DSO.
//
// As you can see in this function, we create a copy relocation for the
// dynamic linker, and the relocation contains not only symbol name but
// various other informtion about the symbol. So, such attributes become a
// part of the ABI.
//
// Note for application developers: I can give you a piece of advice if
// you are writing a shared library. You probably should export only
// functions from your library. You shouldn't export variables.
//
// As an example what can happen when you export variables without knowing
// the semantics of copy relocations, assume that you have an exported
// variable of type T. It is an ABI-breaking change to add new members at
// end of T even though doing that doesn't change the layout of the
// existing members. That's because the space for the new members are not
// reserved in .bss unless you recompile the main program. That means they
// are likely to overlap with other data that happens to be laid out next
// to the variable in .bss. This kind of issue is sometimes very hard to
// debug. What's a solution? Instead of exporting a varaible V from a DSO,
// define an accessor getV().
template <class ELFT> static void addCopyRelSymbol(SharedSymbol *SS) {
typedef typename ELFT::uint uintX_t;
// Copy relocation against zero-sized symbol doesn't make sense.
uintX_t SymSize = SS->template getSize<ELFT>();
if (SymSize == 0)
fatal("cannot create a copy relocation for symbol " + toString(*SS));
// See if this symbol is in a read-only segment. If so, preserve the symbol's
// memory protection by reserving space in the .bss.rel.ro section.
bool IsReadOnly = isReadOnly<ELFT>(SS);
BssSection *Sec = IsReadOnly ? In<ELFT>::BssRelRo : In<ELFT>::Bss;
uintX_t Off = Sec->reserveSpace(SS->getAlignment<ELFT>(), SymSize);
// Look through the DSO's dynamic symbol table for aliases and create a
// dynamic symbol for each one. This causes the copy relocation to correctly
// interpose any aliases.
for (SharedSymbol *Sym : getSymbolsAt<ELFT>(SS)) {
Sym->NeedsCopy = true;
Sym->CopyRelSec = Sec;
Sym->CopyRelSecOff = Off;
Sym->symbol()->IsUsedInRegularObj = true;
}
In<ELFT>::RelaDyn->addReloc({Target->CopyRel, Sec, Off, false, SS, 0});
}
template <class ELFT>
static RelExpr adjustExpr(SymbolBody &Body, RelExpr Expr, uint32_t Type,
const uint8_t *Data, InputSectionBase &S,
typename ELFT::uint RelOff) {
if (Body.isGnuIFunc()) {
Expr = toPlt(Expr);
} else if (!isPreemptible(Body, Type)) {
if (needsPlt(Expr))
Expr = fromPlt(Expr);
if (Expr == R_GOT_PC && !isAbsoluteValue(Body))
2016-06-17 23:01:50 +08:00
Expr = Target->adjustRelaxExpr(Type, Data, Expr);
}
bool IsWrite = !Config->ZText || (S.Flags & SHF_WRITE);
if (IsWrite || isStaticLinkTimeConstant<ELFT>(Expr, Type, Body, S, RelOff))
return Expr;
// This relocation would require the dynamic linker to write a value to read
// only memory. We can hack around it if we are producing an executable and
// the refered symbol can be preemepted to refer to the executable.
if (Config->Shared || (Config->Pic && !isRelExpr(Expr))) {
error(S.getLocation<ELFT>(RelOff) + ": can't create dynamic relocation " +
toString(Type) + " against " +
(Body.getName().empty() ? "local symbol in readonly segment"
: "symbol '" + toString(Body) + "'") +
" defined in " + toString(Body.File));
return Expr;
}
if (Body.getVisibility() != STV_DEFAULT) {
error(S.getLocation<ELFT>(RelOff) + ": cannot preempt symbol '" +
toString(Body) + "' defined in " + toString(Body.File));
return Expr;
}
if (Body.isObject()) {
// Produce a copy relocation.
auto *B = cast<SharedSymbol>(&Body);
if (!B->NeedsCopy) {
if (Config->ZNocopyreloc)
error(S.getLocation<ELFT>(RelOff) + ": unresolvable relocation " +
toString(Type) + " against symbol '" + toString(*B) +
"'; recompile with -fPIC or remove '-z nocopyreloc'");
addCopyRelSymbol<ELFT>(B);
}
return Expr;
}
if (Body.isFunc()) {
// This handles a non PIC program call to function in a shared library. In
// an ideal world, we could just report an error saying the relocation can
// overflow at runtime. In the real world with glibc, crt1.o has a
// R_X86_64_PC32 pointing to libc.so.
//
// The general idea on how to handle such cases is to create a PLT entry and
// use that as the function value.
//
// For the static linking part, we just return a plt expr and everything
// else will use the the PLT entry as the address.
//
// The remaining problem is making sure pointer equality still works. We
// need the help of the dynamic linker for that. We let it know that we have
// a direct reference to a so symbol by creating an undefined symbol with a
// non zero st_value. Seeing that, the dynamic linker resolves the symbol to
// the value of the symbol we created. This is true even for got entries, so
// pointer equality is maintained. To avoid an infinite loop, the only entry
// that points to the real function is a dedicated got entry used by the
// plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT,
// R_386_JMP_SLOT, etc).
Body.NeedsPltAddr = true;
return toPlt(Expr);
}
error("symbol '" + toString(Body) + "' defined in " + toString(Body.File) +
" is missing type");
return Expr;
}
// Returns an addend of a given relocation. If it is RELA, an addend
// is in a relocation itself. If it is REL, we need to read it from an
// input section.
template <class ELFT, class RelTy>
static int64_t computeAddend(const RelTy &Rel, const uint8_t *Buf) {
int64_t A = getAddend<ELFT>(Rel);
uint32_t Type = Rel.getType(Config->IsMips64EL);
if (!RelTy::IsRela)
A += Target->getImplicitAddend(Buf + Rel.r_offset, Type);
if (Config->EMachine == EM_PPC64 && Config->Pic && Type == R_PPC64_TOC)
A += getPPC64TocBase();
return A;
}
// MIPS has an odd notion of "paired" relocations to calculate addends.
// For example, if a relocation is of R_MIPS_HI16, there must be a
// R_MIPS_LO16 relocation after that, and an addend is calculated using
// the two relocations.
template <class ELFT, class RelTy>
static int64_t computeMipsAddend(const RelTy &Rel, InputSectionBase &Sec,
RelExpr Expr, SymbolBody &Body,
const RelTy *End) {
if (Expr == R_MIPS_GOTREL && Body.isLocal())
return Sec.getFile<ELFT>()->MipsGp0;
// The ABI says that the paired relocation is used only for REL.
// See p. 4-17 at ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
if (RelTy::IsRela)
return 0;
uint32_t Type = Rel.getType(Config->IsMips64EL);
uint32_t PairTy = getMipsPairType(Type, Body);
if (PairTy == R_MIPS_NONE)
return 0;
const uint8_t *Buf = Sec.Data.data();
uint32_t SymIndex = Rel.getSymbol(Config->IsMips64EL);
// To make things worse, paired relocations might not be contiguous in
// the relocation table, so we need to do linear search. *sigh*
for (const RelTy *RI = &Rel; RI != End; ++RI) {
if (RI->getType(Config->IsMips64EL) != PairTy)
continue;
if (RI->getSymbol(Config->IsMips64EL) != SymIndex)
continue;
endianness E = Config->Endianness;
int32_t Hi = (read32(Buf + Rel.r_offset, E) & 0xffff) << 16;
int32_t Lo = SignExtend32<16>(read32(Buf + RI->r_offset, E));
return Hi + Lo;
}
warn("can't find matching " + toString(PairTy) + " relocation for " +
toString(Type));
return 0;
}
template <class ELFT>
static void reportUndefined(SymbolBody &Sym, InputSectionBase &S,
uint64_t Offset) {
2017-03-24 09:13:30 +08:00
if (Config->UnresolvedSymbols == UnresolvedPolicy::IgnoreAll)
return;
bool CanBeExternal = Sym.symbol()->computeBinding() != STB_LOCAL &&
Sym.getVisibility() == STV_DEFAULT;
2017-03-24 09:13:30 +08:00
if (Config->UnresolvedSymbols == UnresolvedPolicy::Ignore && CanBeExternal)
return;
std::string Msg = S.getLocation<ELFT>(Offset) + ": undefined symbol '" +
toString(Sym) + "'";
if (Config->UnresolvedSymbols == UnresolvedPolicy::WarnAll ||
(Config->UnresolvedSymbols == UnresolvedPolicy::Warn && CanBeExternal)) {
warn(Msg);
} else {
error(Msg);
if (Config->ArchiveWithoutSymbolsSeen) {
message("At least one archive listed no symbols in its index."
" This can happen when creating archives with a version"
" of ar that does not understand the object files in"
" the archive. For example, if you are using LLVM"
" bitcode objects (such as created by -flto), you may"
" need to use llvm-ar or GNU ar with a plugin.");
// Reset to false so that we print the message only once.
Config->ArchiveWithoutSymbolsSeen = false;
}
}
}
template <class RelTy>
static std::pair<uint32_t, uint32_t>
mergeMipsN32RelTypes(uint32_t Type, uint32_t Offset, RelTy *I, RelTy *E) {
// MIPS N32 ABI treats series of successive relocations with the same offset
// as a single relocation. The similar approach used by N64 ABI, but this ABI
// packs all relocations into the single relocation record. Here we emulate
// this for the N32 ABI. Iterate over relocation with the same offset and put
// theirs types into the single bit-set.
uint32_t Processed = 0;
for (; I != E && Offset == I->r_offset; ++I) {
++Processed;
Type |= I->getType(Config->IsMips64EL) << (8 * Processed);
}
return std::make_pair(Type, Processed);
}
// .eh_frame sections are mergeable input sections, so their input
// offsets are not linearly mapped to output section. For each input
// offset, we need to find a section piece containing the offset and
// add the piece's base address to the input offset to compute the
// output offset. That isn't cheap.
//
// This class is to speed up the offset computation. When we process
// relocations, we access offsets in the monotonically increasing
// order. So we can optimize for that access pattern.
//
// For sections other than .eh_frame, this class doesn't do anything.
namespace {
class OffsetGetter {
public:
explicit OffsetGetter(InputSectionBase &Sec) {
if (auto *Eh = dyn_cast<EhInputSection>(&Sec)) {
P = Eh->Pieces;
Size = Eh->Pieces.size();
}
}
2017-03-27 02:23:00 +08:00
// Translates offsets in input sections to offsets in output sections.
// Given offset must increase monotonically. We assume that P is
// sorted by InputOff.
uint64_t get(uint64_t Off) {
if (P.empty())
return Off;
while (I != Size && P[I].InputOff + P[I].size() <= Off)
++I;
if (I == Size)
return Off;
if (Off < P[I].InputOff) {
error("relocation not in any piece");
return -1;
}
// Offset -1 means that the piece is dead (i.e. garbage collected).
if (P[I].OutputOff == -1)
return -1;
return P[I].OutputOff + Off - P[I].InputOff;
}
private:
ArrayRef<EhSectionPiece> P;
size_t I = 0;
size_t Size;
};
} // namespace
// The reason we have to do this early scan is as follows
// * To mmap the output file, we need to know the size
// * For that, we need to know how many dynamic relocs we will have.
// It might be possible to avoid this by outputting the file with write:
// * Write the allocated output sections, computing addresses.
// * Apply relocations, recording which ones require a dynamic reloc.
// * Write the dynamic relocations.
// * Write the rest of the file.
// This would have some drawbacks. For example, we would only know if .rela.dyn
// is needed after applying relocations. If it is, it will go after rw and rx
// sections. Given that it is ro, we will need an extra PT_LOAD. This
// complicates things for the dynamic linker and means we would have to reserve
// space for the extra PT_LOAD even if we end up not using it.
template <class ELFT, class RelTy>
static void scanRelocs(InputSectionBase &Sec, ArrayRef<RelTy> Rels) {
OffsetGetter GetOffset(Sec);
for (auto I = Rels.begin(), End = Rels.end(); I != End; ++I) {
const RelTy &Rel = *I;
SymbolBody &Body = Sec.getFile<ELFT>()->getRelocTargetSym(Rel);
uint32_t Type = Rel.getType(Config->IsMips64EL);
if (Config->MipsN32Abi) {
uint32_t Processed;
std::tie(Type, Processed) =
mergeMipsN32RelTypes(Type, Rel.r_offset, I + 1, End);
I += Processed;
}
// Compute the offset of this section in the output section.
uint64_t Offset = GetOffset.get(Rel.r_offset);
if (Offset == uint64_t(-1))
continue;
2017-03-25 07:21:14 +08:00
// Report undefined symbols. The fact that we report undefined
// symbols here means that we report undefined symbols only when
// they have relocations pointing to them. We don't care about
// undefined symbols that are in dead-stripped sections.
if (!Body.isLocal() && Body.isUndefined() && !Body.symbol()->isWeak())
reportUndefined<ELFT>(Body, Sec, Rel.r_offset);
RelExpr Expr = Target->getRelExpr(Type, Body);
// Ignore "hint" relocations because they are only markers for relaxation.
if (isRelExprOneOf<R_HINT, R_NONE>(Expr))
continue;
bool Preemptible = isPreemptible(Body, Type);
Expr = adjustExpr<ELFT>(Body, Expr, Type, Sec.Data.data() + Rel.r_offset,
Sec, Rel.r_offset);
if (ErrorCount)
continue;
// This relocation does not require got entry, but it is relative to got and
// needs it to be created. Here we request for that.
2017-02-16 14:24:16 +08:00
if (isRelExprOneOf<R_GOTONLY_PC, R_GOTONLY_PC_FROM_END, R_GOTREL,
R_GOTREL_FROM_END, R_PPC_TOC>(Expr))
In<ELFT>::Got->HasGotOffRel = true;
int64_t Addend = computeAddend<ELFT>(Rel, Sec.Data.data());
if (Config->EMachine == EM_MIPS)
Addend += computeMipsAddend<ELFT>(Rel, Sec, Expr, Body, End);
if (unsigned Processed =
handleTlsRelocation<ELFT>(Type, Body, Sec, Offset, Addend, Expr)) {
I += (Processed - 1);
continue;
}
if (Expr == R_TLSDESC_CALL)
continue;
if (!needsPlt(Expr) && !refersToGotEntry(Expr) &&
isPreemptible(Body, Type)) {
// We don't know anything about the finaly symbol. Just ask the dynamic
// linker to handle the relocation for us.
if (!Target->isPicRel(Type))
error(Sec.getLocation<ELFT>(Offset) + ": relocation " + toString(Type) +
" cannot be used against shared object; recompile with -fPIC.");
In<ELFT>::RelaDyn->addReloc(
{Target->getDynRel(Type), &Sec, Offset, false, &Body, Addend});
// MIPS ABI turns using of GOT and dynamic relocations inside out.
// While regular ABI uses dynamic relocations to fill up GOT entries
// MIPS ABI requires dynamic linker to fills up GOT entries using
// specially sorted dynamic symbol table. This affects even dynamic
// relocations against symbols which do not require GOT entries
// creation explicitly, i.e. do not have any GOT-relocations. So if
// a preemptible symbol has a dynamic relocation we anyway have
// to create a GOT entry for it.
// If a non-preemptible symbol has a dynamic relocation against it,
// dynamic linker takes it st_value, adds offset and writes down
// result of the dynamic relocation. In case of preemptible symbol
// dynamic linker performs symbol resolution, writes the symbol value
// to the GOT entry and reads the GOT entry when it needs to perform
// a dynamic relocation.
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19
if (Config->EMachine == EM_MIPS)
In<ELFT>::MipsGot->addEntry(Body, Addend, Expr);
continue;
}
// If the relocation points to something in the file, we can process it.
bool Constant =
isStaticLinkTimeConstant<ELFT>(Expr, Type, Body, Sec, Rel.r_offset);
// If the output being produced is position independent, the final value
// is still not known. In that case we still need some help from the
// dynamic linker. We can however do better than just copying the incoming
// relocation. We can process some of it and and just ask the dynamic
// linker to add the load address.
if (!Constant)
In<ELFT>::RelaDyn->addReloc(
{Target->RelativeRel, &Sec, Offset, true, &Body, Addend});
// If the produced value is a constant, we just remember to write it
// when outputting this section. We also have to do it if the format
// uses Elf_Rel, since in that case the written value is the addend.
if (Constant || !RelTy::IsRela)
Sec.Relocations.push_back({Expr, Type, Offset, Addend, &Body});
// At this point we are done with the relocated position. Some relocations
// also require us to create a got or plt entry.
// If a relocation needs PLT, we create a PLT and a GOT slot for the symbol.
if (needsPlt(Expr)) {
if (Body.isInPlt())
continue;
if (Body.isGnuIFunc() && !Preemptible) {
InX::Iplt->addEntry<ELFT>(Body);
In<ELFT>::IgotPlt->addEntry(Body);
In<ELFT>::RelaIplt->addReloc({Target->IRelativeRel, In<ELFT>::IgotPlt,
Body.getGotPltOffset(), !Preemptible,
&Body, 0});
} else {
InX::Plt->addEntry<ELFT>(Body);
In<ELFT>::GotPlt->addEntry(Body);
In<ELFT>::RelaPlt->addReloc({Target->PltRel, In<ELFT>::GotPlt,
Body.getGotPltOffset(), !Preemptible,
&Body, 0});
}
continue;
}
if (refersToGotEntry(Expr)) {
if (Config->EMachine == EM_MIPS) {
// MIPS ABI has special rules to process GOT entries and doesn't
// require relocation entries for them. A special case is TLS
// relocations. In that case dynamic loader applies dynamic
// relocations to initialize TLS GOT entries.
// See "Global Offset Table" in Chapter 5 in the following document
// for detailed description:
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
In<ELFT>::MipsGot->addEntry(Body, Addend, Expr);
if (Body.isTls() && Body.isPreemptible())
In<ELFT>::RelaDyn->addReloc({Target->TlsGotRel, In<ELFT>::MipsGot,
Body.getGotOffset(), false, &Body, 0});
continue;
}
if (Body.isInGot())
continue;
In<ELFT>::Got->addEntry(Body);
uint64_t Off = Body.getGotOffset();
uint32_t DynType;
RelExpr GotRE = R_ABS;
if (Body.isTls()) {
DynType = Target->TlsGotRel;
GotRE = R_TLS;
} else if (!Preemptible && Config->Pic && !isAbsolute(Body))
DynType = Target->RelativeRel;
else
DynType = Target->GotRel;
// FIXME: this logic is almost duplicated above.
bool Constant = !Preemptible && !(Config->Pic && !isAbsolute(Body));
if (!Constant)
In<ELFT>::RelaDyn->addReloc(
{DynType, In<ELFT>::Got, Off, !Preemptible, &Body, 0});
if (Constant || (!RelTy::IsRela && !Preemptible))
In<ELFT>::Got->Relocations.push_back({GotRE, DynType, Off, 0, &Body});
continue;
}
}
}
template <class ELFT> void scanRelocations(InputSectionBase &S) {
if (S.AreRelocsRela)
scanRelocs<ELFT>(S, S.relas<ELFT>());
else
scanRelocs<ELFT>(S, S.rels<ELFT>());
}
// Insert the Thunks for OutputSection OS into their designated place
// in the Sections vector, and recalculate the InputSection output section
// offsets.
// This may invalidate any output section offsets stored outside of InputSection
static void mergeThunks(OutputSection *OS,
std::vector<ThunkSection *> &Thunks) {
// Order Thunks in ascending OutSecOff
auto ThunkCmp = [](const ThunkSection *A, const ThunkSection *B) {
return A->OutSecOff < B->OutSecOff;
};
std::stable_sort(Thunks.begin(), Thunks.end(), ThunkCmp);
// Merge sorted vectors of Thunks and InputSections by OutSecOff
std::vector<InputSection *> Tmp;
Tmp.reserve(OS->Sections.size() + Thunks.size());
auto MergeCmp = [](const InputSection *A, const InputSection *B) {
// std::merge requires a strict weak ordering.
if (A->OutSecOff < B->OutSecOff)
return true;
if (A->OutSecOff == B->OutSecOff)
// Check if Thunk is immediately before any specific Target InputSection
// for example Mips LA25 Thunks.
if (auto *TA = dyn_cast<ThunkSection>(A))
if (TA && TA->getTargetInputSection() == B)
return true;
return false;
};
std::merge(OS->Sections.begin(), OS->Sections.end(), Thunks.begin(),
Thunks.end(), std::back_inserter(Tmp), MergeCmp);
OS->Sections = std::move(Tmp);
OS->assignOffsets();
}
// Process all relocations from the InputSections that have been assigned
// to OutputSections and redirect through Thunks if needed.
//
// createThunks must be called after scanRelocs has created the Relocations for
// each InputSection. It must be called before the static symbol table is
// finalized. If any Thunks are added to an OutputSection the output section
// offsets of the InputSections will change.
//
// FIXME: All Thunks are assumed to be in range of the relocation. Range
// extension Thunks are not yet supported.
template <class ELFT>
bool createThunks(ArrayRef<OutputSection *> OutputSections) {
// Track Symbols that already have a Thunk
DenseMap<SymbolBody *, Thunk *> ThunkedSymbols;
// Track InputSections that have a ThunkSection placed in front
DenseMap<InputSection *, ThunkSection *> ThunkedSections;
// Track the ThunksSections that need to be inserted into an OutputSection
std::map<OutputSection *, std::vector<ThunkSection *>> ThunkSections;
// Find or create a Thunk for Body for relocation Type
auto GetThunk = [&](SymbolBody &Body, uint32_t Type) {
auto res = ThunkedSymbols.insert({&Body, nullptr});
if (res.second == true)
res.first->second = addThunk<ELFT>(Type, Body);
return std::make_pair(res.first->second, res.second);
};
// Find or create a ThunkSection to be placed immediately before IS
auto GetISThunkSec = [&](InputSection *IS, OutputSection *OS) {
ThunkSection *TS = ThunkedSections.lookup(IS);
if (TS)
return TS;
auto *TOS = cast<OutputSection>(IS->OutSec);
TS = make<ThunkSection>(TOS, IS->OutSecOff);
ThunkSections[TOS].push_back(TS);
ThunkedSections[IS] = TS;
return TS;
};
// Find or create a ThunkSection to be placed as last executable section in
// OS.
auto GetOSThunkSec = [&](ThunkSection *&TS, OutputSection *OS) {
if (TS == nullptr) {
uint32_t Off = 0;
for (auto *IS : OS->Sections) {
Off = IS->OutSecOff + IS->getSize();
if ((IS->Flags & SHF_EXECINSTR) == 0)
break;
}
TS = make<ThunkSection>(OS, Off);
ThunkSections[OS].push_back(TS);
}
return TS;
};
// Create all the Thunks and insert them into synthetic ThunkSections. The
// ThunkSections are later inserted back into the OutputSection.
// We separate the creation of ThunkSections from the insertion of the
// ThunkSections back into the OutputSection as ThunkSections are not always
// inserted into the same OutputSection as the caller.
for (OutputSection *Base : OutputSections) {
auto *OS = dyn_cast<OutputSection>(Base);
if (OS == nullptr)
continue;
ThunkSection *OSTS = nullptr;
for (InputSection *IS : OS->Sections) {
for (Relocation &Rel : IS->Relocations) {
SymbolBody &Body = *Rel.Sym;
if (Target->needsThunk(Rel.Expr, Rel.Type, IS->File, Body)) {
Thunk *T;
bool IsNew;
std::tie(T, IsNew) = GetThunk(Body, Rel.Type);
if (IsNew) {
// Find or create a ThunkSection for the new Thunk
ThunkSection *TS;
if (auto *TIS = T->getTargetInputSection())
TS = GetISThunkSec(TIS, OS);
else
TS = GetOSThunkSec(OSTS, OS);
TS->addThunk(T);
}
// Redirect relocation to Thunk, we never go via the PLT to a Thunk
Rel.Sym = T->ThunkSym;
Rel.Expr = fromPlt(Rel.Expr);
}
}
}
}
// Merge all created synthetic ThunkSections back into OutputSection
for (auto &KV : ThunkSections)
mergeThunks(KV.first, KV.second);
return !ThunkSections.empty();
}
template void scanRelocations<ELF32LE>(InputSectionBase &);
template void scanRelocations<ELF32BE>(InputSectionBase &);
template void scanRelocations<ELF64LE>(InputSectionBase &);
template void scanRelocations<ELF64BE>(InputSectionBase &);
template bool createThunks<ELF32LE>(ArrayRef<OutputSection *>);
template bool createThunks<ELF32BE>(ArrayRef<OutputSection *>);
template bool createThunks<ELF64LE>(ArrayRef<OutputSection *>);
template bool createThunks<ELF64BE>(ArrayRef<OutputSection *>);
}
}