2016-05-25 04:24:43 +08:00
|
|
|
//===- Relocations.cpp ----------------------------------------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-05-25 04:24:43 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2016-06-08 20:29:29 +08:00
|
|
|
// This file contains platform-independent functions to process relocations.
|
2016-05-25 04:24:43 +08:00
|
|
|
// I'll describe the overview of this file here.
|
|
|
|
//
|
|
|
|
// Simple relocations are easy to handle for the linker. For example,
|
|
|
|
// for R_X86_64_PC64 relocs, the linker just has to fix up locations
|
|
|
|
// with the relative offsets to the target symbols. It would just be
|
|
|
|
// reading records from relocation sections and applying them to output.
|
|
|
|
//
|
|
|
|
// But not all relocations are that easy to handle. For example, for
|
|
|
|
// R_386_GOTOFF relocs, the linker has to create new GOT entries for
|
|
|
|
// symbols if they don't exist, and fix up locations with GOT entry
|
|
|
|
// offsets from the beginning of GOT section. So there is more than
|
|
|
|
// fixing addresses in relocation processing.
|
|
|
|
//
|
|
|
|
// ELF defines a large number of complex relocations.
|
|
|
|
//
|
|
|
|
// The functions in this file analyze relocations and do whatever needs
|
|
|
|
// to be done. It includes, but not limited to, the following.
|
|
|
|
//
|
|
|
|
// - create GOT/PLT entries
|
|
|
|
// - create new relocations in .dynsym to let the dynamic linker resolve
|
|
|
|
// them at runtime (since ELF supports dynamic linking, not all
|
|
|
|
// relocations can be resolved at link-time)
|
|
|
|
// - create COPY relocs and reserve space in .bss
|
|
|
|
// - replace expensive relocs (in terms of runtime cost) with cheap ones
|
|
|
|
// - error out infeasible combinations such as PIC and non-relative relocs
|
|
|
|
//
|
|
|
|
// Note that the functions in this file don't actually apply relocations
|
|
|
|
// because it doesn't know about the output file nor the output file buffer.
|
|
|
|
// It instead stores Relocation objects to InputSection's Relocations
|
|
|
|
// vector to let it apply later in InputSection::writeTo.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "Relocations.h"
|
|
|
|
#include "Config.h"
|
2017-06-07 17:35:14 +08:00
|
|
|
#include "LinkerScript.h"
|
2016-05-25 04:24:43 +08:00
|
|
|
#include "OutputSections.h"
|
|
|
|
#include "SymbolTable.h"
|
2017-12-10 00:56:18 +08:00
|
|
|
#include "Symbols.h"
|
2016-11-10 17:48:29 +08:00
|
|
|
#include "SyntheticSections.h"
|
2016-05-25 04:24:43 +08:00
|
|
|
#include "Target.h"
|
2016-07-09 00:10:27 +08:00
|
|
|
#include "Thunks.h"
|
Introduce a flag to warn when ifunc symbols are used with text relocations.
Summary:
This patch adds a new flag, --warn-ifunc-textrel, to work around a glibc bug. When a code with ifunc symbols is used to produce an object file with text relocations, lld always succeeds. However, if that object file is linked using an old version of glibc, the resultant binary just crashes with segmentation fault when it is run (The bug is going to be corrected as of glibc 2.19).
Since there is no way to tell beforehand what library the object file will be linked against in the future, there does not seem to be a fool-proof way for lld to give an error only in cases where the binary will crash. So, with this change (dated 2018-09-25), lld starts to give a warning, contingent on a new command line flag that does not have a gnu counter part. The default value for --warn-ifunc-textrel is false, so lld behaviour will not change unless the user explicitly asks lld to give a warning. Users that link with a glibc library with version 2.19 or newer, or does not use ifunc symbols, or does not generate object files with text relocations do not need to take any action. Other users may consider to start passing warn-ifunc-textrel to lld to get early warnings.
Reviewers: ruiu, espindola
Reviewed By: ruiu
Subscribers: grimar, MaskRay, markj, emaste, arichardson, llvm-commits
Differential Revision: https://reviews.llvm.org/D52430
llvm-svn: 343628
2018-10-03 04:30:22 +08:00
|
|
|
#include "lld/Common/ErrorHandler.h"
|
2017-11-29 04:39:17 +08:00
|
|
|
#include "lld/Common/Memory.h"
|
2018-03-01 01:38:19 +08:00
|
|
|
#include "lld/Common/Strings.h"
|
2018-06-12 03:42:57 +08:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2016-05-25 04:24:43 +08:00
|
|
|
#include "llvm/Support/Endian.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-02-01 18:26:03 +08:00
|
|
|
#include <algorithm>
|
2016-05-25 04:24:43 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
using namespace llvm::ELF;
|
|
|
|
using namespace llvm::object;
|
|
|
|
using namespace llvm::support::endian;
|
|
|
|
|
2017-03-31 04:31:19 +08:00
|
|
|
using namespace lld;
|
|
|
|
using namespace lld::elf;
|
2016-05-25 04:24:43 +08:00
|
|
|
|
2018-12-19 18:19:40 +08:00
|
|
|
static Optional<std::string> getLinkerScriptLocation(const Symbol &Sym) {
|
|
|
|
for (BaseCommand *Base : Script->SectionCommands)
|
|
|
|
if (auto *Cmd = dyn_cast<SymbolAssignment>(Base))
|
|
|
|
if (Cmd->Sym == &Sym)
|
|
|
|
return Cmd->Location;
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2017-04-04 05:36:31 +08:00
|
|
|
// Construct a message in the following format.
|
|
|
|
//
|
|
|
|
// >>> defined in /home/alice/src/foo.o
|
|
|
|
// >>> referenced by bar.c:12 (/home/alice/src/bar.c:12)
|
|
|
|
// >>> /home/alice/src/bar.o:(.text+0x1)
|
2017-11-04 05:21:47 +08:00
|
|
|
static std::string getLocation(InputSectionBase &S, const Symbol &Sym,
|
2017-04-04 05:36:31 +08:00
|
|
|
uint64_t Off) {
|
2018-12-19 18:19:40 +08:00
|
|
|
std::string Msg = "\n>>> defined in ";
|
|
|
|
if (Sym.File)
|
|
|
|
Msg += toString(Sym.File);
|
|
|
|
else if (Optional<std::string> Loc = getLinkerScriptLocation(Sym))
|
|
|
|
Msg += *Loc;
|
|
|
|
|
|
|
|
Msg += "\n>>> referenced by ";
|
2017-12-24 01:21:39 +08:00
|
|
|
std::string Src = S.getSrcMsg(Sym, Off);
|
2017-04-04 05:36:31 +08:00
|
|
|
if (!Src.empty())
|
|
|
|
Msg += Src + "\n>>> ";
|
2017-10-27 11:13:54 +08:00
|
|
|
return Msg + S.getObjMsg(Off);
|
2017-04-04 05:36:31 +08:00
|
|
|
}
|
|
|
|
|
2019-02-15 03:33:26 +08:00
|
|
|
namespace {
|
|
|
|
// Build a bitmask with one bit set for each RelExpr.
|
|
|
|
//
|
|
|
|
// Constexpr function arguments can't be used in static asserts, so we
|
|
|
|
// use template arguments to build the mask.
|
|
|
|
// But function template partial specializations don't exist (needed
|
|
|
|
// for base case of the recursion), so we need a dummy struct.
|
|
|
|
template <RelExpr... Exprs> struct RelExprMaskBuilder {
|
|
|
|
static inline uint64_t build() { return 0; }
|
|
|
|
};
|
|
|
|
|
|
|
|
// Specialization for recursive case.
|
|
|
|
template <RelExpr Head, RelExpr... Tail>
|
|
|
|
struct RelExprMaskBuilder<Head, Tail...> {
|
|
|
|
static inline uint64_t build() {
|
|
|
|
static_assert(0 <= Head && Head < 64,
|
|
|
|
"RelExpr is too large for 64-bit mask!");
|
|
|
|
return (uint64_t(1) << Head) | RelExprMaskBuilder<Tail...>::build();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// Return true if `Expr` is one of `Exprs`.
|
|
|
|
// There are fewer than 64 RelExpr's, so we can represent any set of
|
|
|
|
// RelExpr's as a constant bit mask and test for membership with a
|
|
|
|
// couple cheap bitwise operations.
|
|
|
|
template <RelExpr... Exprs> bool oneof(RelExpr Expr) {
|
|
|
|
assert(0 <= Expr && (int)Expr < 64 &&
|
|
|
|
"RelExpr is too large for 64-bit mask!");
|
|
|
|
return (uint64_t(1) << Expr) & RelExprMaskBuilder<Exprs...>::build();
|
|
|
|
}
|
|
|
|
|
2017-04-07 17:09:48 +08:00
|
|
|
// This function is similar to the `handleTlsRelocation`. MIPS does not
|
|
|
|
// support any relaxations for TLS relocations so by factoring out MIPS
|
2016-09-23 21:54:48 +08:00
|
|
|
// handling in to the separate function we can simplify the code and do not
|
2017-04-07 17:09:48 +08:00
|
|
|
// pollute other `handleTlsRelocation` by MIPS `ifs` statements.
|
|
|
|
// Mips has a custom MipsGotSection that handles the writing of GOT entries
|
|
|
|
// without dynamic relocations.
|
2017-11-04 08:31:04 +08:00
|
|
|
static unsigned handleMipsTlsRelocation(RelType Type, Symbol &Sym,
|
2017-04-07 23:05:44 +08:00
|
|
|
InputSectionBase &C, uint64_t Offset,
|
|
|
|
int64_t Addend, RelExpr Expr) {
|
2017-04-07 17:09:48 +08:00
|
|
|
if (Expr == R_MIPS_TLSLD) {
|
2018-09-26 03:26:58 +08:00
|
|
|
In.MipsGot->addTlsIndex(*C.File);
|
2017-11-04 08:31:04 +08:00
|
|
|
C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
2017-04-07 17:09:48 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (Expr == R_MIPS_TLSGD) {
|
2018-09-26 03:26:58 +08:00
|
|
|
In.MipsGot->addDynTlsEntry(*C.File, Sym);
|
2017-11-04 08:31:04 +08:00
|
|
|
C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
2017-04-07 17:09:48 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function is similar to the `handleMipsTlsRelocation`. ARM also does not
|
|
|
|
// support any relaxations for TLS relocations. ARM is logically similar to Mips
|
|
|
|
// in how it handles TLS, but Mips uses its own custom GOT which handles some
|
|
|
|
// of the cases that ARM uses GOT relocations for.
|
|
|
|
//
|
|
|
|
// We look for TLS global dynamic and local dynamic relocations, these may
|
|
|
|
// require the generation of a pair of GOT entries that have associated
|
|
|
|
// dynamic relocations. When the results of the dynamic relocations can be
|
|
|
|
// resolved at static link time we do so. This is necessary for static linking
|
|
|
|
// as there will be no dynamic loader to resolve them at load-time.
|
|
|
|
//
|
|
|
|
// The pair of GOT entries created are of the form
|
|
|
|
// GOT[e0] Module Index (Used to find pointer to TLS block at run-time)
|
|
|
|
// GOT[e1] Offset of symbol in TLS block
|
2017-11-04 08:31:04 +08:00
|
|
|
static unsigned handleARMTlsRelocation(RelType Type, Symbol &Sym,
|
2017-04-07 23:05:44 +08:00
|
|
|
InputSectionBase &C, uint64_t Offset,
|
|
|
|
int64_t Addend, RelExpr Expr) {
|
2017-04-13 18:56:40 +08:00
|
|
|
// The Dynamic TLS Module Index Relocation for a symbol defined in an
|
2017-08-10 23:05:37 +08:00
|
|
|
// executable is always 1. If the target Symbol is not preemptible then
|
2017-04-13 18:56:40 +08:00
|
|
|
// we know the offset into the TLS block at static link time.
|
2017-11-04 08:31:04 +08:00
|
|
|
bool NeedDynId = Sym.IsPreemptible || Config->Shared;
|
|
|
|
bool NeedDynOff = Sym.IsPreemptible;
|
2017-04-13 18:56:40 +08:00
|
|
|
|
2017-11-04 05:21:47 +08:00
|
|
|
auto AddTlsReloc = [&](uint64_t Off, RelType Type, Symbol *Dest, bool Dyn) {
|
2017-04-13 18:56:40 +08:00
|
|
|
if (Dyn)
|
2018-09-26 03:26:58 +08:00
|
|
|
In.RelaDyn->addReloc(Type, In.Got, Off, Dest);
|
2017-04-13 18:56:40 +08:00
|
|
|
else
|
2018-09-26 03:26:58 +08:00
|
|
|
In.Got->Relocations.push_back({R_ABS, Type, Off, 0, Dest});
|
2016-11-30 00:23:50 +08:00
|
|
|
};
|
2017-03-23 11:11:03 +08:00
|
|
|
|
2017-04-07 17:09:48 +08:00
|
|
|
// Local Dynamic is for access to module local TLS variables, while still
|
|
|
|
// being suitable for being dynamically loaded via dlopen.
|
|
|
|
// GOT[e0] is the module index, with a special value of 0 for the current
|
|
|
|
// module. GOT[e1] is unused. There only needs to be one module index entry.
|
2018-09-26 03:26:58 +08:00
|
|
|
if (Expr == R_TLSLD_PC && In.Got->addTlsIndex()) {
|
|
|
|
AddTlsReloc(In.Got->getTlsIndexOff(), Target->TlsModuleIndexRel,
|
2017-11-04 08:31:04 +08:00
|
|
|
NeedDynId ? nullptr : &Sym, NeedDynId);
|
|
|
|
C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
2016-06-23 23:26:31 +08:00
|
|
|
return 1;
|
|
|
|
}
|
2017-03-23 11:11:03 +08:00
|
|
|
|
2017-04-07 17:09:48 +08:00
|
|
|
// Global Dynamic is the most general purpose access model. When we know
|
|
|
|
// the module index and offset of symbol in TLS block we can fill these in
|
|
|
|
// using static GOT relocations.
|
|
|
|
if (Expr == R_TLSGD_PC) {
|
2018-09-26 03:26:58 +08:00
|
|
|
if (In.Got->addDynTlsEntry(Sym)) {
|
|
|
|
uint64_t Off = In.Got->getGlobalDynOffset(Sym);
|
2017-11-04 08:31:04 +08:00
|
|
|
AddTlsReloc(Off, Target->TlsModuleIndexRel, &Sym, NeedDynId);
|
|
|
|
AddTlsReloc(Off + Config->Wordsize, Target->TlsOffsetRel, &Sym,
|
2017-04-13 18:56:40 +08:00
|
|
|
NeedDynOff);
|
2016-06-23 23:26:31 +08:00
|
|
|
}
|
2017-11-04 08:31:04 +08:00
|
|
|
C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
2016-06-23 23:26:31 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-25 04:24:43 +08:00
|
|
|
// Returns the number of relocations processed.
|
|
|
|
template <class ELFT>
|
2017-02-16 08:12:34 +08:00
|
|
|
static unsigned
|
2017-11-04 08:31:04 +08:00
|
|
|
handleTlsRelocation(RelType Type, Symbol &Sym, InputSectionBase &C,
|
2017-02-16 08:12:34 +08:00
|
|
|
typename ELFT::uint Offset, int64_t Addend, RelExpr Expr) {
|
2017-11-04 08:31:04 +08:00
|
|
|
if (!Sym.isTls())
|
2016-05-25 04:24:43 +08:00
|
|
|
return 0;
|
|
|
|
|
2016-11-17 05:01:02 +08:00
|
|
|
if (Config->EMachine == EM_ARM)
|
2019-03-17 21:53:42 +08:00
|
|
|
return handleARMTlsRelocation(Type, Sym, C, Offset, Addend, Expr);
|
2016-11-17 05:01:02 +08:00
|
|
|
if (Config->EMachine == EM_MIPS)
|
2018-06-11 15:24:31 +08:00
|
|
|
return handleMipsTlsRelocation(Type, Sym, C, Offset, Addend, Expr);
|
2016-06-23 23:26:31 +08:00
|
|
|
|
2019-02-15 03:33:26 +08:00
|
|
|
if (oneof<R_TLSDESC, R_AARCH64_TLSDESC_PAGE, R_TLSDESC_CALL>(Expr) &&
|
2016-06-03 03:49:53 +08:00
|
|
|
Config->Shared) {
|
2018-09-26 03:26:58 +08:00
|
|
|
if (In.Got->addDynTlsEntry(Sym)) {
|
|
|
|
uint64_t Off = In.Got->getGlobalDynOffset(Sym);
|
|
|
|
In.RelaDyn->addReloc(
|
|
|
|
{Target->TlsDescRel, In.Got, Off, !Sym.IsPreemptible, &Sym, 0});
|
2016-06-03 03:49:53 +08:00
|
|
|
}
|
2016-10-20 17:59:26 +08:00
|
|
|
if (Expr != R_TLSDESC_CALL)
|
2017-11-04 08:31:04 +08:00
|
|
|
C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
2016-06-03 03:49:53 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
[ELF] Change GOT*_FROM_END (relative to end(.got)) to GOTPLT* (start(.got.plt))
Summary:
This should address remaining issues discussed in PR36555.
Currently R_GOT*_FROM_END are exclusively used by x86 and x86_64 to
express relocations types relative to the GOT base. We have
_GLOBAL_OFFSET_TABLE_ (GOT base) = start(.got.plt) but end(.got) !=
start(.got.plt)
This can have problems when _GLOBAL_OFFSET_TABLE_ is used as a symbol, e.g.
glibc dl_machine_dynamic assumes _GLOBAL_OFFSET_TABLE_ is start(.got.plt),
which is not true.
extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
return _GLOBAL_OFFSET_TABLE_[0]; // R_X86_64_GOTPC32
In this patch, we
* Change all GOT*_FROM_END to GOTPLT* to fix the problem.
* Add HasGotPltOffRel to denote whether .got.plt should be kept even if
the section is empty.
* Simplify GotSection::empty and GotPltSection::empty by setting
HasGotOffRel and HasGotPltOffRel according to GlobalOffsetTable early.
The change of R_386_GOTPC makes X86::writePltHeader simpler as we don't
have to compute the offset start(.got.plt) - Ebx (it is constant 0).
We still diverge from ld.bfd (at least in most cases) and gold in that
.got.plt and .got are not adjacent, but the advantage doing that is
unclear.
Reviewers: ruiu, sivachandra, espindola
Subscribers: emaste, mehdi_amini, arichardson, dexonsmith, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59594
llvm-svn: 356968
2019-03-26 07:46:19 +08:00
|
|
|
if (oneof<R_TLSLD_GOT, R_TLSLD_GOTPLT, R_TLSLD_PC, R_TLSLD_HINT>(
|
2019-02-15 03:33:26 +08:00
|
|
|
Expr)) {
|
2016-05-25 04:24:43 +08:00
|
|
|
// Local-Dynamic relocs can be relaxed to Local-Exec.
|
|
|
|
if (!Config->Shared) {
|
|
|
|
C.Relocations.push_back(
|
2018-07-10 00:35:51 +08:00
|
|
|
{Target->adjustRelaxExpr(Type, nullptr, R_RELAX_TLS_LD_TO_LE), Type,
|
|
|
|
Offset, Addend, &Sym});
|
[PPC64] Set the number of relocations processed for R_PPC64_TLS[GL]D to 2
Summary:
R_PPC64_TLSGD and R_PPC64_TLSLD are used as markers on TLS code sequences. After GD-to-IE or GD-to-LE relaxation, the next relocation R_PPC64_REL24 should be skipped to not create a false dependency on __tls_get_addr. When linking statically, the false dependency may cause an "undefined symbol: __tls_get_addr" error.
R_PPC64_GOT_TLSGD16_HA
R_PPC64_GOT_TLSGD16_LO
R_PPC64_TLSGD R_TLSDESC_CALL
R_PPC64_REL24 __tls_get_addr
Reviewers: ruiu, sfertile, syzaara, espindola
Reviewed By: sfertile
Subscribers: emaste, nemanjai, arichardson, kbarton, jsji, llvm-commits, tamur
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D57673
llvm-svn: 353262
2019-02-06 10:00:24 +08:00
|
|
|
return Target->getTlsGdRelaxSkip(Type);
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
2018-07-10 00:35:51 +08:00
|
|
|
if (Expr == R_TLSLD_HINT)
|
|
|
|
return 1;
|
2018-09-26 03:26:58 +08:00
|
|
|
if (In.Got->addTlsIndex())
|
|
|
|
In.RelaDyn->addReloc(Target->TlsModuleIndexRel, In.Got,
|
|
|
|
In.Got->getTlsIndexOff(), nullptr);
|
2017-11-04 08:31:04 +08:00
|
|
|
C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
2016-05-25 04:24:43 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Local-Dynamic relocs can be relaxed to Local-Exec.
|
2019-04-23 14:31:44 +08:00
|
|
|
if (Expr == R_DTPREL && !Config->Shared) {
|
2018-07-10 00:35:51 +08:00
|
|
|
C.Relocations.push_back(
|
|
|
|
{Target->adjustRelaxExpr(Type, nullptr, R_RELAX_TLS_LD_TO_LE), Type,
|
|
|
|
Offset, Addend, &Sym});
|
2016-05-25 04:24:43 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-06-27 21:55:41 +08:00
|
|
|
// Local-Dynamic sequence where offset of tls variable relative to dynamic
|
|
|
|
// thread pointer is stored in the got.
|
|
|
|
if (Expr == R_TLSLD_GOT_OFF) {
|
|
|
|
// Local-Dynamic relocs can be relaxed to local-exec
|
|
|
|
if (!Config->Shared) {
|
|
|
|
C.Relocations.push_back({R_RELAX_TLS_LD_TO_LE, Type, Offset, Addend, &Sym});
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (!Sym.isInGot()) {
|
2018-09-26 03:26:58 +08:00
|
|
|
In.Got->addEntry(Sym);
|
2018-06-27 21:55:41 +08:00
|
|
|
uint64_t Off = Sym.getGotOffset();
|
2018-09-26 03:26:58 +08:00
|
|
|
In.Got->Relocations.push_back(
|
|
|
|
{R_ABS, Target->TlsOffsetRel, Off, 0, &Sym});
|
2018-06-27 21:55:41 +08:00
|
|
|
}
|
|
|
|
C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2019-02-15 03:33:26 +08:00
|
|
|
if (oneof<R_TLSDESC, R_AARCH64_TLSDESC_PAGE, R_TLSDESC_CALL, R_TLSGD_GOT,
|
[ELF] Change GOT*_FROM_END (relative to end(.got)) to GOTPLT* (start(.got.plt))
Summary:
This should address remaining issues discussed in PR36555.
Currently R_GOT*_FROM_END are exclusively used by x86 and x86_64 to
express relocations types relative to the GOT base. We have
_GLOBAL_OFFSET_TABLE_ (GOT base) = start(.got.plt) but end(.got) !=
start(.got.plt)
This can have problems when _GLOBAL_OFFSET_TABLE_ is used as a symbol, e.g.
glibc dl_machine_dynamic assumes _GLOBAL_OFFSET_TABLE_ is start(.got.plt),
which is not true.
extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
return _GLOBAL_OFFSET_TABLE_[0]; // R_X86_64_GOTPC32
In this patch, we
* Change all GOT*_FROM_END to GOTPLT* to fix the problem.
* Add HasGotPltOffRel to denote whether .got.plt should be kept even if
the section is empty.
* Simplify GotSection::empty and GotPltSection::empty by setting
HasGotOffRel and HasGotPltOffRel according to GlobalOffsetTable early.
The change of R_386_GOTPC makes X86::writePltHeader simpler as we don't
have to compute the offset start(.got.plt) - Ebx (it is constant 0).
We still diverge from ld.bfd (at least in most cases) and gold in that
.got.plt and .got are not adjacent, but the advantage doing that is
unclear.
Reviewers: ruiu, sivachandra, espindola
Subscribers: emaste, mehdi_amini, arichardson, dexonsmith, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59594
llvm-svn: 356968
2019-03-26 07:46:19 +08:00
|
|
|
R_TLSGD_GOTPLT, R_TLSGD_PC>(Expr)) {
|
2016-05-25 04:24:43 +08:00
|
|
|
if (Config->Shared) {
|
2018-09-26 03:26:58 +08:00
|
|
|
if (In.Got->addDynTlsEntry(Sym)) {
|
|
|
|
uint64_t Off = In.Got->getGlobalDynOffset(Sym);
|
|
|
|
In.RelaDyn->addReloc(Target->TlsModuleIndexRel, In.Got, Off, &Sym);
|
2016-06-09 05:31:59 +08:00
|
|
|
|
|
|
|
// If the symbol is preemptible we need the dynamic linker to write
|
|
|
|
// the offset too.
|
2017-03-26 11:20:49 +08:00
|
|
|
uint64_t OffsetOff = Off + Config->Wordsize;
|
2017-11-04 08:31:04 +08:00
|
|
|
if (Sym.IsPreemptible)
|
2018-09-26 03:26:58 +08:00
|
|
|
In.RelaDyn->addReloc(Target->TlsOffsetRel, In.Got, OffsetOff, &Sym);
|
2016-11-29 11:45:36 +08:00
|
|
|
else
|
2018-09-26 03:26:58 +08:00
|
|
|
In.Got->Relocations.push_back(
|
2017-11-04 08:31:04 +08:00
|
|
|
{R_ABS, Target->TlsOffsetRel, OffsetOff, 0, &Sym});
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
2017-11-04 08:31:04 +08:00
|
|
|
C.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
2016-05-25 04:24:43 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Global-Dynamic relocs can be relaxed to Initial-Exec or Local-Exec
|
|
|
|
// depending on the symbol being locally defined or not.
|
2017-11-04 08:31:04 +08:00
|
|
|
if (Sym.IsPreemptible) {
|
2016-05-25 04:24:43 +08:00
|
|
|
C.Relocations.push_back(
|
2016-06-05 07:22:34 +08:00
|
|
|
{Target->adjustRelaxExpr(Type, nullptr, R_RELAX_TLS_GD_TO_IE), Type,
|
2017-11-04 08:31:04 +08:00
|
|
|
Offset, Addend, &Sym});
|
|
|
|
if (!Sym.isInGot()) {
|
2018-09-26 03:26:58 +08:00
|
|
|
In.Got->addEntry(Sym);
|
|
|
|
In.RelaDyn->addReloc(Target->TlsGotRel, In.Got, Sym.getGotOffset(),
|
|
|
|
&Sym);
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
2017-03-23 11:44:08 +08:00
|
|
|
} else {
|
|
|
|
C.Relocations.push_back(
|
|
|
|
{Target->adjustRelaxExpr(Type, nullptr, R_RELAX_TLS_GD_TO_LE), Type,
|
2017-11-04 08:31:04 +08:00
|
|
|
Offset, Addend, &Sym});
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
[PPC64] Set the number of relocations processed for R_PPC64_TLS[GL]D to 2
Summary:
R_PPC64_TLSGD and R_PPC64_TLSLD are used as markers on TLS code sequences. After GD-to-IE or GD-to-LE relaxation, the next relocation R_PPC64_REL24 should be skipped to not create a false dependency on __tls_get_addr. When linking statically, the false dependency may cause an "undefined symbol: __tls_get_addr" error.
R_PPC64_GOT_TLSGD16_HA
R_PPC64_GOT_TLSGD16_LO
R_PPC64_TLSGD R_TLSDESC_CALL
R_PPC64_REL24 __tls_get_addr
Reviewers: ruiu, sfertile, syzaara, espindola
Reviewed By: sfertile
Subscribers: emaste, nemanjai, arichardson, kbarton, jsji, llvm-commits, tamur
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D57673
llvm-svn: 353262
2019-02-06 10:00:24 +08:00
|
|
|
return Target->getTlsGdRelaxSkip(Type);
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initial-Exec relocs can be relaxed to Local-Exec if the symbol is locally
|
|
|
|
// defined.
|
[ELF] Change GOT*_FROM_END (relative to end(.got)) to GOTPLT* (start(.got.plt))
Summary:
This should address remaining issues discussed in PR36555.
Currently R_GOT*_FROM_END are exclusively used by x86 and x86_64 to
express relocations types relative to the GOT base. We have
_GLOBAL_OFFSET_TABLE_ (GOT base) = start(.got.plt) but end(.got) !=
start(.got.plt)
This can have problems when _GLOBAL_OFFSET_TABLE_ is used as a symbol, e.g.
glibc dl_machine_dynamic assumes _GLOBAL_OFFSET_TABLE_ is start(.got.plt),
which is not true.
extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
return _GLOBAL_OFFSET_TABLE_[0]; // R_X86_64_GOTPC32
In this patch, we
* Change all GOT*_FROM_END to GOTPLT* to fix the problem.
* Add HasGotPltOffRel to denote whether .got.plt should be kept even if
the section is empty.
* Simplify GotSection::empty and GotPltSection::empty by setting
HasGotOffRel and HasGotPltOffRel according to GlobalOffsetTable early.
The change of R_386_GOTPC makes X86::writePltHeader simpler as we don't
have to compute the offset start(.got.plt) - Ebx (it is constant 0).
We still diverge from ld.bfd (at least in most cases) and gold in that
.got.plt and .got are not adjacent, but the advantage doing that is
unclear.
Reviewers: ruiu, sivachandra, espindola
Subscribers: emaste, mehdi_amini, arichardson, dexonsmith, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59594
llvm-svn: 356968
2019-03-26 07:46:19 +08:00
|
|
|
if (oneof<R_GOT, R_GOTPLT, R_GOT_PC, R_AARCH64_GOT_PAGE_PC, R_GOT_OFF,
|
2019-02-15 03:33:26 +08:00
|
|
|
R_TLSIE_HINT>(Expr) &&
|
2017-11-04 08:31:04 +08:00
|
|
|
!Config->Shared && !Sym.IsPreemptible) {
|
|
|
|
C.Relocations.push_back({R_RELAX_TLS_IE_TO_LE, Type, Offset, Addend, &Sym});
|
2016-05-25 04:24:43 +08:00
|
|
|
return 1;
|
|
|
|
}
|
2017-04-06 11:30:51 +08:00
|
|
|
|
2018-08-21 23:13:53 +08:00
|
|
|
if (Expr == R_TLSIE_HINT)
|
|
|
|
return 1;
|
2016-05-25 04:24:43 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-13 04:42:15 +08:00
|
|
|
static RelType getMipsPairType(RelType Type, bool IsLocal) {
|
2017-03-27 03:35:24 +08:00
|
|
|
switch (Type) {
|
2016-05-25 04:24:43 +08:00
|
|
|
case R_MIPS_HI16:
|
|
|
|
return R_MIPS_LO16;
|
|
|
|
case R_MIPS_GOT16:
|
2017-10-17 22:39:03 +08:00
|
|
|
// In case of global symbol, the R_MIPS_GOT16 relocation does not
|
|
|
|
// have a pair. Each global symbol has a unique entry in the GOT
|
|
|
|
// and a corresponding instruction with help of the R_MIPS_GOT16
|
|
|
|
// relocation loads an address of the symbol. In case of local
|
|
|
|
// symbol, the R_MIPS_GOT16 relocation creates a GOT entry to hold
|
|
|
|
// the high 16 bits of the symbol's value. A paired R_MIPS_LO16
|
|
|
|
// relocations handle low 16 bits of the address. That allows
|
|
|
|
// to allocate only one GOT entry for every 64 KBytes of local data.
|
2017-10-13 04:42:15 +08:00
|
|
|
return IsLocal ? R_MIPS_LO16 : R_MIPS_NONE;
|
2017-09-12 21:08:24 +08:00
|
|
|
case R_MICROMIPS_GOT16:
|
2017-10-13 04:42:15 +08:00
|
|
|
return IsLocal ? R_MICROMIPS_LO16 : R_MIPS_NONE;
|
2016-05-25 04:24:43 +08:00
|
|
|
case R_MIPS_PCHI16:
|
|
|
|
return R_MIPS_PCLO16;
|
|
|
|
case R_MICROMIPS_HI16:
|
|
|
|
return R_MICROMIPS_LO16;
|
|
|
|
default:
|
|
|
|
return R_MIPS_NONE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// True if non-preemptable symbol always has the same value regardless of where
|
|
|
|
// the DSO is loaded.
|
2017-11-04 08:31:04 +08:00
|
|
|
static bool isAbsolute(const Symbol &Sym) {
|
|
|
|
if (Sym.isUndefWeak())
|
2017-09-14 04:43:04 +08:00
|
|
|
return true;
|
2017-11-06 12:35:31 +08:00
|
|
|
if (const auto *DR = dyn_cast<Defined>(&Sym))
|
2016-05-25 04:24:43 +08:00
|
|
|
return DR->Section == nullptr; // Absolute symbol.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-11-04 08:31:04 +08:00
|
|
|
static bool isAbsoluteValue(const Symbol &Sym) {
|
|
|
|
return isAbsolute(Sym) || Sym.isTls();
|
2016-10-28 01:28:56 +08:00
|
|
|
}
|
|
|
|
|
2017-04-07 06:39:11 +08:00
|
|
|
// Returns true if Expr refers a PLT entry.
|
2016-05-25 04:24:43 +08:00
|
|
|
static bool needsPlt(RelExpr Expr) {
|
2019-02-15 03:33:26 +08:00
|
|
|
return oneof<R_PLT_PC, R_PPC_CALL_PLT, R_PLT>(Expr);
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-04-07 06:39:11 +08:00
|
|
|
// Returns true if Expr refers a GOT entry. Note that this function
|
|
|
|
// returns false for TLS variables even though they need GOT, because
|
|
|
|
// TLS variables uses GOT differently than the regular variables.
|
|
|
|
static bool needsGot(RelExpr Expr) {
|
2019-02-15 03:33:26 +08:00
|
|
|
return oneof<R_GOT, R_GOT_OFF, R_HEXAGON_GOT, R_MIPS_GOT_LOCAL_PAGE,
|
|
|
|
R_MIPS_GOT_OFF, R_MIPS_GOT_OFF32, R_AARCH64_GOT_PAGE_PC,
|
[ELF] Change GOT*_FROM_END (relative to end(.got)) to GOTPLT* (start(.got.plt))
Summary:
This should address remaining issues discussed in PR36555.
Currently R_GOT*_FROM_END are exclusively used by x86 and x86_64 to
express relocations types relative to the GOT base. We have
_GLOBAL_OFFSET_TABLE_ (GOT base) = start(.got.plt) but end(.got) !=
start(.got.plt)
This can have problems when _GLOBAL_OFFSET_TABLE_ is used as a symbol, e.g.
glibc dl_machine_dynamic assumes _GLOBAL_OFFSET_TABLE_ is start(.got.plt),
which is not true.
extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
return _GLOBAL_OFFSET_TABLE_[0]; // R_X86_64_GOTPC32
In this patch, we
* Change all GOT*_FROM_END to GOTPLT* to fix the problem.
* Add HasGotPltOffRel to denote whether .got.plt should be kept even if
the section is empty.
* Simplify GotSection::empty and GotPltSection::empty by setting
HasGotOffRel and HasGotPltOffRel according to GlobalOffsetTable early.
The change of R_386_GOTPC makes X86::writePltHeader simpler as we don't
have to compute the offset start(.got.plt) - Ebx (it is constant 0).
We still diverge from ld.bfd (at least in most cases) and gold in that
.got.plt and .got are not adjacent, but the advantage doing that is
unclear.
Reviewers: ruiu, sivachandra, espindola
Subscribers: emaste, mehdi_amini, arichardson, dexonsmith, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59594
llvm-svn: 356968
2019-03-26 07:46:19 +08:00
|
|
|
R_GOT_PC, R_GOTPLT>(Expr);
|
2017-04-07 06:39:11 +08:00
|
|
|
}
|
|
|
|
|
2016-05-25 04:24:43 +08:00
|
|
|
// True if this expression is of the form Sym - X, where X is a position in the
|
|
|
|
// file (PC, or GOT for example).
|
|
|
|
static bool isRelExpr(RelExpr Expr) {
|
[ELF] Change GOT*_FROM_END (relative to end(.got)) to GOTPLT* (start(.got.plt))
Summary:
This should address remaining issues discussed in PR36555.
Currently R_GOT*_FROM_END are exclusively used by x86 and x86_64 to
express relocations types relative to the GOT base. We have
_GLOBAL_OFFSET_TABLE_ (GOT base) = start(.got.plt) but end(.got) !=
start(.got.plt)
This can have problems when _GLOBAL_OFFSET_TABLE_ is used as a symbol, e.g.
glibc dl_machine_dynamic assumes _GLOBAL_OFFSET_TABLE_ is start(.got.plt),
which is not true.
extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
return _GLOBAL_OFFSET_TABLE_[0]; // R_X86_64_GOTPC32
In this patch, we
* Change all GOT*_FROM_END to GOTPLT* to fix the problem.
* Add HasGotPltOffRel to denote whether .got.plt should be kept even if
the section is empty.
* Simplify GotSection::empty and GotPltSection::empty by setting
HasGotOffRel and HasGotPltOffRel according to GlobalOffsetTable early.
The change of R_386_GOTPC makes X86::writePltHeader simpler as we don't
have to compute the offset start(.got.plt) - Ebx (it is constant 0).
We still diverge from ld.bfd (at least in most cases) and gold in that
.got.plt and .got are not adjacent, but the advantage doing that is
unclear.
Reviewers: ruiu, sivachandra, espindola
Subscribers: emaste, mehdi_amini, arichardson, dexonsmith, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59594
llvm-svn: 356968
2019-03-26 07:46:19 +08:00
|
|
|
return oneof<R_PC, R_GOTREL, R_GOTPLTREL, R_MIPS_GOTREL, R_PPC_CALL,
|
2019-02-15 03:33:26 +08:00
|
|
|
R_PPC_CALL_PLT, R_AARCH64_PAGE_PC, R_RELAX_GOT_PC>(Expr);
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-03-26 11:20:30 +08:00
|
|
|
// Returns true if a given relocation can be computed at link-time.
|
|
|
|
//
|
|
|
|
// For instance, we know the offset from a relocation to its target at
|
|
|
|
// link-time if the relocation is PC-relative and refers a
|
2017-03-27 02:23:00 +08:00
|
|
|
// non-interposable function in the same executable. This function
|
2017-03-26 11:20:30 +08:00
|
|
|
// will return true for such relocation.
|
|
|
|
//
|
|
|
|
// If this function returns false, that means we need to emit a
|
|
|
|
// dynamic relocation so that the relocation will be fixed at load-time.
|
2017-11-04 08:31:04 +08:00
|
|
|
static bool isStaticLinkTimeConstant(RelExpr E, RelType Type, const Symbol &Sym,
|
|
|
|
InputSectionBase &S, uint64_t RelOff) {
|
2016-05-25 04:24:43 +08:00
|
|
|
// These expressions always compute a constant
|
2019-04-22 11:10:40 +08:00
|
|
|
if (oneof<R_DTPREL, R_GOTPLT, R_GOT_OFF, R_HEXAGON_GOT, R_TLSLD_GOT_OFF,
|
2019-02-15 03:33:26 +08:00
|
|
|
R_MIPS_GOT_LOCAL_PAGE, R_MIPS_GOTREL, R_MIPS_GOT_OFF,
|
|
|
|
R_MIPS_GOT_OFF32, R_MIPS_GOT_GP_PC, R_MIPS_TLSGD,
|
2019-04-22 11:10:40 +08:00
|
|
|
R_AARCH64_GOT_PAGE_PC, R_GOT_PC, R_GOTONLY_PC, R_GOTPLTONLY_PC,
|
|
|
|
R_PLT_PC, R_TLSGD_GOT, R_TLSGD_GOTPLT, R_TLSGD_PC, R_PPC_CALL_PLT,
|
|
|
|
R_TLSDESC_CALL, R_AARCH64_TLSDESC_PAGE, R_HINT, R_TLSLD_HINT,
|
|
|
|
R_TLSIE_HINT>(E))
|
2016-05-25 04:24:43 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// These never do, except if the entire file is position dependent or if
|
|
|
|
// only the low bits are used.
|
2019-02-14 05:49:55 +08:00
|
|
|
if (E == R_GOT || E == R_PLT || E == R_TLSDESC)
|
2017-03-18 07:29:01 +08:00
|
|
|
return Target->usesOnlyLowPageBits(Type) || !Config->Pic;
|
2016-05-25 04:24:43 +08:00
|
|
|
|
2017-11-04 08:31:04 +08:00
|
|
|
if (Sym.IsPreemptible)
|
2016-05-25 04:24:43 +08:00
|
|
|
return false;
|
2017-03-18 07:29:01 +08:00
|
|
|
if (!Config->Pic)
|
2016-05-25 04:24:43 +08:00
|
|
|
return true;
|
|
|
|
|
2018-01-03 11:58:58 +08:00
|
|
|
// The size of a non preemptible symbol is a constant.
|
|
|
|
if (E == R_SIZE)
|
|
|
|
return true;
|
|
|
|
|
2019-04-18 18:00:37 +08:00
|
|
|
// We set the final symbols values for linker script defined symbols later.
|
|
|
|
// They always can be computed as a link time constant.
|
|
|
|
if (Sym.ScriptDefined)
|
|
|
|
return true;
|
|
|
|
|
2017-03-26 11:20:30 +08:00
|
|
|
// For the target and the relocation, we want to know if they are
|
|
|
|
// absolute or relative.
|
2017-11-04 08:31:04 +08:00
|
|
|
bool AbsVal = isAbsoluteValue(Sym);
|
2016-05-25 04:24:43 +08:00
|
|
|
bool RelE = isRelExpr(E);
|
|
|
|
if (AbsVal && !RelE)
|
|
|
|
return true;
|
|
|
|
if (!AbsVal && RelE)
|
|
|
|
return true;
|
2017-03-26 11:20:30 +08:00
|
|
|
if (!AbsVal && !RelE)
|
|
|
|
return Target->usesOnlyLowPageBits(Type);
|
2016-05-25 04:24:43 +08:00
|
|
|
|
|
|
|
// Relative relocation to an absolute value. This is normally unrepresentable,
|
|
|
|
// but if the relocation refers to a weak undefined symbol, we allow it to
|
|
|
|
// resolve to the image base. This is a little strange, but it allows us to
|
|
|
|
// link function calls to such symbols. Normally such a call will be guarded
|
|
|
|
// with a comparison, which will load a zero from the GOT.
|
2016-12-08 14:19:47 +08:00
|
|
|
// Another special case is MIPS _gp_disp symbol which represents offset
|
|
|
|
// between start of a function and '_gp' value and defined as absolute just
|
|
|
|
// to simplify the code.
|
2017-03-26 11:20:30 +08:00
|
|
|
assert(AbsVal && RelE);
|
2017-11-04 08:31:04 +08:00
|
|
|
if (Sym.isUndefWeak())
|
2016-05-25 04:24:43 +08:00
|
|
|
return true;
|
|
|
|
|
2017-04-04 05:36:31 +08:00
|
|
|
error("relocation " + toString(Type) + " cannot refer to absolute symbol: " +
|
2017-12-24 01:21:39 +08:00
|
|
|
toString(Sym) + getLocation(S, Sym, RelOff));
|
2017-03-26 11:20:30 +08:00
|
|
|
return true;
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static RelExpr toPlt(RelExpr Expr) {
|
2018-01-04 00:52:15 +08:00
|
|
|
switch (Expr) {
|
2018-05-04 23:09:49 +08:00
|
|
|
case R_PPC_CALL:
|
|
|
|
return R_PPC_CALL_PLT;
|
2018-01-04 00:52:15 +08:00
|
|
|
case R_PC:
|
2016-05-25 04:24:43 +08:00
|
|
|
return R_PLT_PC;
|
2018-01-04 00:52:15 +08:00
|
|
|
case R_ABS:
|
2016-05-25 04:24:43 +08:00
|
|
|
return R_PLT;
|
2018-01-04 00:52:15 +08:00
|
|
|
default:
|
|
|
|
return Expr;
|
|
|
|
}
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static RelExpr fromPlt(RelExpr Expr) {
|
|
|
|
// We decided not to use a plt. Optimize a reference to the plt to a
|
|
|
|
// reference to the symbol itself.
|
2018-01-04 00:29:43 +08:00
|
|
|
switch (Expr) {
|
|
|
|
case R_PLT_PC:
|
2016-05-25 04:24:43 +08:00
|
|
|
return R_PC;
|
2018-05-04 23:09:49 +08:00
|
|
|
case R_PPC_CALL_PLT:
|
|
|
|
return R_PPC_CALL;
|
2018-01-04 00:29:43 +08:00
|
|
|
case R_PLT:
|
2016-05-25 04:24:43 +08:00
|
|
|
return R_ABS;
|
2018-01-04 00:29:43 +08:00
|
|
|
default:
|
|
|
|
return Expr;
|
|
|
|
}
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-03-26 11:41:41 +08:00
|
|
|
// Returns true if a given shared symbol is in a read-only segment in a DSO.
|
2018-01-04 07:26:20 +08:00
|
|
|
template <class ELFT> static bool isReadOnly(SharedSymbol &SS) {
|
2019-04-01 08:11:24 +08:00
|
|
|
using Elf_Phdr = typename ELFT::Phdr;
|
2017-01-10 09:21:50 +08:00
|
|
|
|
|
|
|
// Determine if the symbol is read-only by scanning the DSO's program headers.
|
2019-04-09 01:35:55 +08:00
|
|
|
const SharedFile &File = SS.getFile();
|
2019-04-06 04:16:26 +08:00
|
|
|
for (const Elf_Phdr &Phdr :
|
|
|
|
check(File.template getObj<ELFT>().program_headers()))
|
2017-01-10 09:21:50 +08:00
|
|
|
if ((Phdr.p_type == ELF::PT_LOAD || Phdr.p_type == ELF::PT_GNU_RELRO) &&
|
2018-01-04 07:26:20 +08:00
|
|
|
!(Phdr.p_flags & ELF::PF_W) && SS.Value >= Phdr.p_vaddr &&
|
|
|
|
SS.Value < Phdr.p_vaddr + Phdr.p_memsz)
|
2017-01-10 09:21:50 +08:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-02-17 11:34:17 +08:00
|
|
|
// Returns symbols at the same offset as a given symbol, including SS itself.
|
2017-02-16 12:39:45 +08:00
|
|
|
//
|
|
|
|
// If two or more symbols are at the same offset, and at least one of
|
|
|
|
// them are copied by a copy relocation, all of them need to be copied.
|
2018-06-12 03:42:57 +08:00
|
|
|
// Otherwise, they would refer to different places at runtime.
|
2017-02-16 12:39:45 +08:00
|
|
|
template <class ELFT>
|
2018-06-12 03:42:57 +08:00
|
|
|
static SmallSet<SharedSymbol *, 4> getSymbolsAt(SharedSymbol &SS) {
|
2019-04-01 08:11:24 +08:00
|
|
|
using Elf_Sym = typename ELFT::Sym;
|
2017-02-16 12:39:45 +08:00
|
|
|
|
2019-04-09 01:35:55 +08:00
|
|
|
SharedFile &File = SS.getFile();
|
2017-02-27 07:35:34 +08:00
|
|
|
|
2018-06-12 03:42:57 +08:00
|
|
|
SmallSet<SharedSymbol *, 4> Ret;
|
2019-04-06 04:16:26 +08:00
|
|
|
for (const Elf_Sym &S : File.template getGlobalELFSyms<ELFT>()) {
|
2017-10-31 01:26:12 +08:00
|
|
|
if (S.st_shndx == SHN_UNDEF || S.st_shndx == SHN_ABS ||
|
2018-08-07 03:09:40 +08:00
|
|
|
S.getType() == STT_TLS || S.st_value != SS.Value)
|
2017-02-16 12:39:45 +08:00
|
|
|
continue;
|
2017-12-21 00:28:19 +08:00
|
|
|
StringRef Name = check(S.getName(File.getStringTable()));
|
2017-11-04 05:21:47 +08:00
|
|
|
Symbol *Sym = Symtab->find(Name);
|
2017-02-27 07:35:34 +08:00
|
|
|
if (auto *Alias = dyn_cast_or_null<SharedSymbol>(Sym))
|
2018-06-12 03:42:57 +08:00
|
|
|
Ret.insert(Alias);
|
2017-02-16 12:39:45 +08:00
|
|
|
}
|
|
|
|
return Ret;
|
|
|
|
}
|
|
|
|
|
2018-04-27 02:03:04 +08:00
|
|
|
// When a symbol is copy relocated or we create a canonical plt entry, it is
|
|
|
|
// effectively a defined symbol. In the case of copy relocation the symbol is
|
|
|
|
// in .bss and in the case of a canonical plt entry it is in .plt. This function
|
|
|
|
// replaces the existing symbol with a Defined pointing to the appropriate
|
|
|
|
// location.
|
2018-04-27 01:58:58 +08:00
|
|
|
static void replaceWithDefined(Symbol &Sym, SectionBase *Sec, uint64_t Value,
|
|
|
|
uint64_t Size) {
|
|
|
|
Symbol Old = Sym;
|
|
|
|
replaceSymbol<Defined>(&Sym, Sym.File, Sym.getName(), Sym.Binding,
|
|
|
|
Sym.StOther, Sym.Type, Value, Size, Sec);
|
|
|
|
Sym.PltIndex = Old.PltIndex;
|
|
|
|
Sym.GotIndex = Old.GotIndex;
|
|
|
|
Sym.VerdefIndex = Old.VerdefIndex;
|
2018-11-15 01:56:43 +08:00
|
|
|
Sym.PPC64BranchltIndex = Old.PPC64BranchltIndex;
|
2018-04-27 01:58:58 +08:00
|
|
|
Sym.IsPreemptible = true;
|
|
|
|
Sym.ExportDynamic = true;
|
|
|
|
Sym.IsUsedInRegularObj = true;
|
|
|
|
Sym.Used = true;
|
|
|
|
}
|
|
|
|
|
2017-01-10 09:21:50 +08:00
|
|
|
// Reserve space in .bss or .bss.rel.ro for copy relocation.
|
2017-02-20 06:48:33 +08:00
|
|
|
//
|
|
|
|
// The copy relocation is pretty much a hack. If you use a copy relocation
|
|
|
|
// in your program, not only the symbol name but the symbol's size, RW/RO
|
|
|
|
// bit and alignment become part of the ABI. In addition to that, if the
|
|
|
|
// symbol has aliases, the aliases become part of the ABI. That's subtle,
|
|
|
|
// but if you violate that implicit ABI, that can cause very counter-
|
|
|
|
// intuitive consequences.
|
|
|
|
//
|
|
|
|
// So, what is the copy relocation? It's for linking non-position
|
|
|
|
// independent code to DSOs. In an ideal world, all references to data
|
|
|
|
// exported by DSOs should go indirectly through GOT. But if object files
|
|
|
|
// are compiled as non-PIC, all data references are direct. There is no
|
|
|
|
// way for the linker to transform the code to use GOT, as machine
|
|
|
|
// instructions are already set in stone in object files. This is where
|
|
|
|
// the copy relocation takes a role.
|
|
|
|
//
|
|
|
|
// A copy relocation instructs the dynamic linker to copy data from a DSO
|
|
|
|
// to a specified address (which is usually in .bss) at load-time. If the
|
|
|
|
// static linker (that's us) finds a direct data reference to a DSO
|
|
|
|
// symbol, it creates a copy relocation, so that the symbol can be
|
|
|
|
// resolved as if it were in .bss rather than in a DSO.
|
|
|
|
//
|
|
|
|
// As you can see in this function, we create a copy relocation for the
|
|
|
|
// dynamic linker, and the relocation contains not only symbol name but
|
|
|
|
// various other informtion about the symbol. So, such attributes become a
|
|
|
|
// part of the ABI.
|
2017-02-20 10:22:56 +08:00
|
|
|
//
|
|
|
|
// Note for application developers: I can give you a piece of advice if
|
|
|
|
// you are writing a shared library. You probably should export only
|
|
|
|
// functions from your library. You shouldn't export variables.
|
|
|
|
//
|
|
|
|
// As an example what can happen when you export variables without knowing
|
|
|
|
// the semantics of copy relocations, assume that you have an exported
|
|
|
|
// variable of type T. It is an ABI-breaking change to add new members at
|
|
|
|
// end of T even though doing that doesn't change the layout of the
|
|
|
|
// existing members. That's because the space for the new members are not
|
|
|
|
// reserved in .bss unless you recompile the main program. That means they
|
|
|
|
// are likely to overlap with other data that happens to be laid out next
|
|
|
|
// to the variable in .bss. This kind of issue is sometimes very hard to
|
|
|
|
// debug. What's a solution? Instead of exporting a varaible V from a DSO,
|
|
|
|
// define an accessor getV().
|
2018-01-04 07:26:20 +08:00
|
|
|
template <class ELFT> static void addCopyRelSymbol(SharedSymbol &SS) {
|
2016-05-25 04:24:43 +08:00
|
|
|
// Copy relocation against zero-sized symbol doesn't make sense.
|
2018-01-04 07:26:20 +08:00
|
|
|
uint64_t SymSize = SS.getSize();
|
2018-06-07 05:43:34 +08:00
|
|
|
if (SymSize == 0 || SS.Alignment == 0)
|
2018-01-04 07:26:20 +08:00
|
|
|
fatal("cannot create a copy relocation for symbol " + toString(SS));
|
2016-05-25 04:24:43 +08:00
|
|
|
|
2017-01-10 09:21:50 +08:00
|
|
|
// See if this symbol is in a read-only segment. If so, preserve the symbol's
|
|
|
|
// memory protection by reserving space in the .bss.rel.ro section.
|
2017-02-27 07:35:34 +08:00
|
|
|
bool IsReadOnly = isReadOnly<ELFT>(SS);
|
2017-10-04 08:21:17 +08:00
|
|
|
BssSection *Sec = make<BssSection>(IsReadOnly ? ".bss.rel.ro" : ".bss",
|
2018-01-04 07:26:20 +08:00
|
|
|
SymSize, SS.Alignment);
|
2017-09-14 00:59:12 +08:00
|
|
|
if (IsReadOnly)
|
2018-09-26 03:26:58 +08:00
|
|
|
In.BssRelRo->getParent()->addSection(Sec);
|
2017-09-14 00:59:12 +08:00
|
|
|
else
|
2018-09-26 03:26:58 +08:00
|
|
|
In.Bss->getParent()->addSection(Sec);
|
2017-02-09 18:27:57 +08:00
|
|
|
|
2016-05-25 04:24:43 +08:00
|
|
|
// Look through the DSO's dynamic symbol table for aliases and create a
|
|
|
|
// dynamic symbol for each one. This causes the copy relocation to correctly
|
|
|
|
// interpose any aliases.
|
2018-04-27 01:58:58 +08:00
|
|
|
for (SharedSymbol *Sym : getSymbolsAt<ELFT>(SS))
|
|
|
|
replaceWithDefined(*Sym, Sec, 0, Sym->Size);
|
2017-02-16 12:39:45 +08:00
|
|
|
|
2018-09-26 03:26:58 +08:00
|
|
|
In.RelaDyn->addReloc(Target->CopyRel, Sec, 0, &SS);
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-03-27 03:35:24 +08:00
|
|
|
// MIPS has an odd notion of "paired" relocations to calculate addends.
|
|
|
|
// For example, if a relocation is of R_MIPS_HI16, there must be a
|
|
|
|
// R_MIPS_LO16 relocation after that, and an addend is calculated using
|
|
|
|
// the two relocations.
|
|
|
|
template <class ELFT, class RelTy>
|
2017-10-13 04:42:15 +08:00
|
|
|
static int64_t computeMipsAddend(const RelTy &Rel, const RelTy *End,
|
|
|
|
InputSectionBase &Sec, RelExpr Expr,
|
|
|
|
bool IsLocal) {
|
|
|
|
if (Expr == R_MIPS_GOTREL && IsLocal)
|
2017-03-27 03:35:24 +08:00
|
|
|
return Sec.getFile<ELFT>()->MipsGp0;
|
|
|
|
|
|
|
|
// The ABI says that the paired relocation is used only for REL.
|
|
|
|
// See p. 4-17 at ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
|
|
|
|
if (RelTy::IsRela)
|
|
|
|
return 0;
|
|
|
|
|
2017-10-12 06:49:24 +08:00
|
|
|
RelType Type = Rel.getType(Config->IsMips64EL);
|
2017-10-13 04:42:15 +08:00
|
|
|
uint32_t PairTy = getMipsPairType(Type, IsLocal);
|
2017-03-27 03:35:24 +08:00
|
|
|
if (PairTy == R_MIPS_NONE)
|
|
|
|
return 0;
|
|
|
|
|
Avoid unnecessary buffer allocation and memcpy for compressed sections.
Previously, we uncompress all compressed sections before doing anything.
That works, and that is conceptually simple, but that could results in
a waste of CPU time and memory if uncompressed sections are then
discarded or just copied to the output buffer.
In particular, if .debug_gnu_pub{names,types} are compressed and if no
-gdb-index option is given, we wasted CPU and memory because we
uncompress them into newly allocated bufers and then memcpy the buffers
to the output buffer. That temporary buffer was redundant.
This patch changes how to uncompress sections. Now, compressed sections
are uncompressed lazily. To do that, `Data` member of `InputSectionBase`
is now hidden from outside, and `data()` accessor automatically expands
an compressed buffer if necessary.
If no one calls `data()`, then `writeTo()` directly uncompresses
compressed data into the output buffer. That eliminates the redundant
memory allocation and redundant memcpy.
This patch significantly reduces memory consumption (20 GiB max RSS to
15 Gib) for an executable whose .debug_gnu_pub{names,types} are in total
5 GiB in an uncompressed form.
Differential Revision: https://reviews.llvm.org/D52917
llvm-svn: 343979
2018-10-09 00:58:59 +08:00
|
|
|
const uint8_t *Buf = Sec.data().data();
|
2017-03-27 03:35:24 +08:00
|
|
|
uint32_t SymIndex = Rel.getSymbol(Config->IsMips64EL);
|
|
|
|
|
|
|
|
// To make things worse, paired relocations might not be contiguous in
|
|
|
|
// the relocation table, so we need to do linear search. *sigh*
|
2017-10-13 04:42:15 +08:00
|
|
|
for (const RelTy *RI = &Rel; RI != End; ++RI)
|
|
|
|
if (RI->getType(Config->IsMips64EL) == PairTy &&
|
|
|
|
RI->getSymbol(Config->IsMips64EL) == SymIndex)
|
|
|
|
return Target->getImplicitAddend(Buf + RI->r_offset, PairTy);
|
2017-03-26 11:20:49 +08:00
|
|
|
|
2017-03-27 03:35:24 +08:00
|
|
|
warn("can't find matching " + toString(PairTy) + " relocation for " +
|
|
|
|
toString(Type));
|
|
|
|
return 0;
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-10-13 04:42:15 +08:00
|
|
|
// Returns an addend of a given relocation. If it is RELA, an addend
|
|
|
|
// is in a relocation itself. If it is REL, we need to read it from an
|
|
|
|
// input section.
|
|
|
|
template <class ELFT, class RelTy>
|
|
|
|
static int64_t computeAddend(const RelTy &Rel, const RelTy *End,
|
|
|
|
InputSectionBase &Sec, RelExpr Expr,
|
|
|
|
bool IsLocal) {
|
|
|
|
int64_t Addend;
|
|
|
|
RelType Type = Rel.getType(Config->IsMips64EL);
|
|
|
|
|
|
|
|
if (RelTy::IsRela) {
|
|
|
|
Addend = getAddend<ELFT>(Rel);
|
|
|
|
} else {
|
Avoid unnecessary buffer allocation and memcpy for compressed sections.
Previously, we uncompress all compressed sections before doing anything.
That works, and that is conceptually simple, but that could results in
a waste of CPU time and memory if uncompressed sections are then
discarded or just copied to the output buffer.
In particular, if .debug_gnu_pub{names,types} are compressed and if no
-gdb-index option is given, we wasted CPU and memory because we
uncompress them into newly allocated bufers and then memcpy the buffers
to the output buffer. That temporary buffer was redundant.
This patch changes how to uncompress sections. Now, compressed sections
are uncompressed lazily. To do that, `Data` member of `InputSectionBase`
is now hidden from outside, and `data()` accessor automatically expands
an compressed buffer if necessary.
If no one calls `data()`, then `writeTo()` directly uncompresses
compressed data into the output buffer. That eliminates the redundant
memory allocation and redundant memcpy.
This patch significantly reduces memory consumption (20 GiB max RSS to
15 Gib) for an executable whose .debug_gnu_pub{names,types} are in total
5 GiB in an uncompressed form.
Differential Revision: https://reviews.llvm.org/D52917
llvm-svn: 343979
2018-10-09 00:58:59 +08:00
|
|
|
const uint8_t *Buf = Sec.data().data();
|
2017-10-13 04:42:15 +08:00
|
|
|
Addend = Target->getImplicitAddend(Buf + Rel.r_offset, Type);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Config->EMachine == EM_PPC64 && Config->Pic && Type == R_PPC64_TOC)
|
|
|
|
Addend += getPPC64TocBase();
|
|
|
|
if (Config->EMachine == EM_MIPS)
|
|
|
|
Addend += computeMipsAddend<ELFT>(Rel, End, Sec, Expr, IsLocal);
|
|
|
|
|
|
|
|
return Addend;
|
|
|
|
}
|
|
|
|
|
2017-10-12 12:52:39 +08:00
|
|
|
// Report an undefined symbol if necessary.
|
|
|
|
// Returns true if this function printed out an error message.
|
2017-11-04 05:21:47 +08:00
|
|
|
static bool maybeReportUndefined(Symbol &Sym, InputSectionBase &Sec,
|
2017-10-12 12:52:39 +08:00
|
|
|
uint64_t Offset) {
|
2017-11-01 00:07:41 +08:00
|
|
|
if (Sym.isLocal() || !Sym.isUndefined() || Sym.isWeak())
|
2017-10-12 12:52:39 +08:00
|
|
|
return false;
|
2017-03-24 09:13:30 +08:00
|
|
|
|
2017-11-01 00:07:41 +08:00
|
|
|
bool CanBeExternal =
|
2018-01-17 02:21:23 +08:00
|
|
|
Sym.computeBinding() != STB_LOCAL && Sym.Visibility == STV_DEFAULT;
|
2017-03-24 09:13:30 +08:00
|
|
|
if (Config->UnresolvedSymbols == UnresolvedPolicy::Ignore && CanBeExternal)
|
2017-10-12 12:52:39 +08:00
|
|
|
return false;
|
2016-10-06 17:45:04 +08:00
|
|
|
|
2019-03-12 19:10:29 +08:00
|
|
|
std::string Msg = "undefined ";
|
|
|
|
if (Sym.Visibility == STV_INTERNAL)
|
|
|
|
Msg += "internal ";
|
|
|
|
else if (Sym.Visibility == STV_HIDDEN)
|
|
|
|
Msg += "hidden ";
|
|
|
|
else if (Sym.Visibility == STV_PROTECTED)
|
|
|
|
Msg += "protected ";
|
|
|
|
Msg += "symbol: " + toString(Sym) + "\n>>> referenced by ";
|
2017-03-31 03:13:47 +08:00
|
|
|
|
2017-12-24 01:21:39 +08:00
|
|
|
std::string Src = Sec.getSrcMsg(Sym, Offset);
|
2017-03-31 03:13:47 +08:00
|
|
|
if (!Src.empty())
|
|
|
|
Msg += Src + "\n>>> ";
|
2017-10-27 11:13:54 +08:00
|
|
|
Msg += Sec.getObjMsg(Offset);
|
2016-10-06 17:45:04 +08:00
|
|
|
|
2018-12-22 03:28:49 +08:00
|
|
|
if (Sym.getName().startswith("_ZTV"))
|
|
|
|
Msg += "\nthe vtable symbol may be undefined because the class is missing "
|
2018-12-22 06:57:11 +08:00
|
|
|
"its key function (see https://lld.llvm.org/missingkeyfunction)";
|
2018-12-22 03:28:49 +08:00
|
|
|
|
2017-10-12 12:52:39 +08:00
|
|
|
if ((Config->UnresolvedSymbols == UnresolvedPolicy::Warn && CanBeExternal) ||
|
|
|
|
Config->NoinhibitExec) {
|
2016-10-06 17:45:04 +08:00
|
|
|
warn(Msg);
|
2017-10-12 12:52:39 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
error(Msg);
|
|
|
|
return true;
|
2016-10-06 17:45:04 +08:00
|
|
|
}
|
|
|
|
|
2017-10-12 12:02:02 +08:00
|
|
|
// MIPS N32 ABI treats series of successive relocations with the same offset
|
|
|
|
// as a single relocation. The similar approach used by N64 ABI, but this ABI
|
|
|
|
// packs all relocations into the single relocation record. Here we emulate
|
|
|
|
// this for the N32 ABI. Iterate over relocation with the same offset and put
|
|
|
|
// theirs types into the single bit-set.
|
|
|
|
template <class RelTy> static RelType getMipsN32RelType(RelTy *&Rel, RelTy *End) {
|
2018-01-09 06:20:44 +08:00
|
|
|
RelType Type = 0;
|
2017-10-12 12:02:02 +08:00
|
|
|
uint64_t Offset = Rel->r_offset;
|
|
|
|
|
|
|
|
int N = 0;
|
2018-01-09 06:20:44 +08:00
|
|
|
while (Rel != End && Rel->r_offset == Offset)
|
|
|
|
Type |= (Rel++)->getType(Config->IsMips64EL) << (8 * N++);
|
2017-10-12 12:02:02 +08:00
|
|
|
return Type;
|
2016-11-06 06:58:01 +08:00
|
|
|
}
|
|
|
|
|
2017-03-26 10:26:33 +08:00
|
|
|
// .eh_frame sections are mergeable input sections, so their input
|
|
|
|
// offsets are not linearly mapped to output section. For each input
|
|
|
|
// offset, we need to find a section piece containing the offset and
|
|
|
|
// add the piece's base address to the input offset to compute the
|
|
|
|
// output offset. That isn't cheap.
|
|
|
|
//
|
|
|
|
// This class is to speed up the offset computation. When we process
|
|
|
|
// relocations, we access offsets in the monotonically increasing
|
|
|
|
// order. So we can optimize for that access pattern.
|
|
|
|
//
|
|
|
|
// For sections other than .eh_frame, this class doesn't do anything.
|
|
|
|
namespace {
|
|
|
|
class OffsetGetter {
|
|
|
|
public:
|
|
|
|
explicit OffsetGetter(InputSectionBase &Sec) {
|
2017-10-12 08:43:22 +08:00
|
|
|
if (auto *Eh = dyn_cast<EhInputSection>(&Sec))
|
|
|
|
Pieces = Eh->Pieces;
|
2017-03-26 10:26:33 +08:00
|
|
|
}
|
|
|
|
|
2017-03-27 02:23:00 +08:00
|
|
|
// Translates offsets in input sections to offsets in output sections.
|
2017-10-12 08:43:22 +08:00
|
|
|
// Given offset must increase monotonically. We assume that Piece is
|
2017-03-27 02:23:00 +08:00
|
|
|
// sorted by InputOff.
|
2017-03-26 10:26:33 +08:00
|
|
|
uint64_t get(uint64_t Off) {
|
2017-10-12 08:43:22 +08:00
|
|
|
if (Pieces.empty())
|
2017-03-26 10:26:33 +08:00
|
|
|
return Off;
|
|
|
|
|
2017-10-12 08:43:22 +08:00
|
|
|
while (I != Pieces.size() && Pieces[I].InputOff + Pieces[I].Size <= Off)
|
2017-03-26 10:26:33 +08:00
|
|
|
++I;
|
2017-10-12 08:43:22 +08:00
|
|
|
if (I == Pieces.size())
|
2018-08-14 22:20:34 +08:00
|
|
|
fatal(".eh_frame: relocation is not in any piece");
|
2017-03-27 02:23:22 +08:00
|
|
|
|
2017-10-12 08:43:22 +08:00
|
|
|
// Pieces must be contiguous, so there must be no holes in between.
|
|
|
|
assert(Pieces[I].InputOff <= Off && "Relocation not in any piece");
|
2017-03-26 10:26:33 +08:00
|
|
|
|
|
|
|
// Offset -1 means that the piece is dead (i.e. garbage collected).
|
2017-10-12 08:43:22 +08:00
|
|
|
if (Pieces[I].OutputOff == -1)
|
2017-03-26 10:26:33 +08:00
|
|
|
return -1;
|
2017-10-12 08:43:22 +08:00
|
|
|
return Pieces[I].OutputOff + Off - Pieces[I].InputOff;
|
2017-03-26 10:26:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2017-10-12 08:43:22 +08:00
|
|
|
ArrayRef<EhSectionPiece> Pieces;
|
2017-03-26 10:26:33 +08:00
|
|
|
size_t I = 0;
|
|
|
|
};
|
2017-03-26 10:26:52 +08:00
|
|
|
} // namespace
|
2017-03-26 10:26:33 +08:00
|
|
|
|
2018-07-10 04:08:55 +08:00
|
|
|
static void addRelativeReloc(InputSectionBase *IS, uint64_t OffsetInSec,
|
|
|
|
Symbol *Sym, int64_t Addend, RelExpr Expr,
|
|
|
|
RelType Type) {
|
|
|
|
// Add a relative relocation. If RelrDyn section is enabled, and the
|
|
|
|
// relocation offset is guaranteed to be even, add the relocation to
|
|
|
|
// the RelrDyn section, otherwise add it to the RelaDyn section.
|
|
|
|
// RelrDyn sections don't support odd offsets. Also, RelrDyn sections
|
|
|
|
// don't store the addend values, so we must write it to the relocated
|
|
|
|
// address.
|
2018-09-26 03:26:58 +08:00
|
|
|
if (In.RelrDyn && IS->Alignment >= 2 && OffsetInSec % 2 == 0) {
|
2018-07-10 04:08:55 +08:00
|
|
|
IS->Relocations.push_back({Expr, Type, OffsetInSec, Addend, Sym});
|
2018-09-26 03:26:58 +08:00
|
|
|
In.RelrDyn->Relocs.push_back({IS, OffsetInSec});
|
2018-07-10 04:08:55 +08:00
|
|
|
return;
|
|
|
|
}
|
2018-09-26 03:26:58 +08:00
|
|
|
In.RelaDyn->addReloc(Target->RelativeRel, IS, OffsetInSec, Sym, Addend, Expr,
|
|
|
|
Type);
|
2018-07-10 04:08:55 +08:00
|
|
|
}
|
|
|
|
|
2017-03-27 04:51:14 +08:00
|
|
|
template <class ELFT, class GotPltSection>
|
|
|
|
static void addPltEntry(PltSection *Plt, GotPltSection *GotPlt,
|
2018-01-09 08:13:54 +08:00
|
|
|
RelocationBaseSection *Rel, RelType Type, Symbol &Sym) {
|
2017-03-27 04:51:14 +08:00
|
|
|
Plt->addEntry<ELFT>(Sym);
|
|
|
|
GotPlt->addEntry(Sym);
|
2018-01-09 08:13:54 +08:00
|
|
|
Rel->addReloc(
|
|
|
|
{Type, GotPlt, Sym.getGotPltOffset(), !Sym.IsPreemptible, &Sym, 0});
|
2017-03-27 04:51:14 +08:00
|
|
|
}
|
|
|
|
|
2019-03-15 14:58:23 +08:00
|
|
|
static void addGotEntry(Symbol &Sym) {
|
2018-09-26 03:26:58 +08:00
|
|
|
In.Got->addEntry(Sym);
|
2017-03-27 05:00:09 +08:00
|
|
|
|
2019-02-14 05:49:55 +08:00
|
|
|
RelExpr Expr = Sym.isTls() ? R_TLS : R_ABS;
|
2017-03-27 05:00:09 +08:00
|
|
|
uint64_t Off = Sym.getGotOffset();
|
|
|
|
|
2017-10-13 09:19:10 +08:00
|
|
|
// If a GOT slot value can be calculated at link-time, which is now,
|
|
|
|
// we can just fill that out.
|
|
|
|
//
|
|
|
|
// (We don't actually write a value to a GOT slot right now, but we
|
|
|
|
// add a static relocation to a Relocations vector so that
|
|
|
|
// InputSection::relocate will do the work for us. We may be able
|
|
|
|
// to just write a value now, but it is a TODO.)
|
2018-01-09 08:13:54 +08:00
|
|
|
bool IsLinkTimeConstant =
|
|
|
|
!Sym.IsPreemptible && (!Config->Pic || isAbsolute(Sym));
|
2017-10-13 09:19:10 +08:00
|
|
|
if (IsLinkTimeConstant) {
|
2018-09-26 03:26:58 +08:00
|
|
|
In.Got->Relocations.push_back({Expr, Target->GotRel, Off, 0, &Sym});
|
2017-10-13 09:42:27 +08:00
|
|
|
return;
|
2017-10-13 09:19:10 +08:00
|
|
|
}
|
2017-10-13 09:42:27 +08:00
|
|
|
|
|
|
|
// Otherwise, we emit a dynamic relocation to .rel[a].dyn so that
|
|
|
|
// the GOT slot will be fixed at load-time.
|
2018-07-10 04:08:55 +08:00
|
|
|
if (!Sym.isTls() && !Sym.IsPreemptible && Config->Pic && !isAbsolute(Sym)) {
|
2018-09-26 03:26:58 +08:00
|
|
|
addRelativeReloc(In.Got, Off, &Sym, 0, R_ABS, Target->GotRel);
|
2018-07-10 04:08:55 +08:00
|
|
|
return;
|
|
|
|
}
|
2018-09-26 03:26:58 +08:00
|
|
|
In.RelaDyn->addReloc(Sym.isTls() ? Target->TlsGotRel : Target->GotRel, In.Got,
|
|
|
|
Off, &Sym, 0, Sym.IsPreemptible ? R_ADDEND : R_ABS,
|
|
|
|
Target->GotRel);
|
2017-03-27 05:00:09 +08:00
|
|
|
}
|
|
|
|
|
2018-01-10 09:37:36 +08:00
|
|
|
// Return true if we can define a symbol in the executable that
|
|
|
|
// contains the value/function of a symbol defined in a shared
|
|
|
|
// library.
|
|
|
|
static bool canDefineSymbolInExecutable(Symbol &Sym) {
|
|
|
|
// If the symbol has default visibility the symbol defined in the
|
|
|
|
// executable will preempt it.
|
2018-01-17 03:28:28 +08:00
|
|
|
// Note that we want the visibility of the shared symbol itself, not
|
|
|
|
// the visibility of the symbol in the output file we are producing. That is
|
|
|
|
// why we use Sym.StOther.
|
|
|
|
if ((Sym.StOther & 0x3) == STV_DEFAULT)
|
2018-01-10 09:37:36 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// If we are allowed to break address equality of functions, defining
|
|
|
|
// a plt entry will allow the program to call the function in the
|
|
|
|
// .so, but the .so and the executable will no agree on the address
|
|
|
|
// of the function. Similar logic for objects.
|
|
|
|
return ((Sym.isFunc() && Config->IgnoreFunctionAddressEquality) ||
|
|
|
|
(Sym.isObject() && Config->IgnoreDataAddressEquality));
|
|
|
|
}
|
|
|
|
|
2016-05-25 04:24:43 +08:00
|
|
|
// The reason we have to do this early scan is as follows
|
|
|
|
// * To mmap the output file, we need to know the size
|
|
|
|
// * For that, we need to know how many dynamic relocs we will have.
|
|
|
|
// It might be possible to avoid this by outputting the file with write:
|
|
|
|
// * Write the allocated output sections, computing addresses.
|
|
|
|
// * Apply relocations, recording which ones require a dynamic reloc.
|
|
|
|
// * Write the dynamic relocations.
|
|
|
|
// * Write the rest of the file.
|
|
|
|
// This would have some drawbacks. For example, we would only know if .rela.dyn
|
|
|
|
// is needed after applying relocations. If it is, it will go after rw and rx
|
|
|
|
// sections. Given that it is ro, we will need an extra PT_LOAD. This
|
|
|
|
// complicates things for the dynamic linker and means we would have to reserve
|
|
|
|
// space for the extra PT_LOAD even if we end up not using it.
|
2018-01-09 08:13:54 +08:00
|
|
|
template <class ELFT, class RelTy>
|
2018-04-27 01:22:44 +08:00
|
|
|
static void processRelocAux(InputSectionBase &Sec, RelExpr Expr, RelType Type,
|
|
|
|
uint64_t Offset, Symbol &Sym, const RelTy &Rel,
|
|
|
|
int64_t Addend) {
|
2018-01-09 08:13:54 +08:00
|
|
|
if (isStaticLinkTimeConstant(Expr, Type, Sym, Sec, Offset)) {
|
|
|
|
Sec.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
2018-04-27 01:22:44 +08:00
|
|
|
return;
|
2018-01-09 08:13:54 +08:00
|
|
|
}
|
|
|
|
bool CanWrite = (Sec.Flags & SHF_WRITE) || !Config->ZText;
|
|
|
|
if (CanWrite) {
|
|
|
|
// R_GOT refers to a position in the got, even if the symbol is preemptible.
|
|
|
|
bool IsPreemptibleValue = Sym.IsPreemptible && Expr != R_GOT;
|
|
|
|
|
|
|
|
if (!IsPreemptibleValue) {
|
2018-07-10 04:08:55 +08:00
|
|
|
addRelativeReloc(&Sec, Offset, &Sym, Addend, Expr, Type);
|
2018-04-27 01:22:44 +08:00
|
|
|
return;
|
2018-04-05 20:07:20 +08:00
|
|
|
} else if (RelType Rel = Target->getDynRel(Type)) {
|
2018-09-26 03:26:58 +08:00
|
|
|
In.RelaDyn->addReloc(Rel, &Sec, Offset, &Sym, Addend, R_ADDEND, Type);
|
2018-01-09 08:13:54 +08:00
|
|
|
|
|
|
|
// MIPS ABI turns using of GOT and dynamic relocations inside out.
|
|
|
|
// While regular ABI uses dynamic relocations to fill up GOT entries
|
|
|
|
// MIPS ABI requires dynamic linker to fills up GOT entries using
|
|
|
|
// specially sorted dynamic symbol table. This affects even dynamic
|
|
|
|
// relocations against symbols which do not require GOT entries
|
|
|
|
// creation explicitly, i.e. do not have any GOT-relocations. So if
|
|
|
|
// a preemptible symbol has a dynamic relocation we anyway have
|
|
|
|
// to create a GOT entry for it.
|
|
|
|
// If a non-preemptible symbol has a dynamic relocation against it,
|
|
|
|
// dynamic linker takes it st_value, adds offset and writes down
|
|
|
|
// result of the dynamic relocation. In case of preemptible symbol
|
|
|
|
// dynamic linker performs symbol resolution, writes the symbol value
|
|
|
|
// to the GOT entry and reads the GOT entry when it needs to perform
|
|
|
|
// a dynamic relocation.
|
|
|
|
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19
|
|
|
|
if (Config->EMachine == EM_MIPS)
|
2018-09-26 03:26:58 +08:00
|
|
|
In.MipsGot->addEntry(*Sec.File, Sym, Addend, Expr);
|
2018-04-27 01:22:44 +08:00
|
|
|
return;
|
2018-01-09 08:13:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the relocation is to a weak undef, and we are producing
|
|
|
|
// executable, give up on it and produce a non preemptible 0.
|
|
|
|
if (!Config->Shared && Sym.isUndefWeak()) {
|
|
|
|
Sec.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
2018-04-27 01:22:44 +08:00
|
|
|
return;
|
2018-01-09 08:13:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!CanWrite && (Config->Pic && !isRelExpr(Expr))) {
|
|
|
|
error(
|
|
|
|
"can't create dynamic relocation " + toString(Type) + " against " +
|
|
|
|
(Sym.getName().empty() ? "local symbol" : "symbol: " + toString(Sym)) +
|
2018-03-15 06:05:12 +08:00
|
|
|
" in readonly segment; recompile object files with -fPIC "
|
|
|
|
"or pass '-Wl,-z,notext' to allow text relocations in the output" +
|
2018-01-09 08:13:54 +08:00
|
|
|
getLocation(Sec, Sym, Offset));
|
2018-04-27 01:22:44 +08:00
|
|
|
return;
|
2018-01-09 08:13:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy relocations are only possible if we are creating an executable.
|
|
|
|
if (Config->Shared) {
|
|
|
|
errorOrWarn("relocation " + toString(Type) +
|
|
|
|
" cannot be used against symbol " + toString(Sym) +
|
|
|
|
"; recompile with -fPIC" + getLocation(Sec, Sym, Offset));
|
2018-04-27 01:22:44 +08:00
|
|
|
return;
|
2018-01-09 08:13:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the symbol is undefined we already reported any relevant errors.
|
2018-04-27 01:58:58 +08:00
|
|
|
if (Sym.isUndefined())
|
2018-04-27 01:22:44 +08:00
|
|
|
return;
|
2018-01-09 08:13:54 +08:00
|
|
|
|
2018-01-10 09:37:36 +08:00
|
|
|
if (!canDefineSymbolInExecutable(Sym)) {
|
2018-01-09 08:13:54 +08:00
|
|
|
error("cannot preempt symbol: " + toString(Sym) +
|
|
|
|
getLocation(Sec, Sym, Offset));
|
2018-04-27 01:22:44 +08:00
|
|
|
return;
|
2018-01-09 08:13:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (Sym.isObject()) {
|
|
|
|
// Produce a copy relocation.
|
2018-04-27 01:58:58 +08:00
|
|
|
if (auto *SS = dyn_cast<SharedSymbol>(&Sym)) {
|
2018-04-21 05:24:08 +08:00
|
|
|
if (!Config->ZCopyreloc)
|
2018-01-09 08:13:54 +08:00
|
|
|
error("unresolvable relocation " + toString(Type) +
|
2018-04-27 01:58:58 +08:00
|
|
|
" against symbol '" + toString(*SS) +
|
2018-01-09 08:13:54 +08:00
|
|
|
"'; recompile with -fPIC or remove '-z nocopyreloc'" +
|
|
|
|
getLocation(Sec, Sym, Offset));
|
2018-04-27 01:58:58 +08:00
|
|
|
addCopyRelSymbol<ELFT>(*SS);
|
2018-01-09 08:13:54 +08:00
|
|
|
}
|
|
|
|
Sec.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
2018-04-27 01:22:44 +08:00
|
|
|
return;
|
2018-01-09 08:13:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (Sym.isFunc()) {
|
|
|
|
// This handles a non PIC program call to function in a shared library. In
|
|
|
|
// an ideal world, we could just report an error saying the relocation can
|
|
|
|
// overflow at runtime. In the real world with glibc, crt1.o has a
|
|
|
|
// R_X86_64_PC32 pointing to libc.so.
|
|
|
|
//
|
|
|
|
// The general idea on how to handle such cases is to create a PLT entry and
|
|
|
|
// use that as the function value.
|
|
|
|
//
|
|
|
|
// For the static linking part, we just return a plt expr and everything
|
2018-02-23 10:05:48 +08:00
|
|
|
// else will use the PLT entry as the address.
|
2018-01-09 08:13:54 +08:00
|
|
|
//
|
|
|
|
// The remaining problem is making sure pointer equality still works. We
|
|
|
|
// need the help of the dynamic linker for that. We let it know that we have
|
|
|
|
// a direct reference to a so symbol by creating an undefined symbol with a
|
|
|
|
// non zero st_value. Seeing that, the dynamic linker resolves the symbol to
|
|
|
|
// the value of the symbol we created. This is true even for got entries, so
|
|
|
|
// pointer equality is maintained. To avoid an infinite loop, the only entry
|
|
|
|
// that points to the real function is a dedicated got entry used by the
|
|
|
|
// plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT,
|
|
|
|
// R_386_JMP_SLOT, etc).
|
2018-03-15 02:08:33 +08:00
|
|
|
|
|
|
|
// For position independent executable on i386, the plt entry requires ebx
|
|
|
|
// to be set. This causes two problems:
|
|
|
|
// * If some code has a direct reference to a function, it was probably
|
|
|
|
// compiled without -fPIE/-fPIC and doesn't maintain ebx.
|
|
|
|
// * If a library definition gets preempted to the executable, it will have
|
|
|
|
// the wrong ebx value.
|
|
|
|
if (Config->Pie && Config->EMachine == EM_386)
|
|
|
|
errorOrWarn("symbol '" + toString(Sym) +
|
|
|
|
"' cannot be preempted; recompile with -fPIE" +
|
|
|
|
getLocation(Sec, Sym, Offset));
|
2018-04-27 01:22:44 +08:00
|
|
|
if (!Sym.isInPlt())
|
2018-09-26 03:26:58 +08:00
|
|
|
addPltEntry<ELFT>(In.Plt, In.GotPlt, In.RelaPlt, Target->PltRel, Sym);
|
2018-04-27 01:58:58 +08:00
|
|
|
if (!Sym.isDefined())
|
2019-03-28 09:37:48 +08:00
|
|
|
replaceWithDefined(
|
|
|
|
Sym, In.Plt,
|
|
|
|
Target->PltHeaderSize + Target->PltEntrySize * Sym.PltIndex, 0);
|
2018-01-09 08:13:54 +08:00
|
|
|
Sym.NeedsPltAddr = true;
|
2018-04-27 01:58:58 +08:00
|
|
|
Sec.Relocations.push_back({Expr, Type, Offset, Addend, &Sym});
|
2018-04-27 01:22:44 +08:00
|
|
|
return;
|
2018-01-09 08:13:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
errorOrWarn("symbol '" + toString(Sym) + "' has no type" +
|
|
|
|
getLocation(Sec, Sym, Offset));
|
|
|
|
}
|
|
|
|
|
2019-02-14 05:49:55 +08:00
|
|
|
struct IRelativeReloc {
|
|
|
|
RelType Type;
|
|
|
|
InputSectionBase *Sec;
|
|
|
|
uint64_t Offset;
|
|
|
|
Symbol *Sym;
|
|
|
|
};
|
|
|
|
|
|
|
|
static std::vector<IRelativeReloc> IRelativeRelocs;
|
|
|
|
|
2016-05-25 04:24:43 +08:00
|
|
|
template <class ELFT, class RelTy>
|
2018-01-09 06:20:44 +08:00
|
|
|
static void scanReloc(InputSectionBase &Sec, OffsetGetter &GetOffset, RelTy *&I,
|
|
|
|
RelTy *End) {
|
|
|
|
const RelTy &Rel = *I;
|
|
|
|
Symbol &Sym = Sec.getFile<ELFT>()->getRelocTargetSym(Rel);
|
|
|
|
RelType Type;
|
2016-07-20 19:47:50 +08:00
|
|
|
|
2018-01-09 06:20:44 +08:00
|
|
|
// Deal with MIPS oddity.
|
|
|
|
if (Config->MipsN32Abi) {
|
|
|
|
Type = getMipsN32RelType(I, End);
|
|
|
|
} else {
|
|
|
|
Type = Rel.getType(Config->IsMips64EL);
|
|
|
|
++I;
|
|
|
|
}
|
2017-12-07 06:32:19 +08:00
|
|
|
|
2018-01-09 06:20:44 +08:00
|
|
|
// Get an offset in an output section this relocation is applied to.
|
|
|
|
uint64_t Offset = GetOffset.get(Rel.r_offset);
|
|
|
|
if (Offset == uint64_t(-1))
|
|
|
|
return;
|
2016-05-25 04:24:43 +08:00
|
|
|
|
2018-01-09 06:20:44 +08:00
|
|
|
// Skip if the target symbol is an erroneous undefined symbol.
|
|
|
|
if (maybeReportUndefined(Sym, Sec, Rel.r_offset))
|
|
|
|
return;
|
2016-11-06 06:58:01 +08:00
|
|
|
|
Avoid unnecessary buffer allocation and memcpy for compressed sections.
Previously, we uncompress all compressed sections before doing anything.
That works, and that is conceptually simple, but that could results in
a waste of CPU time and memory if uncompressed sections are then
discarded or just copied to the output buffer.
In particular, if .debug_gnu_pub{names,types} are compressed and if no
-gdb-index option is given, we wasted CPU and memory because we
uncompress them into newly allocated bufers and then memcpy the buffers
to the output buffer. That temporary buffer was redundant.
This patch changes how to uncompress sections. Now, compressed sections
are uncompressed lazily. To do that, `Data` member of `InputSectionBase`
is now hidden from outside, and `data()` accessor automatically expands
an compressed buffer if necessary.
If no one calls `data()`, then `writeTo()` directly uncompresses
compressed data into the output buffer. That eliminates the redundant
memory allocation and redundant memcpy.
This patch significantly reduces memory consumption (20 GiB max RSS to
15 Gib) for an executable whose .debug_gnu_pub{names,types} are in total
5 GiB in an uncompressed form.
Differential Revision: https://reviews.llvm.org/D52917
llvm-svn: 343979
2018-10-09 00:58:59 +08:00
|
|
|
const uint8_t *RelocatedAddr = Sec.data().begin() + Rel.r_offset;
|
2018-01-09 06:20:44 +08:00
|
|
|
RelExpr Expr = Target->getRelExpr(Type, Sym, RelocatedAddr);
|
2017-03-26 10:26:33 +08:00
|
|
|
|
2018-01-09 06:20:44 +08:00
|
|
|
// Ignore "hint" relocations because they are only markers for relaxation.
|
2019-02-15 03:33:26 +08:00
|
|
|
if (oneof<R_HINT, R_NONE>(Expr))
|
2018-01-09 06:20:44 +08:00
|
|
|
return;
|
2017-10-12 02:37:24 +08:00
|
|
|
|
2019-02-12 23:35:49 +08:00
|
|
|
// We can separate the small code model relocations into 2 categories:
|
|
|
|
// 1) Those that access the compiler generated .toc sections.
|
|
|
|
// 2) Those that access the linker allocated got entries.
|
|
|
|
// lld allocates got entries to symbols on demand. Since we don't try to sort
|
|
|
|
// the got entries in any way, we don't have to track which objects have
|
|
|
|
// got-based small code model relocs. The .toc sections get placed after the
|
|
|
|
// end of the linker allocated .got section and we do sort those so sections
|
|
|
|
// addressed with small code model relocations come first.
|
|
|
|
if (Config->EMachine == EM_PPC64 && isPPC64SmallCodeModelTocReloc(Type))
|
|
|
|
Sec.File->PPC64SmallCodeModelTocRelocs = true;
|
2019-01-25 02:17:40 +08:00
|
|
|
|
2019-02-14 05:49:55 +08:00
|
|
|
if (Sym.isGnuIFunc() && !Config->ZText && Config->WarnIfuncTextrel) {
|
|
|
|
warn("using ifunc symbols when text relocations are allowed may produce "
|
|
|
|
"a binary that will segfault, if the object file is linked with "
|
|
|
|
"old version of glibc (glibc 2.28 and earlier). If this applies to "
|
|
|
|
"you, consider recompiling the object files without -fPIC and "
|
|
|
|
"without -Wl,-z,notext option. Use -no-warn-ifunc-textrel to "
|
|
|
|
"turn off this warning." +
|
|
|
|
getLocation(Sec, Sym, Offset));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Relax relocations.
|
2018-01-09 06:20:44 +08:00
|
|
|
//
|
2019-02-14 05:49:55 +08:00
|
|
|
// If we know that a PLT entry will be resolved within the same ELF module, we
|
|
|
|
// can skip PLT access and directly jump to the destination function. For
|
|
|
|
// example, if we are linking a main exectuable, all dynamic symbols that can
|
|
|
|
// be resolved within the executable will actually be resolved that way at
|
|
|
|
// runtime, because the main exectuable is always at the beginning of a search
|
|
|
|
// list. We can leverage that fact.
|
|
|
|
if (!Sym.IsPreemptible && !Sym.isGnuIFunc()) {
|
|
|
|
if (Expr == R_GOT_PC && !isAbsoluteValue(Sym))
|
|
|
|
Expr = Target->adjustRelaxExpr(Type, RelocatedAddr, Expr);
|
|
|
|
else
|
|
|
|
Expr = fromPlt(Expr);
|
Introduce a flag to warn when ifunc symbols are used with text relocations.
Summary:
This patch adds a new flag, --warn-ifunc-textrel, to work around a glibc bug. When a code with ifunc symbols is used to produce an object file with text relocations, lld always succeeds. However, if that object file is linked using an old version of glibc, the resultant binary just crashes with segmentation fault when it is run (The bug is going to be corrected as of glibc 2.19).
Since there is no way to tell beforehand what library the object file will be linked against in the future, there does not seem to be a fool-proof way for lld to give an error only in cases where the binary will crash. So, with this change (dated 2018-09-25), lld starts to give a warning, contingent on a new command line flag that does not have a gnu counter part. The default value for --warn-ifunc-textrel is false, so lld behaviour will not change unless the user explicitly asks lld to give a warning. Users that link with a glibc library with version 2.19 or newer, or does not use ifunc symbols, or does not generate object files with text relocations do not need to take any action. Other users may consider to start passing warn-ifunc-textrel to lld to get early warnings.
Reviewers: ruiu, espindola
Reviewed By: ruiu
Subscribers: grimar, MaskRay, markj, emaste, arichardson, llvm-commits
Differential Revision: https://reviews.llvm.org/D52430
llvm-svn: 343628
2018-10-03 04:30:22 +08:00
|
|
|
}
|
2018-01-09 06:20:44 +08:00
|
|
|
|
[ELF] Change GOT*_FROM_END (relative to end(.got)) to GOTPLT* (start(.got.plt))
Summary:
This should address remaining issues discussed in PR36555.
Currently R_GOT*_FROM_END are exclusively used by x86 and x86_64 to
express relocations types relative to the GOT base. We have
_GLOBAL_OFFSET_TABLE_ (GOT base) = start(.got.plt) but end(.got) !=
start(.got.plt)
This can have problems when _GLOBAL_OFFSET_TABLE_ is used as a symbol, e.g.
glibc dl_machine_dynamic assumes _GLOBAL_OFFSET_TABLE_ is start(.got.plt),
which is not true.
extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
return _GLOBAL_OFFSET_TABLE_[0]; // R_X86_64_GOTPC32
In this patch, we
* Change all GOT*_FROM_END to GOTPLT* to fix the problem.
* Add HasGotPltOffRel to denote whether .got.plt should be kept even if
the section is empty.
* Simplify GotSection::empty and GotPltSection::empty by setting
HasGotOffRel and HasGotPltOffRel according to GlobalOffsetTable early.
The change of R_386_GOTPC makes X86::writePltHeader simpler as we don't
have to compute the offset start(.got.plt) - Ebx (it is constant 0).
We still diverge from ld.bfd (at least in most cases) and gold in that
.got.plt and .got are not adjacent, but the advantage doing that is
unclear.
Reviewers: ruiu, sivachandra, espindola
Subscribers: emaste, mehdi_amini, arichardson, dexonsmith, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59594
llvm-svn: 356968
2019-03-26 07:46:19 +08:00
|
|
|
// If the relocation does not emit a GOT or GOTPLT entry but its computation
|
|
|
|
// uses their addresses, we need GOT or GOTPLT to be created.
|
|
|
|
//
|
|
|
|
// The 4 types that relative GOTPLT are all x86 and x86-64 specific.
|
|
|
|
if (oneof<R_GOTPLTONLY_PC, R_GOTPLTREL, R_GOTPLT, R_TLSGD_GOTPLT>(Expr)) {
|
|
|
|
In.GotPlt->HasGotPltOffRel = true;
|
|
|
|
} else if (oneof<R_GOTONLY_PC, R_GOTREL, R_PPC_TOC>(Expr)) {
|
2018-09-26 03:26:58 +08:00
|
|
|
In.Got->HasGotOffRel = true;
|
[ELF] Change GOT*_FROM_END (relative to end(.got)) to GOTPLT* (start(.got.plt))
Summary:
This should address remaining issues discussed in PR36555.
Currently R_GOT*_FROM_END are exclusively used by x86 and x86_64 to
express relocations types relative to the GOT base. We have
_GLOBAL_OFFSET_TABLE_ (GOT base) = start(.got.plt) but end(.got) !=
start(.got.plt)
This can have problems when _GLOBAL_OFFSET_TABLE_ is used as a symbol, e.g.
glibc dl_machine_dynamic assumes _GLOBAL_OFFSET_TABLE_ is start(.got.plt),
which is not true.
extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
return _GLOBAL_OFFSET_TABLE_[0]; // R_X86_64_GOTPC32
In this patch, we
* Change all GOT*_FROM_END to GOTPLT* to fix the problem.
* Add HasGotPltOffRel to denote whether .got.plt should be kept even if
the section is empty.
* Simplify GotSection::empty and GotPltSection::empty by setting
HasGotOffRel and HasGotPltOffRel according to GlobalOffsetTable early.
The change of R_386_GOTPC makes X86::writePltHeader simpler as we don't
have to compute the offset start(.got.plt) - Ebx (it is constant 0).
We still diverge from ld.bfd (at least in most cases) and gold in that
.got.plt and .got are not adjacent, but the advantage doing that is
unclear.
Reviewers: ruiu, sivachandra, espindola
Subscribers: emaste, mehdi_amini, arichardson, dexonsmith, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59594
llvm-svn: 356968
2019-03-26 07:46:19 +08:00
|
|
|
}
|
2017-10-12 13:32:52 +08:00
|
|
|
|
2018-01-09 06:20:44 +08:00
|
|
|
// Read an addend.
|
|
|
|
int64_t Addend = computeAddend<ELFT>(Rel, End, Sec, Expr, Sym.isLocal());
|
2016-05-25 04:24:43 +08:00
|
|
|
|
2018-01-09 06:20:44 +08:00
|
|
|
// Process some TLS relocations, including relaxing TLS relocations.
|
|
|
|
// Note that this function does not handle all TLS relocations.
|
|
|
|
if (unsigned Processed =
|
|
|
|
handleTlsRelocation<ELFT>(Type, Sym, Sec, Offset, Addend, Expr)) {
|
|
|
|
I += (Processed - 1);
|
|
|
|
return;
|
|
|
|
}
|
2016-05-25 04:24:43 +08:00
|
|
|
|
2019-02-14 05:49:55 +08:00
|
|
|
// Non-preemptible ifuncs require special handling. First, handle the usual
|
|
|
|
// case where the symbol isn't one of these.
|
|
|
|
if (!Sym.isGnuIFunc() || Sym.IsPreemptible) {
|
|
|
|
// If a relocation needs PLT, we create PLT and GOTPLT slots for the symbol.
|
|
|
|
if (needsPlt(Expr) && !Sym.isInPlt())
|
2018-09-26 03:26:58 +08:00
|
|
|
addPltEntry<ELFT>(In.Plt, In.GotPlt, In.RelaPlt, Target->PltRel, Sym);
|
2016-05-25 04:24:43 +08:00
|
|
|
|
2019-02-14 05:49:55 +08:00
|
|
|
// Create a GOT slot if a relocation needs GOT.
|
|
|
|
if (needsGot(Expr)) {
|
|
|
|
if (Config->EMachine == EM_MIPS) {
|
|
|
|
// MIPS ABI has special rules to process GOT entries and doesn't
|
|
|
|
// require relocation entries for them. A special case is TLS
|
|
|
|
// relocations. In that case dynamic loader applies dynamic
|
|
|
|
// relocations to initialize TLS GOT entries.
|
|
|
|
// See "Global Offset Table" in Chapter 5 in the following document
|
|
|
|
// for detailed description:
|
|
|
|
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
|
|
|
|
In.MipsGot->addEntry(*Sec.File, Sym, Addend, Expr);
|
|
|
|
} else if (!Sym.isInGot()) {
|
2019-03-15 14:58:23 +08:00
|
|
|
addGotEntry(Sym);
|
2019-02-14 05:49:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Handle a reference to a non-preemptible ifunc. These are special in a
|
|
|
|
// few ways:
|
|
|
|
//
|
|
|
|
// - Unlike most non-preemptible symbols, non-preemptible ifuncs do not have
|
|
|
|
// a fixed value. But assuming that all references to the ifunc are
|
|
|
|
// GOT-generating or PLT-generating, the handling of an ifunc is
|
|
|
|
// relatively straightforward. We create a PLT entry in Iplt, which is
|
|
|
|
// usually at the end of .plt, which makes an indirect call using a
|
|
|
|
// matching GOT entry in IgotPlt, which is usually at the end of .got.plt.
|
|
|
|
// The GOT entry is relocated using an IRELATIVE relocation in RelaIplt,
|
|
|
|
// which is usually at the end of .rela.plt. Unlike most relocations in
|
|
|
|
// .rela.plt, which may be evaluated lazily without -z now, dynamic
|
|
|
|
// loaders evaluate IRELATIVE relocs eagerly, which means that for
|
|
|
|
// IRELATIVE relocs only, GOT-generating relocations can point directly to
|
|
|
|
// .got.plt without requiring a separate GOT entry.
|
|
|
|
//
|
|
|
|
// - Despite the fact that an ifunc does not have a fixed value, compilers
|
|
|
|
// that are not passed -fPIC will assume that they do, and will emit
|
|
|
|
// direct (non-GOT-generating, non-PLT-generating) relocations to the
|
|
|
|
// symbol. This means that if a direct relocation to the symbol is
|
|
|
|
// seen, the linker must set a value for the symbol, and this value must
|
|
|
|
// be consistent no matter what type of reference is made to the symbol.
|
|
|
|
// This can be done by creating a PLT entry for the symbol in the way
|
|
|
|
// described above and making it canonical, that is, making all references
|
|
|
|
// point to the PLT entry instead of the resolver. In lld we also store
|
|
|
|
// the address of the PLT entry in the dynamic symbol table, which means
|
|
|
|
// that the symbol will also have the same value in other modules.
|
|
|
|
// Because the value loaded from the GOT needs to be consistent with
|
|
|
|
// the value computed using a direct relocation, a non-preemptible ifunc
|
|
|
|
// may end up with two GOT entries, one in .got.plt that points to the
|
|
|
|
// address returned by the resolver and is used only by the PLT entry,
|
|
|
|
// and another in .got that points to the PLT entry and is used by
|
|
|
|
// GOT-generating relocations.
|
|
|
|
//
|
|
|
|
// - The fact that these symbols do not have a fixed value makes them an
|
|
|
|
// exception to the general rule that a statically linked executable does
|
|
|
|
// not require any form of dynamic relocation. To handle these relocations
|
|
|
|
// correctly, the IRELATIVE relocations are stored in an array which a
|
|
|
|
// statically linked executable's startup code must enumerate using the
|
|
|
|
// linker-defined symbols __rela?_iplt_{start,end}.
|
|
|
|
//
|
|
|
|
// - An absolute relocation to a non-preemptible ifunc (such as a global
|
|
|
|
// variable containing a pointer to the ifunc) needs to be relocated in
|
|
|
|
// the exact same way as a GOT entry, so we can avoid needing to make the
|
|
|
|
// PLT entry canonical by translating such relocations into IRELATIVE
|
|
|
|
// relocations in the RelaIplt.
|
|
|
|
if (!Sym.isInPlt()) {
|
|
|
|
// Create PLT and GOTPLT slots for the symbol.
|
|
|
|
Sym.IsInIplt = true;
|
|
|
|
|
|
|
|
// Create a copy of the symbol to use as the target of the IRELATIVE
|
|
|
|
// relocation in the IgotPlt. This is in case we make the PLT canonical
|
|
|
|
// later, which would overwrite the original symbol.
|
|
|
|
//
|
|
|
|
// FIXME: Creating a copy of the symbol here is a bit of a hack. All
|
|
|
|
// that's really needed to create the IRELATIVE is the section and value,
|
|
|
|
// so ideally we should just need to copy those.
|
|
|
|
auto *DirectSym = make<Defined>(cast<Defined>(Sym));
|
|
|
|
addPltEntry<ELFT>(In.Iplt, In.IgotPlt, In.RelaIplt, Target->IRelativeRel,
|
|
|
|
*DirectSym);
|
|
|
|
Sym.PltIndex = DirectSym->PltIndex;
|
|
|
|
}
|
|
|
|
if (Expr == R_ABS && Addend == 0 && (Sec.Flags & SHF_WRITE)) {
|
|
|
|
// We might be able to represent this as an IRELATIVE. But we don't know
|
|
|
|
// yet whether some later relocation will make the symbol point to a
|
|
|
|
// canonical PLT, which would make this either a dynamic RELATIVE (PIC) or
|
|
|
|
// static (non-PIC) relocation. So we keep a record of the information
|
|
|
|
// required to process the relocation, and after scanRelocs() has been
|
|
|
|
// called on all relocations, the relocation is resolved by
|
|
|
|
// addIRelativeRelocs().
|
|
|
|
IRelativeRelocs.push_back({Type, &Sec, Offset, &Sym});
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (needsGot(Expr)) {
|
|
|
|
// Redirect GOT accesses to point to the Igot.
|
|
|
|
//
|
|
|
|
// This field is also used to keep track of whether we ever needed a GOT
|
|
|
|
// entry. If we did and we make the PLT canonical later, we'll need to
|
|
|
|
// create a GOT entry pointing to the PLT entry for Sym.
|
|
|
|
Sym.GotInIgot = true;
|
|
|
|
} else if (!needsPlt(Expr)) {
|
|
|
|
// Make the ifunc's PLT entry canonical by changing the value of its
|
|
|
|
// symbol to redirect all references to point to it.
|
|
|
|
unsigned EntryOffset = Sym.PltIndex * Target->PltEntrySize;
|
|
|
|
if (Config->ZRetpolineplt)
|
|
|
|
EntryOffset += Target->PltHeaderSize;
|
|
|
|
|
|
|
|
auto &D = cast<Defined>(Sym);
|
|
|
|
D.Section = In.Iplt;
|
|
|
|
D.Value = EntryOffset;
|
|
|
|
D.Size = 0;
|
|
|
|
// It's important to set the symbol type here so that dynamic loaders
|
|
|
|
// don't try to call the PLT as if it were an ifunc resolver.
|
|
|
|
D.Type = STT_FUNC;
|
|
|
|
|
|
|
|
if (Sym.GotInIgot) {
|
|
|
|
// We previously encountered a GOT generating reference that we
|
|
|
|
// redirected to the Igot. Now that the PLT entry is canonical we must
|
|
|
|
// clear the redirection to the Igot and add a GOT entry. As we've
|
|
|
|
// changed the symbol type to STT_FUNC future GOT generating references
|
|
|
|
// will naturally use this GOT entry.
|
|
|
|
//
|
|
|
|
// We don't need to worry about creating a MIPS GOT here because ifuncs
|
|
|
|
// aren't a thing on MIPS.
|
|
|
|
Sym.GotInIgot = false;
|
2019-03-15 14:58:23 +08:00
|
|
|
addGotEntry(Sym);
|
2019-02-14 05:49:55 +08:00
|
|
|
}
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
2018-01-09 06:20:44 +08:00
|
|
|
}
|
2018-04-27 01:22:44 +08:00
|
|
|
|
|
|
|
processRelocAux<ELFT>(Sec, Expr, Type, Offset, Sym, Rel, Addend);
|
2018-01-09 06:20:44 +08:00
|
|
|
}
|
2017-04-04 05:36:31 +08:00
|
|
|
|
2018-01-09 06:20:44 +08:00
|
|
|
template <class ELFT, class RelTy>
|
|
|
|
static void scanRelocs(InputSectionBase &Sec, ArrayRef<RelTy> Rels) {
|
|
|
|
OffsetGetter GetOffset(Sec);
|
2016-05-25 04:24:43 +08:00
|
|
|
|
2018-01-09 06:20:44 +08:00
|
|
|
// Not all relocations end up in Sec.Relocations, but a lot do.
|
|
|
|
Sec.Relocations.reserve(Rels.size());
|
2017-10-13 13:58:54 +08:00
|
|
|
|
2018-01-09 06:20:44 +08:00
|
|
|
for (auto I = Rels.begin(), End = Rels.end(); I != End;)
|
|
|
|
scanReloc<ELFT>(Sec, GetOffset, I, End);
|
2018-08-10 01:59:56 +08:00
|
|
|
|
|
|
|
// Sort relocations by offset to binary search for R_RISCV_PCREL_HI20
|
|
|
|
if (Config->EMachine == EM_RISCV)
|
2019-04-23 10:42:06 +08:00
|
|
|
llvm::stable_sort(Sec.Relocations,
|
|
|
|
[](const Relocation &LHS, const Relocation &RHS) {
|
|
|
|
return LHS.Offset < RHS.Offset;
|
|
|
|
});
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-03-31 04:31:19 +08:00
|
|
|
template <class ELFT> void elf::scanRelocations(InputSectionBase &S) {
|
2016-11-10 22:53:24 +08:00
|
|
|
if (S.AreRelocsRela)
|
2017-02-23 10:28:28 +08:00
|
|
|
scanRelocs<ELFT>(S, S.relas<ELFT>());
|
2016-05-25 04:24:43 +08:00
|
|
|
else
|
2017-02-23 10:28:28 +08:00
|
|
|
scanRelocs<ELFT>(S, S.rels<ELFT>());
|
2016-05-25 04:24:43 +08:00
|
|
|
}
|
|
|
|
|
2019-02-14 05:49:55 +08:00
|
|
|
// Figure out which representation to use for any absolute relocs to
|
|
|
|
// non-preemptible ifuncs that we visited during scanRelocs().
|
|
|
|
void elf::addIRelativeRelocs() {
|
|
|
|
for (IRelativeReloc &R : IRelativeRelocs) {
|
|
|
|
if (R.Sym->Type == STT_GNU_IFUNC)
|
|
|
|
In.RelaIplt->addReloc(
|
|
|
|
{Target->IRelativeRel, R.Sec, R.Offset, true, R.Sym, 0});
|
|
|
|
else if (Config->Pic)
|
|
|
|
addRelativeReloc(R.Sec, R.Offset, R.Sym, 0, R_ABS, R.Type);
|
|
|
|
else
|
|
|
|
R.Sec->Relocations.push_back({R_ABS, R.Type, R.Offset, 0, R.Sym});
|
|
|
|
}
|
|
|
|
IRelativeRelocs.clear();
|
|
|
|
}
|
|
|
|
|
2018-10-04 06:20:26 +08:00
|
|
|
static bool mergeCmp(const InputSection *A, const InputSection *B) {
|
|
|
|
// std::merge requires a strict weak ordering.
|
|
|
|
if (A->OutSecOff < B->OutSecOff)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (A->OutSecOff == B->OutSecOff) {
|
|
|
|
auto *TA = dyn_cast<ThunkSection>(A);
|
|
|
|
auto *TB = dyn_cast<ThunkSection>(B);
|
|
|
|
|
|
|
|
// Check if Thunk is immediately before any specific Target
|
|
|
|
// InputSection for example Mips LA25 Thunks.
|
|
|
|
if (TA && TA->getTargetInputSection() == B)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Place Thunk Sections without specific targets before
|
|
|
|
// non-Thunk Sections.
|
|
|
|
if (TA && !TB && !TA->getTargetInputSection())
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-10-24 06:31:08 +08:00
|
|
|
// Call Fn on every executable InputSection accessed via the linker script
|
|
|
|
// InputSectionDescription::Sections.
|
|
|
|
static void forEachInputSectionDescription(
|
|
|
|
ArrayRef<OutputSection *> OutputSections,
|
|
|
|
llvm::function_ref<void(OutputSection *, InputSectionDescription *)> Fn) {
|
|
|
|
for (OutputSection *OS : OutputSections) {
|
|
|
|
if (!(OS->Flags & SHF_ALLOC) || !(OS->Flags & SHF_EXECINSTR))
|
|
|
|
continue;
|
|
|
|
for (BaseCommand *BC : OS->SectionCommands)
|
|
|
|
if (auto *ISD = dyn_cast<InputSectionDescription>(BC))
|
|
|
|
Fn(OS, ISD);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-27 17:07:10 +08:00
|
|
|
// Thunk Implementation
|
|
|
|
//
|
|
|
|
// Thunks (sometimes called stubs, veneers or branch islands) are small pieces
|
|
|
|
// of code that the linker inserts inbetween a caller and a callee. The thunks
|
|
|
|
// are added at link time rather than compile time as the decision on whether
|
|
|
|
// a thunk is needed, such as the caller and callee being out of range, can only
|
|
|
|
// be made at link time.
|
|
|
|
//
|
|
|
|
// It is straightforward to tell given the current state of the program when a
|
|
|
|
// thunk is needed for a particular call. The more difficult part is that
|
|
|
|
// the thunk needs to be placed in the program such that the caller can reach
|
|
|
|
// the thunk and the thunk can reach the callee; furthermore, adding thunks to
|
|
|
|
// the program alters addresses, which can mean more thunks etc.
|
|
|
|
//
|
|
|
|
// In lld we have a synthetic ThunkSection that can hold many Thunks.
|
|
|
|
// The decision to have a ThunkSection act as a container means that we can
|
|
|
|
// more easily handle the most common case of a single block of contiguous
|
|
|
|
// Thunks by inserting just a single ThunkSection.
|
|
|
|
//
|
|
|
|
// The implementation of Thunks in lld is split across these areas
|
|
|
|
// Relocations.cpp : Framework for creating and placing thunks
|
|
|
|
// Thunks.cpp : The code generated for each supported thunk
|
|
|
|
// Target.cpp : Target specific hooks that the framework uses to decide when
|
|
|
|
// a thunk is used
|
|
|
|
// Synthetic.cpp : Implementation of ThunkSection
|
|
|
|
// Writer.cpp : Iteratively call framework until no more Thunks added
|
|
|
|
//
|
|
|
|
// Thunk placement requirements:
|
|
|
|
// Mips LA25 thunks. These must be placed immediately before the callee section
|
|
|
|
// We can assume that the caller is in range of the Thunk. These are modelled
|
|
|
|
// by Thunks that return the section they must precede with
|
|
|
|
// getTargetInputSection().
|
|
|
|
//
|
|
|
|
// ARM interworking and range extension thunks. These thunks must be placed
|
|
|
|
// within range of the caller. All implemented ARM thunks can always reach the
|
|
|
|
// callee as they use an indirect jump via a register that has no range
|
|
|
|
// restrictions.
|
|
|
|
//
|
|
|
|
// Thunk placement algorithm:
|
|
|
|
// For Mips LA25 ThunkSections; the placement is explicit, it has to be before
|
|
|
|
// getTargetInputSection().
|
|
|
|
//
|
|
|
|
// For thunks that must be placed within range of the caller there are many
|
|
|
|
// possible choices given that the maximum range from the caller is usually
|
|
|
|
// much larger than the average InputSection size. Desirable properties include:
|
|
|
|
// - Maximize reuse of thunks by multiple callers
|
|
|
|
// - Minimize number of ThunkSections to simplify insertion
|
|
|
|
// - Handle impact of already added Thunks on addresses
|
|
|
|
// - Simple to understand and implement
|
|
|
|
//
|
|
|
|
// In lld for the first pass, we pre-create one or more ThunkSections per
|
|
|
|
// InputSectionDescription at Target specific intervals. A ThunkSection is
|
|
|
|
// placed so that the estimated end of the ThunkSection is within range of the
|
|
|
|
// start of the InputSectionDescription or the previous ThunkSection. For
|
|
|
|
// example:
|
|
|
|
// InputSectionDescription
|
|
|
|
// Section 0
|
|
|
|
// ...
|
|
|
|
// Section N
|
|
|
|
// ThunkSection 0
|
|
|
|
// Section N + 1
|
|
|
|
// ...
|
|
|
|
// Section N + K
|
|
|
|
// Thunk Section 1
|
|
|
|
//
|
|
|
|
// The intention is that we can add a Thunk to a ThunkSection that is well
|
|
|
|
// spaced enough to service a number of callers without having to do a lot
|
|
|
|
// of work. An important principle is that it is not an error if a Thunk cannot
|
|
|
|
// be placed in a pre-created ThunkSection; when this happens we create a new
|
|
|
|
// ThunkSection placed next to the caller. This allows us to handle the vast
|
|
|
|
// majority of thunks simply, but also handle rare cases where the branch range
|
|
|
|
// is smaller than the target specific spacing.
|
|
|
|
//
|
|
|
|
// The algorithm is expected to create all the thunks that are needed in a
|
|
|
|
// single pass, with a small number of programs needing a second pass due to
|
|
|
|
// the insertion of thunks in the first pass increasing the offset between
|
|
|
|
// callers and callees that were only just in range.
|
|
|
|
//
|
|
|
|
// A consequence of allowing new ThunkSections to be created outside of the
|
|
|
|
// pre-created ThunkSections is that in rare cases calls to Thunks that were in
|
|
|
|
// range in pass K, are out of range in some pass > K due to the insertion of
|
|
|
|
// more Thunks in between the caller and callee. When this happens we retarget
|
|
|
|
// the relocation back to the original target and create another Thunk.
|
|
|
|
|
|
|
|
// Remove ThunkSections that are empty, this should only be the initial set
|
|
|
|
// precreated on pass 0.
|
|
|
|
|
|
|
|
// Insert the Thunks for OutputSection OS into their designated place
|
|
|
|
// in the Sections vector, and recalculate the InputSection output section
|
|
|
|
// offsets.
|
|
|
|
// This may invalidate any output section offsets stored outside of InputSection
|
2017-10-27 16:56:20 +08:00
|
|
|
void ThunkCreator::mergeThunks(ArrayRef<OutputSection *> OutputSections) {
|
|
|
|
forEachInputSectionDescription(
|
|
|
|
OutputSections, [&](OutputSection *OS, InputSectionDescription *ISD) {
|
|
|
|
if (ISD->ThunkSections.empty())
|
|
|
|
return;
|
|
|
|
|
2017-10-27 16:58:28 +08:00
|
|
|
// Remove any zero sized precreated Thunks.
|
2017-10-27 17:07:10 +08:00
|
|
|
llvm::erase_if(ISD->ThunkSections,
|
|
|
|
[](const std::pair<ThunkSection *, uint32_t> &TS) {
|
|
|
|
return TS.first->getSize() == 0;
|
|
|
|
});
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2017-10-27 17:07:10 +08:00
|
|
|
// ISD->ThunkSections contains all created ThunkSections, including
|
|
|
|
// those inserted in previous passes. Extract the Thunks created this
|
|
|
|
// pass and order them in ascending OutSecOff.
|
|
|
|
std::vector<ThunkSection *> NewThunks;
|
|
|
|
for (const std::pair<ThunkSection *, uint32_t> TS : ISD->ThunkSections)
|
|
|
|
if (TS.second == Pass)
|
|
|
|
NewThunks.push_back(TS.first);
|
2019-04-23 10:42:06 +08:00
|
|
|
llvm::stable_sort(NewThunks,
|
|
|
|
[](const ThunkSection *A, const ThunkSection *B) {
|
|
|
|
return A->OutSecOff < B->OutSecOff;
|
|
|
|
});
|
2017-10-27 16:56:20 +08:00
|
|
|
|
|
|
|
// Merge sorted vectors of Thunks and InputSections by OutSecOff
|
|
|
|
std::vector<InputSection *> Tmp;
|
2017-10-27 17:07:10 +08:00
|
|
|
Tmp.reserve(ISD->Sections.size() + NewThunks.size());
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2017-10-27 16:56:20 +08:00
|
|
|
std::merge(ISD->Sections.begin(), ISD->Sections.end(),
|
2017-10-27 17:07:10 +08:00
|
|
|
NewThunks.begin(), NewThunks.end(), std::back_inserter(Tmp),
|
2018-10-04 06:20:26 +08:00
|
|
|
mergeCmp);
|
|
|
|
|
2017-10-27 16:56:20 +08:00
|
|
|
ISD->Sections = std::move(Tmp);
|
|
|
|
});
|
2017-02-01 18:26:03 +08:00
|
|
|
}
|
|
|
|
|
2017-10-27 17:04:11 +08:00
|
|
|
// Find or create a ThunkSection within the InputSectionDescription (ISD) that
|
2017-11-06 18:36:18 +08:00
|
|
|
// is in range of Src. An ISD maps to a range of InputSections described by a
|
2017-10-27 17:04:11 +08:00
|
|
|
// linker script section pattern such as { .text .text.* }.
|
|
|
|
ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *OS, InputSection *IS,
|
|
|
|
InputSectionDescription *ISD,
|
|
|
|
uint32_t Type, uint64_t Src) {
|
2017-10-27 17:07:10 +08:00
|
|
|
for (std::pair<ThunkSection *, uint32_t> TP : ISD->ThunkSections) {
|
|
|
|
ThunkSection *TS = TP.first;
|
2017-10-27 17:04:11 +08:00
|
|
|
uint64_t TSBase = OS->Addr + TS->OutSecOff;
|
|
|
|
uint64_t TSLimit = TSBase + TS->getSize();
|
|
|
|
if (Target->inBranchRange(Type, Src, (Src > TSLimit) ? TSBase : TSLimit))
|
|
|
|
return TS;
|
|
|
|
}
|
|
|
|
|
|
|
|
// No suitable ThunkSection exists. This can happen when there is a branch
|
|
|
|
// with lower range than the ThunkSection spacing or when there are too
|
|
|
|
// many Thunks. Create a new ThunkSection as close to the InputSection as
|
|
|
|
// possible. Error if InputSection is so large we cannot place ThunkSection
|
|
|
|
// anywhere in Range.
|
|
|
|
uint64_t ThunkSecOff = IS->OutSecOff;
|
|
|
|
if (!Target->inBranchRange(Type, Src, OS->Addr + ThunkSecOff)) {
|
|
|
|
ThunkSecOff = IS->OutSecOff + IS->getSize();
|
|
|
|
if (!Target->inBranchRange(Type, Src, OS->Addr + ThunkSecOff))
|
|
|
|
fatal("InputSection too large for range extension thunk " +
|
|
|
|
IS->getObjMsg(Src - (OS->Addr + IS->OutSecOff)));
|
|
|
|
}
|
|
|
|
return addThunkSection(OS, ISD, ThunkSecOff);
|
2017-04-05 18:30:09 +08:00
|
|
|
}
|
|
|
|
|
2017-09-12 17:17:39 +08:00
|
|
|
// Add a Thunk that needs to be placed in a ThunkSection that immediately
|
|
|
|
// precedes its Target.
|
|
|
|
ThunkSection *ThunkCreator::getISThunkSec(InputSection *IS) {
|
2017-04-05 18:30:09 +08:00
|
|
|
ThunkSection *TS = ThunkedSections.lookup(IS);
|
|
|
|
if (TS)
|
|
|
|
return TS;
|
2017-06-07 17:35:14 +08:00
|
|
|
|
2017-09-12 17:17:39 +08:00
|
|
|
// Find InputSectionRange within Target Output Section (TOS) that the
|
|
|
|
// InputSection (IS) that we need to precede is in.
|
|
|
|
OutputSection *TOS = IS->getParent();
|
2018-10-04 06:20:26 +08:00
|
|
|
for (BaseCommand *BC : TOS->SectionCommands) {
|
|
|
|
auto *ISD = dyn_cast<InputSectionDescription>(BC);
|
|
|
|
if (!ISD || ISD->Sections.empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
InputSection *First = ISD->Sections.front();
|
|
|
|
InputSection *Last = ISD->Sections.back();
|
|
|
|
|
|
|
|
if (IS->OutSecOff < First->OutSecOff || Last->OutSecOff < IS->OutSecOff)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
TS = addThunkSection(TOS, ISD, IS->OutSecOff);
|
|
|
|
ThunkedSections[IS] = TS;
|
|
|
|
return TS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
2017-04-05 18:30:09 +08:00
|
|
|
}
|
|
|
|
|
2017-10-27 16:58:28 +08:00
|
|
|
// Create one or more ThunkSections per OS that can be used to place Thunks.
|
|
|
|
// We attempt to place the ThunkSections using the following desirable
|
|
|
|
// properties:
|
|
|
|
// - Within range of the maximum number of callers
|
|
|
|
// - Minimise the number of ThunkSections
|
|
|
|
//
|
|
|
|
// We follow a simple but conservative heuristic to place ThunkSections at
|
|
|
|
// offsets that are multiples of a Target specific branch range.
|
2018-03-31 02:32:24 +08:00
|
|
|
// For an InputSectionDescription that is smaller than the range, a single
|
2017-10-27 16:58:28 +08:00
|
|
|
// ThunkSection at the end of the range will do.
|
2018-03-31 02:32:24 +08:00
|
|
|
//
|
|
|
|
// For an InputSectionDescription that is more than twice the size of the range,
|
|
|
|
// we place the last ThunkSection at range bytes from the end of the
|
|
|
|
// InputSectionDescription in order to increase the likelihood that the
|
|
|
|
// distance from a thunk to its target will be sufficiently small to
|
|
|
|
// allow for the creation of a short thunk.
|
2017-10-27 16:58:28 +08:00
|
|
|
void ThunkCreator::createInitialThunkSections(
|
|
|
|
ArrayRef<OutputSection *> OutputSections) {
|
2018-08-20 17:37:50 +08:00
|
|
|
uint32_t ThunkSectionSpacing = Target->getThunkSectionSpacing();
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2017-10-27 16:58:28 +08:00
|
|
|
forEachInputSectionDescription(
|
|
|
|
OutputSections, [&](OutputSection *OS, InputSectionDescription *ISD) {
|
|
|
|
if (ISD->Sections.empty())
|
|
|
|
return;
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2018-03-31 02:32:24 +08:00
|
|
|
uint32_t ISDBegin = ISD->Sections.front()->OutSecOff;
|
|
|
|
uint32_t ISDEnd =
|
|
|
|
ISD->Sections.back()->OutSecOff + ISD->Sections.back()->getSize();
|
|
|
|
uint32_t LastThunkLowerBound = -1;
|
2018-08-20 17:37:50 +08:00
|
|
|
if (ISDEnd - ISDBegin > ThunkSectionSpacing * 2)
|
|
|
|
LastThunkLowerBound = ISDEnd - ThunkSectionSpacing;
|
2018-03-31 02:32:24 +08:00
|
|
|
|
2017-10-27 16:58:28 +08:00
|
|
|
uint32_t ISLimit;
|
2018-03-31 02:32:24 +08:00
|
|
|
uint32_t PrevISLimit = ISDBegin;
|
2018-08-20 17:37:50 +08:00
|
|
|
uint32_t ThunkUpperBound = ISDBegin + ThunkSectionSpacing;
|
2017-10-27 16:58:28 +08:00
|
|
|
|
|
|
|
for (const InputSection *IS : ISD->Sections) {
|
|
|
|
ISLimit = IS->OutSecOff + IS->getSize();
|
|
|
|
if (ISLimit > ThunkUpperBound) {
|
|
|
|
addThunkSection(OS, ISD, PrevISLimit);
|
2018-08-20 17:37:50 +08:00
|
|
|
ThunkUpperBound = PrevISLimit + ThunkSectionSpacing;
|
2017-10-27 16:58:28 +08:00
|
|
|
}
|
2018-03-31 02:32:24 +08:00
|
|
|
if (ISLimit > LastThunkLowerBound)
|
|
|
|
break;
|
2017-10-27 16:58:28 +08:00
|
|
|
PrevISLimit = ISLimit;
|
|
|
|
}
|
|
|
|
addThunkSection(OS, ISD, ISLimit);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-09-12 17:17:39 +08:00
|
|
|
ThunkSection *ThunkCreator::addThunkSection(OutputSection *OS,
|
2017-10-27 16:56:20 +08:00
|
|
|
InputSectionDescription *ISD,
|
2017-06-16 21:10:08 +08:00
|
|
|
uint64_t Off) {
|
2017-09-12 17:17:39 +08:00
|
|
|
auto *TS = make<ThunkSection>(OS, Off);
|
2018-10-04 06:20:26 +08:00
|
|
|
ISD->ThunkSections.push_back({TS, Pass});
|
2017-06-16 21:10:08 +08:00
|
|
|
return TS;
|
|
|
|
}
|
|
|
|
|
2017-11-04 08:31:04 +08:00
|
|
|
std::pair<Thunk *, bool> ThunkCreator::getThunk(Symbol &Sym, RelType Type,
|
2017-10-27 17:04:11 +08:00
|
|
|
uint64_t Src) {
|
2018-03-10 01:54:43 +08:00
|
|
|
std::vector<Thunk *> *ThunkVec = nullptr;
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2018-03-10 01:54:43 +08:00
|
|
|
// We use (section, offset) pair to find the thunk position if possible so
|
|
|
|
// that we create only one thunk for aliased symbols or ICFed sections.
|
|
|
|
if (auto *D = dyn_cast<Defined>(&Sym))
|
|
|
|
if (!D->isInPlt() && D->Section)
|
|
|
|
ThunkVec = &ThunkedSymbolsBySection[{D->Section->Repl, D->Value}];
|
|
|
|
if (!ThunkVec)
|
|
|
|
ThunkVec = &ThunkedSymbols[&Sym];
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2018-03-10 01:54:43 +08:00
|
|
|
// Check existing Thunks for Sym to see if they can be reused
|
2018-10-04 06:20:26 +08:00
|
|
|
for (Thunk *T : *ThunkVec)
|
|
|
|
if (T->isCompatibleWith(Type) &&
|
|
|
|
Target->inBranchRange(Type, Src, T->getThunkTargetSym()->getVA()))
|
2018-10-04 08:35:24 +08:00
|
|
|
return std::make_pair(T, false);
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2017-07-05 17:53:33 +08:00
|
|
|
// No existing compatible Thunk in range, create a new one
|
2017-11-04 08:31:04 +08:00
|
|
|
Thunk *T = addThunk(Type, Sym);
|
2018-03-10 01:54:43 +08:00
|
|
|
ThunkVec->push_back(T);
|
2018-10-04 08:35:24 +08:00
|
|
|
return std::make_pair(T, true);
|
2017-04-05 18:30:09 +08:00
|
|
|
}
|
|
|
|
|
2017-10-27 17:07:10 +08:00
|
|
|
// Return true if the relocation target is an in range Thunk.
|
|
|
|
// Return false if the relocation is not to a Thunk. If the relocation target
|
|
|
|
// was originally to a Thunk, but is no longer in range we revert the
|
|
|
|
// relocation back to its original non-Thunk target.
|
|
|
|
bool ThunkCreator::normalizeExistingThunk(Relocation &Rel, uint64_t Src) {
|
2018-10-04 06:20:26 +08:00
|
|
|
if (Thunk *T = Thunks.lookup(Rel.Sym)) {
|
2017-10-27 17:07:10 +08:00
|
|
|
if (Target->inBranchRange(Rel.Type, Src, Rel.Sym->getVA()))
|
|
|
|
return true;
|
2018-10-04 06:20:26 +08:00
|
|
|
Rel.Sym = &T->Destination;
|
2017-10-27 17:07:10 +08:00
|
|
|
if (Rel.Sym->isInPlt())
|
|
|
|
Rel.Expr = toPlt(Rel.Expr);
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-02-01 18:26:03 +08:00
|
|
|
// Process all relocations from the InputSections that have been assigned
|
2017-10-27 17:07:10 +08:00
|
|
|
// to InputSectionDescriptions and redirect through Thunks if needed. The
|
|
|
|
// function should be called iteratively until it returns false.
|
|
|
|
//
|
|
|
|
// PreConditions:
|
|
|
|
// All InputSections that may need a Thunk are reachable from
|
|
|
|
// OutputSectionCommands.
|
2017-02-01 18:26:03 +08:00
|
|
|
//
|
2017-10-27 17:07:10 +08:00
|
|
|
// All OutputSections have an address and all InputSections have an offset
|
|
|
|
// within the OutputSection.
|
2017-02-01 18:26:03 +08:00
|
|
|
//
|
2017-10-27 17:07:10 +08:00
|
|
|
// The offsets between caller (relocation place) and callee
|
|
|
|
// (relocation target) will not be modified outside of createThunks().
|
|
|
|
//
|
|
|
|
// PostConditions:
|
|
|
|
// If return value is true then ThunkSections have been inserted into
|
|
|
|
// OutputSections. All relocations that needed a Thunk based on the information
|
|
|
|
// available to createThunks() on entry have been redirected to a Thunk. Note
|
|
|
|
// that adding Thunks changes offsets between caller and callee so more Thunks
|
|
|
|
// may be required.
|
|
|
|
//
|
|
|
|
// If return value is false then no more Thunks are needed, and createThunks has
|
|
|
|
// made no changes. If the target requires range extension thunks, currently
|
|
|
|
// ARM, then any future change in offset between caller and callee risks a
|
|
|
|
// relocation out of range error.
|
2017-07-28 03:22:43 +08:00
|
|
|
bool ThunkCreator::createThunks(ArrayRef<OutputSection *> OutputSections) {
|
2017-10-27 16:56:20 +08:00
|
|
|
bool AddressesChanged = false;
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2018-08-20 17:37:50 +08:00
|
|
|
if (Pass == 0 && Target->getThunkSectionSpacing())
|
2017-10-27 16:58:28 +08:00
|
|
|
createInitialThunkSections(OutputSections);
|
2018-10-04 06:20:26 +08:00
|
|
|
|
|
|
|
// With Thunk Size much smaller than branch range we expect to
|
|
|
|
// converge quickly; if we get to 10 something has gone wrong.
|
|
|
|
if (Pass == 10)
|
2017-10-27 17:07:10 +08:00
|
|
|
fatal("thunk creation not converged");
|
2017-10-27 16:58:28 +08:00
|
|
|
|
2017-02-01 18:26:03 +08:00
|
|
|
// Create all the Thunks and insert them into synthetic ThunkSections. The
|
2017-10-27 16:56:20 +08:00
|
|
|
// ThunkSections are later inserted back into InputSectionDescriptions.
|
2017-02-01 18:26:03 +08:00
|
|
|
// We separate the creation of ThunkSections from the insertion of the
|
2017-10-27 16:56:20 +08:00
|
|
|
// ThunkSections as ThunkSections are not always inserted into the same
|
|
|
|
// InputSectionDescription as the caller.
|
|
|
|
forEachInputSectionDescription(
|
|
|
|
OutputSections, [&](OutputSection *OS, InputSectionDescription *ISD) {
|
|
|
|
for (InputSection *IS : ISD->Sections)
|
|
|
|
for (Relocation &Rel : IS->Relocations) {
|
2018-03-24 08:35:11 +08:00
|
|
|
uint64_t Src = IS->getVA(Rel.Offset);
|
2017-10-27 17:07:10 +08:00
|
|
|
|
|
|
|
// If we are a relocation to an existing Thunk, check if it is
|
|
|
|
// still in range. If not then Rel will be altered to point to its
|
|
|
|
// original target so another Thunk can be generated.
|
|
|
|
if (Pass > 0 && normalizeExistingThunk(Rel, Src))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!Target->needsThunk(Rel.Expr, Rel.Type, IS->File, Src,
|
|
|
|
*Rel.Sym))
|
2017-10-27 16:56:20 +08:00
|
|
|
continue;
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2017-10-27 16:56:20 +08:00
|
|
|
Thunk *T;
|
|
|
|
bool IsNew;
|
2017-10-27 17:07:10 +08:00
|
|
|
std::tie(T, IsNew) = getThunk(*Rel.Sym, Rel.Type, Src);
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2017-10-27 16:56:20 +08:00
|
|
|
if (IsNew) {
|
|
|
|
// Find or create a ThunkSection for the new Thunk
|
|
|
|
ThunkSection *TS;
|
|
|
|
if (auto *TIS = T->getTargetInputSection())
|
|
|
|
TS = getISThunkSec(TIS);
|
|
|
|
else
|
2017-10-27 17:04:11 +08:00
|
|
|
TS = getISDThunkSec(OS, IS, ISD, Rel.Type, Src);
|
2017-10-27 16:56:20 +08:00
|
|
|
TS->addThunk(T);
|
2018-03-30 06:32:13 +08:00
|
|
|
Thunks[T->getThunkTargetSym()] = T;
|
2017-10-27 16:56:20 +08:00
|
|
|
}
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2017-10-27 16:56:20 +08:00
|
|
|
// Redirect relocation to Thunk, we never go via the PLT to a Thunk
|
2018-03-30 06:32:13 +08:00
|
|
|
Rel.Sym = T->getThunkTargetSym();
|
2017-10-27 16:56:20 +08:00
|
|
|
Rel.Expr = fromPlt(Rel.Expr);
|
|
|
|
}
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2018-03-30 06:32:13 +08:00
|
|
|
for (auto &P : ISD->ThunkSections)
|
|
|
|
AddressesChanged |= P.first->assignOffsets();
|
2017-10-27 16:56:20 +08:00
|
|
|
});
|
2018-10-04 06:20:26 +08:00
|
|
|
|
2018-03-30 06:32:13 +08:00
|
|
|
for (auto &P : ThunkedSections)
|
|
|
|
AddressesChanged |= P.second->assignOffsets();
|
|
|
|
|
2017-02-01 18:26:03 +08:00
|
|
|
// Merge all created synthetic ThunkSections back into OutputSection
|
2017-10-27 16:56:20 +08:00
|
|
|
mergeThunks(OutputSections);
|
2017-06-16 21:10:08 +08:00
|
|
|
++Pass;
|
2017-10-27 16:56:20 +08:00
|
|
|
return AddressesChanged;
|
2016-07-21 01:58:07 +08:00
|
|
|
}
|
2016-05-25 04:24:43 +08:00
|
|
|
|
2017-03-31 04:31:19 +08:00
|
|
|
template void elf::scanRelocations<ELF32LE>(InputSectionBase &);
|
|
|
|
template void elf::scanRelocations<ELF32BE>(InputSectionBase &);
|
|
|
|
template void elf::scanRelocations<ELF64LE>(InputSectionBase &);
|
|
|
|
template void elf::scanRelocations<ELF64BE>(InputSectionBase &);
|