2015-07-25 05:03:07 +08:00
|
|
|
//===- Writer.cpp ---------------------------------------------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-07-25 05:03:07 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-09-05 06:48:30 +08:00
|
|
|
#include "Writer.h"
|
2017-12-05 23:59:05 +08:00
|
|
|
#include "AArch64ErrataFix.h"
|
2019-09-16 17:38:38 +08:00
|
|
|
#include "ARMErrataFix.h"
|
2018-04-18 07:30:05 +08:00
|
|
|
#include "CallGraphSort.h"
|
2015-08-06 07:51:50 +08:00
|
|
|
#include "Config.h"
|
2016-02-12 05:17:59 +08:00
|
|
|
#include "LinkerScript.h"
|
2017-01-14 05:05:46 +08:00
|
|
|
#include "MapFile.h"
|
2015-09-22 05:38:08 +08:00
|
|
|
#include "OutputSections.h"
|
2016-05-25 04:24:43 +08:00
|
|
|
#include "Relocations.h"
|
2015-08-06 07:24:46 +08:00
|
|
|
#include "SymbolTable.h"
|
2017-12-10 00:56:18 +08:00
|
|
|
#include "Symbols.h"
|
2016-11-02 04:28:21 +08:00
|
|
|
#include "SyntheticSections.h"
|
2015-09-23 02:19:46 +08:00
|
|
|
#include "Target.h"
|
2021-04-01 03:48:18 +08:00
|
|
|
#include "lld/Common/Arrays.h"
|
2022-01-21 03:53:18 +08:00
|
|
|
#include "lld/Common/CommonLinkerContext.h"
|
2019-03-12 00:30:55 +08:00
|
|
|
#include "lld/Common/Filesystem.h"
|
2018-03-01 01:38:19 +08:00
|
|
|
#include "lld/Common/Strings.h"
|
2015-11-12 17:52:08 +08:00
|
|
|
#include "llvm/ADT/StringMap.h"
|
2015-10-13 04:51:48 +08:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
[Support] Move LLD's parallel algorithm wrappers to support
Essentially takes the lld/Common/Threads.h wrappers and moves them to
the llvm/Support/Paralle.h algorithm header.
The changes are:
- Remove policy parameter, since all clients use `par`.
- Rename the methods to `parallelSort` etc to match LLVM style, since
they are no longer C++17 pstl compatible.
- Move algorithms from llvm::parallel:: to llvm::, since they have
"parallel" in the name and are no longer overloads of the regular
algorithms.
- Add range overloads
- Use the sequential algorithm directly when 1 thread is requested
(skips task grouping)
- Fix the index type of parallelForEachN to size_t. Nobody in LLVM was
using any other parameter, and it made overload resolution hard for
for_each_n(par, 0, foo.size(), ...) because 0 is int, not size_t.
Remove Threads.h and update LLD for that.
This is a prerequisite for parallel public symbol processing in the PDB
library, which is in LLVM.
Reviewed By: MaskRay, aganea
Differential Revision: https://reviews.llvm.org/D79390
2020-05-05 11:03:19 +08:00
|
|
|
#include "llvm/Support/Parallel.h"
|
2019-04-17 06:45:14 +08:00
|
|
|
#include "llvm/Support/RandomNumberGenerator.h"
|
|
|
|
#include "llvm/Support/SHA1.h"
|
2020-01-29 00:05:13 +08:00
|
|
|
#include "llvm/Support/TimeProfiler.h"
|
2019-04-17 06:45:14 +08:00
|
|
|
#include "llvm/Support/xxhash.h"
|
2016-09-29 09:45:22 +08:00
|
|
|
#include <climits>
|
2015-07-25 05:03:07 +08:00
|
|
|
|
2020-04-07 21:48:18 +08:00
|
|
|
#define DEBUG_TYPE "lld"
|
|
|
|
|
2015-07-25 05:03:07 +08:00
|
|
|
using namespace llvm;
|
|
|
|
using namespace llvm::ELF;
|
|
|
|
using namespace llvm::object;
|
2016-10-10 17:39:26 +08:00
|
|
|
using namespace llvm::support;
|
|
|
|
using namespace llvm::support::endian;
|
2020-05-15 13:18:58 +08:00
|
|
|
using namespace lld;
|
|
|
|
using namespace lld::elf;
|
2015-07-25 05:03:07 +08:00
|
|
|
|
2015-08-06 07:24:46 +08:00
|
|
|
namespace {
|
|
|
|
// The writer writes a SymbolTable result to a file.
|
|
|
|
template <class ELFT> class Writer {
|
|
|
|
public:
|
2020-12-28 18:20:00 +08:00
|
|
|
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
|
|
|
|
|
2017-11-14 02:06:43 +08:00
|
|
|
Writer() : buffer(errorHandler().outputBuffer) {}
|
2017-04-06 05:46:06 +08:00
|
|
|
|
2015-08-06 07:24:46 +08:00
|
|
|
void run();
|
|
|
|
|
|
|
|
private:
|
2015-10-09 07:49:30 +08:00
|
|
|
void copyLocalSymbols();
|
2017-02-11 09:40:49 +08:00
|
|
|
void addSectionSymbols();
|
2016-09-22 06:36:19 +08:00
|
|
|
void sortSections();
|
2017-12-12 21:30:44 +08:00
|
|
|
void resolveShfLinkOrder();
|
2019-04-18 16:15:54 +08:00
|
|
|
void finalizeAddressDependentContent();
|
2020-04-07 21:48:18 +08:00
|
|
|
void optimizeBasicBlockJumps();
|
2017-10-30 18:12:49 +08:00
|
|
|
void sortInputSections();
|
2016-07-20 22:43:20 +08:00
|
|
|
void finalizeSections();
|
2018-10-24 22:24:01 +08:00
|
|
|
void checkExecuteOnly();
|
2017-09-13 00:38:01 +08:00
|
|
|
void setReservedSymbolSections();
|
2015-12-25 15:38:58 +08:00
|
|
|
|
2021-12-27 05:53:47 +08:00
|
|
|
SmallVector<PhdrEntry *, 0> createPhdrs(Partition &part);
|
2019-06-08 01:57:58 +08:00
|
|
|
void addPhdrForSection(Partition &part, unsigned shType, unsigned pType,
|
|
|
|
unsigned pFlags);
|
2016-04-02 01:07:17 +08:00
|
|
|
void assignFileOffsets();
|
2016-08-25 17:05:47 +08:00
|
|
|
void assignFileOffsetsBinary();
|
2019-06-08 01:57:58 +08:00
|
|
|
void setPhdrs(Partition &part);
|
2018-04-04 17:24:31 +08:00
|
|
|
void checkSections();
|
2016-03-31 03:41:51 +08:00
|
|
|
void fixSectionAlignments();
|
2016-04-02 01:24:19 +08:00
|
|
|
void openFile();
|
2017-08-03 00:35:00 +08:00
|
|
|
void writeTrapInstr();
|
2015-08-06 07:24:46 +08:00
|
|
|
void writeHeader();
|
|
|
|
void writeSections();
|
2016-08-25 17:05:47 +08:00
|
|
|
void writeSectionsBinary();
|
ELF: Implement --build-id.
This patch implements --build-id. After the linker creates an output file
in the memory buffer, it computes the FNV1 hash of the resulting file
and set the hash to the .note section as a build-id.
GNU ld and gold have the same feature, but their default choice of the
hash function is different. Their default is SHA1.
We made a deliberate choice to not use a secure hash function for the
sake of performance. Computing a secure hash is slow -- for example,
MD5 throughput is usually 400 MB/s or so. SHA1 is slower than that.
As a result, if you pass --build-id to gold, then the linker becomes about
10% slower than that without the option. We observed a similar degradation
in an experimental implementation of build-id for LLD. On the other hand,
we observed only 1-2% performance degradation with the FNV hash.
Since build-id is not for digital certificate or anything, we think that
a very small probability of collision is acceptable.
We considered using other signals such as using input file timestamps as
inputs to a secure hash function. But such signals would have an issue
with build reproducibility (if you build a binary from the same source
tree using the same toolchain, the build id should become the same.)
GNU linkers accepts --build-id=<style> option where style is one of
"MD5", "SHA1", or an arbitrary hex string. That option is out of scope
of this patch.
http://reviews.llvm.org/D18091
llvm-svn: 263292
2016-03-12 04:51:53 +08:00
|
|
|
void writeBuildId();
|
2015-08-06 07:24:46 +08:00
|
|
|
|
2017-11-14 02:06:43 +08:00
|
|
|
std::unique_ptr<FileOutputBuffer> &buffer;
|
2015-09-18 03:58:07 +08:00
|
|
|
|
2015-12-26 17:47:57 +08:00
|
|
|
void addRelIpltSymbols();
|
2015-12-26 17:48:00 +08:00
|
|
|
void addStartEndSymbols();
|
2017-02-24 23:07:30 +08:00
|
|
|
void addStartStopSymbols(OutputSection *sec);
|
2015-10-11 06:34:30 +08:00
|
|
|
|
2019-03-02 02:53:41 +08:00
|
|
|
uint64_t fileSize;
|
2017-04-06 05:37:09 +08:00
|
|
|
uint64_t sectionHeaderOff;
|
2015-08-06 07:24:46 +08:00
|
|
|
};
|
|
|
|
} // anonymous namespace
|
|
|
|
|
2017-09-19 17:20:54 +08:00
|
|
|
static bool needsInterpSection() {
|
2020-01-17 04:00:24 +08:00
|
|
|
return !config->relocatable && !config->shared &&
|
|
|
|
!config->dynamicLinker.empty() && script->needsInterpSection();
|
2016-07-21 19:01:23 +08:00
|
|
|
}
|
|
|
|
|
2020-05-15 13:18:58 +08:00
|
|
|
template <class ELFT> void elf::writeResult() {
|
2020-01-29 00:05:13 +08:00
|
|
|
Writer<ELFT>().run();
|
|
|
|
}
|
2015-10-08 03:18:16 +08:00
|
|
|
|
2021-12-27 05:53:47 +08:00
|
|
|
static void removeEmptyPTLoad(SmallVector<PhdrEntry *, 0> &phdrs) {
|
2020-05-02 00:50:37 +08:00
|
|
|
auto it = std::stable_partition(
|
|
|
|
phdrs.begin(), phdrs.end(), [&](const PhdrEntry *p) {
|
|
|
|
if (p->p_type != PT_LOAD)
|
|
|
|
return true;
|
|
|
|
if (!p->firstSec)
|
|
|
|
return false;
|
|
|
|
uint64_t size = p->lastSec->addr + p->lastSec->size - p->firstSec->addr;
|
|
|
|
return size != 0;
|
|
|
|
});
|
|
|
|
|
|
|
|
// Clear OutputSection::ptLoad for sections contained in removed
|
|
|
|
// segments.
|
|
|
|
DenseSet<PhdrEntry *> removed(it, phdrs.end());
|
|
|
|
for (OutputSection *sec : outputSections)
|
|
|
|
if (removed.count(sec->ptLoad))
|
|
|
|
sec->ptLoad = nullptr;
|
|
|
|
phdrs.erase(it, phdrs.end());
|
2016-12-06 21:43:34 +08:00
|
|
|
}
|
|
|
|
|
2020-05-15 13:18:58 +08:00
|
|
|
void elf::copySectionsIntoPartitions() {
|
2022-01-18 02:25:07 +08:00
|
|
|
SmallVector<InputSectionBase *, 0> newSections;
|
2019-06-08 01:57:58 +08:00
|
|
|
for (unsigned part = 2; part != partitions.size() + 1; ++part) {
|
|
|
|
for (InputSectionBase *s : inputSections) {
|
|
|
|
if (!(s->flags & SHF_ALLOC) || !s->isLive())
|
|
|
|
continue;
|
|
|
|
InputSectionBase *copy;
|
|
|
|
if (s->type == SHT_NOTE)
|
|
|
|
copy = make<InputSection>(cast<InputSection>(*s));
|
|
|
|
else if (auto *es = dyn_cast<EhInputSection>(s))
|
|
|
|
copy = make<EhInputSection>(*es);
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
copy->partition = part;
|
|
|
|
newSections.push_back(copy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inputSections.insert(inputSections.end(), newSections.begin(),
|
|
|
|
newSections.end());
|
|
|
|
}
|
|
|
|
|
2020-05-15 13:18:58 +08:00
|
|
|
void elf::combineEhSections() {
|
2020-11-03 22:41:09 +08:00
|
|
|
llvm::TimeTraceScope timeScope("Combine EH sections");
|
2017-03-11 04:00:42 +08:00
|
|
|
for (InputSectionBase *&s : inputSections) {
|
2019-06-11 10:54:30 +08:00
|
|
|
// Ignore dead sections and the partition end marker (.part.end),
|
|
|
|
// whose partition number is out of bounds.
|
|
|
|
if (!s->isLive() || s->partition == 255)
|
2017-03-11 04:00:42 +08:00
|
|
|
continue;
|
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
Partition &part = s->getPartition();
|
2019-04-02 02:01:18 +08:00
|
|
|
if (auto *es = dyn_cast<EhInputSection>(s)) {
|
2019-08-26 18:32:12 +08:00
|
|
|
part.ehFrame->addSection(es);
|
2019-04-02 02:01:18 +08:00
|
|
|
s = nullptr;
|
2019-06-08 01:57:58 +08:00
|
|
|
} else if (s->kind() == SectionBase::Regular && part.armExidx &&
|
|
|
|
part.armExidx->addSection(cast<InputSection>(s))) {
|
2019-04-02 02:01:18 +08:00
|
|
|
s = nullptr;
|
|
|
|
}
|
2017-03-11 04:00:42 +08:00
|
|
|
}
|
|
|
|
|
2021-10-17 14:31:20 +08:00
|
|
|
llvm::erase_value(inputSections, nullptr);
|
2017-03-11 04:00:42 +08:00
|
|
|
}
|
|
|
|
|
2017-12-09 03:13:27 +08:00
|
|
|
static Defined *addOptionalRegular(StringRef name, SectionBase *sec,
|
2021-09-26 06:47:27 +08:00
|
|
|
uint64_t val, uint8_t stOther = STV_HIDDEN) {
|
2017-12-09 03:13:27 +08:00
|
|
|
Symbol *s = symtab->find(name);
|
|
|
|
if (!s || s->isDefined())
|
|
|
|
return nullptr;
|
2019-05-16 10:14:00 +08:00
|
|
|
|
2021-09-26 06:47:27 +08:00
|
|
|
s->resolve(Defined{/*file=*/nullptr, name, STB_GLOBAL, stOther, STT_NOTYPE,
|
|
|
|
val,
|
2019-07-11 14:08:54 +08:00
|
|
|
/*size=*/0, sec});
|
2019-05-28 18:12:06 +08:00
|
|
|
return cast<Defined>(s);
|
2017-12-09 03:13:27 +08:00
|
|
|
}
|
|
|
|
|
2018-10-20 19:12:50 +08:00
|
|
|
static Defined *addAbsolute(StringRef name) {
|
2019-05-17 09:55:20 +08:00
|
|
|
Symbol *sym = symtab->addSymbol(Defined{nullptr, name, STB_GLOBAL, STV_HIDDEN,
|
|
|
|
STT_NOTYPE, 0, 0, nullptr});
|
2019-05-16 11:29:03 +08:00
|
|
|
return cast<Defined>(sym);
|
2018-10-24 15:42:06 +08:00
|
|
|
}
|
2018-10-20 19:12:50 +08:00
|
|
|
|
2017-12-09 03:13:27 +08:00
|
|
|
// The linker is expected to define some symbols depending on
|
|
|
|
// the linking result. This function defines such symbols.
|
2020-05-15 13:18:58 +08:00
|
|
|
void elf::addReservedSymbols() {
|
2017-12-09 03:13:27 +08:00
|
|
|
if (config->emachine == EM_MIPS) {
|
|
|
|
// Define _gp for MIPS. st_value of _gp symbol will be updated by Writer
|
|
|
|
// so that it points to an absolute address which by default is relative
|
|
|
|
// to GOT. Default offset is 0x7ff0.
|
|
|
|
// See "Global Data Symbols" in Chapter 6 in the following document:
|
|
|
|
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
|
2018-10-20 19:12:50 +08:00
|
|
|
ElfSym::mipsGp = addAbsolute("_gp");
|
2017-12-09 03:13:27 +08:00
|
|
|
|
|
|
|
// On MIPS O32 ABI, _gp_disp is a magic symbol designates offset between
|
|
|
|
// start of function and 'gp' pointer into GOT.
|
|
|
|
if (symtab->find("_gp_disp"))
|
2018-10-20 19:12:50 +08:00
|
|
|
ElfSym::mipsGpDisp = addAbsolute("_gp_disp");
|
2017-12-09 03:13:27 +08:00
|
|
|
|
|
|
|
// The __gnu_local_gp is a magic symbol equal to the current value of 'gp'
|
|
|
|
// pointer. This symbol is used in the code generated by .cpload pseudo-op
|
|
|
|
// in case of using -mno-shared option.
|
|
|
|
// https://sourceware.org/ml/binutils/2004-12/msg00094.html
|
|
|
|
if (symtab->find("__gnu_local_gp"))
|
2018-10-20 19:12:50 +08:00
|
|
|
ElfSym::mipsLocalGp = addAbsolute("__gnu_local_gp");
|
[PPC32] Improve the 32-bit PowerPC port
Many -static/-no-pie/-shared/-pie applications linked against glibc or musl
should work with this patch. This also helps FreeBSD PowerPC64 to migrate
their lib32 (PR40888).
* Fix default image base and max page size.
* Support new-style Secure PLT (see below). Old-style BSS PLT is not
implemented, so it is not suitable for FreeBSD rtld now because it doesn't
support Secure PLT yet.
* Support more initial relocation types:
R_PPC_ADDR32, R_PPC_REL16*, R_PPC_LOCAL24PC, R_PPC_PLTREL24, and R_PPC_GOT16.
The addend of R_PPC_PLTREL24 is special: it decides the call stub PLT type
but it should be ignored for the computation of target symbol VA.
* Support GNU ifunc
* Support .glink used for lazy PLT resolution in glibc
* Add a new thunk type: PPC32PltCallStub that is similar to PPC64PltCallStub.
It is used by R_PPC_REL24 and R_PPC_PLTREL24.
A PLT stub used in -fPIE/-fPIC usually loads an address relative to
.got2+0x8000 (-fpie/-fpic code uses _GLOBAL_OFFSET_TABLE_ relative
addresses).
Two .got2 sections in two object files have different addresses, thus a PLT stub
can't be shared by two object files. To handle this incompatibility,
change the parameters of Thunk::isCompatibleWith to
`const InputSection &, const Relocation &`.
PowerPC psABI specified an old-style .plt (BSS PLT) that is both
writable and executable. Linkers don't make separate RW- and RWE segments,
which causes all initially writable memory (think .data) executable.
This is a big security concern so a new PLT scheme (secure PLT) was developed to
address the security issue.
TLS will be implemented in D62940.
glibc older than ~2012 requires .rela.dyn to include .rela.plt, it can
not handle the DT_RELA+DT_RELASZ == DT_JMPREL case correctly. A hack
(not included in this patch) in LinkerScript.cpp addOrphanSections() to
work around the issue:
if (Config->EMachine == EM_PPC) {
// Older glibc assumes .rela.dyn includes .rela.plt
Add(In.RelaDyn);
if (In.RelaPlt->isLive() && !In.RelaPlt->Parent)
In.RelaDyn->getParent()->addSection(In.RelaPlt);
}
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D62464
llvm-svn: 362721
2019-06-07 01:03:00 +08:00
|
|
|
} else if (config->emachine == EM_PPC) {
|
|
|
|
// glibc *crt1.o has a undefined reference to _SDA_BASE_. Since we don't
|
|
|
|
// support Small Data Area, define it arbitrarily as 0.
|
|
|
|
addOptionalRegular("_SDA_BASE_", nullptr, 0, STV_HIDDEN);
|
2020-05-15 04:55:20 +08:00
|
|
|
} else if (config->emachine == EM_PPC64) {
|
|
|
|
addPPC64SaveRestore();
|
2017-12-09 03:13:27 +08:00
|
|
|
}
|
|
|
|
|
2018-05-17 13:34:29 +08:00
|
|
|
// The Power Architecture 64-bit v2 ABI defines a TableOfContents (TOC) which
|
|
|
|
// combines the typical ELF GOT with the small data sections. It commonly
|
|
|
|
// includes .got .toc .sdata .sbss. The .TOC. symbol replaces both
|
|
|
|
// _GLOBAL_OFFSET_TABLE_ and _SDA_BASE_ from the 32-bit ABI. It is used to
|
|
|
|
// represent the TOC base which is offset by 0x8000 bytes from the start of
|
|
|
|
// the .got section.
|
2018-11-29 19:18:07 +08:00
|
|
|
// We do not allow _GLOBAL_OFFSET_TABLE_ to be defined by input objects as the
|
|
|
|
// correctness of some relocations depends on its value.
|
2019-03-29 01:38:53 +08:00
|
|
|
StringRef gotSymName =
|
2018-11-29 19:18:07 +08:00
|
|
|
(config->emachine == EM_PPC64) ? ".TOC." : "_GLOBAL_OFFSET_TABLE_";
|
2019-03-29 01:38:53 +08:00
|
|
|
|
|
|
|
if (Symbol *s = symtab->find(gotSymName)) {
|
|
|
|
if (s->isDefined()) {
|
2018-11-29 19:18:07 +08:00
|
|
|
error(toString(s->file) + " cannot redefine linker defined symbol '" +
|
2019-03-29 01:38:53 +08:00
|
|
|
gotSymName + "'");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t gotOff = 0;
|
[PPC32] Improve the 32-bit PowerPC port
Many -static/-no-pie/-shared/-pie applications linked against glibc or musl
should work with this patch. This also helps FreeBSD PowerPC64 to migrate
their lib32 (PR40888).
* Fix default image base and max page size.
* Support new-style Secure PLT (see below). Old-style BSS PLT is not
implemented, so it is not suitable for FreeBSD rtld now because it doesn't
support Secure PLT yet.
* Support more initial relocation types:
R_PPC_ADDR32, R_PPC_REL16*, R_PPC_LOCAL24PC, R_PPC_PLTREL24, and R_PPC_GOT16.
The addend of R_PPC_PLTREL24 is special: it decides the call stub PLT type
but it should be ignored for the computation of target symbol VA.
* Support GNU ifunc
* Support .glink used for lazy PLT resolution in glibc
* Add a new thunk type: PPC32PltCallStub that is similar to PPC64PltCallStub.
It is used by R_PPC_REL24 and R_PPC_PLTREL24.
A PLT stub used in -fPIE/-fPIC usually loads an address relative to
.got2+0x8000 (-fpie/-fpic code uses _GLOBAL_OFFSET_TABLE_ relative
addresses).
Two .got2 sections in two object files have different addresses, thus a PLT stub
can't be shared by two object files. To handle this incompatibility,
change the parameters of Thunk::isCompatibleWith to
`const InputSection &, const Relocation &`.
PowerPC psABI specified an old-style .plt (BSS PLT) that is both
writable and executable. Linkers don't make separate RW- and RWE segments,
which causes all initially writable memory (think .data) executable.
This is a big security concern so a new PLT scheme (secure PLT) was developed to
address the security issue.
TLS will be implemented in D62940.
glibc older than ~2012 requires .rela.dyn to include .rela.plt, it can
not handle the DT_RELA+DT_RELASZ == DT_JMPREL case correctly. A hack
(not included in this patch) in LinkerScript.cpp addOrphanSections() to
work around the issue:
if (Config->EMachine == EM_PPC) {
// Older glibc assumes .rela.dyn includes .rela.plt
Add(In.RelaDyn);
if (In.RelaPlt->isLive() && !In.RelaPlt->Parent)
In.RelaDyn->getParent()->addSection(In.RelaPlt);
}
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D62464
llvm-svn: 362721
2019-06-07 01:03:00 +08:00
|
|
|
if (config->emachine == EM_PPC64)
|
2019-03-29 01:38:53 +08:00
|
|
|
gotOff = 0x8000;
|
|
|
|
|
2019-07-11 14:08:54 +08:00
|
|
|
s->resolve(Defined{/*file=*/nullptr, gotSymName, STB_GLOBAL, STV_HIDDEN,
|
|
|
|
STT_NOTYPE, gotOff, /*size=*/0, Out::elfHeader});
|
2019-05-16 11:29:03 +08:00
|
|
|
ElfSym::globalOffsetTable = cast<Defined>(s);
|
2018-11-29 19:18:07 +08:00
|
|
|
}
|
2017-12-09 03:13:27 +08:00
|
|
|
|
|
|
|
// __ehdr_start is the location of ELF file headers. Note that we define
|
|
|
|
// this symbol unconditionally even when using a linker script, which
|
|
|
|
// differs from the behavior implemented by GNU linker which only define
|
|
|
|
// this symbol if ELF headers are in the memory mapped segment.
|
2018-03-29 06:09:40 +08:00
|
|
|
addOptionalRegular("__ehdr_start", Out::elfHeader, 0, STV_HIDDEN);
|
|
|
|
|
2017-12-09 03:13:27 +08:00
|
|
|
// __executable_start is not documented, but the expectation of at
|
2018-03-29 06:09:40 +08:00
|
|
|
// least the Android libc is that it points to the ELF header.
|
|
|
|
addOptionalRegular("__executable_start", Out::elfHeader, 0, STV_HIDDEN);
|
|
|
|
|
2017-12-09 03:13:27 +08:00
|
|
|
// __dso_handle symbol is passed to cxa_finalize as a marker to identify
|
|
|
|
// each DSO. The address of the symbol doesn't matter as long as they are
|
|
|
|
// different in different DSOs, so we chose the start address of the DSO.
|
2018-03-29 06:09:40 +08:00
|
|
|
addOptionalRegular("__dso_handle", Out::elfHeader, 0, STV_HIDDEN);
|
2017-12-09 03:13:27 +08:00
|
|
|
|
2019-10-29 09:41:38 +08:00
|
|
|
// If linker script do layout we do not need to create any standard symbols.
|
2017-12-09 03:13:27 +08:00
|
|
|
if (script->hasSectionsCommand)
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto add = [](StringRef s, int64_t pos) {
|
2017-12-24 01:21:39 +08:00
|
|
|
return addOptionalRegular(s, Out::elfHeader, pos, STV_DEFAULT);
|
2017-12-09 03:13:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
ElfSym::bss = add("__bss_start", 0);
|
|
|
|
ElfSym::end1 = add("end", -1);
|
|
|
|
ElfSym::end2 = add("_end", -1);
|
|
|
|
ElfSym::etext1 = add("etext", -1);
|
|
|
|
ElfSym::etext2 = add("_etext", -1);
|
|
|
|
ElfSym::edata1 = add("edata", -1);
|
|
|
|
ElfSym::edata2 = add("_edata", -1);
|
|
|
|
}
|
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
static OutputSection *findSection(StringRef name, unsigned partition = 1) {
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : script->sectionCommands)
|
|
|
|
if (auto *sec = dyn_cast<OutputSection>(cmd))
|
2019-06-08 01:57:58 +08:00
|
|
|
if (sec->name == name && sec->partition == partition)
|
2017-12-09 05:44:11 +08:00
|
|
|
return sec;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2020-05-15 13:18:58 +08:00
|
|
|
template <class ELFT> void elf::createSyntheticSections() {
|
2016-11-02 07:17:45 +08:00
|
|
|
// Initialize all pointers with NULL. This is needed because
|
|
|
|
// you can call lld::elf::main more than once as a library.
|
2021-11-29 06:47:57 +08:00
|
|
|
Out::tlsPhdr = nullptr;
|
|
|
|
Out::preinitArray = nullptr;
|
|
|
|
Out::initArray = nullptr;
|
|
|
|
Out::finiArray = nullptr;
|
2016-11-02 07:17:45 +08:00
|
|
|
|
2019-10-02 00:10:13 +08:00
|
|
|
// Add the .interp section first because it is not a SyntheticSection.
|
|
|
|
// The removeUnusedSyntheticSections() function relies on the
|
|
|
|
// SyntheticSections coming last.
|
|
|
|
if (needsInterpSection()) {
|
|
|
|
for (size_t i = 1; i <= partitions.size(); ++i) {
|
|
|
|
InputSection *sec = createInterpSection();
|
|
|
|
sec->partition = i;
|
|
|
|
inputSections.push_back(sec);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-23 12:51:11 +08:00
|
|
|
auto add = [](SyntheticSection &sec) { inputSections.push_back(&sec); };
|
2017-02-05 13:18:58 +08:00
|
|
|
|
2021-12-28 10:15:23 +08:00
|
|
|
in.shStrTab = std::make_unique<StringTableSection>(".shstrtab", false);
|
2016-11-02 07:17:45 +08:00
|
|
|
|
2017-02-27 10:31:26 +08:00
|
|
|
Out::programHeaders = make<OutputSection>("", 0, SHF_ALLOC);
|
2017-10-07 08:58:34 +08:00
|
|
|
Out::programHeaders->alignment = config->wordsize;
|
2016-11-02 07:17:45 +08:00
|
|
|
|
|
|
|
if (config->strip != StripPolicy::All) {
|
2021-12-28 10:15:23 +08:00
|
|
|
in.strTab = std::make_unique<StringTableSection>(".strtab", false);
|
|
|
|
in.symTab = std::make_unique<SymbolTableSection<ELFT>>(*in.strTab);
|
|
|
|
in.symTabShndx = std::make_unique<SymtabShndxSection>();
|
2016-11-02 07:17:45 +08:00
|
|
|
}
|
|
|
|
|
2021-12-28 10:15:23 +08:00
|
|
|
in.bss = std::make_unique<BssSection>(".bss", 0, 1);
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.bss);
|
2017-11-24 16:48:29 +08:00
|
|
|
|
|
|
|
// If there is a SECTIONS command and a .data.rel.ro section name use name
|
|
|
|
// .data.rel.ro.bss so that we match in the .data.rel.ro output section.
|
|
|
|
// This makes sure our relro is contiguous.
|
2019-06-08 01:57:58 +08:00
|
|
|
bool hasDataRelRo =
|
|
|
|
script->hasSectionsCommand && findSection(".data.rel.ro", 0);
|
2021-12-28 10:15:23 +08:00
|
|
|
in.bssRelRo = std::make_unique<BssSection>(
|
|
|
|
hasDataRelRo ? ".data.rel.ro.bss" : ".bss.rel.ro", 0, 1);
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.bssRelRo);
|
2017-03-17 18:14:53 +08:00
|
|
|
|
2016-11-22 12:28:39 +08:00
|
|
|
// Add MIPS-specific sections.
|
2016-11-10 05:36:56 +08:00
|
|
|
if (config->emachine == EM_MIPS) {
|
2017-09-16 02:05:02 +08:00
|
|
|
if (!config->shared && config->hasDynSymTab) {
|
2021-12-28 10:15:23 +08:00
|
|
|
in.mipsRldMap = std::make_unique<MipsRldMapSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.mipsRldMap);
|
2016-11-23 01:49:14 +08:00
|
|
|
}
|
2016-11-22 12:28:39 +08:00
|
|
|
if (auto *sec = MipsAbiFlagsSection<ELFT>::create())
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*sec);
|
2016-11-22 12:28:39 +08:00
|
|
|
if (auto *sec = MipsOptionsSection<ELFT>::create())
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*sec);
|
2016-11-22 12:28:39 +08:00
|
|
|
if (auto *sec = MipsReginfoSection<ELFT>::create())
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*sec);
|
2016-11-10 05:36:56 +08:00
|
|
|
}
|
[Coding style change] Rename variables so that they start with a lowercase letter
This patch is mechanically generated by clang-llvm-rename tool that I wrote
using Clang Refactoring Engine just for creating this patch. You can see the
source code of the tool at https://reviews.llvm.org/D64123. There's no manual
post-processing; you can generate the same patch by re-running the tool against
lld's code base.
Here is the main discussion thread to change the LLVM coding style:
https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html
In the discussion thread, I proposed we use lld as a testbed for variable
naming scheme change, and this patch does that.
I chose to rename variables so that they are in camelCase, just because that
is a minimal change to make variables to start with a lowercase letter.
Note to downstream patch maintainers: if you are maintaining a downstream lld
repo, just rebasing ahead of this commit would cause massive merge conflicts
because this patch essentially changes every line in the lld subdirectory. But
there's a remedy.
clang-llvm-rename tool is a batch tool, so you can rename variables in your
downstream repo with the tool. Given that, here is how to rebase your repo to
a commit after the mass renaming:
1. rebase to the commit just before the mass variable renaming,
2. apply the tool to your downstream repo to mass-rename variables locally, and
3. rebase again to the head.
Most changes made by the tool should be identical for a downstream repo and
for the head, so at the step 3, almost all changes should be merged and
disappear. I'd expect that there would be some lines that you need to merge by
hand, but that shouldn't be too many.
Differential Revision: https://reviews.llvm.org/D64121
llvm-svn: 365595
2019-07-10 13:00:37 +08:00
|
|
|
|
2019-08-03 10:26:52 +08:00
|
|
|
StringRef relaDynName = config->isRela ? ".rela.dyn" : ".rel.dyn";
|
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
for (Partition &part : partitions) {
|
2021-12-23 12:51:11 +08:00
|
|
|
auto add = [&](SyntheticSection &sec) {
|
|
|
|
sec.partition = part.getNumber();
|
|
|
|
inputSections.push_back(&sec);
|
2019-06-08 01:57:58 +08:00
|
|
|
};
|
2016-11-25 16:05:41 +08:00
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
if (!part.name.empty()) {
|
2021-12-28 10:15:23 +08:00
|
|
|
part.elfHeader = std::make_unique<PartitionElfHeaderSection<ELFT>>();
|
2019-06-08 01:57:58 +08:00
|
|
|
part.elfHeader->name = part.name;
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.elfHeader);
|
2016-11-25 16:05:41 +08:00
|
|
|
|
2021-12-28 10:15:23 +08:00
|
|
|
part.programHeaders =
|
|
|
|
std::make_unique<PartitionProgramHeadersSection<ELFT>>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.programHeaders);
|
2016-11-25 16:05:41 +08:00
|
|
|
}
|
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
if (config->buildId != BuildIdKind::None) {
|
2021-12-28 10:15:23 +08:00
|
|
|
part.buildId = std::make_unique<BuildIdSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.buildId);
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
2016-11-25 16:05:41 +08:00
|
|
|
|
2021-12-28 10:15:23 +08:00
|
|
|
part.dynStrTab = std::make_unique<StringTableSection>(".dynstr", true);
|
|
|
|
part.dynSymTab =
|
|
|
|
std::make_unique<SymbolTableSection<ELFT>>(*part.dynStrTab);
|
|
|
|
part.dynamic = std::make_unique<DynamicSection<ELFT>>();
|
2019-08-03 10:26:52 +08:00
|
|
|
if (config->androidPackDynRelocs)
|
2021-12-23 15:55:11 +08:00
|
|
|
part.relaDyn =
|
2021-12-28 10:15:23 +08:00
|
|
|
std::make_unique<AndroidPackedRelocationSection<ELFT>>(relaDynName);
|
|
|
|
else
|
|
|
|
part.relaDyn = std::make_unique<RelocationSection<ELFT>>(
|
|
|
|
relaDynName, config->zCombreloc);
|
2016-11-25 16:05:41 +08:00
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
if (config->hasDynSymTab) {
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.dynSymTab);
|
2019-06-08 01:57:58 +08:00
|
|
|
|
2021-12-28 10:15:23 +08:00
|
|
|
part.verSym = std::make_unique<VersionTableSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.verSym);
|
2019-06-08 01:57:58 +08:00
|
|
|
|
2019-08-05 22:31:39 +08:00
|
|
|
if (!namedVersionDefs().empty()) {
|
2021-12-28 10:15:23 +08:00
|
|
|
part.verDef = std::make_unique<VersionDefinitionSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.verDef);
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
|
|
|
|
2021-12-28 10:15:23 +08:00
|
|
|
part.verNeed = std::make_unique<VersionNeedSection<ELFT>>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.verNeed);
|
2019-06-08 01:57:58 +08:00
|
|
|
|
|
|
|
if (config->gnuHash) {
|
2022-01-13 05:04:32 +08:00
|
|
|
part.gnuHashTab = std::make_unique<GnuHashTableSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.gnuHashTab);
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (config->sysvHash) {
|
2022-01-13 05:04:32 +08:00
|
|
|
part.hashTab = std::make_unique<HashTableSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.hashTab);
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
|
|
|
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.dynamic);
|
|
|
|
add(*part.dynStrTab);
|
|
|
|
add(*part.relaDyn);
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (config->relrPackDynRelocs) {
|
2021-12-28 10:15:23 +08:00
|
|
|
part.relrDyn = std::make_unique<RelrSection<ELFT>>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.relrDyn);
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!config->relocatable) {
|
|
|
|
if (config->ehFrameHdr) {
|
2021-12-28 10:15:23 +08:00
|
|
|
part.ehFrameHdr = std::make_unique<EhFrameHeader>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.ehFrameHdr);
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
2021-12-28 10:15:23 +08:00
|
|
|
part.ehFrame = std::make_unique<EhFrameSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.ehFrame);
|
2016-11-25 16:05:41 +08:00
|
|
|
}
|
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
if (config->emachine == EM_ARM && !config->relocatable) {
|
|
|
|
// The ARMExidxsyntheticsection replaces all the individual .ARM.exidx
|
|
|
|
// InputSections.
|
2021-12-28 10:15:23 +08:00
|
|
|
part.armExidx = std::make_unique<ARMExidxSyntheticSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*part.armExidx);
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
2016-11-25 16:05:41 +08:00
|
|
|
}
|
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
if (partitions.size() != 1) {
|
|
|
|
// Create the partition end marker. This needs to be in partition number 255
|
|
|
|
// so that it is sorted after all other partitions. It also has other
|
2019-06-11 10:54:30 +08:00
|
|
|
// special handling (see createPhdrs() and combineEhSections()).
|
2021-12-28 10:15:23 +08:00
|
|
|
in.partEnd =
|
|
|
|
std::make_unique<BssSection>(".part.end", config->maxPageSize, 1);
|
2019-06-08 01:57:58 +08:00
|
|
|
in.partEnd->partition = 255;
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.partEnd);
|
2019-06-08 01:57:58 +08:00
|
|
|
|
2021-12-28 10:15:23 +08:00
|
|
|
in.partIndex = std::make_unique<PartitionIndexSection>();
|
|
|
|
addOptionalRegular("__part_index_begin", in.partIndex.get(), 0);
|
|
|
|
addOptionalRegular("__part_index_end", in.partIndex.get(),
|
2019-06-08 01:57:58 +08:00
|
|
|
in.partIndex->getSize());
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.partIndex);
|
2018-07-10 04:08:55 +08:00
|
|
|
}
|
|
|
|
|
2016-11-22 12:28:39 +08:00
|
|
|
// Add .got. MIPS' .got is so different from the other archs,
|
|
|
|
// it has its own class.
|
2016-11-25 16:05:41 +08:00
|
|
|
if (config->emachine == EM_MIPS) {
|
2021-12-28 10:15:23 +08:00
|
|
|
in.mipsGot = std::make_unique<MipsGotSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.mipsGot);
|
2016-11-25 16:05:41 +08:00
|
|
|
} else {
|
2021-12-28 10:15:23 +08:00
|
|
|
in.got = std::make_unique<GotSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.got);
|
2016-11-25 16:05:41 +08:00
|
|
|
}
|
2016-11-17 05:01:02 +08:00
|
|
|
|
[PPC32] Improve the 32-bit PowerPC port
Many -static/-no-pie/-shared/-pie applications linked against glibc or musl
should work with this patch. This also helps FreeBSD PowerPC64 to migrate
their lib32 (PR40888).
* Fix default image base and max page size.
* Support new-style Secure PLT (see below). Old-style BSS PLT is not
implemented, so it is not suitable for FreeBSD rtld now because it doesn't
support Secure PLT yet.
* Support more initial relocation types:
R_PPC_ADDR32, R_PPC_REL16*, R_PPC_LOCAL24PC, R_PPC_PLTREL24, and R_PPC_GOT16.
The addend of R_PPC_PLTREL24 is special: it decides the call stub PLT type
but it should be ignored for the computation of target symbol VA.
* Support GNU ifunc
* Support .glink used for lazy PLT resolution in glibc
* Add a new thunk type: PPC32PltCallStub that is similar to PPC64PltCallStub.
It is used by R_PPC_REL24 and R_PPC_PLTREL24.
A PLT stub used in -fPIE/-fPIC usually loads an address relative to
.got2+0x8000 (-fpie/-fpic code uses _GLOBAL_OFFSET_TABLE_ relative
addresses).
Two .got2 sections in two object files have different addresses, thus a PLT stub
can't be shared by two object files. To handle this incompatibility,
change the parameters of Thunk::isCompatibleWith to
`const InputSection &, const Relocation &`.
PowerPC psABI specified an old-style .plt (BSS PLT) that is both
writable and executable. Linkers don't make separate RW- and RWE segments,
which causes all initially writable memory (think .data) executable.
This is a big security concern so a new PLT scheme (secure PLT) was developed to
address the security issue.
TLS will be implemented in D62940.
glibc older than ~2012 requires .rela.dyn to include .rela.plt, it can
not handle the DT_RELA+DT_RELASZ == DT_JMPREL case correctly. A hack
(not included in this patch) in LinkerScript.cpp addOrphanSections() to
work around the issue:
if (Config->EMachine == EM_PPC) {
// Older glibc assumes .rela.dyn includes .rela.plt
Add(In.RelaDyn);
if (In.RelaPlt->isLive() && !In.RelaPlt->Parent)
In.RelaDyn->getParent()->addSection(In.RelaPlt);
}
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D62464
llvm-svn: 362721
2019-06-07 01:03:00 +08:00
|
|
|
if (config->emachine == EM_PPC) {
|
2021-12-28 10:15:23 +08:00
|
|
|
in.ppc32Got2 = std::make_unique<PPC32Got2Section>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.ppc32Got2);
|
[PPC32] Improve the 32-bit PowerPC port
Many -static/-no-pie/-shared/-pie applications linked against glibc or musl
should work with this patch. This also helps FreeBSD PowerPC64 to migrate
their lib32 (PR40888).
* Fix default image base and max page size.
* Support new-style Secure PLT (see below). Old-style BSS PLT is not
implemented, so it is not suitable for FreeBSD rtld now because it doesn't
support Secure PLT yet.
* Support more initial relocation types:
R_PPC_ADDR32, R_PPC_REL16*, R_PPC_LOCAL24PC, R_PPC_PLTREL24, and R_PPC_GOT16.
The addend of R_PPC_PLTREL24 is special: it decides the call stub PLT type
but it should be ignored for the computation of target symbol VA.
* Support GNU ifunc
* Support .glink used for lazy PLT resolution in glibc
* Add a new thunk type: PPC32PltCallStub that is similar to PPC64PltCallStub.
It is used by R_PPC_REL24 and R_PPC_PLTREL24.
A PLT stub used in -fPIE/-fPIC usually loads an address relative to
.got2+0x8000 (-fpie/-fpic code uses _GLOBAL_OFFSET_TABLE_ relative
addresses).
Two .got2 sections in two object files have different addresses, thus a PLT stub
can't be shared by two object files. To handle this incompatibility,
change the parameters of Thunk::isCompatibleWith to
`const InputSection &, const Relocation &`.
PowerPC psABI specified an old-style .plt (BSS PLT) that is both
writable and executable. Linkers don't make separate RW- and RWE segments,
which causes all initially writable memory (think .data) executable.
This is a big security concern so a new PLT scheme (secure PLT) was developed to
address the security issue.
TLS will be implemented in D62940.
glibc older than ~2012 requires .rela.dyn to include .rela.plt, it can
not handle the DT_RELA+DT_RELASZ == DT_JMPREL case correctly. A hack
(not included in this patch) in LinkerScript.cpp addOrphanSections() to
work around the issue:
if (Config->EMachine == EM_PPC) {
// Older glibc assumes .rela.dyn includes .rela.plt
Add(In.RelaDyn);
if (In.RelaPlt->isLive() && !In.RelaPlt->Parent)
In.RelaDyn->getParent()->addSection(In.RelaPlt);
}
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D62464
llvm-svn: 362721
2019-06-07 01:03:00 +08:00
|
|
|
}
|
|
|
|
|
2018-11-15 01:56:43 +08:00
|
|
|
if (config->emachine == EM_PPC64) {
|
2021-12-28 10:15:23 +08:00
|
|
|
in.ppc64LongBranchTarget = std::make_unique<PPC64LongBranchTargetSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.ppc64LongBranchTarget);
|
2018-11-15 01:56:43 +08:00
|
|
|
}
|
|
|
|
|
2021-12-28 10:15:23 +08:00
|
|
|
in.gotPlt = std::make_unique<GotPltSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.gotPlt);
|
2021-12-28 10:15:23 +08:00
|
|
|
in.igotPlt = std::make_unique<IgotPltSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.igotPlt);
|
2016-11-30 00:05:27 +08:00
|
|
|
|
[ELF] Change GOT*_FROM_END (relative to end(.got)) to GOTPLT* (start(.got.plt))
Summary:
This should address remaining issues discussed in PR36555.
Currently R_GOT*_FROM_END are exclusively used by x86 and x86_64 to
express relocations types relative to the GOT base. We have
_GLOBAL_OFFSET_TABLE_ (GOT base) = start(.got.plt) but end(.got) !=
start(.got.plt)
This can have problems when _GLOBAL_OFFSET_TABLE_ is used as a symbol, e.g.
glibc dl_machine_dynamic assumes _GLOBAL_OFFSET_TABLE_ is start(.got.plt),
which is not true.
extern const ElfW(Addr) _GLOBAL_OFFSET_TABLE_[] attribute_hidden;
return _GLOBAL_OFFSET_TABLE_[0]; // R_X86_64_GOTPC32
In this patch, we
* Change all GOT*_FROM_END to GOTPLT* to fix the problem.
* Add HasGotPltOffRel to denote whether .got.plt should be kept even if
the section is empty.
* Simplify GotSection::empty and GotPltSection::empty by setting
HasGotOffRel and HasGotPltOffRel according to GlobalOffsetTable early.
The change of R_386_GOTPC makes X86::writePltHeader simpler as we don't
have to compute the offset start(.got.plt) - Ebx (it is constant 0).
We still diverge from ld.bfd (at least in most cases) and gold in that
.got.plt and .got are not adjacent, but the advantage doing that is
unclear.
Reviewers: ruiu, sivachandra, espindola
Subscribers: emaste, mehdi_amini, arichardson, dexonsmith, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59594
llvm-svn: 356968
2019-03-26 07:46:19 +08:00
|
|
|
// _GLOBAL_OFFSET_TABLE_ is defined relative to either .got.plt or .got. Treat
|
|
|
|
// it as a relocation and ensure the referenced section is created.
|
|
|
|
if (ElfSym::globalOffsetTable && config->emachine != EM_MIPS) {
|
|
|
|
if (target->gotBaseSymInGotPlt)
|
|
|
|
in.gotPlt->hasGotPltOffRel = true;
|
|
|
|
else
|
|
|
|
in.got->hasGotOffRel = true;
|
|
|
|
}
|
|
|
|
|
2019-02-20 19:34:18 +08:00
|
|
|
if (config->gdbIndex)
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*GdbIndexSection::create<ELFT>());
|
2016-11-30 00:05:27 +08:00
|
|
|
|
|
|
|
// We always need to add rel[a].plt to output if it has entries.
|
|
|
|
// Even for static linking it can contain R_[*]_IRELATIVE relocations.
|
2021-12-28 10:15:23 +08:00
|
|
|
in.relaPlt = std::make_unique<RelocationSection<ELFT>>(
|
2019-07-11 14:08:54 +08:00
|
|
|
config->isRela ? ".rela.plt" : ".rel.plt", /*sort=*/false);
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.relaPlt);
|
2016-11-30 00:05:27 +08:00
|
|
|
|
2019-08-03 10:26:52 +08:00
|
|
|
// The relaIplt immediately follows .rel[a].dyn to ensure that the IRelative
|
|
|
|
// relocations are processed last by the dynamic loader. We cannot place the
|
|
|
|
// iplt section in .rel.dyn when Android relocation packing is enabled because
|
|
|
|
// that would cause a section type mismatch. However, because the Android
|
|
|
|
// dynamic loader reads .rel.plt after .rel.dyn, we can get the desired
|
|
|
|
// behaviour by placing the iplt section in .rel.plt.
|
2021-12-28 10:15:23 +08:00
|
|
|
in.relaIplt = std::make_unique<RelocationSection<ELFT>>(
|
2019-08-03 10:26:52 +08:00
|
|
|
config->androidPackDynRelocs ? in.relaPlt->name : relaDynName,
|
2019-07-11 14:08:54 +08:00
|
|
|
/*sort=*/false);
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.relaIplt);
|
2016-12-08 20:58:55 +08:00
|
|
|
|
2019-12-11 10:05:36 +08:00
|
|
|
if ((config->emachine == EM_386 || config->emachine == EM_X86_64) &&
|
|
|
|
(config->andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT)) {
|
2021-12-28 10:15:23 +08:00
|
|
|
in.ibtPlt = std::make_unique<IBTPltSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.ibtPlt);
|
2019-12-11 10:05:36 +08:00
|
|
|
}
|
|
|
|
|
2021-12-28 10:15:23 +08:00
|
|
|
if (config->emachine == EM_PPC)
|
|
|
|
in.plt = std::make_unique<PPC32GlinkSection>();
|
|
|
|
else
|
|
|
|
in.plt = std::make_unique<PltSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.plt);
|
2021-12-28 10:15:23 +08:00
|
|
|
in.iplt = std::make_unique<IpltSection>();
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.iplt);
|
2016-11-30 00:05:27 +08:00
|
|
|
|
2019-06-05 11:04:46 +08:00
|
|
|
if (config->andFeatures)
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*make<GnuPropertySection>());
|
2019-06-05 11:04:46 +08:00
|
|
|
|
2018-08-29 15:27:09 +08:00
|
|
|
// .note.GNU-stack is always added when we are creating a re-linkable
|
|
|
|
// object file. Other linkers are using the presence of this marker
|
|
|
|
// section to control the executable-ness of the stack area, but that
|
|
|
|
// is irrelevant these days. Stack area should always be non-executable
|
|
|
|
// by default. So we emit this section unconditionally.
|
|
|
|
if (config->relocatable)
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*make<GnuStackSection>());
|
2018-08-29 15:27:09 +08:00
|
|
|
|
2018-09-26 03:26:58 +08:00
|
|
|
if (in.symTab)
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.symTab);
|
2018-09-26 03:26:58 +08:00
|
|
|
if (in.symTabShndx)
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.symTabShndx);
|
|
|
|
add(*in.shStrTab);
|
2018-09-26 03:26:58 +08:00
|
|
|
if (in.strTab)
|
2021-12-23 12:51:11 +08:00
|
|
|
add(*in.strTab);
|
2016-11-02 07:17:45 +08:00
|
|
|
}
|
|
|
|
|
2017-12-09 05:50:29 +08:00
|
|
|
// The main function of the writer.
|
|
|
|
template <class ELFT> void Writer<ELFT>::run() {
|
2020-04-18 01:29:25 +08:00
|
|
|
copyLocalSymbols();
|
2017-12-09 05:50:29 +08:00
|
|
|
|
|
|
|
if (config->copyRelocs)
|
|
|
|
addSectionSymbols();
|
|
|
|
|
|
|
|
// Now that we have a complete set of output sections. This function
|
|
|
|
// completes section contents. For example, we need to add strings
|
|
|
|
// to the string table, and add entries to .got and .plt.
|
|
|
|
// finalizeSections does that.
|
|
|
|
finalizeSections();
|
2018-10-24 22:24:01 +08:00
|
|
|
checkExecuteOnly();
|
2017-12-09 05:50:29 +08:00
|
|
|
|
2021-10-26 03:52:06 +08:00
|
|
|
// If --compressed-debug-sections is specified, compress .debug_* sections.
|
|
|
|
// Do it right now because it changes the size of output sections.
|
2018-01-09 07:12:42 +08:00
|
|
|
for (OutputSection *sec : outputSections)
|
|
|
|
sec->maybeCompress<ELFT>();
|
2017-12-09 05:50:29 +08:00
|
|
|
|
[ELF] Map the ELF header at imageBase
If there is no readonly section, we map:
* The ELF header at imageBase+maxPageSize
* Program headers at imageBase+maxPageSize+sizeof(Ehdr)
* The first section .text at imageBase+maxPageSize+sizeof(Ehdr)+sizeof(program headers)
Due to the interaction between Writer<ELFT>::fixSectionAlignments and
LinkerScript::allocateHeaders,
`alignDown(p_vaddr(R PT_LOAD)) = alignDown(p_vaddr(RX PT_LOAD))`.
The RX PT_LOAD will override the R PT_LOAD at runtime, which is not ideal:
```
// PHDR at 0x401034, should be 0x400034
PHDR 0x000034 0x00401034 0x00401034 0x000a0 0x000a0 R 0x4
// R PT_LOAD contains just Ehdr and program headers.
// At 0x401000, should be 0x400000
LOAD 0x000000 0x00401000 0x00401000 0x000d4 0x000d4 R 0x1000
LOAD 0x0000d4 0x004010d4 0x004010d4 0x00001 0x00001 R E 0x1000
```
* createPhdrs allocates the headers to the R PT_LOAD.
* fixSectionAlignments assigns `imageBase+maxPageSize+sizeof(Ehdr)+sizeof(program headers)` (formula: `alignTo(dot, maxPageSize) + dot % config->maxPageSize`) to addrExpr of .text
* allocateHeaders computes the minimum address among SHF_ALLOC sections, i.e. addr(.text)
* allocateHeaders sets address of ELF header to `addr(.text)-sizeof(Ehdr)-sizeof(program headers) = imageBase+maxPageSize`
The main observation is that when the SECTIONS command is not used, we
don't have to call allocateHeaders. This requires an assumption that
the presence of PT_PHDR and addresses of headers can be decided
regardless of address information.
This may seem natural because dot is not manipulated by a linker script.
The other thing is that we have to drop the special rule for -T<section>
in `getInitialDot`. If -Ttext is smaller than the image base, the headers
will not be allocated with the old behavior (allocateHeaders is called)
but always allocated with the new behavior.
The behavior change is not a problem. Whether and where headers are
allocated can vary among linkers, or ld.bfd across different versions
(--enable-separate-code or not). It is thus advised to use a linker
script with the PHDRS command to have a consistent behavior across
linkers. If PT_PHDR is needed, an explicit --image-base can be a simpler
alternative.
Differential Revision: https://reviews.llvm.org/D67325
llvm-svn: 371957
2019-09-16 15:04:16 +08:00
|
|
|
if (script->hasSectionsCommand)
|
|
|
|
script->allocateHeaders(mainPart->phdrs);
|
2017-12-09 05:50:29 +08:00
|
|
|
|
|
|
|
// Remove empty PT_LOAD to avoid causing the dynamic linker to try to mmap a
|
|
|
|
// 0 sized region. This has to be done late since only after assignAddresses
|
|
|
|
// we know the size of the sections.
|
2019-06-08 01:57:58 +08:00
|
|
|
for (Partition &part : partitions)
|
|
|
|
removeEmptyPTLoad(part.phdrs);
|
2017-12-09 05:50:29 +08:00
|
|
|
|
|
|
|
if (!config->oFormatBinary)
|
|
|
|
assignFileOffsets();
|
|
|
|
else
|
|
|
|
assignFileOffsetsBinary();
|
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
for (Partition &part : partitions)
|
|
|
|
setPhdrs(part);
|
2017-12-09 05:50:29 +08:00
|
|
|
|
|
|
|
|
2021-09-21 00:52:30 +08:00
|
|
|
// Handle --print-map(-M)/--Map, --why-extract=, --cref and
|
|
|
|
// --print-archive-stats=. Dump them before checkSections() because the files
|
|
|
|
// may be useful in case checkSections() or openFile() fails, for example, due
|
|
|
|
// to an erroneous file size.
|
2021-11-30 06:14:53 +08:00
|
|
|
writeMapAndCref();
|
2021-09-21 00:52:30 +08:00
|
|
|
writeWhyExtract();
|
2020-04-28 10:30:09 +08:00
|
|
|
writeArchiveStats();
|
2020-03-11 06:41:57 +08:00
|
|
|
|
2018-02-03 06:24:06 +08:00
|
|
|
if (config->checkSections)
|
2018-04-04 17:24:31 +08:00
|
|
|
checkSections();
|
2018-01-31 17:22:44 +08:00
|
|
|
|
2017-12-09 05:50:29 +08:00
|
|
|
// It does not make sense try to open the file if we have error already.
|
|
|
|
if (errorCount())
|
|
|
|
return;
|
|
|
|
|
2020-11-03 22:41:09 +08:00
|
|
|
{
|
|
|
|
llvm::TimeTraceScope timeScope("Write output file");
|
|
|
|
// Write the result down to a file.
|
|
|
|
openFile();
|
|
|
|
if (errorCount())
|
|
|
|
return;
|
2017-12-09 05:50:29 +08:00
|
|
|
|
2020-11-03 22:41:09 +08:00
|
|
|
if (!config->oFormatBinary) {
|
|
|
|
if (config->zSeparate != SeparateSegmentKind::None)
|
|
|
|
writeTrapInstr();
|
|
|
|
writeHeader();
|
|
|
|
writeSections();
|
|
|
|
} else {
|
|
|
|
writeSectionsBinary();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Backfill .note.gnu.build-id section content. This is done at last
|
|
|
|
// because the content is usually a hash value of the entire output file.
|
|
|
|
writeBuildId();
|
|
|
|
if (errorCount())
|
|
|
|
return;
|
2017-12-09 05:50:29 +08:00
|
|
|
|
2020-11-03 22:41:09 +08:00
|
|
|
if (auto e = buffer->commit())
|
|
|
|
error("failed to write to the output file: " + toString(std::move(e)));
|
|
|
|
}
|
2017-12-09 05:50:29 +08:00
|
|
|
}
|
|
|
|
|
2020-04-25 19:58:00 +08:00
|
|
|
template <class ELFT, class RelTy>
|
|
|
|
static void markUsedLocalSymbolsImpl(ObjFile<ELFT> *file,
|
|
|
|
llvm::ArrayRef<RelTy> rels) {
|
|
|
|
for (const RelTy &rel : rels) {
|
|
|
|
Symbol &sym = file->getRelocTargetSym(rel);
|
|
|
|
if (sym.isLocal())
|
|
|
|
sym.used = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The function ensures that the "used" field of local symbols reflects the fact
|
|
|
|
// that the symbol is used in a relocation from a live section.
|
|
|
|
template <class ELFT> static void markUsedLocalSymbols() {
|
|
|
|
// With --gc-sections, the field is already filled.
|
|
|
|
// See MarkLive<ELFT>::resolveReloc().
|
|
|
|
if (config->gcSections)
|
|
|
|
return;
|
|
|
|
// Without --gc-sections, the field is initialized with "true".
|
|
|
|
// Drop the flag first and then rise for symbols referenced in relocations.
|
2021-12-15 16:37:10 +08:00
|
|
|
for (ELFFileBase *file : objectFiles) {
|
2020-04-25 19:58:00 +08:00
|
|
|
ObjFile<ELFT> *f = cast<ObjFile<ELFT>>(file);
|
|
|
|
for (Symbol *b : f->getLocalSymbols())
|
|
|
|
b->used = false;
|
|
|
|
for (InputSectionBase *s : f->getSections()) {
|
|
|
|
InputSection *isec = dyn_cast_or_null<InputSection>(s);
|
|
|
|
if (!isec)
|
|
|
|
continue;
|
|
|
|
if (isec->type == SHT_REL)
|
|
|
|
markUsedLocalSymbolsImpl(f, isec->getDataAs<typename ELFT::Rel>());
|
|
|
|
else if (isec->type == SHT_RELA)
|
|
|
|
markUsedLocalSymbolsImpl(f, isec->getDataAs<typename ELFT::Rela>());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-05 09:30:09 +08:00
|
|
|
static bool shouldKeepInSymtab(const Defined &sym) {
|
|
|
|
if (sym.isSection())
|
2016-01-28 02:04:26 +08:00
|
|
|
return false;
|
|
|
|
|
2020-04-25 19:58:00 +08:00
|
|
|
// If --emit-reloc or -r is given, preserve symbols referenced by relocations
|
|
|
|
// from live sections.
|
|
|
|
if (config->copyRelocs && sym.used)
|
2016-01-28 02:04:26 +08:00
|
|
|
return true;
|
2019-04-08 14:45:07 +08:00
|
|
|
|
2020-04-24 11:32:49 +08:00
|
|
|
// Exclude local symbols pointing to .ARM.exidx sections.
|
|
|
|
// They are probably mapping symbols "$d", which are optional for these
|
|
|
|
// sections. After merging the .ARM.exidx sections, some of these symbols
|
|
|
|
// may become dangling. The easiest way to avoid the issue is not to add
|
|
|
|
// them to the symbol table from the beginning.
|
|
|
|
if (config->emachine == EM_ARM && sym.section &&
|
|
|
|
sym.section->type == SHT_ARM_EXIDX)
|
|
|
|
return false;
|
|
|
|
|
2020-04-18 01:29:25 +08:00
|
|
|
if (config->discard == DiscardPolicy::None)
|
2019-04-08 14:45:07 +08:00
|
|
|
return true;
|
2020-04-18 01:29:25 +08:00
|
|
|
if (config->discard == DiscardPolicy::All)
|
|
|
|
return false;
|
2016-01-28 02:04:26 +08:00
|
|
|
|
|
|
|
// In ELF assembly .L symbols are normally discarded by the assembler.
|
|
|
|
// If the assembler fails to do so, the linker discards them if
|
|
|
|
// * --discard-locals is used.
|
|
|
|
// * The symbol is in a SHF_MERGE section, which is normally the reason for
|
|
|
|
// the assembler keeping the .L symbol.
|
2021-11-03 15:56:09 +08:00
|
|
|
if (sym.getName().startswith(".L") &&
|
2021-10-26 02:55:31 +08:00
|
|
|
(config->discard == DiscardPolicy::Locals ||
|
|
|
|
(sym.section && (sym.section->flags & SHF_MERGE))))
|
2016-01-28 02:04:26 +08:00
|
|
|
return false;
|
2021-10-26 02:55:31 +08:00
|
|
|
return true;
|
2016-01-28 02:04:26 +08:00
|
|
|
}
|
|
|
|
|
2017-11-04 05:21:47 +08:00
|
|
|
static bool includeInSymtab(const Symbol &b) {
|
2017-11-06 12:35:31 +08:00
|
|
|
if (auto *d = dyn_cast<Defined>(&b)) {
|
2016-05-06 00:40:28 +08:00
|
|
|
// Always include absolute symbols.
|
2017-03-09 06:36:28 +08:00
|
|
|
SectionBase *sec = d->section;
|
|
|
|
if (!sec)
|
2016-05-06 00:40:28 +08:00
|
|
|
return true;
|
2018-10-25 05:59:58 +08:00
|
|
|
|
2017-12-14 06:59:23 +08:00
|
|
|
// Exclude symbols pointing to garbage-collected sections.
|
2019-05-29 11:55:20 +08:00
|
|
|
if (isa<InputSectionBase>(sec) && !sec->isLive())
|
2017-12-14 06:59:23 +08:00
|
|
|
return false;
|
2018-10-25 05:59:58 +08:00
|
|
|
|
2017-03-09 06:36:28 +08:00
|
|
|
if (auto *s = dyn_cast<MergeInputSection>(sec))
|
2016-05-22 08:41:38 +08:00
|
|
|
if (!s->getSectionPiece(d->value)->live)
|
2016-05-06 00:40:28 +08:00
|
|
|
return false;
|
2017-11-29 04:17:58 +08:00
|
|
|
return true;
|
2016-05-06 00:40:28 +08:00
|
|
|
}
|
2017-11-29 04:17:58 +08:00
|
|
|
return b.used;
|
2016-05-06 00:40:28 +08:00
|
|
|
}
|
2016-05-06 00:38:46 +08:00
|
|
|
|
2015-10-09 07:49:30 +08:00
|
|
|
// Local symbols are not in the linker's symbol table. This function scans
|
|
|
|
// each object file's symbol table to copy local symbols to the output.
|
|
|
|
template <class ELFT> void Writer<ELFT>::copyLocalSymbols() {
|
2018-09-26 03:26:58 +08:00
|
|
|
if (!in.symTab)
|
2016-01-21 11:07:38 +08:00
|
|
|
return;
|
2020-11-03 22:41:09 +08:00
|
|
|
llvm::TimeTraceScope timeScope("Add local symbols");
|
2020-04-25 19:58:00 +08:00
|
|
|
if (config->copyRelocs && config->discard != DiscardPolicy::None)
|
|
|
|
markUsedLocalSymbols<ELFT>();
|
2021-12-15 16:37:10 +08:00
|
|
|
for (ELFFileBase *file : objectFiles) {
|
|
|
|
for (Symbol *b : file->getLocalSymbols()) {
|
2020-06-20 00:05:28 +08:00
|
|
|
assert(b->isLocal() && "should have been caught in initializeSymbols()");
|
2017-11-06 12:35:31 +08:00
|
|
|
auto *dr = dyn_cast<Defined>(b);
|
2016-11-24 02:07:33 +08:00
|
|
|
|
2016-04-04 22:04:16 +08:00
|
|
|
// No reason to keep local undefined symbol in symtab.
|
|
|
|
if (!dr)
|
|
|
|
continue;
|
2021-12-23 16:59:29 +08:00
|
|
|
if (includeInSymtab(*b) && shouldKeepInSymtab(*dr))
|
|
|
|
in.symTab->addSymbol(b);
|
2015-10-09 07:49:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-25 05:59:58 +08:00
|
|
|
// Create a section symbol for each output section so that we can represent
|
|
|
|
// relocations that point to the section. If we know that no relocation is
|
|
|
|
// referring to a section (that happens if the section is a synthetic one), we
|
|
|
|
// don't create a section symbol for that section.
|
2017-02-11 09:40:49 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::addSectionSymbols() {
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : script->sectionCommands) {
|
|
|
|
auto *sec = dyn_cast<OutputSection>(cmd);
|
2017-07-28 03:22:43 +08:00
|
|
|
if (!sec)
|
2017-03-01 03:43:54 +08:00
|
|
|
continue;
|
2021-11-26 12:24:23 +08:00
|
|
|
auto i = llvm::find_if(sec->commands, [](SectionCommand *cmd) {
|
|
|
|
if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
|
2017-07-05 03:08:40 +08:00
|
|
|
return !isd->sections.empty();
|
|
|
|
return false;
|
|
|
|
});
|
2021-11-26 08:47:07 +08:00
|
|
|
if (i == sec->commands.end())
|
2017-07-05 03:08:40 +08:00
|
|
|
continue;
|
2019-09-24 19:48:31 +08:00
|
|
|
InputSectionBase *isec = cast<InputSectionDescription>(*i)->sections[0];
|
2017-11-16 20:33:36 +08:00
|
|
|
|
|
|
|
// Relocations are not using REL[A] section symbols.
|
|
|
|
if (isec->type == SHT_REL || isec->type == SHT_RELA)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Unlike other synthetic sections, mergeable output sections contain data
|
|
|
|
// copied from input sections, and there may be a relocation pointing to its
|
2021-10-26 03:52:06 +08:00
|
|
|
// contents if -r or --emit-reloc is given.
|
2017-11-16 20:33:36 +08:00
|
|
|
if (isa<SyntheticSection>(isec) && !(isec->flags & SHF_MERGE))
|
2017-02-11 09:40:49 +08:00
|
|
|
continue;
|
|
|
|
|
2020-11-03 00:37:15 +08:00
|
|
|
// Set the symbol to be relative to the output section so that its st_value
|
|
|
|
// equals the output section address. Note, there may be a gap between the
|
|
|
|
// start of the output section and isec.
|
2021-12-16 05:15:02 +08:00
|
|
|
in.symTab->addSymbol(
|
|
|
|
makeDefined(isec->file, "", STB_LOCAL, /*stOther=*/0, STT_SECTION,
|
|
|
|
/*value=*/0, /*size=*/0, isec->getOutputSection()));
|
2017-02-11 09:40:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-16 12:51:46 +08:00
|
|
|
// Today's loaders have a feature to make segments read-only after
|
|
|
|
// processing dynamic relocations to enhance security. PT_GNU_RELRO
|
|
|
|
// is defined for that.
|
|
|
|
//
|
|
|
|
// This function returns true if a section needs to be put into a
|
|
|
|
// PT_GNU_RELRO segment.
|
2017-07-25 07:55:33 +08:00
|
|
|
static bool isRelroSection(const OutputSection *sec) {
|
2016-02-11 06:43:13 +08:00
|
|
|
if (!config->zRelro)
|
|
|
|
return false;
|
2017-02-16 12:51:46 +08:00
|
|
|
|
2016-11-10 07:23:45 +08:00
|
|
|
uint64_t flags = sec->flags;
|
2017-04-13 13:40:07 +08:00
|
|
|
|
|
|
|
// Non-allocatable or non-writable sections don't need RELRO because
|
|
|
|
// they are not writable or not even mapped to memory in the first place.
|
|
|
|
// RELRO is for sections that are essentially read-only but need to
|
|
|
|
// be writable only at process startup to allow dynamic linker to
|
|
|
|
// apply relocations.
|
2015-11-24 18:15:50 +08:00
|
|
|
if (!(flags & SHF_ALLOC) || !(flags & SHF_WRITE))
|
|
|
|
return false;
|
2017-04-13 13:40:07 +08:00
|
|
|
|
|
|
|
// Once initialized, TLS data segments are used as data templates
|
|
|
|
// for a thread-local storage. For each new thread, runtime
|
|
|
|
// allocates memory for a TLS and copy templates there. No thread
|
|
|
|
// are supposed to use templates directly. Thus, it can be in RELRO.
|
2015-12-11 03:13:08 +08:00
|
|
|
if (flags & SHF_TLS)
|
|
|
|
return true;
|
2017-02-16 12:51:46 +08:00
|
|
|
|
2017-04-13 13:40:07 +08:00
|
|
|
// .init_array, .preinit_array and .fini_array contain pointers to
|
|
|
|
// functions that are executed on process startup or exit. These
|
|
|
|
// pointers are set by the static linker, and they are not expected
|
|
|
|
// to change at runtime. But if you are an attacker, you could do
|
|
|
|
// interesting things by manipulating pointers in .fini_array, for
|
|
|
|
// example. So they are put into RELRO.
|
2016-11-09 09:42:41 +08:00
|
|
|
uint32_t type = sec->type;
|
2015-12-11 03:13:08 +08:00
|
|
|
if (type == SHT_INIT_ARRAY || type == SHT_FINI_ARRAY ||
|
|
|
|
type == SHT_PREINIT_ARRAY)
|
2015-11-24 18:15:50 +08:00
|
|
|
return true;
|
2017-02-16 12:51:46 +08:00
|
|
|
|
2017-04-13 13:40:07 +08:00
|
|
|
// .got contains pointers to external symbols. They are resolved by
|
|
|
|
// the dynamic linker when a module is loaded into memory, and after
|
|
|
|
// that they are not expected to change. So, it can be in RELRO.
|
2018-09-26 03:26:58 +08:00
|
|
|
if (in.got && sec == in.got->getParent())
|
2017-04-13 13:40:07 +08:00
|
|
|
return true;
|
|
|
|
|
2018-10-25 05:59:48 +08:00
|
|
|
// .toc is a GOT-ish section for PowerPC64. Their contents are accessed
|
|
|
|
// through r2 register, which is reserved for that purpose. Since r2 is used
|
|
|
|
// for accessing .got as well, .got and .toc need to be close enough in the
|
|
|
|
// virtual address space. Usually, .toc comes just after .got. Since we place
|
|
|
|
// .got into RELRO, .toc needs to be placed into RELRO too.
|
2018-05-24 23:59:41 +08:00
|
|
|
if (sec->name.equals(".toc"))
|
|
|
|
return true;
|
|
|
|
|
2017-04-13 13:40:07 +08:00
|
|
|
// .got.plt contains pointers to external function symbols. They are
|
|
|
|
// by default resolved lazily, so we usually cannot put it into RELRO.
|
|
|
|
// However, if "-z now" is given, the lazy symbol resolution is
|
|
|
|
// disabled, which enables us to put it into RELRO.
|
2018-09-26 03:26:58 +08:00
|
|
|
if (sec == in.gotPlt->getParent())
|
2015-11-24 18:15:50 +08:00
|
|
|
return config->zNow;
|
2017-04-13 13:40:07 +08:00
|
|
|
|
|
|
|
// .dynamic section contains data for the dynamic linker, and
|
|
|
|
// there's no need to write to it at runtime, so it's better to put
|
|
|
|
// it into RELRO.
|
2019-06-08 01:57:58 +08:00
|
|
|
if (sec->name == ".dynamic")
|
2016-11-17 05:01:02 +08:00
|
|
|
return true;
|
2017-04-13 13:40:07 +08:00
|
|
|
|
|
|
|
// Sections with some special names are put into RELRO. This is a
|
|
|
|
// bit unfortunate because section names shouldn't be significant in
|
|
|
|
// ELF in spirit. But in reality many linker features depend on
|
|
|
|
// magic section names.
|
2017-02-24 22:28:00 +08:00
|
|
|
StringRef s = sec->name;
|
2017-12-05 19:15:58 +08:00
|
|
|
return s == ".data.rel.ro" || s == ".bss.rel.ro" || s == ".ctors" ||
|
|
|
|
s == ".dtors" || s == ".jcr" || s == ".eh_frame" ||
|
2020-03-27 18:15:02 +08:00
|
|
|
s == ".fini_array" || s == ".init_array" ||
|
|
|
|
s == ".openbsd.randomdata" || s == ".preinit_array";
|
2015-11-24 18:15:50 +08:00
|
|
|
}
|
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
// We compute a rank for each section. The rank indicates where the
|
|
|
|
// section should be placed in the file. Instead of using simple
|
|
|
|
// numbers (0,1,2...), we use a series of flags. One for each decision
|
|
|
|
// point when placing the section.
|
|
|
|
// Using flags has two key properties:
|
|
|
|
// * It is easy to check if a give branch was taken.
|
|
|
|
// * It is easy two see how similar two ranks are (see getRankProximity).
|
|
|
|
enum RankFlags {
|
2019-06-08 01:57:58 +08:00
|
|
|
RF_NOT_ADDR_SET = 1 << 27,
|
|
|
|
RF_NOT_ALLOC = 1 << 26,
|
|
|
|
RF_PARTITION = 1 << 18, // Partition number (8 bits)
|
|
|
|
RF_NOT_PART_EHDR = 1 << 17,
|
|
|
|
RF_NOT_PART_PHDR = 1 << 16,
|
[ELF] Simplify RelRo, TLS, NOBITS section ranks and make RW PT_LOAD start with RelRo
Old: PT_LOAD(.data | PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) | .bss)
New: PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) | .data .bss)
The placement of | indicates page alignment caused by PT_GNU_RELRO. The
new layout has simpler rules and saves space for many cases.
Old size: roundup(.data) + roundup(.data.rel.ro)
New size: roundup(.data.rel.ro + .bss.rel.ro) + .data
Other advantages:
* At runtime the 3 memory mappings decrease to 2.
* start(PT_TLS) = start(PT_GNU_RELRO) = start(RW PT_LOAD). This
simplifies binary manipulation tools.
GNU strip before 2.31 discards PT_GNU_RELRO if its
address is not equal to the start of its associated PT_LOAD.
This has been fixed by https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=f2731e0c374e5323ce4cdae2bcc7b7fe22da1a6f
But with this change, we will be compatible with GNU strip before 2.31
* Before, .got.plt (non-relro by default) was placed before .got (relro
by default), which made it impossible to have _GLOBAL_OFFSET_TABLE_
(start of .got.plt on x86-64) equal to the end of .got (R_GOT*_FROM_END)
(https://bugs.llvm.org/show_bug.cgi?id=36555). With the new ordering, we
can improve on this regard if we'd like to.
Reviewers: ruiu, espindola, pcc
Subscribers: emaste, arichardson, llvm-commits, joerg, jdoerfert
Differential Revision: https://reviews.llvm.org/D56828
llvm-svn: 356117
2019-03-14 11:47:45 +08:00
|
|
|
RF_NOT_INTERP = 1 << 15,
|
|
|
|
RF_NOT_NOTE = 1 << 14,
|
|
|
|
RF_WRITE = 1 << 13,
|
|
|
|
RF_EXEC_WRITE = 1 << 12,
|
|
|
|
RF_EXEC = 1 << 11,
|
|
|
|
RF_RODATA = 1 << 10,
|
|
|
|
RF_NOT_RELRO = 1 << 9,
|
2018-12-19 03:16:37 +08:00
|
|
|
RF_NOT_TLS = 1 << 8,
|
|
|
|
RF_BSS = 1 << 7,
|
2017-05-19 00:20:12 +08:00
|
|
|
RF_PPC_NOT_TOCBSS = 1 << 6,
|
2018-05-24 23:59:41 +08:00
|
|
|
RF_PPC_TOCL = 1 << 5,
|
|
|
|
RF_PPC_TOC = 1 << 4,
|
|
|
|
RF_PPC_GOT = 1 << 3,
|
2017-05-19 00:20:12 +08:00
|
|
|
RF_PPC_BRANCH_LT = 1 << 2,
|
|
|
|
RF_MIPS_GPREL = 1 << 1,
|
|
|
|
RF_MIPS_NOT_GOT = 1 << 0
|
2017-05-12 22:52:22 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static unsigned getSectionRank(const OutputSection *sec) {
|
2019-05-29 11:55:20 +08:00
|
|
|
unsigned rank = sec->partition * RF_PARTITION;
|
2017-05-12 22:52:22 +08:00
|
|
|
|
|
|
|
// We want to put section specified by -T option first, so we
|
|
|
|
// can start assigning VA starting from them later.
|
|
|
|
if (config->sectionStartMap.count(sec->name))
|
|
|
|
return rank;
|
|
|
|
rank |= RF_NOT_ADDR_SET;
|
|
|
|
|
2018-12-19 03:16:37 +08:00
|
|
|
// Allocatable sections go first to reduce the total PT_LOAD size and
|
|
|
|
// so debug info doesn't change addresses in actual code.
|
|
|
|
if (!(sec->flags & SHF_ALLOC))
|
|
|
|
return rank | RF_NOT_ALLOC;
|
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
if (sec->type == SHT_LLVM_PART_EHDR)
|
|
|
|
return rank;
|
|
|
|
rank |= RF_NOT_PART_EHDR;
|
|
|
|
|
|
|
|
if (sec->type == SHT_LLVM_PART_PHDR)
|
|
|
|
return rank;
|
|
|
|
rank |= RF_NOT_PART_PHDR;
|
|
|
|
|
2016-11-03 02:58:44 +08:00
|
|
|
// Put .interp first because some loaders want to see that section
|
|
|
|
// on the first page of the executable file when loaded into memory.
|
2017-05-12 22:52:22 +08:00
|
|
|
if (sec->name == ".interp")
|
|
|
|
return rank;
|
|
|
|
rank |= RF_NOT_INTERP;
|
2016-11-03 02:58:44 +08:00
|
|
|
|
2018-12-19 03:16:37 +08:00
|
|
|
// Put .note sections (which make up one PT_NOTE) at the beginning so that
|
|
|
|
// they are likely to be included in a core file even if core file size is
|
|
|
|
// limited. In particular, we want a .note.gnu.build-id and a .note.tag to be
|
|
|
|
// included in a core to match core files with executables.
|
|
|
|
if (sec->type == SHT_NOTE)
|
|
|
|
return rank;
|
|
|
|
rank |= RF_NOT_NOTE;
|
2016-12-20 05:21:07 +08:00
|
|
|
|
2017-05-27 01:23:25 +08:00
|
|
|
// Sort sections based on their access permission in the following
|
|
|
|
// order: R, RX, RWX, RW. This order is based on the following
|
|
|
|
// considerations:
|
|
|
|
// * Read-only sections come first such that they go in the
|
|
|
|
// PT_LOAD covering the program headers at the start of the file.
|
2018-06-27 01:04:47 +08:00
|
|
|
// * Read-only, executable sections come next.
|
2017-05-27 01:23:25 +08:00
|
|
|
// * Writable, executable sections follow such that .plt on
|
|
|
|
// architectures where it needs to be writable will be placed
|
|
|
|
// between .text and .data.
|
|
|
|
// * Writable sections come last, such that .bss lands at the very
|
|
|
|
// end of the last PT_LOAD.
|
|
|
|
bool isExec = sec->flags & SHF_EXECINSTR;
|
|
|
|
bool isWrite = sec->flags & SHF_WRITE;
|
|
|
|
|
|
|
|
if (isExec) {
|
|
|
|
if (isWrite)
|
|
|
|
rank |= RF_EXEC_WRITE;
|
2018-06-27 01:04:47 +08:00
|
|
|
else
|
2017-05-12 22:52:22 +08:00
|
|
|
rank |= RF_EXEC;
|
2018-06-27 23:56:32 +08:00
|
|
|
} else if (isWrite) {
|
|
|
|
rank |= RF_WRITE;
|
|
|
|
} else if (sec->type == SHT_PROGBITS) {
|
2018-06-27 06:13:32 +08:00
|
|
|
// Make non-executable and non-writable PROGBITS sections (e.g .rodata
|
2018-06-27 23:56:32 +08:00
|
|
|
// .eh_frame) closer to .text. They likely contain PC or GOT relative
|
2018-06-27 06:13:32 +08:00
|
|
|
// relocations and there could be relocation overflow if other huge sections
|
|
|
|
// (.dynstr .dynsym) were placed in between.
|
2018-06-27 23:56:32 +08:00
|
|
|
rank |= RF_RODATA;
|
2016-09-30 06:48:55 +08:00
|
|
|
}
|
2015-10-09 07:49:30 +08:00
|
|
|
|
[ELF] Simplify RelRo, TLS, NOBITS section ranks and make RW PT_LOAD start with RelRo
Old: PT_LOAD(.data | PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) | .bss)
New: PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) | .data .bss)
The placement of | indicates page alignment caused by PT_GNU_RELRO. The
new layout has simpler rules and saves space for many cases.
Old size: roundup(.data) + roundup(.data.rel.ro)
New size: roundup(.data.rel.ro + .bss.rel.ro) + .data
Other advantages:
* At runtime the 3 memory mappings decrease to 2.
* start(PT_TLS) = start(PT_GNU_RELRO) = start(RW PT_LOAD). This
simplifies binary manipulation tools.
GNU strip before 2.31 discards PT_GNU_RELRO if its
address is not equal to the start of its associated PT_LOAD.
This has been fixed by https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=f2731e0c374e5323ce4cdae2bcc7b7fe22da1a6f
But with this change, we will be compatible with GNU strip before 2.31
* Before, .got.plt (non-relro by default) was placed before .got (relro
by default), which made it impossible to have _GLOBAL_OFFSET_TABLE_
(start of .got.plt on x86-64) equal to the end of .got (R_GOT*_FROM_END)
(https://bugs.llvm.org/show_bug.cgi?id=36555). With the new ordering, we
can improve on this regard if we'd like to.
Reviewers: ruiu, espindola, pcc
Subscribers: emaste, arichardson, llvm-commits, joerg, jdoerfert
Differential Revision: https://reviews.llvm.org/D56828
llvm-svn: 356117
2019-03-14 11:47:45 +08:00
|
|
|
// Place RelRo sections first. After considering SHT_NOBITS below, the
|
|
|
|
// ordering is PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) | .data .bss),
|
|
|
|
// where | marks where page alignment happens. An alternative ordering is
|
|
|
|
// PT_LOAD(.data | PT_GNU_RELRO( .data.rel.ro .bss.rel.ro) | .bss), but it may
|
|
|
|
// waste more bytes due to 2 alignment places.
|
|
|
|
if (!isRelroSection(sec))
|
|
|
|
rank |= RF_NOT_RELRO;
|
2015-10-17 07:11:07 +08:00
|
|
|
|
[ELF] Simplify RelRo, TLS, NOBITS section ranks and make RW PT_LOAD start with RelRo
Old: PT_LOAD(.data | PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) | .bss)
New: PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) | .data .bss)
The placement of | indicates page alignment caused by PT_GNU_RELRO. The
new layout has simpler rules and saves space for many cases.
Old size: roundup(.data) + roundup(.data.rel.ro)
New size: roundup(.data.rel.ro + .bss.rel.ro) + .data
Other advantages:
* At runtime the 3 memory mappings decrease to 2.
* start(PT_TLS) = start(PT_GNU_RELRO) = start(RW PT_LOAD). This
simplifies binary manipulation tools.
GNU strip before 2.31 discards PT_GNU_RELRO if its
address is not equal to the start of its associated PT_LOAD.
This has been fixed by https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=f2731e0c374e5323ce4cdae2bcc7b7fe22da1a6f
But with this change, we will be compatible with GNU strip before 2.31
* Before, .got.plt (non-relro by default) was placed before .got (relro
by default), which made it impossible to have _GLOBAL_OFFSET_TABLE_
(start of .got.plt on x86-64) equal to the end of .got (R_GOT*_FROM_END)
(https://bugs.llvm.org/show_bug.cgi?id=36555). With the new ordering, we
can improve on this regard if we'd like to.
Reviewers: ruiu, espindola, pcc
Subscribers: emaste, arichardson, llvm-commits, joerg, jdoerfert
Differential Revision: https://reviews.llvm.org/D56828
llvm-svn: 356117
2019-03-14 11:47:45 +08:00
|
|
|
// If we got here we know that both A and B are in the same PT_LOAD.
|
2017-01-10 09:21:30 +08:00
|
|
|
|
|
|
|
// The TLS initialization block needs to be a single contiguous block in a R/W
|
|
|
|
// PT_LOAD, so stick TLS sections directly before the other RelRo R/W
|
[ELF] Simplify RelRo, TLS, NOBITS section ranks and make RW PT_LOAD start with RelRo
Old: PT_LOAD(.data | PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) | .bss)
New: PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) | .data .bss)
The placement of | indicates page alignment caused by PT_GNU_RELRO. The
new layout has simpler rules and saves space for many cases.
Old size: roundup(.data) + roundup(.data.rel.ro)
New size: roundup(.data.rel.ro + .bss.rel.ro) + .data
Other advantages:
* At runtime the 3 memory mappings decrease to 2.
* start(PT_TLS) = start(PT_GNU_RELRO) = start(RW PT_LOAD). This
simplifies binary manipulation tools.
GNU strip before 2.31 discards PT_GNU_RELRO if its
address is not equal to the start of its associated PT_LOAD.
This has been fixed by https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=f2731e0c374e5323ce4cdae2bcc7b7fe22da1a6f
But with this change, we will be compatible with GNU strip before 2.31
* Before, .got.plt (non-relro by default) was placed before .got (relro
by default), which made it impossible to have _GLOBAL_OFFSET_TABLE_
(start of .got.plt on x86-64) equal to the end of .got (R_GOT*_FROM_END)
(https://bugs.llvm.org/show_bug.cgi?id=36555). With the new ordering, we
can improve on this regard if we'd like to.
Reviewers: ruiu, espindola, pcc
Subscribers: emaste, arichardson, llvm-commits, joerg, jdoerfert
Differential Revision: https://reviews.llvm.org/D56828
llvm-svn: 356117
2019-03-14 11:47:45 +08:00
|
|
|
// sections. Since p_filesz can be less than p_memsz, place NOBITS sections
|
|
|
|
// after PROGBITS.
|
|
|
|
if (!(sec->flags & SHF_TLS))
|
2017-05-12 22:52:22 +08:00
|
|
|
rank |= RF_NOT_TLS;
|
2017-01-10 09:21:30 +08:00
|
|
|
|
[ELF] Simplify RelRo, TLS, NOBITS section ranks and make RW PT_LOAD start with RelRo
Old: PT_LOAD(.data | PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) | .bss)
New: PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) | .data .bss)
The placement of | indicates page alignment caused by PT_GNU_RELRO. The
new layout has simpler rules and saves space for many cases.
Old size: roundup(.data) + roundup(.data.rel.ro)
New size: roundup(.data.rel.ro + .bss.rel.ro) + .data
Other advantages:
* At runtime the 3 memory mappings decrease to 2.
* start(PT_TLS) = start(PT_GNU_RELRO) = start(RW PT_LOAD). This
simplifies binary manipulation tools.
GNU strip before 2.31 discards PT_GNU_RELRO if its
address is not equal to the start of its associated PT_LOAD.
This has been fixed by https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=f2731e0c374e5323ce4cdae2bcc7b7fe22da1a6f
But with this change, we will be compatible with GNU strip before 2.31
* Before, .got.plt (non-relro by default) was placed before .got (relro
by default), which made it impossible to have _GLOBAL_OFFSET_TABLE_
(start of .got.plt on x86-64) equal to the end of .got (R_GOT*_FROM_END)
(https://bugs.llvm.org/show_bug.cgi?id=36555). With the new ordering, we
can improve on this regard if we'd like to.
Reviewers: ruiu, espindola, pcc
Subscribers: emaste, arichardson, llvm-commits, joerg, jdoerfert
Differential Revision: https://reviews.llvm.org/D56828
llvm-svn: 356117
2019-03-14 11:47:45 +08:00
|
|
|
// Within TLS sections, or within other RelRo sections, or within non-RelRo
|
|
|
|
// sections, place non-NOBITS sections first.
|
|
|
|
if (sec->type == SHT_NOBITS)
|
2017-05-12 22:52:22 +08:00
|
|
|
rank |= RF_BSS;
|
|
|
|
|
2017-08-19 00:15:36 +08:00
|
|
|
// Some architectures have additional ordering restrictions for sections
|
|
|
|
// within the same PT_LOAD.
|
2017-05-12 22:52:22 +08:00
|
|
|
if (config->emachine == EM_PPC64) {
|
|
|
|
// PPC64 has a number of special SHT_PROGBITS+SHF_ALLOC+SHF_WRITE sections
|
|
|
|
// that we would like to make sure appear is a specific order to maximize
|
|
|
|
// their coverage by a single signed 16-bit offset from the TOC base
|
|
|
|
// pointer. Conversely, the special .tocbss section should be first among
|
|
|
|
// all SHT_NOBITS sections. This will put it next to the loaded special
|
|
|
|
// PPC64 sections (and, thus, within reach of the TOC base pointer).
|
|
|
|
StringRef name = sec->name;
|
|
|
|
if (name != ".tocbss")
|
|
|
|
rank |= RF_PPC_NOT_TOCBSS;
|
|
|
|
|
|
|
|
if (name == ".toc1")
|
|
|
|
rank |= RF_PPC_TOCL;
|
|
|
|
|
|
|
|
if (name == ".toc")
|
|
|
|
rank |= RF_PPC_TOC;
|
|
|
|
|
2018-05-24 23:59:41 +08:00
|
|
|
if (name == ".got")
|
|
|
|
rank |= RF_PPC_GOT;
|
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
if (name == ".branch_lt")
|
|
|
|
rank |= RF_PPC_BRANCH_LT;
|
|
|
|
}
|
2018-03-01 10:31:29 +08:00
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
if (config->emachine == EM_MIPS) {
|
|
|
|
// All sections with SHF_MIPS_GPREL flag should be grouped together
|
|
|
|
// because data in these sections is addressable with a gp relative address.
|
|
|
|
if (sec->flags & SHF_MIPS_GPREL)
|
|
|
|
rank |= RF_MIPS_GPREL;
|
2015-11-24 18:15:50 +08:00
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
if (sec->name != ".got")
|
|
|
|
rank |= RF_MIPS_NOT_GOT;
|
|
|
|
}
|
2015-10-14 03:07:29 +08:00
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
return rank;
|
|
|
|
}
|
|
|
|
|
2021-11-26 12:24:23 +08:00
|
|
|
static bool compareSections(const SectionCommand *aCmd,
|
|
|
|
const SectionCommand *bCmd) {
|
2017-07-28 03:22:43 +08:00
|
|
|
const OutputSection *a = cast<OutputSection>(aCmd);
|
|
|
|
const OutputSection *b = cast<OutputSection>(bCmd);
|
2018-10-25 05:59:58 +08:00
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
if (a->sortRank != b->sortRank)
|
|
|
|
return a->sortRank < b->sortRank;
|
2018-10-25 05:59:58 +08:00
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
if (!(a->sortRank & RF_NOT_ADDR_SET))
|
|
|
|
return config->sectionStartMap.lookup(a->name) <
|
|
|
|
config->sectionStartMap.lookup(b->name);
|
2016-09-22 06:36:19 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-02-24 23:07:30 +08:00
|
|
|
void PhdrEntry::add(OutputSection *sec) {
|
2017-09-07 19:01:10 +08:00
|
|
|
lastSec = sec;
|
|
|
|
if (!firstSec)
|
|
|
|
firstSec = sec;
|
2017-03-07 22:55:52 +08:00
|
|
|
p_align = std::max(p_align, sec->alignment);
|
2016-12-20 01:01:01 +08:00
|
|
|
if (p_type == PT_LOAD)
|
2017-09-07 18:53:07 +08:00
|
|
|
sec->ptLoad = this;
|
2016-07-19 17:25:43 +08:00
|
|
|
}
|
|
|
|
|
2015-12-26 17:47:57 +08:00
|
|
|
// The beginning and the ending of .rel[a].plt section are marked
|
|
|
|
// with __rel[a]_iplt_{start,end} symbols if it is a statically linked
|
|
|
|
// executable. The runtime needs these symbols in order to resolve
|
|
|
|
// all IRELATIVE relocs on startup. For dynamic executables, we don't
|
|
|
|
// need these symbols, since IRELATIVE relocs are resolved through GOT
|
|
|
|
// and PLT. For details, see http://www.airs.com/blog/archives/403.
|
2016-04-14 21:23:02 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::addRelIpltSymbols() {
|
2021-07-16 02:31:11 +08:00
|
|
|
if (config->relocatable || config->isPic)
|
2015-12-21 18:12:06 +08:00
|
|
|
return;
|
2015-12-26 17:47:57 +08:00
|
|
|
|
2019-01-16 02:30:23 +08:00
|
|
|
// By default, __rela_iplt_{start,end} belong to a dummy section 0
|
|
|
|
// because .rela.plt might be empty and thus removed from output.
|
2019-07-16 13:50:45 +08:00
|
|
|
// We'll override Out::elfHeader with In.relaIplt later when we are
|
2019-01-16 02:30:23 +08:00
|
|
|
// sure that .rela.plt exists in output.
|
|
|
|
ElfSym::relaIpltStart = addOptionalRegular(
|
|
|
|
config->isRela ? "__rela_iplt_start" : "__rel_iplt_start",
|
2021-09-26 06:47:27 +08:00
|
|
|
Out::elfHeader, 0, STV_HIDDEN);
|
2019-01-16 02:30:23 +08:00
|
|
|
|
|
|
|
ElfSym::relaIpltEnd = addOptionalRegular(
|
|
|
|
config->isRela ? "__rela_iplt_end" : "__rel_iplt_end",
|
2021-09-26 06:47:27 +08:00
|
|
|
Out::elfHeader, 0, STV_HIDDEN);
|
2015-12-21 18:12:06 +08:00
|
|
|
}
|
|
|
|
|
2017-09-01 10:23:31 +08:00
|
|
|
// This function generates assignments for predefined symbols (e.g. _end or
|
|
|
|
// _etext) and inserts them into the commands sequence to be processed at the
|
|
|
|
// appropriate time. This ensures that the value is going to be correct by the
|
|
|
|
// time any references to these symbols are processed and is equivalent to
|
|
|
|
// defining these symbols explicitly in the linker script.
|
2017-09-13 00:38:01 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::setReservedSymbolSections() {
|
2017-12-12 01:23:28 +08:00
|
|
|
if (ElfSym::globalOffsetTable) {
|
2018-03-19 14:52:51 +08:00
|
|
|
// The _GLOBAL_OFFSET_TABLE_ symbol is defined by target convention usually
|
|
|
|
// to the start of the .got or .got.plt section.
|
2021-12-28 10:15:23 +08:00
|
|
|
InputSection *sec = in.gotPlt.get();
|
2018-03-19 14:52:51 +08:00
|
|
|
if (!target->gotBaseSymInGotPlt)
|
2021-12-28 10:15:23 +08:00
|
|
|
sec = in.mipsGot.get() ? cast<InputSection>(in.mipsGot.get())
|
|
|
|
: cast<InputSection>(in.got.get());
|
|
|
|
ElfSym::globalOffsetTable->section = sec;
|
2017-12-12 01:23:28 +08:00
|
|
|
}
|
|
|
|
|
2019-08-03 10:26:52 +08:00
|
|
|
// .rela_iplt_{start,end} mark the start and the end of in.relaIplt.
|
2019-04-01 16:16:08 +08:00
|
|
|
if (ElfSym::relaIpltStart && in.relaIplt->isNeeded()) {
|
2021-12-28 10:15:23 +08:00
|
|
|
ElfSym::relaIpltStart->section = in.relaIplt.get();
|
|
|
|
ElfSym::relaIpltEnd->section = in.relaIplt.get();
|
2018-09-26 03:26:58 +08:00
|
|
|
ElfSym::relaIpltEnd->value = in.relaIplt->getSize();
|
2019-01-16 02:30:23 +08:00
|
|
|
}
|
2018-04-20 00:54:30 +08:00
|
|
|
|
2017-09-01 10:23:31 +08:00
|
|
|
PhdrEntry *last = nullptr;
|
|
|
|
PhdrEntry *lastRO = nullptr;
|
2017-09-06 04:17:37 +08:00
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
for (Partition &part : partitions) {
|
|
|
|
for (PhdrEntry *p : part.phdrs) {
|
|
|
|
if (p->p_type != PT_LOAD)
|
|
|
|
continue;
|
|
|
|
last = p;
|
|
|
|
if (!(p->p_flags & PF_W))
|
|
|
|
lastRO = p;
|
|
|
|
}
|
2017-09-01 10:23:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (lastRO) {
|
2017-10-30 06:31:48 +08:00
|
|
|
// _etext is the first location after the last read-only loadable segment.
|
2017-09-13 00:38:01 +08:00
|
|
|
if (ElfSym::etext1)
|
|
|
|
ElfSym::etext1->section = lastRO->lastSec;
|
|
|
|
if (ElfSym::etext2)
|
|
|
|
ElfSym::etext2->section = lastRO->lastSec;
|
2017-09-01 10:23:31 +08:00
|
|
|
}
|
2017-09-06 04:17:37 +08:00
|
|
|
|
2017-10-30 06:31:48 +08:00
|
|
|
if (last) {
|
|
|
|
// _edata points to the end of the last mapped initialized section.
|
|
|
|
OutputSection *edata = nullptr;
|
|
|
|
for (OutputSection *os : outputSections) {
|
|
|
|
if (os->type != SHT_NOBITS)
|
|
|
|
edata = os;
|
|
|
|
if (os == last->lastSec)
|
2017-10-17 22:31:29 +08:00
|
|
|
break;
|
2017-10-30 06:31:48 +08:00
|
|
|
}
|
2017-10-17 22:31:29 +08:00
|
|
|
|
2017-09-13 00:38:01 +08:00
|
|
|
if (ElfSym::edata1)
|
2017-10-30 06:31:48 +08:00
|
|
|
ElfSym::edata1->section = edata;
|
2017-09-13 00:38:01 +08:00
|
|
|
if (ElfSym::edata2)
|
2017-10-30 06:31:48 +08:00
|
|
|
ElfSym::edata2->section = edata;
|
|
|
|
|
|
|
|
// _end is the first location after the uninitialized data region.
|
|
|
|
if (ElfSym::end1)
|
|
|
|
ElfSym::end1->section = last->lastSec;
|
|
|
|
if (ElfSym::end2)
|
|
|
|
ElfSym::end2->section = last->lastSec;
|
2017-09-13 00:38:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ElfSym::bss)
|
|
|
|
ElfSym::bss->section = findSection(".bss");
|
|
|
|
|
|
|
|
// Setup MIPS _gp_disp/__gnu_local_gp symbols which should
|
|
|
|
// be equal to the _gp symbol's value.
|
2017-09-21 02:30:57 +08:00
|
|
|
if (ElfSym::mipsGp) {
|
2017-09-13 00:38:01 +08:00
|
|
|
// Find GP-relative section with the lowest address
|
|
|
|
// and use this address to calculate default _gp value.
|
|
|
|
for (OutputSection *os : outputSections) {
|
|
|
|
if (os->flags & SHF_MIPS_GPREL) {
|
|
|
|
ElfSym::mipsGp->section = os;
|
|
|
|
ElfSym::mipsGp->value = 0x7ff0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-09-01 10:23:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
// We want to find how similar two ranks are.
|
|
|
|
// The more branches in getSectionRank that match, the more similar they are.
|
|
|
|
// Since each branch corresponds to a bit flag, we can just use
|
|
|
|
// countLeadingZeros.
|
2017-07-28 03:22:43 +08:00
|
|
|
static int getRankProximityAux(OutputSection *a, OutputSection *b) {
|
2017-05-12 22:52:22 +08:00
|
|
|
return countLeadingZeros(a->sortRank ^ b->sortRank);
|
2016-11-08 18:44:48 +08:00
|
|
|
}
|
|
|
|
|
2021-11-26 12:24:23 +08:00
|
|
|
static int getRankProximity(OutputSection *a, SectionCommand *b) {
|
2019-04-30 20:27:06 +08:00
|
|
|
auto *sec = dyn_cast<OutputSection>(b);
|
2019-06-04 04:14:25 +08:00
|
|
|
return (sec && sec->hasInputSections) ? getRankProximityAux(a, sec) : -1;
|
2017-06-16 05:51:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// When placing orphan sections, we want to place them after symbol assignments
|
|
|
|
// so that an orphan after
|
|
|
|
// begin_foo = .;
|
|
|
|
// foo : { *(foo) }
|
|
|
|
// end_foo = .;
|
|
|
|
// doesn't break the intended meaning of the begin/end symbols.
|
|
|
|
// We don't want to go over sections since findOrphanPos is the
|
|
|
|
// one in charge of deciding the order of the sections.
|
|
|
|
// We don't want to go over changes to '.', since doing so in
|
|
|
|
// rx_sec : { *(rx_sec) }
|
|
|
|
// . = ALIGN(0x1000);
|
|
|
|
// /* The RW PT_LOAD starts here*/
|
|
|
|
// rw_sec : { *(rw_sec) }
|
|
|
|
// would mean that the RW PT_LOAD would become unaligned.
|
2021-11-26 12:24:23 +08:00
|
|
|
static bool shouldSkip(SectionCommand *cmd) {
|
2017-06-16 05:51:01 +08:00
|
|
|
if (auto *assign = dyn_cast<SymbolAssignment>(cmd))
|
|
|
|
return assign->name != ".";
|
2018-07-04 23:05:21 +08:00
|
|
|
return false;
|
2017-06-16 05:51:01 +08:00
|
|
|
}
|
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
// We want to place orphan sections so that they share as much
|
|
|
|
// characteristics with their neighbors as possible. For example, if
|
|
|
|
// both are rw, or both are tls.
|
2021-12-27 05:53:47 +08:00
|
|
|
static SmallVectorImpl<SectionCommand *>::iterator
|
|
|
|
findOrphanPos(SmallVectorImpl<SectionCommand *>::iterator b,
|
|
|
|
SmallVectorImpl<SectionCommand *>::iterator e) {
|
2017-07-28 03:22:43 +08:00
|
|
|
OutputSection *sec = cast<OutputSection>(*e);
|
2017-05-09 21:58:46 +08:00
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
// Find the first element that has as close a rank as possible.
|
2021-11-26 12:24:23 +08:00
|
|
|
auto i = std::max_element(b, e, [=](SectionCommand *a, SectionCommand *b) {
|
2017-05-12 22:52:22 +08:00
|
|
|
return getRankProximity(sec, a) < getRankProximity(sec, b);
|
|
|
|
});
|
|
|
|
if (i == e)
|
2017-05-09 21:58:46 +08:00
|
|
|
return e;
|
2021-10-21 12:37:52 +08:00
|
|
|
auto foundSec = dyn_cast<OutputSection>(*i);
|
|
|
|
if (!foundSec)
|
|
|
|
return e;
|
2017-05-09 21:58:46 +08:00
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
// Consider all existing sections with the same proximity.
|
2017-06-16 05:51:01 +08:00
|
|
|
int proximity = getRankProximity(sec, *i);
|
2021-10-21 12:37:52 +08:00
|
|
|
unsigned sortRank = sec->sortRank;
|
2021-11-11 16:07:38 +08:00
|
|
|
if (script->hasPhdrsCommands() || !script->memoryRegions.empty())
|
|
|
|
// Prevent the orphan section to be placed before the found section. If
|
|
|
|
// custom program headers are defined, that helps to avoid adding it to a
|
|
|
|
// previous segment and changing flags of that segment, for example, making
|
|
|
|
// a read-only segment writable. If memory regions are defined, an orphan
|
|
|
|
// section should continue the same region as the found section to better
|
|
|
|
// resemble the behavior of GNU ld.
|
2021-10-21 12:37:52 +08:00
|
|
|
sortRank = std::max(sortRank, foundSec->sortRank);
|
2017-06-16 05:51:01 +08:00
|
|
|
for (; i != e; ++i) {
|
2017-07-28 03:22:43 +08:00
|
|
|
auto *curSec = dyn_cast<OutputSection>(*i);
|
2019-06-04 04:14:25 +08:00
|
|
|
if (!curSec || !curSec->hasInputSections)
|
2017-06-16 05:51:01 +08:00
|
|
|
continue;
|
2017-07-28 03:22:43 +08:00
|
|
|
if (getRankProximity(sec, curSec) != proximity ||
|
2021-10-21 12:37:52 +08:00
|
|
|
sortRank < curSec->sortRank)
|
2017-06-16 05:51:01 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-10-23 08:51:08 +08:00
|
|
|
|
2021-11-26 12:24:23 +08:00
|
|
|
auto isOutputSecWithInputSections = [](SectionCommand *cmd) {
|
2019-04-23 20:17:15 +08:00
|
|
|
auto *os = dyn_cast<OutputSection>(cmd);
|
2019-06-04 04:14:25 +08:00
|
|
|
return os && os->hasInputSections;
|
2019-04-23 20:17:15 +08:00
|
|
|
};
|
2022-01-05 20:58:01 +08:00
|
|
|
auto j =
|
|
|
|
std::find_if(std::make_reverse_iterator(i), std::make_reverse_iterator(b),
|
|
|
|
isOutputSecWithInputSections);
|
2017-06-16 05:51:01 +08:00
|
|
|
i = j.base();
|
2017-09-20 01:29:58 +08:00
|
|
|
|
|
|
|
// As a special case, if the orphan section is the last section, put
|
|
|
|
// it at the very end, past any other commands.
|
|
|
|
// This matches bfd's behavior and is convenient when the linker script fully
|
|
|
|
// specifies the start of the file, but doesn't care about the end (the non
|
|
|
|
// alloc sections for example).
|
2019-06-04 04:14:25 +08:00
|
|
|
auto nextSec = std::find_if(i, e, isOutputSecWithInputSections);
|
2017-09-20 01:29:58 +08:00
|
|
|
if (nextSec == e)
|
|
|
|
return e;
|
|
|
|
|
2017-06-16 05:51:01 +08:00
|
|
|
while (i != e && shouldSkip(*i))
|
2017-05-12 22:52:22 +08:00
|
|
|
++i;
|
|
|
|
return i;
|
2017-05-09 21:58:46 +08:00
|
|
|
}
|
|
|
|
|
2020-02-20 05:19:58 +08:00
|
|
|
// Adds random priorities to sections not already in the map.
|
|
|
|
static void maybeShuffle(DenseMap<const InputSectionBase *, int> &order) {
|
2021-03-19 01:18:19 +08:00
|
|
|
if (config->shuffleSections.empty())
|
2020-02-20 05:19:58 +08:00
|
|
|
return;
|
|
|
|
|
2021-12-23 14:30:07 +08:00
|
|
|
SmallVector<InputSectionBase *, 0> matched, sections = inputSections;
|
2021-03-19 01:18:19 +08:00
|
|
|
matched.reserve(sections.size());
|
|
|
|
for (const auto &patAndSeed : config->shuffleSections) {
|
|
|
|
matched.clear();
|
|
|
|
for (InputSectionBase *sec : sections)
|
|
|
|
if (patAndSeed.first.match(sec->name))
|
|
|
|
matched.push_back(sec);
|
|
|
|
const uint32_t seed = patAndSeed.second;
|
|
|
|
if (seed == UINT32_MAX) {
|
|
|
|
// If --shuffle-sections <section-glob>=-1, reverse the section order. The
|
|
|
|
// section order is stable even if the number of sections changes. This is
|
|
|
|
// useful to catch issues like static initialization order fiasco
|
|
|
|
// reliably.
|
|
|
|
std::reverse(matched.begin(), matched.end());
|
|
|
|
} else {
|
|
|
|
std::mt19937 g(seed ? seed : std::random_device()());
|
|
|
|
llvm::shuffle(matched.begin(), matched.end(), g);
|
|
|
|
}
|
|
|
|
size_t i = 0;
|
|
|
|
for (InputSectionBase *&sec : sections)
|
|
|
|
if (patAndSeed.first.match(sec->name))
|
|
|
|
sec = matched[i++];
|
|
|
|
}
|
|
|
|
|
2020-02-20 05:19:58 +08:00
|
|
|
// Existing priorities are < 0, so use priorities >= 0 for the missing
|
|
|
|
// sections.
|
2021-03-19 01:18:19 +08:00
|
|
|
int prio = 0;
|
|
|
|
for (InputSectionBase *sec : sections) {
|
|
|
|
if (order.try_emplace(sec, prio).second)
|
|
|
|
++prio;
|
2020-02-20 05:19:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-31 00:24:04 +08:00
|
|
|
// Builds section order for handling --symbol-ordering-file.
|
2018-02-14 09:42:26 +08:00
|
|
|
static DenseMap<const InputSectionBase *, int> buildSectionOrder() {
|
|
|
|
DenseMap<const InputSectionBase *, int> sectionOrder;
|
2021-10-26 03:52:06 +08:00
|
|
|
// Use the rarely used option --call-graph-ordering-file to sort sections.
|
2018-04-18 07:30:05 +08:00
|
|
|
if (!config->callGraphProfile.empty())
|
|
|
|
return computeCallGraphProfileOrder();
|
|
|
|
|
2018-01-31 00:24:04 +08:00
|
|
|
if (config->symbolOrderingFile.empty())
|
|
|
|
return sectionOrder;
|
|
|
|
|
2018-02-14 21:36:22 +08:00
|
|
|
struct SymbolOrderEntry {
|
|
|
|
int priority;
|
|
|
|
bool present;
|
|
|
|
};
|
|
|
|
|
2018-01-31 00:24:04 +08:00
|
|
|
// Build a map from symbols to their priorities. Symbols that didn't
|
|
|
|
// appear in the symbol ordering file have the lowest priority 0.
|
|
|
|
// All explicitly mentioned symbols have negative (higher) priorities.
|
2022-01-17 13:19:01 +08:00
|
|
|
DenseMap<CachedHashStringRef, SymbolOrderEntry> symbolOrder;
|
2018-01-31 00:24:04 +08:00
|
|
|
int priority = -config->symbolOrderingFile.size();
|
|
|
|
for (StringRef s : config->symbolOrderingFile)
|
2022-01-17 13:19:01 +08:00
|
|
|
symbolOrder.insert({CachedHashStringRef(s), {priority++, false}});
|
2018-01-31 00:24:04 +08:00
|
|
|
|
|
|
|
// Build a map from sections to their priorities.
|
2018-04-06 11:36:19 +08:00
|
|
|
auto addSym = [&](Symbol &sym) {
|
2022-01-17 13:19:01 +08:00
|
|
|
auto it = symbolOrder.find(CachedHashStringRef(sym.getName()));
|
2018-04-06 11:36:19 +08:00
|
|
|
if (it == symbolOrder.end())
|
|
|
|
return;
|
|
|
|
SymbolOrderEntry &ent = it->second;
|
|
|
|
ent.present = true;
|
|
|
|
|
2018-10-26 23:07:12 +08:00
|
|
|
maybeWarnUnorderableSymbol(&sym);
|
2018-02-14 21:36:22 +08:00
|
|
|
|
2018-04-06 11:36:19 +08:00
|
|
|
if (auto *d = dyn_cast<Defined>(&sym)) {
|
2018-03-08 01:24:46 +08:00
|
|
|
if (auto *sec = dyn_cast_or_null<InputSectionBase>(d->section)) {
|
2021-12-25 04:09:48 +08:00
|
|
|
int &priority = sectionOrder[cast<InputSectionBase>(sec)];
|
2018-03-08 01:24:46 +08:00
|
|
|
priority = std::min(priority, ent.priority);
|
2018-02-14 09:42:26 +08:00
|
|
|
}
|
2018-01-31 00:24:04 +08:00
|
|
|
}
|
2018-04-06 11:36:19 +08:00
|
|
|
};
|
2018-10-25 05:59:58 +08:00
|
|
|
|
2018-04-06 11:36:19 +08:00
|
|
|
// We want both global and local symbols. We get the global ones from the
|
|
|
|
// symbol table and iterate the object files for the local ones.
|
2019-11-21 03:16:15 +08:00
|
|
|
for (Symbol *sym : symtab->symbols())
|
2022-01-05 18:04:36 +08:00
|
|
|
addSym(*sym);
|
2019-05-28 14:33:06 +08:00
|
|
|
|
2021-12-15 16:37:10 +08:00
|
|
|
for (ELFFileBase *file : objectFiles)
|
2022-01-05 18:06:30 +08:00
|
|
|
for (Symbol *sym : file->getLocalSymbols())
|
2020-09-08 12:44:26 +08:00
|
|
|
addSym(*sym);
|
2018-02-14 21:36:22 +08:00
|
|
|
|
|
|
|
if (config->warnSymbolOrdering)
|
|
|
|
for (auto orderEntry : symbolOrder)
|
|
|
|
if (!orderEntry.second.present)
|
2022-01-17 13:19:01 +08:00
|
|
|
warn("symbol ordering file: no such symbol: " + orderEntry.first.val());
|
2018-02-14 21:36:22 +08:00
|
|
|
|
2018-01-31 00:24:04 +08:00
|
|
|
return sectionOrder;
|
|
|
|
}
|
|
|
|
|
2018-03-31 05:36:54 +08:00
|
|
|
// Sorts the sections in ISD according to the provided section order.
|
|
|
|
static void
|
|
|
|
sortISDBySectionOrder(InputSectionDescription *isd,
|
|
|
|
const DenseMap<const InputSectionBase *, int> &order) {
|
2021-12-27 05:53:47 +08:00
|
|
|
SmallVector<InputSection *, 0> unorderedSections;
|
|
|
|
SmallVector<std::pair<InputSection *, int>, 0> orderedSections;
|
2018-03-31 05:36:54 +08:00
|
|
|
uint64_t unorderedSize = 0;
|
[Coding style change] Rename variables so that they start with a lowercase letter
This patch is mechanically generated by clang-llvm-rename tool that I wrote
using Clang Refactoring Engine just for creating this patch. You can see the
source code of the tool at https://reviews.llvm.org/D64123. There's no manual
post-processing; you can generate the same patch by re-running the tool against
lld's code base.
Here is the main discussion thread to change the LLVM coding style:
https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html
In the discussion thread, I proposed we use lld as a testbed for variable
naming scheme change, and this patch does that.
I chose to rename variables so that they are in camelCase, just because that
is a minimal change to make variables to start with a lowercase letter.
Note to downstream patch maintainers: if you are maintaining a downstream lld
repo, just rebasing ahead of this commit would cause massive merge conflicts
because this patch essentially changes every line in the lld subdirectory. But
there's a remedy.
clang-llvm-rename tool is a batch tool, so you can rename variables in your
downstream repo with the tool. Given that, here is how to rebase your repo to
a commit after the mass renaming:
1. rebase to the commit just before the mass variable renaming,
2. apply the tool to your downstream repo to mass-rename variables locally, and
3. rebase again to the head.
Most changes made by the tool should be identical for a downstream repo and
for the head, so at the step 3, almost all changes should be merged and
disappear. I'd expect that there would be some lines that you need to merge by
hand, but that shouldn't be too many.
Differential Revision: https://reviews.llvm.org/D64121
llvm-svn: 365595
2019-07-10 13:00:37 +08:00
|
|
|
|
2018-03-31 05:36:54 +08:00
|
|
|
for (InputSection *isec : isd->sections) {
|
2018-04-04 03:45:10 +08:00
|
|
|
auto i = order.find(isec);
|
|
|
|
if (i == order.end()) {
|
2018-03-31 05:36:54 +08:00
|
|
|
unorderedSections.push_back(isec);
|
|
|
|
unorderedSize += isec->getSize();
|
|
|
|
continue;
|
|
|
|
}
|
2018-04-04 03:45:10 +08:00
|
|
|
orderedSections.push_back({isec, i->second});
|
2018-03-31 05:36:54 +08:00
|
|
|
}
|
2019-08-24 16:40:20 +08:00
|
|
|
llvm::sort(orderedSections, llvm::less_second());
|
2018-03-31 05:36:54 +08:00
|
|
|
|
|
|
|
// Find an insertion point for the ordered section list in the unordered
|
|
|
|
// section list. On targets with limited-range branches, this is the mid-point
|
|
|
|
// of the unordered section list. This decreases the likelihood that a range
|
|
|
|
// extension thunk will be needed to enter or exit the ordered region. If the
|
|
|
|
// ordered section list is a list of hot functions, we can generally expect
|
|
|
|
// the ordered functions to be called more often than the unordered functions,
|
|
|
|
// making it more likely that any particular call will be within range, and
|
|
|
|
// therefore reducing the number of thunks required.
|
|
|
|
//
|
|
|
|
// For example, imagine that you have 8MB of hot code and 32MB of cold code.
|
|
|
|
// If the layout is:
|
|
|
|
//
|
|
|
|
// 8MB hot
|
|
|
|
// 32MB cold
|
|
|
|
//
|
|
|
|
// only the first 8-16MB of the cold code (depending on which hot function it
|
|
|
|
// is actually calling) can call the hot code without a range extension thunk.
|
|
|
|
// However, if we use this layout:
|
|
|
|
//
|
|
|
|
// 16MB cold
|
|
|
|
// 8MB hot
|
|
|
|
// 16MB cold
|
|
|
|
//
|
|
|
|
// both the last 8-16MB of the first block of cold code and the first 8-16MB
|
|
|
|
// of the second block of cold code can call the hot code without a thunk. So
|
|
|
|
// we effectively double the amount of code that could potentially call into
|
|
|
|
// the hot code without a thunk.
|
2018-04-04 04:08:45 +08:00
|
|
|
size_t insPt = 0;
|
2018-08-20 17:37:50 +08:00
|
|
|
if (target->getThunkSectionSpacing() && !orderedSections.empty()) {
|
2018-03-31 05:36:54 +08:00
|
|
|
uint64_t unorderedPos = 0;
|
2018-04-04 04:08:45 +08:00
|
|
|
for (; insPt != unorderedSections.size(); ++insPt) {
|
|
|
|
unorderedPos += unorderedSections[insPt]->getSize();
|
2018-03-31 05:36:54 +08:00
|
|
|
if (unorderedPos > unorderedSize / 2)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-04 04:08:45 +08:00
|
|
|
isd->sections.clear();
|
|
|
|
for (InputSection *isec : makeArrayRef(unorderedSections).slice(0, insPt))
|
|
|
|
isd->sections.push_back(isec);
|
2018-04-04 03:45:10 +08:00
|
|
|
for (std::pair<InputSection *, int> p : orderedSections)
|
2018-04-04 04:08:45 +08:00
|
|
|
isd->sections.push_back(p.first);
|
|
|
|
for (InputSection *isec : makeArrayRef(unorderedSections).slice(insPt))
|
|
|
|
isd->sections.push_back(isec);
|
2018-03-31 05:36:54 +08:00
|
|
|
}
|
|
|
|
|
2018-02-10 00:09:22 +08:00
|
|
|
static void sortSection(OutputSection *sec,
|
2018-02-14 09:42:26 +08:00
|
|
|
const DenseMap<const InputSectionBase *, int> &order) {
|
2018-02-10 00:09:22 +08:00
|
|
|
StringRef name = sec->name;
|
2018-01-31 00:20:08 +08:00
|
|
|
|
2020-02-20 15:14:49 +08:00
|
|
|
// Never sort these.
|
|
|
|
if (name == ".init" || name == ".fini")
|
|
|
|
return;
|
|
|
|
|
2020-08-10 22:00:53 +08:00
|
|
|
// IRelative relocations that usually live in the .rel[a].dyn section should
|
2021-02-19 03:24:56 +08:00
|
|
|
// be processed last by the dynamic loader. To achieve that we add synthetic
|
|
|
|
// sections in the required order from the beginning so that the in.relaIplt
|
2020-08-10 22:00:53 +08:00
|
|
|
// section is placed last in an output section. Here we just do not apply
|
|
|
|
// sorting for an output section which holds the in.relaIplt section.
|
|
|
|
if (in.relaIplt->getParent() == sec)
|
|
|
|
return;
|
|
|
|
|
2020-02-20 15:14:49 +08:00
|
|
|
// Sort input sections by priority using the list provided by
|
|
|
|
// --symbol-ordering-file or --shuffle-sections=. This is a least significant
|
|
|
|
// digit radix sort. The sections may be sorted stably again by a more
|
|
|
|
// significant key.
|
|
|
|
if (!order.empty())
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *b : sec->commands)
|
2020-02-20 15:14:49 +08:00
|
|
|
if (auto *isd = dyn_cast<InputSectionDescription>(b))
|
|
|
|
sortISDBySectionOrder(isd, order);
|
|
|
|
|
2021-10-26 07:57:46 +08:00
|
|
|
if (script->hasSectionsCommand)
|
2018-02-10 00:09:22 +08:00
|
|
|
return;
|
2017-10-30 18:12:49 +08:00
|
|
|
|
2021-10-26 07:57:46 +08:00
|
|
|
if (name == ".init_array" || name == ".fini_array") {
|
|
|
|
sec->sortInitFini();
|
|
|
|
} else if (name == ".ctors" || name == ".dtors") {
|
|
|
|
sec->sortCtorsDtors();
|
|
|
|
} else if (config->emachine == EM_PPC64 && name == ".toc") {
|
|
|
|
// .toc is allocated just after .got and is accessed using GOT-relative
|
|
|
|
// relocations. Object files compiled with small code model have an
|
|
|
|
// addressable range of [.got, .got + 0xFFFC] for GOT-relative relocations.
|
|
|
|
// To reduce the risk of relocation overflow, .toc contents are sorted so
|
|
|
|
// that sections having smaller relocation offsets are at beginning of .toc
|
2021-11-26 08:47:07 +08:00
|
|
|
assert(sec->commands.size() == 1);
|
|
|
|
auto *isd = cast<InputSectionDescription>(sec->commands[0]);
|
2019-04-23 10:42:06 +08:00
|
|
|
llvm::stable_sort(isd->sections,
|
|
|
|
[](const InputSection *a, const InputSection *b) -> bool {
|
|
|
|
return a->file->ppc64SmallCodeModelTocRelocs &&
|
|
|
|
!b->file->ppc64SmallCodeModelTocRelocs;
|
|
|
|
});
|
2019-01-25 02:17:40 +08:00
|
|
|
}
|
2018-02-10 00:09:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If no layout was provided by linker script, we want to apply default
|
|
|
|
// sorting for special input sections. This also handles --symbol-ordering-file.
|
|
|
|
template <class ELFT> void Writer<ELFT>::sortInputSections() {
|
|
|
|
// Build the order once since it is expensive.
|
2018-02-14 09:42:26 +08:00
|
|
|
DenseMap<const InputSectionBase *, int> order = buildSectionOrder();
|
2020-02-20 05:19:58 +08:00
|
|
|
maybeShuffle(order);
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : script->sectionCommands)
|
|
|
|
if (auto *sec = dyn_cast<OutputSection>(cmd))
|
2018-02-10 00:09:22 +08:00
|
|
|
sortSection(sec, order);
|
2017-10-30 18:12:49 +08:00
|
|
|
}
|
|
|
|
|
2016-09-22 06:36:19 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::sortSections() {
|
2020-11-03 22:41:09 +08:00
|
|
|
llvm::TimeTraceScope timeScope("Sort sections");
|
2017-10-02 17:11:13 +08:00
|
|
|
script->adjustSectionsBeforeSorting();
|
2017-06-28 17:59:34 +08:00
|
|
|
|
2016-11-12 06:43:27 +08:00
|
|
|
// Don't sort if using -r. It is not necessary and we want to preserve the
|
|
|
|
// relative order for SHF_LINK_ORDER sections.
|
|
|
|
if (config->relocatable)
|
2017-06-29 06:44:11 +08:00
|
|
|
return;
|
2017-05-12 22:52:22 +08:00
|
|
|
|
2018-01-31 00:20:08 +08:00
|
|
|
sortInputSections();
|
2017-10-30 18:12:49 +08:00
|
|
|
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : script->sectionCommands) {
|
|
|
|
auto *os = dyn_cast<OutputSection>(cmd);
|
2018-04-09 21:01:50 +08:00
|
|
|
if (!os)
|
|
|
|
continue;
|
|
|
|
os->sortRank = getSectionRank(os);
|
|
|
|
}
|
|
|
|
|
2018-01-31 00:20:08 +08:00
|
|
|
if (!script->hasSectionsCommand) {
|
2017-10-30 18:12:49 +08:00
|
|
|
// We know that all the OutputSections are contiguous in this case.
|
2021-11-26 12:24:23 +08:00
|
|
|
auto isSection = [](SectionCommand *cmd) {
|
|
|
|
return isa<OutputSection>(cmd);
|
|
|
|
};
|
2018-04-11 17:03:02 +08:00
|
|
|
std::stable_sort(
|
|
|
|
llvm::find_if(script->sectionCommands, isSection),
|
|
|
|
llvm::find_if(llvm::reverse(script->sectionCommands), isSection).base(),
|
|
|
|
compareSections);
|
2020-02-11 07:58:29 +08:00
|
|
|
|
|
|
|
// Process INSERT commands. From this point onwards the order of
|
|
|
|
// script->sectionCommands is fixed.
|
|
|
|
script->processInsertCommands();
|
2016-09-22 06:36:19 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-11 07:58:29 +08:00
|
|
|
script->processInsertCommands();
|
|
|
|
|
2017-06-16 05:51:01 +08:00
|
|
|
// Orphan sections are sections present in the input files which are
|
|
|
|
// not explicitly placed into the output file by the linker script.
|
|
|
|
//
|
|
|
|
// The sections in the linker script are already in the correct
|
|
|
|
// order. We have to figuere out where to insert the orphan
|
|
|
|
// sections.
|
|
|
|
//
|
2016-09-22 06:36:19 +08:00
|
|
|
// The order of the sections in the script is arbitrary and may not agree with
|
2017-06-16 05:51:01 +08:00
|
|
|
// compareSections. This means that we cannot easily define a strict weak
|
|
|
|
// ordering. To see why, consider a comparison of a section in the script and
|
|
|
|
// one not in the script. We have a two simple options:
|
2016-09-22 06:36:19 +08:00
|
|
|
// * Make them equivalent (a is not less than b, and b is not less than a).
|
|
|
|
// The problem is then that equivalence has to be transitive and we can
|
|
|
|
// have sections a, b and c with only b in a script and a less than c
|
|
|
|
// which breaks this property.
|
|
|
|
// * Use compareSectionsNonScript. Given that the script order doesn't have
|
|
|
|
// to match, we can end up with sections a, b, c, d where b and c are in the
|
|
|
|
// script and c is compareSectionsNonScript less than b. In which case d
|
|
|
|
// can be equivalent to c, a to b and d < a. As a concrete example:
|
|
|
|
// .a (rx) # not in script
|
|
|
|
// .b (rx) # in script
|
|
|
|
// .c (ro) # in script
|
|
|
|
// .d (ro) # not in script
|
|
|
|
//
|
|
|
|
// The way we define an order then is:
|
2017-06-16 05:51:01 +08:00
|
|
|
// * Sort only the orphan sections. They are in the end right now.
|
|
|
|
// * Move each orphan section to its preferred position. We try
|
2018-02-23 10:05:48 +08:00
|
|
|
// to put each section in the last position where it can share
|
2016-11-08 18:44:48 +08:00
|
|
|
// a PT_LOAD.
|
2017-06-16 05:51:01 +08:00
|
|
|
//
|
|
|
|
// There is some ambiguity as to where exactly a new entry should be
|
2017-10-11 09:19:33 +08:00
|
|
|
// inserted, because Commands contains not only output section
|
2017-06-16 05:51:01 +08:00
|
|
|
// commands but also other types of commands such as symbol assignment
|
|
|
|
// expressions. There's no correct answer here due to the lack of the
|
|
|
|
// formal specification of the linker script. We use heuristics to
|
|
|
|
// determine whether a new output command should be added before or
|
|
|
|
// after another commands. For the details, look at shouldSkip
|
|
|
|
// function.
|
|
|
|
|
2017-10-11 09:50:56 +08:00
|
|
|
auto i = script->sectionCommands.begin();
|
|
|
|
auto e = script->sectionCommands.end();
|
2021-11-26 12:24:23 +08:00
|
|
|
auto nonScriptI = std::find_if(i, e, [](SectionCommand *cmd) {
|
|
|
|
if (auto *sec = dyn_cast<OutputSection>(cmd))
|
2018-03-08 03:25:36 +08:00
|
|
|
return sec->sectionIndex == UINT32_MAX;
|
2017-06-16 05:51:01 +08:00
|
|
|
return false;
|
|
|
|
});
|
|
|
|
|
|
|
|
// Sort the orphan sections.
|
|
|
|
std::stable_sort(nonScriptI, e, compareSections);
|
2016-09-22 06:36:19 +08:00
|
|
|
|
2017-06-16 05:51:01 +08:00
|
|
|
// As a horrible special case, skip the first . assignment if it is before any
|
|
|
|
// section. We do this because it is common to set a load address by starting
|
|
|
|
// the script with ". = 0xabcd" and the expectation is that every section is
|
|
|
|
// after that.
|
|
|
|
auto firstSectionOrDotAssignment =
|
2021-11-26 12:24:23 +08:00
|
|
|
std::find_if(i, e, [](SectionCommand *cmd) { return !shouldSkip(cmd); });
|
2017-06-16 05:51:01 +08:00
|
|
|
if (firstSectionOrDotAssignment != e &&
|
|
|
|
isa<SymbolAssignment>(**firstSectionOrDotAssignment))
|
|
|
|
++firstSectionOrDotAssignment;
|
|
|
|
i = firstSectionOrDotAssignment;
|
2016-09-22 06:36:19 +08:00
|
|
|
|
2017-05-12 22:52:22 +08:00
|
|
|
while (nonScriptI != e) {
|
2019-03-17 21:53:42 +08:00
|
|
|
auto pos = findOrphanPos(i, nonScriptI);
|
2017-07-28 03:22:43 +08:00
|
|
|
OutputSection *orphan = cast<OutputSection>(*nonScriptI);
|
2017-05-12 22:52:22 +08:00
|
|
|
|
|
|
|
// As an optimization, find all sections with the same sort rank
|
|
|
|
// and insert them with one rotate.
|
2017-06-16 05:51:01 +08:00
|
|
|
unsigned rank = orphan->sortRank;
|
2021-11-26 12:24:23 +08:00
|
|
|
auto end = std::find_if(nonScriptI + 1, e, [=](SectionCommand *cmd) {
|
2017-07-28 03:22:43 +08:00
|
|
|
return cast<OutputSection>(cmd)->sortRank != rank;
|
2017-05-12 22:52:22 +08:00
|
|
|
});
|
|
|
|
std::rotate(pos, nonScriptI, end);
|
|
|
|
nonScriptI = end;
|
|
|
|
}
|
2016-11-14 23:39:38 +08:00
|
|
|
|
2017-03-20 18:09:58 +08:00
|
|
|
script->adjustSectionsAfterSorting();
|
2016-09-22 06:36:19 +08:00
|
|
|
}
|
|
|
|
|
2017-12-12 21:30:44 +08:00
|
|
|
static bool compareByFilePosition(InputSection *a, InputSection *b) {
|
2020-08-15 01:23:45 +08:00
|
|
|
InputSection *la = a->flags & SHF_LINK_ORDER ? a->getLinkOrderDep() : nullptr;
|
|
|
|
InputSection *lb = b->flags & SHF_LINK_ORDER ? b->getLinkOrderDep() : nullptr;
|
|
|
|
// SHF_LINK_ORDER sections with non-zero sh_link are ordered before
|
|
|
|
// non-SHF_LINK_ORDER sections and SHF_LINK_ORDER sections with zero sh_link.
|
[ELF] Allow SHF_LINK_ORDER sections to have sh_link=0
Part of https://bugs.llvm.org/show_bug.cgi?id=41734
The semantics of SHF_LINK_ORDER have been extended to represent metadata
sections associated with some other sections (usually text).
The associated text section may be discarded (e.g. LTO) and we want the
metadata section to have sh_link=0 (D72899, D76802).
Normally the metadata section is only referenced by the associated text
section. sh_link=0 means the associated text section is discarded, and
the metadata section will be garbage collected. If there is another
section (.gc_root) referencing the metadata section, the metadata
section will be retained. It's the .gc_root consumer's job to validate
the metadata sections.
# This creates a SHF_LINK_ORDER .meta with sh_link=0
.section .meta,"awo",@progbits,0
1:
.section .meta,"awo",@progbits,foo
2:
.section .gc_root,"a",@progbits
.quad 1b
.quad 2b
Reviewed By: pcc, jhenderson
Differential Revision: https://reviews.llvm.org/D72904
2020-08-06 07:09:41 +08:00
|
|
|
if (!la || !lb)
|
|
|
|
return la && !lb;
|
2017-12-12 21:30:44 +08:00
|
|
|
OutputSection *aOut = la->getParent();
|
|
|
|
OutputSection *bOut = lb->getParent();
|
2018-10-25 05:59:58 +08:00
|
|
|
|
2017-12-12 21:30:44 +08:00
|
|
|
if (aOut != bOut)
|
2020-04-23 03:28:52 +08:00
|
|
|
return aOut->addr < bOut->addr;
|
2017-12-12 21:30:44 +08:00
|
|
|
return la->outSecOff < lb->outSecOff;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class ELFT> void Writer<ELFT>::resolveShfLinkOrder() {
|
2020-11-03 22:41:09 +08:00
|
|
|
llvm::TimeTraceScope timeScope("Resolve SHF_LINK_ORDER");
|
2017-12-12 21:30:44 +08:00
|
|
|
for (OutputSection *sec : outputSections) {
|
|
|
|
if (!(sec->flags & SHF_LINK_ORDER))
|
|
|
|
continue;
|
|
|
|
|
2019-09-20 23:03:21 +08:00
|
|
|
// The ARM.exidx section use SHF_LINK_ORDER, but we have consolidated
|
|
|
|
// this processing inside the ARMExidxsyntheticsection::finalizeContents().
|
|
|
|
if (!config->relocatable && config->emachine == EM_ARM &&
|
|
|
|
sec->type == SHT_ARM_EXIDX)
|
|
|
|
continue;
|
|
|
|
|
2020-08-15 01:23:45 +08:00
|
|
|
// Link order may be distributed across several InputSectionDescriptions.
|
|
|
|
// Sorting is performed separately.
|
2017-12-12 21:30:44 +08:00
|
|
|
std::vector<InputSection **> scriptSections;
|
|
|
|
std::vector<InputSection *> sections;
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : sec->commands) {
|
|
|
|
auto *isd = dyn_cast<InputSectionDescription>(cmd);
|
2020-08-15 01:23:45 +08:00
|
|
|
if (!isd)
|
|
|
|
continue;
|
|
|
|
bool hasLinkOrder = false;
|
|
|
|
scriptSections.clear();
|
|
|
|
sections.clear();
|
|
|
|
for (InputSection *&isec : isd->sections) {
|
|
|
|
if (isec->flags & SHF_LINK_ORDER) {
|
|
|
|
InputSection *link = isec->getLinkOrderDep();
|
|
|
|
if (link && !link->getParent())
|
|
|
|
error(toString(isec) + ": sh_link points to discarded section " +
|
|
|
|
toString(link));
|
|
|
|
hasLinkOrder = true;
|
2017-12-12 21:30:44 +08:00
|
|
|
}
|
2020-08-15 01:23:45 +08:00
|
|
|
scriptSections.push_back(&isec);
|
|
|
|
sections.push_back(isec);
|
|
|
|
}
|
|
|
|
if (hasLinkOrder && errorCount() == 0) {
|
|
|
|
llvm::stable_sort(sections, compareByFilePosition);
|
|
|
|
for (int i = 0, n = sections.size(); i != n; ++i)
|
|
|
|
*scriptSections[i] = sections[i];
|
2017-12-12 21:30:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-24 18:23:23 +08:00
|
|
|
static void finalizeSynthetic(SyntheticSection *sec) {
|
2020-11-03 22:41:09 +08:00
|
|
|
if (sec && sec->isNeeded() && sec->getParent()) {
|
|
|
|
llvm::TimeTraceScope timeScope("Finalize synthetic sections", sec->name);
|
2020-04-24 18:23:23 +08:00
|
|
|
sec->finalizeContents();
|
2020-11-03 22:41:09 +08:00
|
|
|
}
|
2020-04-24 18:23:23 +08:00
|
|
|
}
|
|
|
|
|
2019-04-18 16:15:54 +08:00
|
|
|
// We need to generate and finalize the content that depends on the address of
|
|
|
|
// InputSections. As the generation of the content may also alter InputSection
|
|
|
|
// addresses we must converge to a fixed point. We do that here. See the comment
|
|
|
|
// in Writer<ELFT>::finalizeSections().
|
|
|
|
template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
|
2020-11-03 22:41:09 +08:00
|
|
|
llvm::TimeTraceScope timeScope("Finalize address dependent content");
|
2018-10-24 06:03:33 +08:00
|
|
|
ThunkCreator tc;
|
|
|
|
AArch64Err843419Patcher a64p;
|
2019-09-16 17:38:38 +08:00
|
|
|
ARMErr657417Patcher a32p;
|
2019-08-26 18:23:31 +08:00
|
|
|
script->assignAddresses();
|
2020-04-23 03:28:52 +08:00
|
|
|
// .ARM.exidx and SHF_LINK_ORDER do not require precise addresses, but they
|
|
|
|
// do require the relative addresses of OutputSections because linker scripts
|
|
|
|
// can assign Virtual Addresses to OutputSections that are not monotonically
|
|
|
|
// increasing.
|
2020-04-24 18:23:23 +08:00
|
|
|
for (Partition &part : partitions)
|
2021-12-28 10:15:23 +08:00
|
|
|
finalizeSynthetic(part.armExidx.get());
|
2020-04-23 03:28:52 +08:00
|
|
|
resolveShfLinkOrder();
|
2018-10-24 06:03:33 +08:00
|
|
|
|
2020-02-11 07:27:53 +08:00
|
|
|
// Converts call x@GDPLT to call __tls_get_addr
|
|
|
|
if (config->emachine == EM_HEXAGON)
|
|
|
|
hexagonTLSSymbolUpdate(outputSections);
|
|
|
|
|
2019-08-26 18:23:31 +08:00
|
|
|
int assignPasses = 0;
|
2018-10-24 06:03:33 +08:00
|
|
|
for (;;) {
|
2019-08-26 18:23:31 +08:00
|
|
|
bool changed = target->needsThunks && tc.createThunks(outputSections);
|
2018-10-24 06:03:33 +08:00
|
|
|
|
2019-08-26 18:23:31 +08:00
|
|
|
// With Thunk Size much smaller than branch range we expect to
|
2020-09-17 05:03:34 +08:00
|
|
|
// converge quickly; if we get to 15 something has gone wrong.
|
|
|
|
if (changed && tc.pass >= 15) {
|
2019-08-26 18:23:31 +08:00
|
|
|
error("thunk creation not converged");
|
|
|
|
break;
|
|
|
|
}
|
2018-10-24 06:03:33 +08:00
|
|
|
|
|
|
|
if (config->fixCortexA53Errata843419) {
|
|
|
|
if (changed)
|
|
|
|
script->assignAddresses();
|
|
|
|
changed |= a64p.createFixes();
|
|
|
|
}
|
2019-09-16 17:38:38 +08:00
|
|
|
if (config->fixCortexA8) {
|
|
|
|
if (changed)
|
|
|
|
script->assignAddresses();
|
|
|
|
changed |= a32p.createFixes();
|
|
|
|
}
|
2018-10-24 06:03:33 +08:00
|
|
|
|
|
|
|
if (in.mipsGot)
|
|
|
|
in.mipsGot->updateAllocSize();
|
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
for (Partition &part : partitions) {
|
|
|
|
changed |= part.relaDyn->updateAllocSize();
|
|
|
|
if (part.relrDyn)
|
|
|
|
changed |= part.relrDyn->updateAllocSize();
|
|
|
|
}
|
2018-10-24 06:03:33 +08:00
|
|
|
|
2019-08-26 18:23:31 +08:00
|
|
|
const Defined *changedSym = script->assignAddresses();
|
|
|
|
if (!changed) {
|
|
|
|
// Some symbols may be dependent on section addresses. When we break the
|
|
|
|
// loop, the symbol values are finalized because a previous
|
|
|
|
// assignAddresses() finalized section addresses.
|
|
|
|
if (!changedSym)
|
|
|
|
break;
|
|
|
|
if (++assignPasses == 5) {
|
|
|
|
errorOrWarn("assignment to symbol " + toString(*changedSym) +
|
|
|
|
" does not converge");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-10-24 06:03:33 +08:00
|
|
|
}
|
2020-02-18 05:56:01 +08:00
|
|
|
|
2021-12-28 04:10:23 +08:00
|
|
|
if (config->relocatable)
|
|
|
|
for (OutputSection *sec : outputSections)
|
|
|
|
sec->addr = 0;
|
|
|
|
|
2020-03-04 07:37:12 +08:00
|
|
|
// If addrExpr is set, the address may not be a multiple of the alignment.
|
|
|
|
// Warn because this is error-prone.
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : script->sectionCommands)
|
2020-03-04 07:37:12 +08:00
|
|
|
if (auto *os = dyn_cast<OutputSection>(cmd))
|
|
|
|
if (os->addr % os->alignment != 0)
|
|
|
|
warn("address (0x" + Twine::utohexstr(os->addr) + ") of section " +
|
|
|
|
os->name + " is not a multiple of alignment (" +
|
|
|
|
Twine(os->alignment) + ")");
|
2018-10-24 06:03:33 +08:00
|
|
|
}
|
|
|
|
|
2021-02-19 03:24:56 +08:00
|
|
|
// If Input Sections have been shrunk (basic block sections) then
|
2020-04-07 21:48:18 +08:00
|
|
|
// update symbol values and sizes associated with these sections. With basic
|
|
|
|
// block sections, input sections can shrink when the jump instructions at
|
|
|
|
// the end of the section are relaxed.
|
|
|
|
static void fixSymbolsAfterShrinking() {
|
|
|
|
for (InputFile *File : objectFiles) {
|
|
|
|
parallelForEach(File->getSymbols(), [&](Symbol *Sym) {
|
|
|
|
auto *def = dyn_cast<Defined>(Sym);
|
|
|
|
if (!def)
|
|
|
|
return;
|
|
|
|
|
|
|
|
const SectionBase *sec = def->section;
|
|
|
|
if (!sec)
|
|
|
|
return;
|
|
|
|
|
2021-12-25 04:09:48 +08:00
|
|
|
const InputSectionBase *inputSec = dyn_cast<InputSectionBase>(sec);
|
2020-04-07 21:48:18 +08:00
|
|
|
if (!inputSec || !inputSec->bytesDropped)
|
|
|
|
return;
|
|
|
|
|
|
|
|
const size_t OldSize = inputSec->data().size();
|
|
|
|
const size_t NewSize = OldSize - inputSec->bytesDropped;
|
|
|
|
|
|
|
|
if (def->value > NewSize && def->value <= OldSize) {
|
|
|
|
LLVM_DEBUG(llvm::dbgs()
|
|
|
|
<< "Moving symbol " << Sym->getName() << " from "
|
|
|
|
<< def->value << " to "
|
|
|
|
<< def->value - inputSec->bytesDropped << " bytes\n");
|
|
|
|
def->value -= inputSec->bytesDropped;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (def->value + def->size > NewSize && def->value <= OldSize &&
|
|
|
|
def->value + def->size <= OldSize) {
|
|
|
|
LLVM_DEBUG(llvm::dbgs()
|
|
|
|
<< "Shrinking symbol " << Sym->getName() << " from "
|
|
|
|
<< def->size << " to " << def->size - inputSec->bytesDropped
|
|
|
|
<< " bytes\n");
|
|
|
|
def->size -= inputSec->bytesDropped;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If basic block sections exist, there are opportunities to delete fall thru
|
|
|
|
// jumps and shrink jump instructions after basic block reordering. This
|
|
|
|
// relaxation pass does that. It is only enabled when --optimize-bb-jumps
|
|
|
|
// option is used.
|
|
|
|
template <class ELFT> void Writer<ELFT>::optimizeBasicBlockJumps() {
|
|
|
|
assert(config->optimizeBBJumps);
|
|
|
|
|
|
|
|
script->assignAddresses();
|
|
|
|
// For every output section that has executable input sections, this
|
|
|
|
// does the following:
|
|
|
|
// 1. Deletes all direct jump instructions in input sections that
|
|
|
|
// jump to the following section as it is not required.
|
|
|
|
// 2. If there are two consecutive jump instructions, it checks
|
|
|
|
// if they can be flipped and one can be deleted.
|
2021-12-27 05:53:47 +08:00
|
|
|
for (OutputSection *osec : outputSections) {
|
|
|
|
if (!(osec->flags & SHF_EXECINSTR))
|
2020-04-07 21:48:18 +08:00
|
|
|
continue;
|
2021-12-27 05:53:47 +08:00
|
|
|
SmallVector<InputSection *, 0> sections = getInputSections(*osec);
|
2020-04-07 21:48:18 +08:00
|
|
|
std::vector<unsigned> result(sections.size());
|
|
|
|
// Delete all fall through jump instructions. Also, check if two
|
|
|
|
// consecutive jump instructions can be flipped so that a fall
|
|
|
|
// through jmp instruction can be deleted.
|
2021-12-27 15:26:13 +08:00
|
|
|
for (size_t i = 0, e = sections.size(); i != e; ++i) {
|
2020-04-07 21:48:18 +08:00
|
|
|
InputSection *next = i + 1 < sections.size() ? sections[i + 1] : nullptr;
|
2021-12-27 15:26:13 +08:00
|
|
|
InputSection &sec = *sections[i];
|
|
|
|
result[i] = target->deleteFallThruJmpInsn(sec, sec.file, next) ? 1 : 0;
|
|
|
|
}
|
2020-04-07 21:48:18 +08:00
|
|
|
size_t numDeleted = std::count(result.begin(), result.end(), 1);
|
|
|
|
if (numDeleted > 0) {
|
|
|
|
script->assignAddresses();
|
|
|
|
LLVM_DEBUG(llvm::dbgs()
|
|
|
|
<< "Removing " << numDeleted << " fall through jumps\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fixSymbolsAfterShrinking();
|
|
|
|
|
2021-12-27 05:53:47 +08:00
|
|
|
for (OutputSection *osec : outputSections)
|
|
|
|
for (InputSection *is : getInputSections(*osec))
|
2020-04-07 21:48:18 +08:00
|
|
|
is->trim();
|
|
|
|
}
|
|
|
|
|
2017-10-03 02:54:59 +08:00
|
|
|
// In order to allow users to manipulate linker-synthesized sections,
|
|
|
|
// we had to add synthetic sections to the input section list early,
|
|
|
|
// even before we make decisions whether they are needed. This allows
|
|
|
|
// users to write scripts like this: ".mygot : { .got }".
|
|
|
|
//
|
|
|
|
// Doing it has an unintended side effects. If it turns out that we
|
|
|
|
// don't need a .got (for example) at all because there's no
|
|
|
|
// relocation that needs a .got, we don't want to emit .got.
|
|
|
|
//
|
|
|
|
// To deal with the above problem, this function is called after
|
|
|
|
// scanRelocations is called to remove synthetic sections that turn
|
|
|
|
// out to be empty.
|
2017-07-04 00:54:39 +08:00
|
|
|
static void removeUnusedSyntheticSections() {
|
2017-02-03 21:06:18 +08:00
|
|
|
// All input synthetic sections that can be empty are placed after
|
2021-07-28 05:29:25 +08:00
|
|
|
// all regular ones. Reverse iterate to find the first synthetic section
|
|
|
|
// after a non-synthetic one which will be our starting point.
|
|
|
|
auto start = std::find_if(inputSections.rbegin(), inputSections.rend(),
|
|
|
|
[](InputSectionBase *s) {
|
|
|
|
return !isa<SyntheticSection>(s);
|
|
|
|
})
|
|
|
|
.base();
|
|
|
|
|
2021-11-29 13:47:55 +08:00
|
|
|
// Remove unused synthetic sections from inputSections;
|
|
|
|
DenseSet<InputSectionBase *> unused;
|
|
|
|
auto end =
|
|
|
|
std::remove_if(start, inputSections.end(), [&](InputSectionBase *s) {
|
|
|
|
auto *sec = cast<SyntheticSection>(s);
|
|
|
|
if (sec->getParent() && sec->isNeeded())
|
|
|
|
return false;
|
|
|
|
unused.insert(sec);
|
|
|
|
return true;
|
|
|
|
});
|
|
|
|
inputSections.erase(end, inputSections.end());
|
2021-11-29 13:07:33 +08:00
|
|
|
|
|
|
|
// Remove unused synthetic sections from the corresponding input section
|
|
|
|
// description and orphanSections.
|
2021-11-29 13:47:55 +08:00
|
|
|
for (auto *sec : unused)
|
|
|
|
if (OutputSection *osec = cast<SyntheticSection>(sec)->getParent())
|
2021-11-29 13:07:33 +08:00
|
|
|
for (SectionCommand *cmd : osec->commands)
|
|
|
|
if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
|
|
|
|
llvm::erase_if(isd->sections, [&](InputSection *isec) {
|
|
|
|
return unused.count(isec);
|
|
|
|
});
|
|
|
|
llvm::erase_if(script->orphanSections, [&](const InputSectionBase *sec) {
|
|
|
|
return unused.count(sec);
|
|
|
|
});
|
2016-11-25 16:05:41 +08:00
|
|
|
}
|
|
|
|
|
2016-07-20 22:43:20 +08:00
|
|
|
// Create output section objects and add them to OutputSections.
|
|
|
|
template <class ELFT> void Writer<ELFT>::finalizeSections() {
|
2017-07-28 03:22:43 +08:00
|
|
|
Out::preinitArray = findSection(".preinit_array");
|
|
|
|
Out::initArray = findSection(".init_array");
|
|
|
|
Out::finiArray = findSection(".fini_array");
|
2015-10-03 03:37:55 +08:00
|
|
|
|
2015-12-26 17:48:00 +08:00
|
|
|
// The linker needs to define SECNAME_start, SECNAME_end and SECNAME_stop
|
|
|
|
// symbols for sections, so that the runtime can get the start and end
|
|
|
|
// addresses of each section by section name. Add such symbols.
|
2016-03-02 03:12:35 +08:00
|
|
|
if (!config->relocatable) {
|
|
|
|
addStartEndSymbols();
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : script->sectionCommands)
|
|
|
|
if (auto *sec = dyn_cast<OutputSection>(cmd))
|
2017-07-28 03:22:43 +08:00
|
|
|
addStartStopSymbols(sec);
|
2016-03-02 03:12:35 +08:00
|
|
|
}
|
2016-03-05 02:34:14 +08:00
|
|
|
|
|
|
|
// Add _DYNAMIC symbol. Unlike GNU gold, our _DYNAMIC symbol has no type.
|
|
|
|
// It should be okay as no one seems to care about the type.
|
|
|
|
// Even the author of gold doesn't remember why gold behaves that way.
|
|
|
|
// https://sourceware.org/ml/binutils/2002-03/msg00360.html
|
2019-06-08 01:57:58 +08:00
|
|
|
if (mainPart->dynamic->parent)
|
2021-12-28 10:15:23 +08:00
|
|
|
symtab->addSymbol(
|
|
|
|
Defined{/*file=*/nullptr, "_DYNAMIC", STB_WEAK, STV_HIDDEN, STT_NOTYPE,
|
|
|
|
/*value=*/0, /*size=*/0, mainPart->dynamic.get()});
|
2015-10-19 23:21:42 +08:00
|
|
|
|
2016-02-05 05:33:05 +08:00
|
|
|
// Define __rel[a]_iplt_{start,end} symbols if needed.
|
|
|
|
addRelIpltSymbols();
|
|
|
|
|
2019-08-28 17:01:03 +08:00
|
|
|
// RISC-V's gp can address +/- 2 KiB, set it to .sdata + 0x800. This symbol
|
|
|
|
// should only be defined in an executable. If .sdata does not exist, its
|
|
|
|
// value/section does not matter but it has to be relative, so set its
|
|
|
|
// st_shndx arbitrarily to 1 (Out::elfHeader).
|
|
|
|
if (config->emachine == EM_RISCV && !config->shared) {
|
|
|
|
OutputSection *sec = findSection(".sdata");
|
2019-06-14 10:14:53 +08:00
|
|
|
ElfSym::riscvGlobalPointer =
|
2019-08-28 17:01:03 +08:00
|
|
|
addOptionalRegular("__global_pointer$", sec ? sec : Out::elfHeader,
|
2021-09-26 06:47:27 +08:00
|
|
|
0x800, STV_DEFAULT);
|
2019-08-28 17:01:03 +08:00
|
|
|
}
|
2018-08-10 01:59:56 +08:00
|
|
|
|
2021-10-29 08:52:03 +08:00
|
|
|
if (config->emachine == EM_386 || config->emachine == EM_X86_64) {
|
2019-05-30 18:00:20 +08:00
|
|
|
// On targets that support TLSDESC, _TLS_MODULE_BASE_ is defined in such a
|
|
|
|
// way that:
|
|
|
|
//
|
|
|
|
// 1) Without relaxation: it produces a dynamic TLSDESC relocation that
|
|
|
|
// computes 0.
|
|
|
|
// 2) With LD->LE relaxation: _TLS_MODULE_BASE_@tpoff = 0 (lowest address in
|
|
|
|
// the TLS block).
|
|
|
|
//
|
|
|
|
// 2) is special cased in @tpoff computation. To satisfy 1), we define it as
|
|
|
|
// an absolute symbol of zero. This is different from GNU linkers which
|
|
|
|
// define _TLS_MODULE_BASE_ relative to the first TLS section.
|
|
|
|
Symbol *s = symtab->find("_TLS_MODULE_BASE_");
|
|
|
|
if (s && s->isUndefined()) {
|
2019-07-11 14:08:54 +08:00
|
|
|
s->resolve(Defined{/*file=*/nullptr, s->getName(), STB_GLOBAL, STV_HIDDEN,
|
|
|
|
STT_TLS, /*value=*/0, 0,
|
|
|
|
/*section=*/nullptr});
|
2019-05-30 18:00:20 +08:00
|
|
|
ElfSym::tlsModuleBase = cast<Defined>(s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-03 22:41:09 +08:00
|
|
|
{
|
|
|
|
llvm::TimeTraceScope timeScope("Finalize .eh_frame");
|
|
|
|
// This responsible for splitting up .eh_frame section into
|
|
|
|
// pieces. The relocation scan uses those pieces, so this has to be
|
|
|
|
// earlier.
|
|
|
|
for (Partition &part : partitions)
|
2021-12-28 10:15:23 +08:00
|
|
|
finalizeSynthetic(part.ehFrame.get());
|
2020-11-03 22:41:09 +08:00
|
|
|
}
|
2016-04-07 22:22:09 +08:00
|
|
|
|
2022-01-16 15:27:45 +08:00
|
|
|
if (config->hasDynSymTab)
|
|
|
|
for (Symbol *sym : symtab->symbols())
|
|
|
|
sym->isPreemptible = computeIsPreemptible(*sym);
|
2017-08-10 23:05:37 +08:00
|
|
|
|
2019-09-06 23:57:24 +08:00
|
|
|
// Change values of linker-script-defined symbols from placeholders (assigned
|
|
|
|
// by declareSymbols) to actual definitions.
|
|
|
|
script->processSymbolAssignments();
|
|
|
|
|
2020-11-03 22:41:09 +08:00
|
|
|
{
|
|
|
|
llvm::TimeTraceScope timeScope("Scan relocations");
|
|
|
|
// Scan relocations. This must be done after every symbol is declared so
|
|
|
|
// that we can correctly decide if a dynamic relocation is needed. This is
|
|
|
|
// called after processSymbolAssignments() because it needs to know whether
|
|
|
|
// a linker-script-defined symbol is absolute.
|
|
|
|
ppc64noTocRelax.clear();
|
|
|
|
if (!config->relocatable) {
|
2022-01-16 16:28:47 +08:00
|
|
|
// Scan all relocations. Each relocation goes through a series of tests to
|
|
|
|
// determine if it needs special treatment, such as creating GOT, PLT,
|
|
|
|
// copy relocations, etc. Note that relocations for non-alloc sections are
|
|
|
|
// directly processed by InputSection::relocateNonAlloc.
|
|
|
|
for (InputSectionBase *sec : inputSections)
|
|
|
|
if (sec->isLive() && isa<InputSection>(sec) && (sec->flags & SHF_ALLOC))
|
|
|
|
scanRelocations<ELFT>(*sec);
|
|
|
|
for (Partition &part : partitions) {
|
|
|
|
for (EhInputSection *sec : part.ehFrame->sections)
|
|
|
|
scanRelocations<ELFT>(*sec);
|
|
|
|
if (part.armExidx && part.armExidx->isLive())
|
|
|
|
for (InputSection *sec : part.armExidx->exidxSections)
|
|
|
|
scanRelocations<ELFT>(*sec);
|
|
|
|
}
|
|
|
|
|
2020-11-03 22:41:09 +08:00
|
|
|
reportUndefinedSymbols<ELFT>();
|
2021-12-15 08:28:41 +08:00
|
|
|
postScanRelocations();
|
2020-11-03 22:41:09 +08:00
|
|
|
}
|
2019-06-21 02:25:57 +08:00
|
|
|
}
|
2016-07-21 01:58:07 +08:00
|
|
|
|
2019-04-01 16:16:08 +08:00
|
|
|
if (in.plt && in.plt->isNeeded())
|
2018-09-26 03:26:58 +08:00
|
|
|
in.plt->addSymbols();
|
2019-04-01 16:16:08 +08:00
|
|
|
if (in.iplt && in.iplt->isNeeded())
|
2018-09-26 03:26:58 +08:00
|
|
|
in.iplt->addSymbols();
|
2017-01-25 18:31:16 +08:00
|
|
|
|
2020-11-18 04:20:57 +08:00
|
|
|
if (config->unresolvedSymbolsInShlib != UnresolvedPolicy::Ignore) {
|
2021-05-06 21:45:29 +08:00
|
|
|
auto diagnose =
|
|
|
|
config->unresolvedSymbolsInShlib == UnresolvedPolicy::ReportError
|
|
|
|
? errorOrWarn
|
|
|
|
: warn;
|
[ELF] Support --{,no-}allow-shlib-undefined
Summary:
In ld.bfd/gold, --no-allow-shlib-undefined is the default when linking
an executable. This patch implements a check to error on undefined
symbols in a shared object, if all of its DT_NEEDED entries are seen.
Our approach resembles the one used in gold, achieves a good balance to
be useful but not too smart (ld.bfd traces all DSOs and emulates the
behavior of a dynamic linker to catch more cases).
The error is issued based on the symbol table, different from undefined
reference errors issued for relocations. It is most effective when there
are DSOs that were not linked with -z defs (e.g. when static sanitizers
runtime is used).
gold has a comment that some system libraries on GNU/Linux may have
spurious undefined references and thus system libraries should be
excluded (https://sourceware.org/bugzilla/show_bug.cgi?id=6811). The
story may have changed now but we make --allow-shlib-undefined the
default for now. Its interaction with -shared can be discussed in the
future.
Reviewers: ruiu, grimar, pcc, espindola
Reviewed By: ruiu
Subscribers: joerg, emaste, arichardson, llvm-commits
Differential Revision: https://reviews.llvm.org/D57385
llvm-svn: 352826
2019-02-01 10:25:05 +08:00
|
|
|
// Error on undefined symbols in a shared object, if all of its DT_NEEDED
|
2019-10-29 09:41:38 +08:00
|
|
|
// entries are seen. These cases would otherwise lead to runtime errors
|
[ELF] Support --{,no-}allow-shlib-undefined
Summary:
In ld.bfd/gold, --no-allow-shlib-undefined is the default when linking
an executable. This patch implements a check to error on undefined
symbols in a shared object, if all of its DT_NEEDED entries are seen.
Our approach resembles the one used in gold, achieves a good balance to
be useful but not too smart (ld.bfd traces all DSOs and emulates the
behavior of a dynamic linker to catch more cases).
The error is issued based on the symbol table, different from undefined
reference errors issued for relocations. It is most effective when there
are DSOs that were not linked with -z defs (e.g. when static sanitizers
runtime is used).
gold has a comment that some system libraries on GNU/Linux may have
spurious undefined references and thus system libraries should be
excluded (https://sourceware.org/bugzilla/show_bug.cgi?id=6811). The
story may have changed now but we make --allow-shlib-undefined the
default for now. Its interaction with -shared can be discussed in the
future.
Reviewers: ruiu, grimar, pcc, espindola
Reviewed By: ruiu
Subscribers: joerg, emaste, arichardson, llvm-commits
Differential Revision: https://reviews.llvm.org/D57385
llvm-svn: 352826
2019-02-01 10:25:05 +08:00
|
|
|
// reported by the dynamic linker.
|
|
|
|
//
|
|
|
|
// ld.bfd traces all DT_NEEDED to emulate the logic of the dynamic linker to
|
|
|
|
// catch more cases. That is too much for us. Our approach resembles the one
|
|
|
|
// used in ld.gold, achieves a good balance to be useful but not too smart.
|
2021-05-06 21:45:29 +08:00
|
|
|
for (SharedFile *file : sharedFiles) {
|
|
|
|
bool allNeededIsKnown =
|
2019-04-09 01:35:55 +08:00
|
|
|
llvm::all_of(file->dtNeeded, [&](StringRef needed) {
|
2022-01-17 13:19:01 +08:00
|
|
|
return symtab->soNames.count(CachedHashStringRef(needed));
|
2019-04-09 01:35:55 +08:00
|
|
|
});
|
2021-05-06 21:45:29 +08:00
|
|
|
if (!allNeededIsKnown)
|
|
|
|
continue;
|
|
|
|
for (Symbol *sym : file->requiredSymbols)
|
|
|
|
if (sym->isUndefined() && !sym->isWeak())
|
|
|
|
diagnose(toString(file) + ": undefined reference to " +
|
|
|
|
toString(*sym) + " [--no-allow-shlib-undefined]");
|
|
|
|
}
|
[ELF] Support --{,no-}allow-shlib-undefined
Summary:
In ld.bfd/gold, --no-allow-shlib-undefined is the default when linking
an executable. This patch implements a check to error on undefined
symbols in a shared object, if all of its DT_NEEDED entries are seen.
Our approach resembles the one used in gold, achieves a good balance to
be useful but not too smart (ld.bfd traces all DSOs and emulates the
behavior of a dynamic linker to catch more cases).
The error is issued based on the symbol table, different from undefined
reference errors issued for relocations. It is most effective when there
are DSOs that were not linked with -z defs (e.g. when static sanitizers
runtime is used).
gold has a comment that some system libraries on GNU/Linux may have
spurious undefined references and thus system libraries should be
excluded (https://sourceware.org/bugzilla/show_bug.cgi?id=6811). The
story may have changed now but we make --allow-shlib-undefined the
default for now. Its interaction with -shared can be discussed in the
future.
Reviewers: ruiu, grimar, pcc, espindola
Reviewed By: ruiu
Subscribers: joerg, emaste, arichardson, llvm-commits
Differential Revision: https://reviews.llvm.org/D57385
llvm-svn: 352826
2019-02-01 10:25:05 +08:00
|
|
|
}
|
|
|
|
|
2020-11-03 22:41:09 +08:00
|
|
|
{
|
|
|
|
llvm::TimeTraceScope timeScope("Add symbols to symtabs");
|
|
|
|
// Now that we have defined all possible global symbols including linker-
|
|
|
|
// synthesized ones. Visit all symbols to give the finishing touches.
|
|
|
|
for (Symbol *sym : symtab->symbols()) {
|
2021-12-23 16:59:29 +08:00
|
|
|
if (!sym->isUsedInRegularObj || !includeInSymtab(*sym))
|
2020-11-03 22:41:09 +08:00
|
|
|
continue;
|
2022-01-16 15:49:47 +08:00
|
|
|
if (!config->relocatable)
|
|
|
|
sym->binding = sym->computeBinding();
|
2020-11-03 22:41:09 +08:00
|
|
|
if (in.symTab)
|
|
|
|
in.symTab->addSymbol(sym);
|
|
|
|
|
|
|
|
if (sym->includeInDynsym()) {
|
|
|
|
partitions[sym->partition - 1].dynSymTab->addSymbol(sym);
|
|
|
|
if (auto *file = dyn_cast_or_null<SharedFile>(sym->file))
|
|
|
|
if (file->isNeeded && !sym->isUndefined())
|
|
|
|
addVerneed(sym);
|
|
|
|
}
|
2016-04-28 04:22:31 +08:00
|
|
|
}
|
2016-01-29 06:56:29 +08:00
|
|
|
|
2020-11-03 22:41:09 +08:00
|
|
|
// We also need to scan the dynamic relocation tables of the other
|
|
|
|
// partitions and add any referenced symbols to the partition's dynsym.
|
|
|
|
for (Partition &part : MutableArrayRef<Partition>(partitions).slice(1)) {
|
|
|
|
DenseSet<Symbol *> syms;
|
|
|
|
for (const SymbolTableEntry &e : part.dynSymTab->getSymbols())
|
|
|
|
syms.insert(e.sym);
|
|
|
|
for (DynamicReloc &reloc : part.relaDyn->relocs)
|
2021-07-09 17:04:35 +08:00
|
|
|
if (reloc.sym && reloc.needsDynSymIndex() &&
|
|
|
|
syms.insert(reloc.sym).second)
|
2020-11-03 22:41:09 +08:00
|
|
|
part.dynSymTab->addSymbol(reloc.sym);
|
|
|
|
}
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
|
|
|
|
2018-09-26 03:26:58 +08:00
|
|
|
if (in.mipsGot)
|
2019-03-06 11:07:57 +08:00
|
|
|
in.mipsGot->build();
|
2018-06-11 15:24:31 +08:00
|
|
|
|
2017-07-04 00:54:39 +08:00
|
|
|
removeUnusedSyntheticSections();
|
2020-02-26 07:02:04 +08:00
|
|
|
script->diagnoseOrphanHandling();
|
2017-07-04 00:54:39 +08:00
|
|
|
|
2016-09-22 06:36:19 +08:00
|
|
|
sortSections();
|
2017-06-16 05:51:01 +08:00
|
|
|
|
2021-11-29 06:56:29 +08:00
|
|
|
// Create a list of OutputSections, assign sectionIndex, and populate
|
|
|
|
// in.shStrTab.
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : script->sectionCommands)
|
2021-11-29 06:56:29 +08:00
|
|
|
if (auto *osec = dyn_cast<OutputSection>(cmd)) {
|
|
|
|
outputSections.push_back(osec);
|
|
|
|
osec->sectionIndex = outputSections.size();
|
|
|
|
osec->shName = in.shStrTab->addString(osec->name);
|
|
|
|
}
|
2015-12-26 15:50:41 +08:00
|
|
|
|
2017-06-20 09:51:50 +08:00
|
|
|
// Prefer command line supplied address over other constraints.
|
2017-07-28 03:22:43 +08:00
|
|
|
for (OutputSection *sec : outputSections) {
|
|
|
|
auto i = config->sectionStartMap.find(sec->name);
|
2017-06-20 09:51:50 +08:00
|
|
|
if (i != config->sectionStartMap.end())
|
2017-07-28 03:22:43 +08:00
|
|
|
sec->addrExpr = [=] { return i->second; };
|
2017-06-20 09:51:50 +08:00
|
|
|
}
|
|
|
|
|
2020-02-11 07:27:53 +08:00
|
|
|
// With the outputSections available check for GDPLT relocations
|
|
|
|
// and add __tls_get_addr symbol if needed.
|
|
|
|
if (config->emachine == EM_HEXAGON && hexagonNeedsTLSSymbol(outputSections)) {
|
|
|
|
Symbol *sym = symtab->addSymbol(Undefined{
|
|
|
|
nullptr, "__tls_get_addr", STB_GLOBAL, STV_DEFAULT, STT_NOTYPE});
|
|
|
|
sym->isPreemptible = true;
|
|
|
|
partitions[0].dynSymTab->addSymbol(sym);
|
|
|
|
}
|
|
|
|
|
2017-01-29 01:48:21 +08:00
|
|
|
// This is a bit of a hack. A value of 0 means undef, so we set it
|
2018-04-27 13:50:40 +08:00
|
|
|
// to 1 to make __ehdr_start defined. The section number is not
|
2017-01-29 01:48:21 +08:00
|
|
|
// particularly relevant.
|
2017-02-27 10:31:26 +08:00
|
|
|
Out::elfHeader->sectionIndex = 1;
|
2021-11-29 06:47:57 +08:00
|
|
|
Out::elfHeader->size = sizeof(typename ELFT::Ehdr);
|
2017-01-29 01:48:21 +08:00
|
|
|
|
2016-12-20 05:21:07 +08:00
|
|
|
// Binary and relocatable output does not have PHDRS.
|
|
|
|
// The headers have to be created before finalize as that can influence the
|
|
|
|
// image base and the dynamic section on mips includes the image base.
|
|
|
|
if (!config->relocatable && !config->oFormatBinary) {
|
2019-06-08 01:57:58 +08:00
|
|
|
for (Partition &part : partitions) {
|
|
|
|
part.phdrs = script->hasPhdrsCommands() ? script->createPhdrs()
|
|
|
|
: createPhdrs(part);
|
|
|
|
if (config->emachine == EM_ARM) {
|
|
|
|
// PT_ARM_EXIDX is the ARM EHABI equivalent of PT_GNU_EH_FRAME
|
|
|
|
addPhdrForSection(part, SHT_ARM_EXIDX, PT_ARM_EXIDX, PF_R);
|
|
|
|
}
|
|
|
|
if (config->emachine == EM_MIPS) {
|
|
|
|
// Add separate segments for MIPS-specific sections.
|
|
|
|
addPhdrForSection(part, SHT_MIPS_REGINFO, PT_MIPS_REGINFO, PF_R);
|
|
|
|
addPhdrForSection(part, SHT_MIPS_OPTIONS, PT_MIPS_OPTIONS, PF_R);
|
|
|
|
addPhdrForSection(part, SHT_MIPS_ABIFLAGS, PT_MIPS_ABIFLAGS, PF_R);
|
|
|
|
}
|
2019-02-20 22:47:02 +08:00
|
|
|
}
|
2019-06-08 01:57:58 +08:00
|
|
|
Out::programHeaders->size = sizeof(Elf_Phdr) * mainPart->phdrs.size();
|
[ELF] Set Out::TlsPhdr earlier for encoding packed reloc tables
Summary:
For --pack-dyn-relocs=android, finalizeSections calls
LinkerScript::assignAddresses and
AndroidPackedRelocationSection::updateAllocSize in a loop,
where assignAddresses lays out the ELF image, then updateAllocSize
determines the size of the Android packed relocation table by encoding it.
Encoding the table requires knowing the values of relocation addends.
To get the addend of a TLS relocation, updateAllocSize can call getSymVA
on a TLS symbol before setPhdrs has initialized Out::TlsPhdr, producing an
error:
<file> has an STT_TLS symbol but doesn't have an SHF_TLS section
Fix the problem by initializing Out::TlsPhdr immediately after the program
headers are created. The segment's p_vaddr field isn't initialized until
setPhdrs, so use FirstSec->Addr, which is what setPhdrs would use.
FirstSec will typically refer to the .tdata or .tbss output section, whose
(tentative) address was computed by assignAddresses.
Android currently avoids this problem because it uses emutls and doesn't
support ELF TLS. This problem doesn't apply to --pack-dyn-relocs=relr
because SHR_RELR only handles relative relocations without explicit addends
or info.
Fixes https://bugs.llvm.org/show_bug.cgi?id=37841.
Reviewers: ruiu, pcc, chh, javed.absar, espindola
Subscribers: emaste, arichardson, llvm-commits, srhines
Differential Revision: https://reviews.llvm.org/D51671
llvm-svn: 342432
2018-09-18 08:24:48 +08:00
|
|
|
|
|
|
|
// Find the TLS segment. This happens before the section layout loop so that
|
2019-06-08 01:57:58 +08:00
|
|
|
// Android relocation packing can look up TLS symbol addresses. We only need
|
|
|
|
// to care about the main partition here because all TLS symbols were moved
|
|
|
|
// to the main partition (see MarkLive.cpp).
|
|
|
|
for (PhdrEntry *p : mainPart->phdrs)
|
[ELF] Set Out::TlsPhdr earlier for encoding packed reloc tables
Summary:
For --pack-dyn-relocs=android, finalizeSections calls
LinkerScript::assignAddresses and
AndroidPackedRelocationSection::updateAllocSize in a loop,
where assignAddresses lays out the ELF image, then updateAllocSize
determines the size of the Android packed relocation table by encoding it.
Encoding the table requires knowing the values of relocation addends.
To get the addend of a TLS relocation, updateAllocSize can call getSymVA
on a TLS symbol before setPhdrs has initialized Out::TlsPhdr, producing an
error:
<file> has an STT_TLS symbol but doesn't have an SHF_TLS section
Fix the problem by initializing Out::TlsPhdr immediately after the program
headers are created. The segment's p_vaddr field isn't initialized until
setPhdrs, so use FirstSec->Addr, which is what setPhdrs would use.
FirstSec will typically refer to the .tdata or .tbss output section, whose
(tentative) address was computed by assignAddresses.
Android currently avoids this problem because it uses emutls and doesn't
support ELF TLS. This problem doesn't apply to --pack-dyn-relocs=relr
because SHR_RELR only handles relative relocations without explicit addends
or info.
Fixes https://bugs.llvm.org/show_bug.cgi?id=37841.
Reviewers: ruiu, pcc, chh, javed.absar, espindola
Subscribers: emaste, arichardson, llvm-commits, srhines
Differential Revision: https://reviews.llvm.org/D51671
llvm-svn: 342432
2018-09-18 08:24:48 +08:00
|
|
|
if (p->p_type == PT_TLS)
|
|
|
|
Out::tlsPhdr = p;
|
2016-12-20 05:21:07 +08:00
|
|
|
}
|
|
|
|
|
2017-09-13 00:38:01 +08:00
|
|
|
// Some symbols are defined in term of program headers. Now that we
|
|
|
|
// have the headers, we can find out which sections they point to.
|
|
|
|
setReservedSymbolSections();
|
|
|
|
|
2020-11-03 22:41:09 +08:00
|
|
|
{
|
|
|
|
llvm::TimeTraceScope timeScope("Finalize synthetic sections");
|
|
|
|
|
2021-12-28 10:15:23 +08:00
|
|
|
finalizeSynthetic(in.bss.get());
|
|
|
|
finalizeSynthetic(in.bssRelRo.get());
|
|
|
|
finalizeSynthetic(in.symTabShndx.get());
|
|
|
|
finalizeSynthetic(in.shStrTab.get());
|
|
|
|
finalizeSynthetic(in.strTab.get());
|
|
|
|
finalizeSynthetic(in.got.get());
|
|
|
|
finalizeSynthetic(in.mipsGot.get());
|
|
|
|
finalizeSynthetic(in.igotPlt.get());
|
|
|
|
finalizeSynthetic(in.gotPlt.get());
|
|
|
|
finalizeSynthetic(in.relaIplt.get());
|
|
|
|
finalizeSynthetic(in.relaPlt.get());
|
|
|
|
finalizeSynthetic(in.plt.get());
|
|
|
|
finalizeSynthetic(in.iplt.get());
|
|
|
|
finalizeSynthetic(in.ppc32Got2.get());
|
|
|
|
finalizeSynthetic(in.partIndex.get());
|
2020-11-03 22:41:09 +08:00
|
|
|
|
|
|
|
// Dynamic section must be the last one in this list and dynamic
|
|
|
|
// symbol table section (dynSymTab) must be the first one.
|
|
|
|
for (Partition &part : partitions) {
|
2021-12-28 10:15:23 +08:00
|
|
|
finalizeSynthetic(part.dynSymTab.get());
|
2022-01-13 05:04:32 +08:00
|
|
|
finalizeSynthetic(part.gnuHashTab.get());
|
|
|
|
finalizeSynthetic(part.hashTab.get());
|
2021-12-28 10:15:23 +08:00
|
|
|
finalizeSynthetic(part.verDef.get());
|
|
|
|
finalizeSynthetic(part.relaDyn.get());
|
|
|
|
finalizeSynthetic(part.relrDyn.get());
|
|
|
|
finalizeSynthetic(part.ehFrameHdr.get());
|
|
|
|
finalizeSynthetic(part.verSym.get());
|
|
|
|
finalizeSynthetic(part.verNeed.get());
|
|
|
|
finalizeSynthetic(part.dynamic.get());
|
2020-11-03 22:41:09 +08:00
|
|
|
}
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
[Coding style change] Rename variables so that they start with a lowercase letter
This patch is mechanically generated by clang-llvm-rename tool that I wrote
using Clang Refactoring Engine just for creating this patch. You can see the
source code of the tool at https://reviews.llvm.org/D64123. There's no manual
post-processing; you can generate the same patch by re-running the tool against
lld's code base.
Here is the main discussion thread to change the LLVM coding style:
https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html
In the discussion thread, I proposed we use lld as a testbed for variable
naming scheme change, and this patch does that.
I chose to rename variables so that they are in camelCase, just because that
is a minimal change to make variables to start with a lowercase letter.
Note to downstream patch maintainers: if you are maintaining a downstream lld
repo, just rebasing ahead of this commit would cause massive merge conflicts
because this patch essentially changes every line in the lld subdirectory. But
there's a remedy.
clang-llvm-rename tool is a batch tool, so you can rename variables in your
downstream repo with the tool. Given that, here is how to rebase your repo to
a commit after the mass renaming:
1. rebase to the commit just before the mass variable renaming,
2. apply the tool to your downstream repo to mass-rename variables locally, and
3. rebase again to the head.
Most changes made by the tool should be identical for a downstream repo and
for the head, so at the step 3, almost all changes should be merged and
disappear. I'd expect that there would be some lines that you need to merge by
hand, but that shouldn't be too many.
Differential Revision: https://reviews.llvm.org/D64121
llvm-svn: 365595
2019-07-10 13:00:37 +08:00
|
|
|
|
2017-10-11 09:34:51 +08:00
|
|
|
if (!script->hasSectionsCommand && !config->relocatable)
|
2017-09-06 22:02:14 +08:00
|
|
|
fixSectionAlignments();
|
|
|
|
|
2019-04-18 16:15:54 +08:00
|
|
|
// This is used to:
|
|
|
|
// 1) Create "thunks":
|
|
|
|
// Jump instructions in many ISAs have small displacements, and therefore
|
|
|
|
// they cannot jump to arbitrary addresses in memory. For example, RISC-V
|
|
|
|
// JAL instruction can target only +-1 MiB from PC. It is a linker's
|
|
|
|
// responsibility to create and insert small pieces of code between
|
|
|
|
// sections to extend the ranges if jump targets are out of range. Such
|
|
|
|
// code pieces are called "thunks".
|
|
|
|
//
|
|
|
|
// We add thunks at this stage. We couldn't do this before this point
|
|
|
|
// because this is the earliest point where we know sizes of sections and
|
|
|
|
// their layouts (that are needed to determine if jump targets are in
|
|
|
|
// range).
|
|
|
|
//
|
|
|
|
// 2) Update the sections. We need to generate content that depends on the
|
|
|
|
// address of InputSections. For example, MIPS GOT section content or
|
|
|
|
// android packed relocations sections content.
|
2018-10-24 06:03:33 +08:00
|
|
|
//
|
2019-04-18 16:15:54 +08:00
|
|
|
// 3) Assign the final values for the linker script symbols. Linker scripts
|
|
|
|
// sometimes using forward symbol declarations. We want to set the correct
|
|
|
|
// values. They also might change after adding the thunks.
|
|
|
|
finalizeAddressDependentContent();
|
2022-01-13 02:40:33 +08:00
|
|
|
|
|
|
|
// All information needed for OutputSection part of Map file is available.
|
2020-04-23 03:28:52 +08:00
|
|
|
if (errorCount())
|
|
|
|
return;
|
2017-06-05 16:51:15 +08:00
|
|
|
|
2020-11-03 22:41:09 +08:00
|
|
|
{
|
|
|
|
llvm::TimeTraceScope timeScope("Finalize synthetic sections");
|
|
|
|
// finalizeAddressDependentContent may have added local symbols to the
|
|
|
|
// static symbol table.
|
2021-12-28 10:15:23 +08:00
|
|
|
finalizeSynthetic(in.symTab.get());
|
|
|
|
finalizeSynthetic(in.ppc64LongBranchTarget.get());
|
2020-11-03 22:41:09 +08:00
|
|
|
}
|
2018-04-11 17:24:27 +08:00
|
|
|
|
2020-04-07 21:48:18 +08:00
|
|
|
// Relaxation to delete inter-basic block jumps created by basic block
|
|
|
|
// sections. Run after in.symTab is finalized as optimizeBasicBlockJumps
|
|
|
|
// can relax jump instructions based on symbol offset.
|
|
|
|
if (config->optimizeBBJumps)
|
|
|
|
optimizeBasicBlockJumps();
|
|
|
|
|
2017-03-08 22:06:24 +08:00
|
|
|
// Fill other section headers. The dynamic table is finalized
|
|
|
|
// at the end because some tags like RELSZ depend on result
|
|
|
|
// of finalizing other sections.
|
2017-07-28 03:22:43 +08:00
|
|
|
for (OutputSection *sec : outputSections)
|
2019-03-06 11:07:57 +08:00
|
|
|
sec->finalize();
|
2015-12-26 15:50:41 +08:00
|
|
|
}
|
|
|
|
|
2018-10-24 22:24:01 +08:00
|
|
|
// Ensure data sections are not mixed with executable sections when
|
2021-10-26 03:52:06 +08:00
|
|
|
// --execute-only is used. --execute-only make pages executable but not
|
|
|
|
// readable.
|
2018-10-24 22:24:01 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::checkExecuteOnly() {
|
|
|
|
if (!config->executeOnly)
|
|
|
|
return;
|
|
|
|
|
2021-12-27 05:53:47 +08:00
|
|
|
for (OutputSection *osec : outputSections)
|
|
|
|
if (osec->flags & SHF_EXECINSTR)
|
|
|
|
for (InputSection *isec : getInputSections(*osec))
|
2018-10-24 22:24:01 +08:00
|
|
|
if (!(isec->flags & SHF_EXECINSTR))
|
2021-12-27 05:53:47 +08:00
|
|
|
error("cannot place " + toString(isec) + " into " +
|
|
|
|
toString(osec->name) +
|
2022-01-06 16:43:46 +08:00
|
|
|
": --execute-only does not support intermingling data and code");
|
2018-10-24 22:24:01 +08:00
|
|
|
}
|
|
|
|
|
2015-12-26 17:48:00 +08:00
|
|
|
// The linker is expected to define SECNAME_start and SECNAME_end
|
|
|
|
// symbols for a few sections. This function defines them.
|
|
|
|
template <class ELFT> void Writer<ELFT>::addStartEndSymbols() {
|
2018-05-18 11:01:06 +08:00
|
|
|
// If a section does not exist, there's ambiguity as to how we
|
|
|
|
// define _start and _end symbols for an init/fini section. Since
|
|
|
|
// the loader assume that the symbols are always defined, we need to
|
|
|
|
// always define them. But what value? The loader iterates over all
|
|
|
|
// pointers between _start and _end to run global ctors/dtors, so if
|
|
|
|
// the section is empty, their symbol values don't actually matter
|
|
|
|
// as long as _start and _end point to the same location.
|
|
|
|
//
|
|
|
|
// That said, we don't want to set the symbols to 0 (which is
|
|
|
|
// probably the simplest value) because that could cause some
|
|
|
|
// program to fail to link due to relocation overflow, if their
|
|
|
|
// program text is above 2 GiB. We use the address of the .text
|
|
|
|
// section instead to prevent that failure.
|
2018-10-24 01:52:44 +08:00
|
|
|
//
|
2019-10-29 09:41:38 +08:00
|
|
|
// In rare situations, the .text section may not exist. If that's the
|
2018-10-24 01:52:44 +08:00
|
|
|
// case, use the image base address as a last resort.
|
2018-05-18 11:01:06 +08:00
|
|
|
OutputSection *Default = findSection(".text");
|
|
|
|
if (!Default)
|
|
|
|
Default = Out::elfHeader;
|
2018-10-24 01:52:44 +08:00
|
|
|
|
2018-05-18 11:01:06 +08:00
|
|
|
auto define = [=](StringRef start, StringRef end, OutputSection *os) {
|
2021-11-18 01:15:20 +08:00
|
|
|
if (os && !script->isDiscarded(os)) {
|
2017-12-24 01:21:39 +08:00
|
|
|
addOptionalRegular(start, os, 0);
|
|
|
|
addOptionalRegular(end, os, -1);
|
2017-03-14 00:40:20 +08:00
|
|
|
} else {
|
2018-05-18 11:01:06 +08:00
|
|
|
addOptionalRegular(start, Default, 0);
|
|
|
|
addOptionalRegular(end, Default, 0);
|
2017-03-14 00:40:20 +08:00
|
|
|
}
|
2015-12-26 17:48:00 +08:00
|
|
|
};
|
|
|
|
|
2017-02-27 10:31:26 +08:00
|
|
|
define("__preinit_array_start", "__preinit_array_end", Out::preinitArray);
|
|
|
|
define("__init_array_start", "__init_array_end", Out::initArray);
|
|
|
|
define("__fini_array_start", "__fini_array_end", Out::finiArray);
|
2016-10-27 18:28:53 +08:00
|
|
|
|
2017-07-28 03:22:43 +08:00
|
|
|
if (OutputSection *sec = findSection(".ARM.exidx"))
|
2016-10-27 18:28:53 +08:00
|
|
|
define("__exidx_start", "__exidx_end", sec);
|
2015-12-26 17:48:00 +08:00
|
|
|
}
|
|
|
|
|
2015-10-16 01:11:03 +08:00
|
|
|
// If a section name is valid as a C identifier (which is rare because of
|
|
|
|
// the leading '.'), linkers are expected to define __start_<secname> and
|
|
|
|
// __stop_<secname> symbols. They are at beginning and end of the section,
|
|
|
|
// respectively. This is not requested by the ELF standard, but GNU ld and
|
|
|
|
// gold provide the feature, and used by many programs.
|
|
|
|
template <class ELFT>
|
2017-02-24 23:07:30 +08:00
|
|
|
void Writer<ELFT>::addStartStopSymbols(OutputSection *sec) {
|
2017-02-24 22:28:00 +08:00
|
|
|
StringRef s = sec->name;
|
2015-10-16 01:11:03 +08:00
|
|
|
if (!isValidCIdentifier(s))
|
|
|
|
return;
|
2022-01-21 03:53:18 +08:00
|
|
|
addOptionalRegular(saver().save("__start_" + s), sec, 0,
|
2020-06-18 05:10:02 +08:00
|
|
|
config->zStartStopVisibility);
|
2022-01-21 03:53:18 +08:00
|
|
|
addOptionalRegular(saver().save("__stop_" + s), sec, -1,
|
2020-06-18 05:10:02 +08:00
|
|
|
config->zStartStopVisibility);
|
2015-10-16 01:11:03 +08:00
|
|
|
}
|
|
|
|
|
2017-03-16 19:20:02 +08:00
|
|
|
static bool needsPtLoad(OutputSection *sec) {
|
2021-06-07 18:40:44 +08:00
|
|
|
if (!(sec->flags & SHF_ALLOC))
|
2016-02-11 07:29:38 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Don't allocate VA space for TLS NOBITS sections. The PT_TLS PHDR is
|
|
|
|
// responsible for allocating space for them, not the PT_LOAD that
|
|
|
|
// contains the TLS initialization image.
|
2018-10-24 21:44:51 +08:00
|
|
|
if ((sec->flags & SHF_TLS) && sec->type == SHT_NOBITS)
|
2016-02-11 07:29:38 +08:00
|
|
|
return false;
|
|
|
|
return true;
|
2015-09-10 04:48:09 +08:00
|
|
|
}
|
|
|
|
|
2016-09-20 23:22:27 +08:00
|
|
|
// Linker scripts are responsible for aligning addresses. Unfortunately, most
|
|
|
|
// linker scripts are designed for creating two PT_LOADs only, one RX and one
|
|
|
|
// RW. This means that there is no alignment in the RO to RX transition and we
|
|
|
|
// cannot create a PT_LOAD there.
|
2017-04-06 05:37:09 +08:00
|
|
|
static uint64_t computeFlags(uint64_t flags) {
|
2017-02-25 09:52:03 +08:00
|
|
|
if (config->omagic)
|
2016-11-29 17:43:51 +08:00
|
|
|
return PF_R | PF_W | PF_X;
|
[AArch64] Support execute-only LOAD segments.
Summary:
This adds an LLD flag to mark executable LOAD segments execute-only for AArch64 targets.
In AArch64 the expectation is that code is execute-only compatible, so this just adds a linker option to enforce this.
Patch by: ivanlozano (Ivan Lozano)
Reviewers: srhines, echristo, peter.smith, eugenis, javed.absar, espindola, ruiu
Reviewed By: ruiu
Subscribers: dokyungs, emaste, arichardson, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D49456
llvm-svn: 338271
2018-07-31 01:02:46 +08:00
|
|
|
if (config->executeOnly && (flags & PF_X))
|
|
|
|
return flags & ~PF_R;
|
2017-04-06 05:37:09 +08:00
|
|
|
if (config->singleRoRx && !(flags & PF_W))
|
|
|
|
return flags | PF_X;
|
|
|
|
return flags;
|
2016-09-20 23:22:27 +08:00
|
|
|
}
|
|
|
|
|
2016-02-11 06:43:13 +08:00
|
|
|
// Decide which program headers to create and which sections to include in each
|
|
|
|
// one.
|
2019-06-08 01:57:58 +08:00
|
|
|
template <class ELFT>
|
2021-12-27 05:53:47 +08:00
|
|
|
SmallVector<PhdrEntry *, 0> Writer<ELFT>::createPhdrs(Partition &part) {
|
|
|
|
SmallVector<PhdrEntry *, 0> ret;
|
2016-12-20 01:01:01 +08:00
|
|
|
auto addHdr = [&](unsigned type, unsigned flags) -> PhdrEntry * {
|
2017-07-27 15:46:50 +08:00
|
|
|
ret.push_back(make<PhdrEntry>(type, flags));
|
|
|
|
return ret.back();
|
2016-02-11 06:43:13 +08:00
|
|
|
};
|
2015-09-10 04:48:09 +08:00
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
unsigned partNo = part.getNumber();
|
|
|
|
bool isMain = partNo == 1;
|
|
|
|
|
2015-10-24 05:45:59 +08:00
|
|
|
// Add the first PT_LOAD segment for regular output sections.
|
2017-04-06 05:37:09 +08:00
|
|
|
uint64_t flags = computeFlags(PF_R);
|
2019-06-08 01:57:58 +08:00
|
|
|
PhdrEntry *load = nullptr;
|
2017-05-05 03:34:17 +08:00
|
|
|
|
2019-09-09 21:08:51 +08:00
|
|
|
// nmagic or omagic output does not have PT_PHDR, PT_INTERP, or the readonly
|
|
|
|
// PT_LOAD.
|
|
|
|
if (!config->nmagic && !config->omagic) {
|
|
|
|
// The first phdr entry is PT_PHDR which describes the program header
|
|
|
|
// itself.
|
|
|
|
if (isMain)
|
|
|
|
addHdr(PT_PHDR, PF_R)->add(Out::programHeaders);
|
|
|
|
else
|
|
|
|
addHdr(PT_PHDR, PF_R)->add(part.programHeaders->getParent());
|
|
|
|
|
|
|
|
// PT_INTERP must be the second entry if exists.
|
|
|
|
if (OutputSection *cmd = findSection(".interp", partNo))
|
|
|
|
addHdr(PT_INTERP, cmd->getPhdrFlags())->add(cmd);
|
|
|
|
|
|
|
|
// Add the headers. We will remove them if they don't fit.
|
|
|
|
// In the other partitions the headers are ordinary sections, so they don't
|
|
|
|
// need to be added here.
|
|
|
|
if (isMain) {
|
|
|
|
load = addHdr(PT_LOAD, flags);
|
|
|
|
load->add(Out::elfHeader);
|
|
|
|
load->add(Out::programHeaders);
|
|
|
|
}
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
2017-05-05 03:34:17 +08:00
|
|
|
|
[ELF] Split RW PT_LOAD on the PT_GNU_RELRO boundary
Summary:
Based on Peter Collingbourne's suggestion in D56828.
Before D56828: PT_LOAD(.data PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) .bss)
Old: PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) .data .bss)
New: PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro)) PT_LOAD(.data. .bss)
The new layout reflects the runtime memory mappings.
By having two PT_LOAD segments, we can utilize the NOBITS part of the
first PT_LOAD and save bytes for .bss.rel.ro.
.bss.rel.ro is currently small and only used by copy relocations of
symbols in read-only segments, but it can be used for other purposes in
the future, e.g. if a relro section's statically relocated data is all
zeros, we can move it to .bss.rel.ro.
Reviewers: espindola, ruiu, pcc
Reviewed By: ruiu
Subscribers: nemanjai, jvesely, nhaehnle, javed.absar, kbarton, emaste, arichardson, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58892
llvm-svn: 356226
2019-03-15 09:29:57 +08:00
|
|
|
// PT_GNU_RELRO includes all sections that should be marked as
|
2019-10-29 09:41:38 +08:00
|
|
|
// read-only by dynamic linker after processing relocations.
|
[ELF] Split RW PT_LOAD on the PT_GNU_RELRO boundary
Summary:
Based on Peter Collingbourne's suggestion in D56828.
Before D56828: PT_LOAD(.data PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) .bss)
Old: PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) .data .bss)
New: PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro)) PT_LOAD(.data. .bss)
The new layout reflects the runtime memory mappings.
By having two PT_LOAD segments, we can utilize the NOBITS part of the
first PT_LOAD and save bytes for .bss.rel.ro.
.bss.rel.ro is currently small and only used by copy relocations of
symbols in read-only segments, but it can be used for other purposes in
the future, e.g. if a relro section's statically relocated data is all
zeros, we can move it to .bss.rel.ro.
Reviewers: espindola, ruiu, pcc
Reviewed By: ruiu
Subscribers: nemanjai, jvesely, nhaehnle, javed.absar, kbarton, emaste, arichardson, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58892
llvm-svn: 356226
2019-03-15 09:29:57 +08:00
|
|
|
// Current dynamic loaders only support one PT_GNU_RELRO PHDR, give
|
|
|
|
// an error message if more than one PT_GNU_RELRO PHDR is required.
|
|
|
|
PhdrEntry *relRo = make<PhdrEntry>(PT_GNU_RELRO, PF_R);
|
|
|
|
bool inRelroPhdr = false;
|
|
|
|
OutputSection *relroEnd = nullptr;
|
|
|
|
for (OutputSection *sec : outputSections) {
|
2019-06-08 01:57:58 +08:00
|
|
|
if (sec->partition != partNo || !needsPtLoad(sec))
|
[ELF] Split RW PT_LOAD on the PT_GNU_RELRO boundary
Summary:
Based on Peter Collingbourne's suggestion in D56828.
Before D56828: PT_LOAD(.data PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) .bss)
Old: PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) .data .bss)
New: PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro)) PT_LOAD(.data. .bss)
The new layout reflects the runtime memory mappings.
By having two PT_LOAD segments, we can utilize the NOBITS part of the
first PT_LOAD and save bytes for .bss.rel.ro.
.bss.rel.ro is currently small and only used by copy relocations of
symbols in read-only segments, but it can be used for other purposes in
the future, e.g. if a relro section's statically relocated data is all
zeros, we can move it to .bss.rel.ro.
Reviewers: espindola, ruiu, pcc
Reviewed By: ruiu
Subscribers: nemanjai, jvesely, nhaehnle, javed.absar, kbarton, emaste, arichardson, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58892
llvm-svn: 356226
2019-03-15 09:29:57 +08:00
|
|
|
continue;
|
|
|
|
if (isRelroSection(sec)) {
|
|
|
|
inRelroPhdr = true;
|
|
|
|
if (!relroEnd)
|
|
|
|
relRo->add(sec);
|
|
|
|
else
|
|
|
|
error("section: " + sec->name + " is not contiguous with other relro" +
|
|
|
|
" sections");
|
|
|
|
} else if (inRelroPhdr) {
|
|
|
|
inRelroPhdr = false;
|
|
|
|
relroEnd = sec;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:22:43 +08:00
|
|
|
for (OutputSection *sec : outputSections) {
|
2017-03-16 19:20:02 +08:00
|
|
|
if (!needsPtLoad(sec))
|
2016-02-11 07:29:38 +08:00
|
|
|
continue;
|
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
// Normally, sections in partitions other than the current partition are
|
|
|
|
// ignored. But partition number 255 is a special case: it contains the
|
|
|
|
// partition end marker (.part.end). It needs to be added to the main
|
|
|
|
// partition so that a segment is created for it in the main partition,
|
|
|
|
// which will cause the dynamic loader to reserve space for the other
|
|
|
|
// partitions.
|
|
|
|
if (sec->partition != partNo) {
|
|
|
|
if (isMain && sec->partition == 255)
|
|
|
|
addHdr(PT_LOAD, computeFlags(sec->getPhdrFlags()))->add(sec);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-08-17 15:44:19 +08:00
|
|
|
// Segments are contiguous memory regions that has the same attributes
|
|
|
|
// (e.g. executable or writable). There is one phdr for each segment.
|
|
|
|
// Therefore, we need to create a new phdr when the next section has
|
2018-08-02 16:07:07 +08:00
|
|
|
// different flags or is loaded at a discontiguous address or memory
|
|
|
|
// region using AT or AT> linker script command, respectively. At the same
|
|
|
|
// time, we don't want to create a separate load segment for the headers,
|
|
|
|
// even if the first output section has an AT or AT> attribute.
|
2017-04-06 05:37:09 +08:00
|
|
|
uint64_t newFlags = computeFlags(sec->getPhdrFlags());
|
2020-02-09 14:04:06 +08:00
|
|
|
bool sameLMARegion =
|
|
|
|
load && !sec->lmaExpr && sec->lmaRegion == load->firstSec->lmaRegion;
|
|
|
|
if (!(load && newFlags == flags && sec != relroEnd &&
|
|
|
|
sec->memRegion == load->firstSec->memRegion &&
|
|
|
|
(sameLMARegion || load->lastSec == Out::programHeaders))) {
|
2016-03-10 05:37:22 +08:00
|
|
|
load = addHdr(PT_LOAD, newFlags);
|
2016-02-11 06:43:13 +08:00
|
|
|
flags = newFlags;
|
2015-08-13 23:31:17 +08:00
|
|
|
}
|
2015-08-12 08:00:24 +08:00
|
|
|
|
2016-07-21 03:36:41 +08:00
|
|
|
load->add(sec);
|
2015-11-05 10:00:35 +08:00
|
|
|
}
|
2015-11-03 08:34:39 +08:00
|
|
|
|
2017-02-02 06:42:17 +08:00
|
|
|
// Add a TLS segment if any.
|
2017-07-27 15:46:50 +08:00
|
|
|
PhdrEntry *tlsHdr = make<PhdrEntry>(PT_TLS, PF_R);
|
2017-07-28 03:22:43 +08:00
|
|
|
for (OutputSection *sec : outputSections)
|
2019-06-08 01:57:58 +08:00
|
|
|
if (sec->partition == partNo && sec->flags & SHF_TLS)
|
2017-07-27 15:46:50 +08:00
|
|
|
tlsHdr->add(sec);
|
2017-09-07 19:01:10 +08:00
|
|
|
if (tlsHdr->firstSec)
|
2017-07-27 15:46:50 +08:00
|
|
|
ret.push_back(tlsHdr);
|
2016-02-11 06:43:13 +08:00
|
|
|
|
2015-10-24 05:45:59 +08:00
|
|
|
// Add an entry for .dynamic.
|
2019-06-08 01:57:58 +08:00
|
|
|
if (OutputSection *sec = part.dynamic->getParent())
|
2018-12-10 17:24:49 +08:00
|
|
|
addHdr(PT_DYNAMIC, sec->getPhdrFlags())->add(sec);
|
2015-08-12 09:45:28 +08:00
|
|
|
|
2017-09-07 19:01:10 +08:00
|
|
|
if (relRo->firstSec)
|
2017-07-27 15:46:50 +08:00
|
|
|
ret.push_back(relRo);
|
2015-11-24 18:15:50 +08:00
|
|
|
|
2016-02-11 06:43:13 +08:00
|
|
|
// PT_GNU_EH_FRAME is a special section pointing on .eh_frame_hdr.
|
2019-06-08 01:57:58 +08:00
|
|
|
if (part.ehFrame->isNeeded() && part.ehFrameHdr &&
|
|
|
|
part.ehFrame->getParent() && part.ehFrameHdr->getParent())
|
|
|
|
addHdr(PT_GNU_EH_FRAME, part.ehFrameHdr->getParent()->getPhdrFlags())
|
|
|
|
->add(part.ehFrameHdr->getParent());
|
2016-01-15 21:34:52 +08:00
|
|
|
|
2017-03-24 08:15:57 +08:00
|
|
|
// PT_OPENBSD_RANDOMIZE is an OpenBSD-specific feature. That makes
|
|
|
|
// the dynamic linker fill the segment with random data.
|
2019-06-08 01:57:58 +08:00
|
|
|
if (OutputSection *cmd = findSection(".openbsd.randomdata", partNo))
|
2017-07-28 03:22:43 +08:00
|
|
|
addHdr(PT_OPENBSD_RANDOMIZE, cmd->getPhdrFlags())->add(cmd);
|
2016-10-14 21:02:22 +08:00
|
|
|
|
2019-10-22 01:27:51 +08:00
|
|
|
if (config->zGnustack != GnuStackKind::None) {
|
|
|
|
// PT_GNU_STACK is a special section to tell the loader to make the
|
|
|
|
// pages for the stack non-executable. If you really want an executable
|
|
|
|
// stack, you can pass -z execstack, but that's not recommended for
|
|
|
|
// security reasons.
|
|
|
|
unsigned perm = PF_R | PF_W;
|
|
|
|
if (config->zGnustack == GnuStackKind::Exec)
|
|
|
|
perm |= PF_X;
|
|
|
|
addHdr(PT_GNU_STACK, perm)->p_memsz = config->zStackSize;
|
|
|
|
}
|
2016-03-01 21:23:29 +08:00
|
|
|
|
2016-10-14 18:34:36 +08:00
|
|
|
// PT_OPENBSD_WXNEEDED is a OpenBSD-specific header to mark the executable
|
|
|
|
// is expected to perform W^X violations, such as calling mprotect(2) or
|
|
|
|
// mmap(2) with PROT_WRITE | PROT_EXEC, which is prohibited by default on
|
|
|
|
// OpenBSD.
|
|
|
|
if (config->zWxneeded)
|
|
|
|
addHdr(PT_OPENBSD_WXNEEDED, PF_X);
|
|
|
|
|
2019-12-03 02:34:56 +08:00
|
|
|
if (OutputSection *cmd = findSection(".note.gnu.property", partNo))
|
|
|
|
addHdr(PT_GNU_PROPERTY, PF_R)->add(cmd);
|
|
|
|
|
[ELF] Place SHT_NOTE sections with the same alignment into one PT_NOTE
Summary:
While the generic ABI requires notes to be 8-byte aligned in ELF64, many
vendor-specific notes (from Linux, NetBSD, Solaris, etc) use 4-byte
alignment.
In a PT_NOTE segment, if 4-byte aligned notes are followed by an 8-byte
aligned note, the possible 4-byte padding may make consumers fail to
parse the 8-byte aligned note. See PR41000 for a recent report about
.note.gnu.property (NT_GNU_PROPERTY_TYPE_0).
(Note, for NT_GNU_PROPERTY_TYPE_0, the consumers should probably migrate
to PT_GNU_PROPERTY, but the alignment issue affects other notes as well.)
To fix the issue, don't mix notes with different alignments in one
PT_NOTE. If compilers emit 4-byte aligned notes before 8-byte aligned
notes, we'll create at most 2 segments.
sh_size%sh_addralign=0 is actually implied by the rule for linking
unrecognized sections (in generic ABI), so we don't have to check that.
Notes that match in name, type and attribute flags are concatenated into
a single output section. The compilers have to ensure
sh_size%sh_addralign=0 to make concatenated notes parsable.
An alternative approach is to create a PT_NOTE for each SHT_NOTE, but
we'll have to incur the sizeof(Elf64_Phdr)=56 overhead every time a new
note section is introduced.
Reviewers: ruiu, jakehehrlich, phosek, jhenderson, pcc, espindola
Subscribers: emaste, arichardson, krytarowski, fedor.sergeev, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D61296
llvm-svn: 359853
2019-05-03 08:35:49 +08:00
|
|
|
// Create one PT_NOTE per a group of contiguous SHT_NOTE sections with the
|
|
|
|
// same alignment.
|
2017-02-02 04:58:41 +08:00
|
|
|
PhdrEntry *note = nullptr;
|
2017-07-28 03:22:43 +08:00
|
|
|
for (OutputSection *sec : outputSections) {
|
2019-06-08 01:57:58 +08:00
|
|
|
if (sec->partition != partNo)
|
|
|
|
continue;
|
2018-05-10 19:12:18 +08:00
|
|
|
if (sec->type == SHT_NOTE && (sec->flags & SHF_ALLOC)) {
|
[ELF] Place SHT_NOTE sections with the same alignment into one PT_NOTE
Summary:
While the generic ABI requires notes to be 8-byte aligned in ELF64, many
vendor-specific notes (from Linux, NetBSD, Solaris, etc) use 4-byte
alignment.
In a PT_NOTE segment, if 4-byte aligned notes are followed by an 8-byte
aligned note, the possible 4-byte padding may make consumers fail to
parse the 8-byte aligned note. See PR41000 for a recent report about
.note.gnu.property (NT_GNU_PROPERTY_TYPE_0).
(Note, for NT_GNU_PROPERTY_TYPE_0, the consumers should probably migrate
to PT_GNU_PROPERTY, but the alignment issue affects other notes as well.)
To fix the issue, don't mix notes with different alignments in one
PT_NOTE. If compilers emit 4-byte aligned notes before 8-byte aligned
notes, we'll create at most 2 segments.
sh_size%sh_addralign=0 is actually implied by the rule for linking
unrecognized sections (in generic ABI), so we don't have to check that.
Notes that match in name, type and attribute flags are concatenated into
a single output section. The compilers have to ensure
sh_size%sh_addralign=0 to make concatenated notes parsable.
An alternative approach is to create a PT_NOTE for each SHT_NOTE, but
we'll have to incur the sizeof(Elf64_Phdr)=56 overhead every time a new
note section is introduced.
Reviewers: ruiu, jakehehrlich, phosek, jhenderson, pcc, espindola
Subscribers: emaste, arichardson, krytarowski, fedor.sergeev, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D61296
llvm-svn: 359853
2019-05-03 08:35:49 +08:00
|
|
|
if (!note || sec->lmaExpr || note->lastSec->alignment != sec->alignment)
|
2017-02-02 04:58:41 +08:00
|
|
|
note = addHdr(PT_NOTE, PF_R);
|
|
|
|
note->add(sec);
|
|
|
|
} else {
|
|
|
|
note = nullptr;
|
|
|
|
}
|
|
|
|
}
|
2016-07-21 03:36:39 +08:00
|
|
|
return ret;
|
2016-02-11 06:43:13 +08:00
|
|
|
}
|
|
|
|
|
2016-11-28 08:40:21 +08:00
|
|
|
template <class ELFT>
|
2019-06-08 01:57:58 +08:00
|
|
|
void Writer<ELFT>::addPhdrForSection(Partition &part, unsigned shType,
|
|
|
|
unsigned pType, unsigned pFlags) {
|
|
|
|
unsigned partNo = part.getNumber();
|
|
|
|
auto i = llvm::find_if(outputSections, [=](OutputSection *cmd) {
|
|
|
|
return cmd->partition == partNo && cmd->type == shType;
|
|
|
|
});
|
2017-07-28 03:22:43 +08:00
|
|
|
if (i == outputSections.end())
|
2016-11-28 08:40:21 +08:00
|
|
|
return;
|
|
|
|
|
2019-02-20 22:47:02 +08:00
|
|
|
PhdrEntry *entry = make<PhdrEntry>(pType, pFlags);
|
|
|
|
entry->add(*i);
|
2019-06-08 01:57:58 +08:00
|
|
|
part.phdrs.push_back(entry);
|
2016-11-28 08:40:21 +08:00
|
|
|
}
|
|
|
|
|
[ELF][PPC] Allow PT_LOAD to have overlapping p_offset ranges
This change affects the non-linker script case (precisely, when the
`SECTIONS` command is not used). It deletes 3 alignments at PT_LOAD
boundaries for the default case: the size of a powerpc64 binary can be
decreased by at most 192kb. The technique can be ported to other
targets.
Let me demonstrate the idea with a maxPageSize=65536 example:
When assigning the address to the first output section of a new PT_LOAD,
if the end p_vaddr of the previous PT_LOAD is 0x10020, we advance to
the next multiple of maxPageSize: 0x20000. The new PT_LOAD will thus
have p_vaddr=0x20000. Because p_offset and p_vaddr are congruent modulo
maxPageSize, p_offset will be 0x20000, leaving a p_offset gap [0x10020,
0x20000) in the output.
Alternatively, if we advance to 0x20020, the new PT_LOAD will have
p_vaddr=0x20020. We can pick either 0x10020 or 0x20020 for p_offset!
Obviously 0x10020 is the choice because it leaves no gap. At runtime,
p_vaddr will be rounded down by pagesize (65536 if
pagesize=maxPageSize). This PT_LOAD will load additional initial
contents from p_offset ranges [0x10000,0x10020), which will also be
loaded by the previous PT_LOAD. This is fine if -z noseparate-code is in
effect or if we are not transiting between executable and non-executable
segments.
ld.bfd -z noseparate-code leverages this technique to keep output small.
This patch implements the technique in lld, which is mostly effective on
targets with large defaultMaxPageSize (AArch64/MIPS/PPC: 65536). The 3
removed alignments can save almost 3*65536 bytes.
Two places that rely on p_vaddr%pagesize = 0 have to be updated.
1) We used to round p_memsz(PT_GNU_RELRO) up to commonPageSize (defaults
to 4096 on all targets). Now p_vaddr%commonPageSize may be non-zero.
The updated formula takes account of that factor.
2) Our TP offsets formulae are only correct if p_vaddr%p_align = 0.
Fix them. See the updated comments in InputSection.cpp for details.
On targets that we enable the technique (only PPC64 now),
we can potentially make `p_vaddr(PT_TLS)%p_align(PT_TLS) != 0`
if `sh_addralign(.tdata) < sh_addralign(.tbss)`
This exposes many problems in ld.so implementations, especially the
offsets of dynamic TLS blocks. Known issues:
FreeBSD 13.0-CURRENT rtld-elf (i386/amd64/powerpc/arm64)
glibc (HEAD) i386 and x86_64 https://sourceware.org/bugzilla/show_bug.cgi?id=24606
musl<=1.1.22 on TLS Variant I architectures (aarch64/powerpc64/...)
So, force p_vaddr%p_align = 0 by rounding dot up to p_align(PT_TLS).
The technique will be enabled (with updated tests) for other targets in
subsequent patches.
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D64906
llvm-svn: 369343
2019-08-20 16:34:25 +08:00
|
|
|
// Place the first section of each PT_LOAD to a different page (of maxPageSize).
|
|
|
|
// This is achieved by assigning an alignment expression to addrExpr of each
|
|
|
|
// such section.
|
2016-03-31 03:41:51 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::fixSectionAlignments() {
|
[ELF][PPC] Allow PT_LOAD to have overlapping p_offset ranges
This change affects the non-linker script case (precisely, when the
`SECTIONS` command is not used). It deletes 3 alignments at PT_LOAD
boundaries for the default case: the size of a powerpc64 binary can be
decreased by at most 192kb. The technique can be ported to other
targets.
Let me demonstrate the idea with a maxPageSize=65536 example:
When assigning the address to the first output section of a new PT_LOAD,
if the end p_vaddr of the previous PT_LOAD is 0x10020, we advance to
the next multiple of maxPageSize: 0x20000. The new PT_LOAD will thus
have p_vaddr=0x20000. Because p_offset and p_vaddr are congruent modulo
maxPageSize, p_offset will be 0x20000, leaving a p_offset gap [0x10020,
0x20000) in the output.
Alternatively, if we advance to 0x20020, the new PT_LOAD will have
p_vaddr=0x20020. We can pick either 0x10020 or 0x20020 for p_offset!
Obviously 0x10020 is the choice because it leaves no gap. At runtime,
p_vaddr will be rounded down by pagesize (65536 if
pagesize=maxPageSize). This PT_LOAD will load additional initial
contents from p_offset ranges [0x10000,0x10020), which will also be
loaded by the previous PT_LOAD. This is fine if -z noseparate-code is in
effect or if we are not transiting between executable and non-executable
segments.
ld.bfd -z noseparate-code leverages this technique to keep output small.
This patch implements the technique in lld, which is mostly effective on
targets with large defaultMaxPageSize (AArch64/MIPS/PPC: 65536). The 3
removed alignments can save almost 3*65536 bytes.
Two places that rely on p_vaddr%pagesize = 0 have to be updated.
1) We used to round p_memsz(PT_GNU_RELRO) up to commonPageSize (defaults
to 4096 on all targets). Now p_vaddr%commonPageSize may be non-zero.
The updated formula takes account of that factor.
2) Our TP offsets formulae are only correct if p_vaddr%p_align = 0.
Fix them. See the updated comments in InputSection.cpp for details.
On targets that we enable the technique (only PPC64 now),
we can potentially make `p_vaddr(PT_TLS)%p_align(PT_TLS) != 0`
if `sh_addralign(.tdata) < sh_addralign(.tbss)`
This exposes many problems in ld.so implementations, especially the
offsets of dynamic TLS blocks. Known issues:
FreeBSD 13.0-CURRENT rtld-elf (i386/amd64/powerpc/arm64)
glibc (HEAD) i386 and x86_64 https://sourceware.org/bugzilla/show_bug.cgi?id=24606
musl<=1.1.22 on TLS Variant I architectures (aarch64/powerpc64/...)
So, force p_vaddr%p_align = 0 by rounding dot up to p_align(PT_TLS).
The technique will be enabled (with updated tests) for other targets in
subsequent patches.
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D64906
llvm-svn: 369343
2019-08-20 16:34:25 +08:00
|
|
|
const PhdrEntry *prev;
|
|
|
|
auto pageAlign = [&](const PhdrEntry *p) {
|
|
|
|
OutputSection *cmd = p->firstSec;
|
2020-02-18 02:21:31 +08:00
|
|
|
if (!cmd)
|
|
|
|
return;
|
|
|
|
cmd->alignExpr = [align = cmd->alignment]() { return align; };
|
|
|
|
if (!cmd->addrExpr) {
|
[ELF][PPC] Allow PT_LOAD to have overlapping p_offset ranges
This change affects the non-linker script case (precisely, when the
`SECTIONS` command is not used). It deletes 3 alignments at PT_LOAD
boundaries for the default case: the size of a powerpc64 binary can be
decreased by at most 192kb. The technique can be ported to other
targets.
Let me demonstrate the idea with a maxPageSize=65536 example:
When assigning the address to the first output section of a new PT_LOAD,
if the end p_vaddr of the previous PT_LOAD is 0x10020, we advance to
the next multiple of maxPageSize: 0x20000. The new PT_LOAD will thus
have p_vaddr=0x20000. Because p_offset and p_vaddr are congruent modulo
maxPageSize, p_offset will be 0x20000, leaving a p_offset gap [0x10020,
0x20000) in the output.
Alternatively, if we advance to 0x20020, the new PT_LOAD will have
p_vaddr=0x20020. We can pick either 0x10020 or 0x20020 for p_offset!
Obviously 0x10020 is the choice because it leaves no gap. At runtime,
p_vaddr will be rounded down by pagesize (65536 if
pagesize=maxPageSize). This PT_LOAD will load additional initial
contents from p_offset ranges [0x10000,0x10020), which will also be
loaded by the previous PT_LOAD. This is fine if -z noseparate-code is in
effect or if we are not transiting between executable and non-executable
segments.
ld.bfd -z noseparate-code leverages this technique to keep output small.
This patch implements the technique in lld, which is mostly effective on
targets with large defaultMaxPageSize (AArch64/MIPS/PPC: 65536). The 3
removed alignments can save almost 3*65536 bytes.
Two places that rely on p_vaddr%pagesize = 0 have to be updated.
1) We used to round p_memsz(PT_GNU_RELRO) up to commonPageSize (defaults
to 4096 on all targets). Now p_vaddr%commonPageSize may be non-zero.
The updated formula takes account of that factor.
2) Our TP offsets formulae are only correct if p_vaddr%p_align = 0.
Fix them. See the updated comments in InputSection.cpp for details.
On targets that we enable the technique (only PPC64 now),
we can potentially make `p_vaddr(PT_TLS)%p_align(PT_TLS) != 0`
if `sh_addralign(.tdata) < sh_addralign(.tbss)`
This exposes many problems in ld.so implementations, especially the
offsets of dynamic TLS blocks. Known issues:
FreeBSD 13.0-CURRENT rtld-elf (i386/amd64/powerpc/arm64)
glibc (HEAD) i386 and x86_64 https://sourceware.org/bugzilla/show_bug.cgi?id=24606
musl<=1.1.22 on TLS Variant I architectures (aarch64/powerpc64/...)
So, force p_vaddr%p_align = 0 by rounding dot up to p_align(PT_TLS).
The technique will be enabled (with updated tests) for other targets in
subsequent patches.
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D64906
llvm-svn: 369343
2019-08-20 16:34:25 +08:00
|
|
|
// Prefer advancing to align(dot, maxPageSize) + dot%maxPageSize to avoid
|
|
|
|
// padding in the file contents.
|
|
|
|
//
|
|
|
|
// When -z separate-code is used we must not have any overlap in pages
|
|
|
|
// between an executable segment and a non-executable segment. We align to
|
|
|
|
// the next maximum page size boundary on transitions between executable
|
|
|
|
// and non-executable segments.
|
|
|
|
//
|
2019-09-02 16:49:50 +08:00
|
|
|
// SHT_LLVM_PART_EHDR marks the start of a partition. The partition
|
|
|
|
// sections will be extracted to a separate file. Align to the next
|
|
|
|
// maximum page size boundary so that we can find the ELF header at the
|
|
|
|
// start. We cannot benefit from overlapping p_offset ranges with the
|
|
|
|
// previous segment anyway.
|
2019-09-25 11:39:31 +08:00
|
|
|
if (config->zSeparate == SeparateSegmentKind::Loadable ||
|
|
|
|
(config->zSeparate == SeparateSegmentKind::Code && prev &&
|
2019-09-02 16:49:50 +08:00
|
|
|
(prev->p_flags & PF_X) != (p->p_flags & PF_X)) ||
|
|
|
|
cmd->type == SHT_LLVM_PART_EHDR)
|
[ELF][PPC] Allow PT_LOAD to have overlapping p_offset ranges
This change affects the non-linker script case (precisely, when the
`SECTIONS` command is not used). It deletes 3 alignments at PT_LOAD
boundaries for the default case: the size of a powerpc64 binary can be
decreased by at most 192kb. The technique can be ported to other
targets.
Let me demonstrate the idea with a maxPageSize=65536 example:
When assigning the address to the first output section of a new PT_LOAD,
if the end p_vaddr of the previous PT_LOAD is 0x10020, we advance to
the next multiple of maxPageSize: 0x20000. The new PT_LOAD will thus
have p_vaddr=0x20000. Because p_offset and p_vaddr are congruent modulo
maxPageSize, p_offset will be 0x20000, leaving a p_offset gap [0x10020,
0x20000) in the output.
Alternatively, if we advance to 0x20020, the new PT_LOAD will have
p_vaddr=0x20020. We can pick either 0x10020 or 0x20020 for p_offset!
Obviously 0x10020 is the choice because it leaves no gap. At runtime,
p_vaddr will be rounded down by pagesize (65536 if
pagesize=maxPageSize). This PT_LOAD will load additional initial
contents from p_offset ranges [0x10000,0x10020), which will also be
loaded by the previous PT_LOAD. This is fine if -z noseparate-code is in
effect or if we are not transiting between executable and non-executable
segments.
ld.bfd -z noseparate-code leverages this technique to keep output small.
This patch implements the technique in lld, which is mostly effective on
targets with large defaultMaxPageSize (AArch64/MIPS/PPC: 65536). The 3
removed alignments can save almost 3*65536 bytes.
Two places that rely on p_vaddr%pagesize = 0 have to be updated.
1) We used to round p_memsz(PT_GNU_RELRO) up to commonPageSize (defaults
to 4096 on all targets). Now p_vaddr%commonPageSize may be non-zero.
The updated formula takes account of that factor.
2) Our TP offsets formulae are only correct if p_vaddr%p_align = 0.
Fix them. See the updated comments in InputSection.cpp for details.
On targets that we enable the technique (only PPC64 now),
we can potentially make `p_vaddr(PT_TLS)%p_align(PT_TLS) != 0`
if `sh_addralign(.tdata) < sh_addralign(.tbss)`
This exposes many problems in ld.so implementations, especially the
offsets of dynamic TLS blocks. Known issues:
FreeBSD 13.0-CURRENT rtld-elf (i386/amd64/powerpc/arm64)
glibc (HEAD) i386 and x86_64 https://sourceware.org/bugzilla/show_bug.cgi?id=24606
musl<=1.1.22 on TLS Variant I architectures (aarch64/powerpc64/...)
So, force p_vaddr%p_align = 0 by rounding dot up to p_align(PT_TLS).
The technique will be enabled (with updated tests) for other targets in
subsequent patches.
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D64906
llvm-svn: 369343
2019-08-20 16:34:25 +08:00
|
|
|
cmd->addrExpr = [] {
|
|
|
|
return alignTo(script->getDot(), config->maxPageSize);
|
|
|
|
};
|
|
|
|
// PT_TLS is at the start of the first RW PT_LOAD. If `p` includes PT_TLS,
|
|
|
|
// it must be the RW. Align to p_align(PT_TLS) to make sure
|
|
|
|
// p_vaddr(PT_LOAD)%p_align(PT_LOAD) = 0. Otherwise, if
|
|
|
|
// sh_addralign(.tdata) < sh_addralign(.tbss), we will set p_align(PT_TLS)
|
|
|
|
// to sh_addralign(.tbss), while p_vaddr(PT_TLS)=p_vaddr(PT_LOAD) may not
|
|
|
|
// be congruent to 0 modulo p_align(PT_TLS).
|
|
|
|
//
|
|
|
|
// Technically this is not required, but as of 2019, some dynamic loaders
|
|
|
|
// don't handle p_vaddr%p_align != 0 correctly, e.g. glibc (i386 and
|
|
|
|
// x86-64) doesn't make runtime address congruent to p_vaddr modulo
|
|
|
|
// p_align for dynamic TLS blocks (PR/24606), FreeBSD rtld has the same
|
|
|
|
// bug, musl (TLS Variant 1 architectures) before 1.1.23 handled TLS
|
|
|
|
// blocks correctly. We need to keep the workaround for a while.
|
|
|
|
else if (Out::tlsPhdr && Out::tlsPhdr->firstSec == p->firstSec)
|
|
|
|
cmd->addrExpr = [] {
|
|
|
|
return alignTo(script->getDot(), config->maxPageSize) +
|
|
|
|
alignTo(script->getDot() % config->maxPageSize,
|
|
|
|
Out::tlsPhdr->p_align);
|
|
|
|
};
|
|
|
|
else
|
|
|
|
cmd->addrExpr = [] {
|
|
|
|
return alignTo(script->getDot(), config->maxPageSize) +
|
|
|
|
script->getDot() % config->maxPageSize;
|
|
|
|
};
|
|
|
|
}
|
2017-06-02 09:37:58 +08:00
|
|
|
};
|
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
for (Partition &part : partitions) {
|
[ELF][PPC] Allow PT_LOAD to have overlapping p_offset ranges
This change affects the non-linker script case (precisely, when the
`SECTIONS` command is not used). It deletes 3 alignments at PT_LOAD
boundaries for the default case: the size of a powerpc64 binary can be
decreased by at most 192kb. The technique can be ported to other
targets.
Let me demonstrate the idea with a maxPageSize=65536 example:
When assigning the address to the first output section of a new PT_LOAD,
if the end p_vaddr of the previous PT_LOAD is 0x10020, we advance to
the next multiple of maxPageSize: 0x20000. The new PT_LOAD will thus
have p_vaddr=0x20000. Because p_offset and p_vaddr are congruent modulo
maxPageSize, p_offset will be 0x20000, leaving a p_offset gap [0x10020,
0x20000) in the output.
Alternatively, if we advance to 0x20020, the new PT_LOAD will have
p_vaddr=0x20020. We can pick either 0x10020 or 0x20020 for p_offset!
Obviously 0x10020 is the choice because it leaves no gap. At runtime,
p_vaddr will be rounded down by pagesize (65536 if
pagesize=maxPageSize). This PT_LOAD will load additional initial
contents from p_offset ranges [0x10000,0x10020), which will also be
loaded by the previous PT_LOAD. This is fine if -z noseparate-code is in
effect or if we are not transiting between executable and non-executable
segments.
ld.bfd -z noseparate-code leverages this technique to keep output small.
This patch implements the technique in lld, which is mostly effective on
targets with large defaultMaxPageSize (AArch64/MIPS/PPC: 65536). The 3
removed alignments can save almost 3*65536 bytes.
Two places that rely on p_vaddr%pagesize = 0 have to be updated.
1) We used to round p_memsz(PT_GNU_RELRO) up to commonPageSize (defaults
to 4096 on all targets). Now p_vaddr%commonPageSize may be non-zero.
The updated formula takes account of that factor.
2) Our TP offsets formulae are only correct if p_vaddr%p_align = 0.
Fix them. See the updated comments in InputSection.cpp for details.
On targets that we enable the technique (only PPC64 now),
we can potentially make `p_vaddr(PT_TLS)%p_align(PT_TLS) != 0`
if `sh_addralign(.tdata) < sh_addralign(.tbss)`
This exposes many problems in ld.so implementations, especially the
offsets of dynamic TLS blocks. Known issues:
FreeBSD 13.0-CURRENT rtld-elf (i386/amd64/powerpc/arm64)
glibc (HEAD) i386 and x86_64 https://sourceware.org/bugzilla/show_bug.cgi?id=24606
musl<=1.1.22 on TLS Variant I architectures (aarch64/powerpc64/...)
So, force p_vaddr%p_align = 0 by rounding dot up to p_align(PT_TLS).
The technique will be enabled (with updated tests) for other targets in
subsequent patches.
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D64906
llvm-svn: 369343
2019-08-20 16:34:25 +08:00
|
|
|
prev = nullptr;
|
2019-06-08 01:57:58 +08:00
|
|
|
for (const PhdrEntry *p : part.phdrs)
|
[ELF][PPC] Allow PT_LOAD to have overlapping p_offset ranges
This change affects the non-linker script case (precisely, when the
`SECTIONS` command is not used). It deletes 3 alignments at PT_LOAD
boundaries for the default case: the size of a powerpc64 binary can be
decreased by at most 192kb. The technique can be ported to other
targets.
Let me demonstrate the idea with a maxPageSize=65536 example:
When assigning the address to the first output section of a new PT_LOAD,
if the end p_vaddr of the previous PT_LOAD is 0x10020, we advance to
the next multiple of maxPageSize: 0x20000. The new PT_LOAD will thus
have p_vaddr=0x20000. Because p_offset and p_vaddr are congruent modulo
maxPageSize, p_offset will be 0x20000, leaving a p_offset gap [0x10020,
0x20000) in the output.
Alternatively, if we advance to 0x20020, the new PT_LOAD will have
p_vaddr=0x20020. We can pick either 0x10020 or 0x20020 for p_offset!
Obviously 0x10020 is the choice because it leaves no gap. At runtime,
p_vaddr will be rounded down by pagesize (65536 if
pagesize=maxPageSize). This PT_LOAD will load additional initial
contents from p_offset ranges [0x10000,0x10020), which will also be
loaded by the previous PT_LOAD. This is fine if -z noseparate-code is in
effect or if we are not transiting between executable and non-executable
segments.
ld.bfd -z noseparate-code leverages this technique to keep output small.
This patch implements the technique in lld, which is mostly effective on
targets with large defaultMaxPageSize (AArch64/MIPS/PPC: 65536). The 3
removed alignments can save almost 3*65536 bytes.
Two places that rely on p_vaddr%pagesize = 0 have to be updated.
1) We used to round p_memsz(PT_GNU_RELRO) up to commonPageSize (defaults
to 4096 on all targets). Now p_vaddr%commonPageSize may be non-zero.
The updated formula takes account of that factor.
2) Our TP offsets formulae are only correct if p_vaddr%p_align = 0.
Fix them. See the updated comments in InputSection.cpp for details.
On targets that we enable the technique (only PPC64 now),
we can potentially make `p_vaddr(PT_TLS)%p_align(PT_TLS) != 0`
if `sh_addralign(.tdata) < sh_addralign(.tbss)`
This exposes many problems in ld.so implementations, especially the
offsets of dynamic TLS blocks. Known issues:
FreeBSD 13.0-CURRENT rtld-elf (i386/amd64/powerpc/arm64)
glibc (HEAD) i386 and x86_64 https://sourceware.org/bugzilla/show_bug.cgi?id=24606
musl<=1.1.22 on TLS Variant I architectures (aarch64/powerpc64/...)
So, force p_vaddr%p_align = 0 by rounding dot up to p_align(PT_TLS).
The technique will be enabled (with updated tests) for other targets in
subsequent patches.
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D64906
llvm-svn: 369343
2019-08-20 16:34:25 +08:00
|
|
|
if (p->p_type == PT_LOAD && p->firstSec) {
|
|
|
|
pageAlign(p);
|
|
|
|
prev = p;
|
|
|
|
}
|
2016-03-31 03:41:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-24 23:47:46 +08:00
|
|
|
// Compute an in-file position for a given section. The file offset must be the
|
|
|
|
// same with its virtual address modulo the page size, so that the loader can
|
|
|
|
// load executables without any address adjustment.
|
|
|
|
static uint64_t computeFileOffset(OutputSection *os, uint64_t off) {
|
|
|
|
// The first section in a PT_LOAD has to have congruent offset and address
|
2019-09-06 00:32:31 +08:00
|
|
|
// modulo the maximum page size.
|
|
|
|
if (os->ptLoad && os->ptLoad->firstSec == os)
|
|
|
|
return alignTo(off, os->ptLoad->p_align, os->addr);
|
2018-01-03 00:46:30 +08:00
|
|
|
|
2019-08-24 08:41:15 +08:00
|
|
|
// File offsets are not significant for .bss sections other than the first one
|
2021-07-29 22:13:58 +08:00
|
|
|
// in a PT_LOAD/PT_TLS. By convention, we keep section offsets monotonically
|
2019-08-24 08:41:15 +08:00
|
|
|
// increasing rather than setting to zero.
|
2021-07-29 22:13:58 +08:00
|
|
|
if (os->type == SHT_NOBITS &&
|
|
|
|
(!Out::tlsPhdr || Out::tlsPhdr->firstSec != os))
|
2019-08-24 08:41:15 +08:00
|
|
|
return off;
|
|
|
|
|
|
|
|
// If the section is not in a PT_LOAD, we just have to align it.
|
|
|
|
if (!os->ptLoad)
|
|
|
|
return alignTo(off, os->alignment);
|
|
|
|
|
2016-12-08 04:20:39 +08:00
|
|
|
// If two sections share the same PT_LOAD the file offset is calculated
|
|
|
|
// using this formula: Off2 = Off1 + (VA2 - VA1).
|
2019-08-24 08:41:15 +08:00
|
|
|
OutputSection *first = os->ptLoad->firstSec;
|
2018-10-24 23:47:46 +08:00
|
|
|
return first->offset + os->addr - first->addr;
|
2016-04-27 17:16:28 +08:00
|
|
|
}
|
|
|
|
|
2016-08-25 17:05:47 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::assignFileOffsetsBinary() {
|
2020-08-06 00:04:56 +08:00
|
|
|
// Compute the minimum LMA of all non-empty non-NOBITS sections as minAddr.
|
|
|
|
auto needsOffset = [](OutputSection &sec) {
|
|
|
|
return sec.type != SHT_NOBITS && (sec.flags & SHF_ALLOC) && sec.size > 0;
|
|
|
|
};
|
|
|
|
uint64_t minAddr = UINT64_MAX;
|
|
|
|
for (OutputSection *sec : outputSections)
|
|
|
|
if (needsOffset(*sec)) {
|
|
|
|
sec->offset = sec->getLMA();
|
|
|
|
minAddr = std::min(minAddr, sec->offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sections are laid out at LMA minus minAddr.
|
|
|
|
fileSize = 0;
|
2017-07-28 03:22:43 +08:00
|
|
|
for (OutputSection *sec : outputSections)
|
2020-08-06 00:04:56 +08:00
|
|
|
if (needsOffset(*sec)) {
|
|
|
|
sec->offset -= minAddr;
|
|
|
|
fileSize = std::max(fileSize, sec->offset + sec->size);
|
|
|
|
}
|
2016-08-25 17:05:47 +08:00
|
|
|
}
|
|
|
|
|
2018-03-13 16:47:17 +08:00
|
|
|
static std::string rangeToString(uint64_t addr, uint64_t len) {
|
|
|
|
return "[0x" + utohexstr(addr) + ", 0x" + utohexstr(addr + len - 1) + "]";
|
|
|
|
}
|
|
|
|
|
2016-04-02 01:07:17 +08:00
|
|
|
// Assign file offsets to output sections.
|
|
|
|
template <class ELFT> void Writer<ELFT>::assignFileOffsets() {
|
2021-11-29 03:43:59 +08:00
|
|
|
Out::programHeaders->offset = Out::elfHeader->size;
|
|
|
|
uint64_t off = Out::elfHeader->size + Out::programHeaders->size;
|
[Coding style change] Rename variables so that they start with a lowercase letter
This patch is mechanically generated by clang-llvm-rename tool that I wrote
using Clang Refactoring Engine just for creating this patch. You can see the
source code of the tool at https://reviews.llvm.org/D64123. There's no manual
post-processing; you can generate the same patch by re-running the tool against
lld's code base.
Here is the main discussion thread to change the LLVM coding style:
https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html
In the discussion thread, I proposed we use lld as a testbed for variable
naming scheme change, and this patch does that.
I chose to rename variables so that they are in camelCase, just because that
is a minimal change to make variables to start with a lowercase letter.
Note to downstream patch maintainers: if you are maintaining a downstream lld
repo, just rebasing ahead of this commit would cause massive merge conflicts
because this patch essentially changes every line in the lld subdirectory. But
there's a remedy.
clang-llvm-rename tool is a batch tool, so you can rename variables in your
downstream repo with the tool. Given that, here is how to rebase your repo to
a commit after the mass renaming:
1. rebase to the commit just before the mass variable renaming,
2. apply the tool to your downstream repo to mass-rename variables locally, and
3. rebase again to the head.
Most changes made by the tool should be identical for a downstream repo and
for the head, so at the step 3, almost all changes should be merged and
disappear. I'd expect that there would be some lines that you need to merge by
hand, but that shouldn't be too many.
Differential Revision: https://reviews.llvm.org/D64121
llvm-svn: 365595
2019-07-10 13:00:37 +08:00
|
|
|
|
2017-08-03 00:35:00 +08:00
|
|
|
PhdrEntry *lastRX = nullptr;
|
2019-06-08 01:57:58 +08:00
|
|
|
for (Partition &part : partitions)
|
|
|
|
for (PhdrEntry *p : part.phdrs)
|
|
|
|
if (p->p_type == PT_LOAD && (p->p_flags & PF_X))
|
|
|
|
lastRX = p;
|
[Coding style change] Rename variables so that they start with a lowercase letter
This patch is mechanically generated by clang-llvm-rename tool that I wrote
using Clang Refactoring Engine just for creating this patch. You can see the
source code of the tool at https://reviews.llvm.org/D64123. There's no manual
post-processing; you can generate the same patch by re-running the tool against
lld's code base.
Here is the main discussion thread to change the LLVM coding style:
https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html
In the discussion thread, I proposed we use lld as a testbed for variable
naming scheme change, and this patch does that.
I chose to rename variables so that they are in camelCase, just because that
is a minimal change to make variables to start with a lowercase letter.
Note to downstream patch maintainers: if you are maintaining a downstream lld
repo, just rebasing ahead of this commit would cause massive merge conflicts
because this patch essentially changes every line in the lld subdirectory. But
there's a remedy.
clang-llvm-rename tool is a batch tool, so you can rename variables in your
downstream repo with the tool. Given that, here is how to rebase your repo to
a commit after the mass renaming:
1. rebase to the commit just before the mass variable renaming,
2. apply the tool to your downstream repo to mass-rename variables locally, and
3. rebase again to the head.
Most changes made by the tool should be identical for a downstream repo and
for the head, so at the step 3, almost all changes should be merged and
disappear. I'd expect that there would be some lines that you need to merge by
hand, but that shouldn't be too many.
Differential Revision: https://reviews.llvm.org/D64121
llvm-svn: 365595
2019-07-10 13:00:37 +08:00
|
|
|
|
2020-08-14 00:00:26 +08:00
|
|
|
// Layout SHF_ALLOC sections before non-SHF_ALLOC sections. A non-SHF_ALLOC
|
|
|
|
// will not occupy file offsets contained by a PT_LOAD.
|
2017-08-03 00:35:00 +08:00
|
|
|
for (OutputSection *sec : outputSections) {
|
2020-08-14 00:00:26 +08:00
|
|
|
if (!(sec->flags & SHF_ALLOC))
|
|
|
|
continue;
|
2021-11-29 05:44:41 +08:00
|
|
|
off = computeFileOffset(sec, off);
|
|
|
|
sec->offset = off;
|
|
|
|
if (sec->type != SHT_NOBITS)
|
|
|
|
off += sec->size;
|
2018-10-25 05:59:58 +08:00
|
|
|
|
2017-08-03 00:35:00 +08:00
|
|
|
// If this is a last section of the last executable segment and that
|
|
|
|
// segment is the last loadable segment, align the offset of the
|
|
|
|
// following section to avoid loading non-segments parts of the file.
|
2019-09-25 11:39:31 +08:00
|
|
|
if (config->zSeparate != SeparateSegmentKind::None && lastRX &&
|
|
|
|
lastRX->lastSec == sec)
|
2021-11-29 04:47:49 +08:00
|
|
|
off = alignTo(off, config->maxPageSize);
|
2017-08-03 00:35:00 +08:00
|
|
|
}
|
2021-11-29 05:44:41 +08:00
|
|
|
for (OutputSection *osec : outputSections)
|
|
|
|
if (!(osec->flags & SHF_ALLOC)) {
|
|
|
|
osec->offset = alignTo(off, osec->alignment);
|
|
|
|
off = osec->offset + osec->size;
|
|
|
|
}
|
2016-07-01 18:27:36 +08:00
|
|
|
|
2017-04-06 05:37:09 +08:00
|
|
|
sectionHeaderOff = alignTo(off, config->wordsize);
|
2017-07-28 03:22:43 +08:00
|
|
|
fileSize = sectionHeaderOff + (outputSections.size() + 1) * sizeof(Elf_Shdr);
|
2018-03-13 16:47:17 +08:00
|
|
|
|
|
|
|
// Our logic assumes that sections have rising VA within the same segment.
|
|
|
|
// With use of linker scripts it is possible to violate this rule and get file
|
|
|
|
// offset overlaps or overflows. That should never happen with a valid script
|
|
|
|
// which does not move the location counter backwards and usually scripts do
|
|
|
|
// not do that. Unfortunately, there are apps in the wild, for example, Linux
|
|
|
|
// kernel, which control segment distribution explicitly and move the counter
|
|
|
|
// backwards, so we have to allow doing that to support linking them. We
|
2018-04-03 20:28:53 +08:00
|
|
|
// perform non-critical checks for overlaps in checkSectionOverlap(), but here
|
|
|
|
// we want to prevent file size overflows because it would crash the linker.
|
2018-03-13 16:47:17 +08:00
|
|
|
for (OutputSection *sec : outputSections) {
|
|
|
|
if (sec->type == SHT_NOBITS)
|
|
|
|
continue;
|
|
|
|
if ((sec->offset > fileSize) || (sec->offset + sec->size > fileSize))
|
|
|
|
error("unable to place section " + sec->name + " at file offset " +
|
2018-08-04 18:34:52 +08:00
|
|
|
rangeToString(sec->offset, sec->size) +
|
2018-03-13 16:47:17 +08:00
|
|
|
"; check your linker script for overflows");
|
|
|
|
}
|
2016-04-01 18:49:14 +08:00
|
|
|
}
|
|
|
|
|
2016-04-02 01:07:17 +08:00
|
|
|
// Finalize the program headers. We call this function after we assign
|
|
|
|
// file offsets and VAs to all sections.
|
2019-06-08 01:57:58 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::setPhdrs(Partition &part) {
|
|
|
|
for (PhdrEntry *p : part.phdrs) {
|
2017-09-07 19:01:10 +08:00
|
|
|
OutputSection *first = p->firstSec;
|
|
|
|
OutputSection *last = p->lastSec;
|
2018-10-25 05:59:58 +08:00
|
|
|
|
2016-04-02 06:42:04 +08:00
|
|
|
if (first) {
|
2017-07-27 15:46:50 +08:00
|
|
|
p->p_filesz = last->offset - first->offset;
|
2016-11-09 09:42:41 +08:00
|
|
|
if (last->type != SHT_NOBITS)
|
2017-07-27 15:46:50 +08:00
|
|
|
p->p_filesz += last->size;
|
2018-10-25 05:59:58 +08:00
|
|
|
|
2017-07-27 15:46:50 +08:00
|
|
|
p->p_memsz = last->addr + last->size - first->addr;
|
|
|
|
p->p_offset = first->offset;
|
|
|
|
p->p_vaddr = first->addr;
|
2018-10-25 05:59:58 +08:00
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
// File offsets in partitions other than the main partition are relative
|
|
|
|
// to the offset of the ELF headers. Perform that adjustment now.
|
|
|
|
if (part.elfHeader)
|
|
|
|
p->p_offset -= part.elfHeader->getParent()->offset;
|
|
|
|
|
2017-07-27 15:46:50 +08:00
|
|
|
if (!p->hasLMA)
|
|
|
|
p->p_paddr = first->getLMA();
|
2016-02-11 06:43:13 +08:00
|
|
|
}
|
2018-10-25 05:59:58 +08:00
|
|
|
|
2019-09-06 00:32:31 +08:00
|
|
|
if (p->p_type == PT_GNU_RELRO) {
|
2017-07-27 15:46:50 +08:00
|
|
|
p->p_align = 1;
|
[ELF][PPC] Allow PT_LOAD to have overlapping p_offset ranges
This change affects the non-linker script case (precisely, when the
`SECTIONS` command is not used). It deletes 3 alignments at PT_LOAD
boundaries for the default case: the size of a powerpc64 binary can be
decreased by at most 192kb. The technique can be ported to other
targets.
Let me demonstrate the idea with a maxPageSize=65536 example:
When assigning the address to the first output section of a new PT_LOAD,
if the end p_vaddr of the previous PT_LOAD is 0x10020, we advance to
the next multiple of maxPageSize: 0x20000. The new PT_LOAD will thus
have p_vaddr=0x20000. Because p_offset and p_vaddr are congruent modulo
maxPageSize, p_offset will be 0x20000, leaving a p_offset gap [0x10020,
0x20000) in the output.
Alternatively, if we advance to 0x20020, the new PT_LOAD will have
p_vaddr=0x20020. We can pick either 0x10020 or 0x20020 for p_offset!
Obviously 0x10020 is the choice because it leaves no gap. At runtime,
p_vaddr will be rounded down by pagesize (65536 if
pagesize=maxPageSize). This PT_LOAD will load additional initial
contents from p_offset ranges [0x10000,0x10020), which will also be
loaded by the previous PT_LOAD. This is fine if -z noseparate-code is in
effect or if we are not transiting between executable and non-executable
segments.
ld.bfd -z noseparate-code leverages this technique to keep output small.
This patch implements the technique in lld, which is mostly effective on
targets with large defaultMaxPageSize (AArch64/MIPS/PPC: 65536). The 3
removed alignments can save almost 3*65536 bytes.
Two places that rely on p_vaddr%pagesize = 0 have to be updated.
1) We used to round p_memsz(PT_GNU_RELRO) up to commonPageSize (defaults
to 4096 on all targets). Now p_vaddr%commonPageSize may be non-zero.
The updated formula takes account of that factor.
2) Our TP offsets formulae are only correct if p_vaddr%p_align = 0.
Fix them. See the updated comments in InputSection.cpp for details.
On targets that we enable the technique (only PPC64 now),
we can potentially make `p_vaddr(PT_TLS)%p_align(PT_TLS) != 0`
if `sh_addralign(.tdata) < sh_addralign(.tbss)`
This exposes many problems in ld.so implementations, especially the
offsets of dynamic TLS blocks. Known issues:
FreeBSD 13.0-CURRENT rtld-elf (i386/amd64/powerpc/arm64)
glibc (HEAD) i386 and x86_64 https://sourceware.org/bugzilla/show_bug.cgi?id=24606
musl<=1.1.22 on TLS Variant I architectures (aarch64/powerpc64/...)
So, force p_vaddr%p_align = 0 by rounding dot up to p_align(PT_TLS).
The technique will be enabled (with updated tests) for other targets in
subsequent patches.
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D64906
llvm-svn: 369343
2019-08-20 16:34:25 +08:00
|
|
|
// musl/glibc ld.so rounds the size down, so we need to round up
|
2017-01-05 02:56:15 +08:00
|
|
|
// to protect the last page. This is a no-op on FreeBSD which always
|
|
|
|
// rounds up.
|
[ELF][PPC] Allow PT_LOAD to have overlapping p_offset ranges
This change affects the non-linker script case (precisely, when the
`SECTIONS` command is not used). It deletes 3 alignments at PT_LOAD
boundaries for the default case: the size of a powerpc64 binary can be
decreased by at most 192kb. The technique can be ported to other
targets.
Let me demonstrate the idea with a maxPageSize=65536 example:
When assigning the address to the first output section of a new PT_LOAD,
if the end p_vaddr of the previous PT_LOAD is 0x10020, we advance to
the next multiple of maxPageSize: 0x20000. The new PT_LOAD will thus
have p_vaddr=0x20000. Because p_offset and p_vaddr are congruent modulo
maxPageSize, p_offset will be 0x20000, leaving a p_offset gap [0x10020,
0x20000) in the output.
Alternatively, if we advance to 0x20020, the new PT_LOAD will have
p_vaddr=0x20020. We can pick either 0x10020 or 0x20020 for p_offset!
Obviously 0x10020 is the choice because it leaves no gap. At runtime,
p_vaddr will be rounded down by pagesize (65536 if
pagesize=maxPageSize). This PT_LOAD will load additional initial
contents from p_offset ranges [0x10000,0x10020), which will also be
loaded by the previous PT_LOAD. This is fine if -z noseparate-code is in
effect or if we are not transiting between executable and non-executable
segments.
ld.bfd -z noseparate-code leverages this technique to keep output small.
This patch implements the technique in lld, which is mostly effective on
targets with large defaultMaxPageSize (AArch64/MIPS/PPC: 65536). The 3
removed alignments can save almost 3*65536 bytes.
Two places that rely on p_vaddr%pagesize = 0 have to be updated.
1) We used to round p_memsz(PT_GNU_RELRO) up to commonPageSize (defaults
to 4096 on all targets). Now p_vaddr%commonPageSize may be non-zero.
The updated formula takes account of that factor.
2) Our TP offsets formulae are only correct if p_vaddr%p_align = 0.
Fix them. See the updated comments in InputSection.cpp for details.
On targets that we enable the technique (only PPC64 now),
we can potentially make `p_vaddr(PT_TLS)%p_align(PT_TLS) != 0`
if `sh_addralign(.tdata) < sh_addralign(.tbss)`
This exposes many problems in ld.so implementations, especially the
offsets of dynamic TLS blocks. Known issues:
FreeBSD 13.0-CURRENT rtld-elf (i386/amd64/powerpc/arm64)
glibc (HEAD) i386 and x86_64 https://sourceware.org/bugzilla/show_bug.cgi?id=24606
musl<=1.1.22 on TLS Variant I architectures (aarch64/powerpc64/...)
So, force p_vaddr%p_align = 0 by rounding dot up to p_align(PT_TLS).
The technique will be enabled (with updated tests) for other targets in
subsequent patches.
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D64906
llvm-svn: 369343
2019-08-20 16:34:25 +08:00
|
|
|
p->p_memsz = alignTo(p->p_offset + p->p_memsz, config->commonPageSize) -
|
|
|
|
p->p_offset;
|
2017-01-05 02:56:15 +08:00
|
|
|
}
|
2015-10-24 05:45:59 +08:00
|
|
|
}
|
2015-07-25 05:03:07 +08:00
|
|
|
}
|
|
|
|
|
2018-03-30 02:24:01 +08:00
|
|
|
// A helper struct for checkSectionOverlap.
|
|
|
|
namespace {
|
|
|
|
struct SectionOffset {
|
|
|
|
OutputSection *sec;
|
|
|
|
uint64_t offset;
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2018-01-31 17:22:44 +08:00
|
|
|
// Check whether sections overlap for a specific address range (file offsets,
|
2019-10-29 09:41:38 +08:00
|
|
|
// load and virtual addresses).
|
2018-06-27 16:08:12 +08:00
|
|
|
static void checkOverlap(StringRef name, std::vector<SectionOffset> §ions,
|
|
|
|
bool isVirtualAddr) {
|
2018-09-27 04:54:42 +08:00
|
|
|
llvm::sort(sections, [=](const SectionOffset &a, const SectionOffset &b) {
|
|
|
|
return a.offset < b.offset;
|
|
|
|
});
|
2018-03-30 02:24:01 +08:00
|
|
|
|
2018-03-30 03:51:53 +08:00
|
|
|
// Finding overlap is easy given a vector is sorted by start position.
|
|
|
|
// If an element starts before the end of the previous element, they overlap.
|
|
|
|
for (size_t i = 1, end = sections.size(); i < end; ++i) {
|
|
|
|
SectionOffset a = sections[i - 1];
|
|
|
|
SectionOffset b = sections[i];
|
2018-06-27 16:08:12 +08:00
|
|
|
if (b.offset >= a.offset + a.sec->size)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// If both sections are in OVERLAY we allow the overlapping of virtual
|
|
|
|
// addresses, because it is what OVERLAY was designed for.
|
|
|
|
if (isVirtualAddr && a.sec->inOverlay && b.sec->inOverlay)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
errorOrWarn("section " + a.sec->name + " " + name +
|
|
|
|
" range overlaps with " + b.sec->name + "\n>>> " + a.sec->name +
|
|
|
|
" range is " + rangeToString(a.offset, a.sec->size) + "\n>>> " +
|
|
|
|
b.sec->name + " range is " +
|
|
|
|
rangeToString(b.offset, b.sec->size));
|
2018-01-31 17:22:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-03 20:39:28 +08:00
|
|
|
// Check for overlapping sections and address overflows.
|
2018-01-31 17:22:44 +08:00
|
|
|
//
|
|
|
|
// In this function we check that none of the output sections have overlapping
|
|
|
|
// file offsets. For SHF_ALLOC sections we also check that the load address
|
|
|
|
// ranges and the virtual address ranges don't overlap
|
2018-04-04 17:24:31 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::checkSections() {
|
2018-04-03 20:39:28 +08:00
|
|
|
// First, check that section's VAs fit in available address space for target.
|
|
|
|
for (OutputSection *os : outputSections)
|
|
|
|
if ((os->addr + os->size < os->addr) ||
|
|
|
|
(!ELFT::Is64Bits && os->addr + os->size > UINT32_MAX))
|
|
|
|
errorOrWarn("section " + os->name + " at 0x" + utohexstr(os->addr) +
|
|
|
|
" of size 0x" + utohexstr(os->size) +
|
|
|
|
" exceeds available address space");
|
|
|
|
|
|
|
|
// Check for overlapping file offsets. In this case we need to skip any
|
|
|
|
// section marked as SHT_NOBITS. These sections don't actually occupy space in
|
|
|
|
// the file so Sec->Offset + Sec->Size can overlap with others. If --oformat
|
|
|
|
// binary is specified only add SHF_ALLOC sections are added to the output
|
|
|
|
// file so we skip any non-allocated sections in that case.
|
2018-03-30 02:24:01 +08:00
|
|
|
std::vector<SectionOffset> fileOffs;
|
|
|
|
for (OutputSection *sec : outputSections)
|
2018-08-04 18:56:26 +08:00
|
|
|
if (sec->size > 0 && sec->type != SHT_NOBITS &&
|
2018-03-30 02:24:01 +08:00
|
|
|
(!config->oFormatBinary || (sec->flags & SHF_ALLOC)))
|
|
|
|
fileOffs.push_back({sec, sec->offset});
|
2018-06-27 16:08:12 +08:00
|
|
|
checkOverlap("file", fileOffs, false);
|
2018-01-31 17:22:44 +08:00
|
|
|
|
|
|
|
// When linking with -r there is no need to check for overlapping virtual/load
|
|
|
|
// addresses since those addresses will only be assigned when the final
|
|
|
|
// executable/shared object is created.
|
|
|
|
if (config->relocatable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Checking for overlapping virtual and load addresses only needs to take
|
2018-02-23 10:05:48 +08:00
|
|
|
// into account SHF_ALLOC sections since others will not be loaded.
|
2018-01-31 17:22:44 +08:00
|
|
|
// Furthermore, we also need to skip SHF_TLS sections since these will be
|
|
|
|
// mapped to other addresses at runtime and can therefore have overlapping
|
|
|
|
// ranges in the file.
|
2018-03-30 02:24:01 +08:00
|
|
|
std::vector<SectionOffset> vmas;
|
|
|
|
for (OutputSection *sec : outputSections)
|
2018-08-04 18:56:26 +08:00
|
|
|
if (sec->size > 0 && (sec->flags & SHF_ALLOC) && !(sec->flags & SHF_TLS))
|
2018-03-30 02:24:01 +08:00
|
|
|
vmas.push_back({sec, sec->addr});
|
2018-06-27 16:08:12 +08:00
|
|
|
checkOverlap("virtual address", vmas, true);
|
2018-01-31 17:22:44 +08:00
|
|
|
|
|
|
|
// Finally, check that the load addresses don't overlap. This will usually be
|
|
|
|
// the same as the virtual addresses but can be different when using a linker
|
|
|
|
// script with AT().
|
2018-03-30 02:24:01 +08:00
|
|
|
std::vector<SectionOffset> lmas;
|
|
|
|
for (OutputSection *sec : outputSections)
|
2018-08-04 18:56:26 +08:00
|
|
|
if (sec->size > 0 && (sec->flags & SHF_ALLOC) && !(sec->flags & SHF_TLS))
|
2018-03-30 02:24:01 +08:00
|
|
|
lmas.push_back({sec, sec->getLMA()});
|
2018-06-27 16:08:12 +08:00
|
|
|
checkOverlap("load address", lmas, false);
|
2018-01-31 17:22:44 +08:00
|
|
|
}
|
|
|
|
|
2016-11-24 06:41:00 +08:00
|
|
|
// The entry point address is chosen in the following ways.
|
|
|
|
//
|
|
|
|
// 1. the '-e' entry command-line option;
|
|
|
|
// 2. the ENTRY(symbol) command in a linker control script;
|
2017-11-30 09:04:26 +08:00
|
|
|
// 3. the value of the symbol _start, if present;
|
2017-10-25 03:53:51 +08:00
|
|
|
// 4. the number represented by the entry symbol, if it is a number;
|
2021-09-21 00:35:12 +08:00
|
|
|
// 5. the address 0.
|
2018-09-21 06:58:00 +08:00
|
|
|
static uint64_t getEntryAddr() {
|
2017-10-25 03:53:51 +08:00
|
|
|
// Case 1, 2 or 3
|
2017-11-04 05:21:47 +08:00
|
|
|
if (Symbol *b = symtab->find(config->entry))
|
2017-03-17 19:56:54 +08:00
|
|
|
return b->getVA();
|
2017-10-25 03:53:51 +08:00
|
|
|
|
|
|
|
// Case 4
|
2016-12-07 11:23:06 +08:00
|
|
|
uint64_t addr;
|
2017-05-16 16:19:25 +08:00
|
|
|
if (to_integer(config->entry, addr))
|
2016-12-07 11:23:06 +08:00
|
|
|
return addr;
|
2016-11-24 06:41:00 +08:00
|
|
|
|
2017-10-25 03:53:51 +08:00
|
|
|
// Case 5
|
2016-12-07 12:06:21 +08:00
|
|
|
if (config->warnMissingEntry)
|
2016-12-07 10:26:16 +08:00
|
|
|
warn("cannot find entry symbol " + config->entry +
|
|
|
|
"; not setting start address");
|
2016-10-20 08:07:36 +08:00
|
|
|
return 0;
|
2015-12-24 16:37:34 +08:00
|
|
|
}
|
|
|
|
|
2016-02-26 03:28:37 +08:00
|
|
|
static uint16_t getELFType() {
|
2017-03-18 07:29:01 +08:00
|
|
|
if (config->isPic)
|
2016-02-26 03:28:37 +08:00
|
|
|
return ET_DYN;
|
|
|
|
if (config->relocatable)
|
|
|
|
return ET_REL;
|
|
|
|
return ET_EXEC;
|
|
|
|
}
|
|
|
|
|
2015-07-25 05:03:07 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::writeHeader() {
|
2019-06-08 01:57:58 +08:00
|
|
|
writeEhdr<ELFT>(Out::bufferStart, *mainPart);
|
|
|
|
writePhdrs<ELFT>(Out::bufferStart + sizeof(Elf_Ehdr), *mainPart);
|
2015-10-24 06:44:39 +08:00
|
|
|
|
2019-03-01 07:11:35 +08:00
|
|
|
auto *eHdr = reinterpret_cast<Elf_Ehdr *>(Out::bufferStart);
|
2016-02-26 03:28:37 +08:00
|
|
|
eHdr->e_type = getELFType();
|
2016-11-24 06:41:00 +08:00
|
|
|
eHdr->e_entry = getEntryAddr();
|
2015-07-29 08:30:10 +08:00
|
|
|
eHdr->e_shoff = sectionHeaderOff;
|
2015-09-09 06:55:28 +08:00
|
|
|
|
2018-07-18 16:44:38 +08:00
|
|
|
// Write the section header table.
|
|
|
|
//
|
|
|
|
// The ELF header can only store numbers up to SHN_LORESERVE in the e_shnum
|
|
|
|
// and e_shstrndx fields. When the value of one of these fields exceeds
|
|
|
|
// SHN_LORESERVE ELF requires us to put sentinel values in the ELF header and
|
|
|
|
// use fields in the section header at index 0 to store
|
|
|
|
// the value. The sentinel values and fields are:
|
|
|
|
// e_shnum = 0, SHdrs[0].sh_size = number of sections.
|
|
|
|
// e_shstrndx = SHN_XINDEX, SHdrs[0].sh_link = .shstrtab section index.
|
2019-03-01 07:11:35 +08:00
|
|
|
auto *sHdrs = reinterpret_cast<Elf_Shdr *>(Out::bufferStart + eHdr->e_shoff);
|
2018-07-18 16:44:38 +08:00
|
|
|
size_t num = outputSections.size() + 1;
|
|
|
|
if (num >= SHN_LORESERVE)
|
|
|
|
sHdrs->sh_size = num;
|
|
|
|
else
|
|
|
|
eHdr->e_shnum = num;
|
|
|
|
|
2018-09-26 03:26:58 +08:00
|
|
|
uint32_t strTabIndex = in.shStrTab->getParent()->sectionIndex;
|
2018-07-18 16:44:38 +08:00
|
|
|
if (strTabIndex >= SHN_LORESERVE) {
|
|
|
|
sHdrs->sh_link = strTabIndex;
|
|
|
|
eHdr->e_shstrndx = SHN_XINDEX;
|
|
|
|
} else {
|
|
|
|
eHdr->e_shstrndx = strTabIndex;
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:22:43 +08:00
|
|
|
for (OutputSection *sec : outputSections)
|
|
|
|
sec->writeHeaderTo<ELFT>(++sHdrs);
|
2015-07-25 05:03:07 +08:00
|
|
|
}
|
|
|
|
|
2016-12-06 01:40:37 +08:00
|
|
|
// Open a result file.
|
2016-04-02 01:24:19 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::openFile() {
|
2018-12-04 01:42:57 +08:00
|
|
|
uint64_t maxSize = config->is64 ? INT64_MAX : UINT32_MAX;
|
2019-03-02 02:53:41 +08:00
|
|
|
if (fileSize != size_t(fileSize) || maxSize < fileSize) {
|
2021-01-13 04:55:18 +08:00
|
|
|
std::string msg;
|
|
|
|
raw_string_ostream s(msg);
|
|
|
|
s << "output file too large: " << Twine(fileSize) << " bytes\n"
|
|
|
|
<< "section sizes:\n";
|
|
|
|
for (OutputSection *os : outputSections)
|
|
|
|
s << os->name << ' ' << os->size << "\n";
|
|
|
|
error(s.str());
|
2017-04-06 05:37:09 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-12-06 01:40:37 +08:00
|
|
|
unlinkAsync(config->outputFile);
|
2017-12-12 07:30:54 +08:00
|
|
|
unsigned flags = 0;
|
|
|
|
if (!config->relocatable)
|
[LLD][ELF] Support --[no-]mmap-output-file with F_no_mmap
Summary:
Add a flag `F_no_mmap` to `FileOutputBuffer` to support
`--[no-]mmap-output-file` in ELF LLD. LLD currently explicitly ignores
this flag for compatibility with GNU ld and gold.
We need this flag to speed up link time for large binaries in certain
scenarios. When we link some of our larger binaries we find that LLD
takes 50+ GB of memory, which causes memory pressure. The memory
pressure causes the VM to flush dirty pages of the output file to disk.
This is normally okay, since we should be flushing cold pages. However,
when using BtrFS with compression we need to write 128KB at a time when
we flush a page. If any page in that 128KB block is written again, then
it must be flushed a second time, and so on. Since LLD doesn't write
sequentially this causes write amplification. The same 128KB block will
end up being flushed multiple times, causing the linker to many times
more IO than necessary. We've observed 3-5x faster builds with
-no-mmap-output-file when we hit this scenario.
The bad scenario only applies to compressed filesystems, which group
together multiple pages into a single compressed block. I've tested
BtrFS, but the problem will be present for any compressed filesystem
on Linux, since it is caused by the VM.
Silently ignoring --no-mmap-output-file caused a silent regression when
we switched from gold to lld. We pass --no-mmap-output-file to fix this
edge case, but since lld silently ignored the flag we didn't realize it
wasn't being respected.
Benchmark building a 9 GB binary that exposes this edge case. I linked 3
times with --mmap-output-file and 3 times with --no-mmap-output-file and
took the average. The machine has 24 cores @ 2.4 GHz, 112 GB of RAM,
BtrFS mounted with -compress-force=zstd, and an 80% full disk.
| Mode | Time |
|---------|-------|
| mmap | 894 s |
| no mmap | 126 s |
When compression is disabled, BtrFS performs just as well with and
without mmap on this benchmark.
I was unable to reproduce the regression with any binaries in
lld-speed-test.
Reviewed By: ruiu, MaskRay
Differential Revision: https://reviews.llvm.org/D69294
2019-10-30 06:46:22 +08:00
|
|
|
flags |= FileOutputBuffer::F_executable;
|
|
|
|
if (!config->mmapOutputFile)
|
|
|
|
flags |= FileOutputBuffer::F_no_mmap;
|
2017-11-08 09:05:52 +08:00
|
|
|
Expected<std::unique_ptr<FileOutputBuffer>> bufferOrErr =
|
2017-12-12 07:30:54 +08:00
|
|
|
FileOutputBuffer::create(config->outputFile, fileSize, flags);
|
2016-12-06 01:40:37 +08:00
|
|
|
|
2019-03-01 07:11:35 +08:00
|
|
|
if (!bufferOrErr) {
|
2017-11-08 09:05:52 +08:00
|
|
|
error("failed to open " + config->outputFile + ": " +
|
|
|
|
llvm::toString(bufferOrErr.takeError()));
|
2019-03-01 07:11:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
buffer = std::move(*bufferOrErr);
|
|
|
|
Out::bufferStart = buffer->getBufferStart();
|
2015-07-25 05:03:07 +08:00
|
|
|
}
|
|
|
|
|
2016-08-25 17:05:47 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::writeSectionsBinary() {
|
2017-07-28 03:22:43 +08:00
|
|
|
for (OutputSection *sec : outputSections)
|
2016-11-09 09:42:41 +08:00
|
|
|
if (sec->flags & SHF_ALLOC)
|
2019-03-01 07:11:35 +08:00
|
|
|
sec->writeTo<ELFT>(Out::bufferStart + sec->offset);
|
2016-08-25 17:05:47 +08:00
|
|
|
}
|
|
|
|
|
2017-08-15 05:18:12 +08:00
|
|
|
static void fillTrap(uint8_t *i, uint8_t *end) {
|
2017-08-21 16:31:14 +08:00
|
|
|
for (; i + 4 <= end; i += 4)
|
2017-08-03 00:35:00 +08:00
|
|
|
memcpy(i, &target->trapInstr, 4);
|
|
|
|
}
|
|
|
|
|
2017-08-15 05:18:12 +08:00
|
|
|
// Fill the last page of executable segments with trap instructions
|
|
|
|
// instead of leaving them as zero. Even though it is not required by any
|
|
|
|
// standard, it is in general a good thing to do for security reasons.
|
|
|
|
//
|
|
|
|
// We'll leave other pages in segments as-is because the rest will be
|
|
|
|
// overwritten by output sections.
|
2017-08-03 00:35:00 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::writeTrapInstr() {
|
2019-06-08 01:57:58 +08:00
|
|
|
for (Partition &part : partitions) {
|
|
|
|
// Fill the last page.
|
|
|
|
for (PhdrEntry *p : part.phdrs)
|
|
|
|
if (p->p_type == PT_LOAD && (p->p_flags & PF_X))
|
2022-01-17 14:37:31 +08:00
|
|
|
fillTrap(Out::bufferStart +
|
|
|
|
alignDown(p->firstSec->offset + p->p_filesz, 4),
|
2019-06-08 01:57:58 +08:00
|
|
|
Out::bufferStart + alignTo(p->firstSec->offset + p->p_filesz,
|
2021-11-29 04:47:49 +08:00
|
|
|
config->maxPageSize));
|
2019-06-08 01:57:58 +08:00
|
|
|
|
|
|
|
// Round up the file size of the last segment to the page boundary iff it is
|
|
|
|
// an executable segment to ensure that other tools don't accidentally
|
|
|
|
// trim the instruction padding (e.g. when stripping the file).
|
|
|
|
PhdrEntry *last = nullptr;
|
|
|
|
for (PhdrEntry *p : part.phdrs)
|
|
|
|
if (p->p_type == PT_LOAD)
|
|
|
|
last = p;
|
2017-10-27 13:08:39 +08:00
|
|
|
|
2019-06-08 01:57:58 +08:00
|
|
|
if (last && (last->p_flags & PF_X))
|
|
|
|
last->p_memsz = last->p_filesz =
|
2021-11-29 04:47:49 +08:00
|
|
|
alignTo(last->p_filesz, config->maxPageSize);
|
2019-06-08 01:57:58 +08:00
|
|
|
}
|
2017-08-03 00:35:00 +08:00
|
|
|
}
|
|
|
|
|
2015-07-25 05:03:07 +08:00
|
|
|
// Write section contents to a mmap'ed file.
|
|
|
|
template <class ELFT> void Writer<ELFT>::writeSections() {
|
2021-12-21 02:51:24 +08:00
|
|
|
llvm::TimeTraceScope timeScope("Write sections");
|
|
|
|
|
2021-10-26 03:52:06 +08:00
|
|
|
// In -r or --emit-relocs mode, write the relocation sections first as in
|
2017-02-11 09:40:49 +08:00
|
|
|
// ELf_Rel targets we might find out that we need to modify the relocated
|
|
|
|
// section while doing it.
|
2017-10-07 04:12:43 +08:00
|
|
|
for (OutputSection *sec : outputSections)
|
2017-02-11 09:40:49 +08:00
|
|
|
if (sec->type == SHT_REL || sec->type == SHT_RELA)
|
2019-03-01 07:11:35 +08:00
|
|
|
sec->writeTo<ELFT>(Out::bufferStart + sec->offset);
|
2017-02-11 09:40:49 +08:00
|
|
|
|
2017-10-07 04:12:43 +08:00
|
|
|
for (OutputSection *sec : outputSections)
|
2019-03-01 07:11:35 +08:00
|
|
|
if (sec->type != SHT_REL && sec->type != SHT_RELA)
|
|
|
|
sec->writeTo<ELFT>(Out::bufferStart + sec->offset);
|
2021-07-09 17:05:18 +08:00
|
|
|
|
|
|
|
// Finally, check that all dynamic relocation addends were written correctly.
|
|
|
|
if (config->checkDynamicRelocs && config->writeAddends) {
|
|
|
|
for (OutputSection *sec : outputSections)
|
|
|
|
if (sec->type == SHT_REL || sec->type == SHT_RELA)
|
|
|
|
sec->checkDynRelAddends(Out::bufferStart);
|
|
|
|
}
|
2015-07-25 05:03:07 +08:00
|
|
|
}
|
2015-10-10 05:07:25 +08:00
|
|
|
|
2019-04-17 06:45:14 +08:00
|
|
|
// Computes a hash value of Data using a given hash function.
|
|
|
|
// In order to utilize multiple cores, we first split data into 1MB
|
|
|
|
// chunks, compute a hash for each chunk, and then compute a hash value
|
|
|
|
// of the hash values.
|
|
|
|
static void
|
|
|
|
computeHash(llvm::MutableArrayRef<uint8_t> hashBuf,
|
|
|
|
llvm::ArrayRef<uint8_t> data,
|
|
|
|
std::function<void(uint8_t *dest, ArrayRef<uint8_t> arr)> hashFn) {
|
|
|
|
std::vector<ArrayRef<uint8_t>> chunks = split(data, 1024 * 1024);
|
|
|
|
std::vector<uint8_t> hashes(chunks.size() * hashBuf.size());
|
|
|
|
|
|
|
|
// Compute hash values.
|
|
|
|
parallelForEachN(0, chunks.size(), [&](size_t i) {
|
|
|
|
hashFn(hashes.data() + i * hashBuf.size(), chunks[i]);
|
|
|
|
});
|
|
|
|
|
|
|
|
// Write to the final output buffer.
|
|
|
|
hashFn(hashBuf.data(), hashes);
|
|
|
|
}
|
|
|
|
|
2019-05-09 16:08:09 +08:00
|
|
|
template <class ELFT> void Writer<ELFT>::writeBuildId() {
|
2019-06-08 01:57:58 +08:00
|
|
|
if (!mainPart->buildId || !mainPart->buildId->getParent())
|
2019-05-09 16:08:09 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (config->buildId == BuildIdKind::Hexstring) {
|
2019-06-08 01:57:58 +08:00
|
|
|
for (Partition &part : partitions)
|
|
|
|
part.buildId->writeBuildId(config->buildIdVector);
|
2019-05-09 16:08:09 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute a hash of all sections of the output file.
|
2019-06-08 01:57:58 +08:00
|
|
|
size_t hashSize = mainPart->buildId->hashSize;
|
|
|
|
std::vector<uint8_t> buildId(hashSize);
|
2019-05-09 16:08:09 +08:00
|
|
|
llvm::ArrayRef<uint8_t> buf{Out::bufferStart, size_t(fileSize)};
|
|
|
|
|
2019-04-17 06:45:14 +08:00
|
|
|
switch (config->buildId) {
|
|
|
|
case BuildIdKind::Fast:
|
|
|
|
computeHash(buildId, buf, [](uint8_t *dest, ArrayRef<uint8_t> arr) {
|
|
|
|
write64le(dest, xxHash64(arr));
|
|
|
|
});
|
|
|
|
break;
|
|
|
|
case BuildIdKind::Md5:
|
2019-05-09 16:08:09 +08:00
|
|
|
computeHash(buildId, buf, [&](uint8_t *dest, ArrayRef<uint8_t> arr) {
|
2019-06-08 01:57:58 +08:00
|
|
|
memcpy(dest, MD5::hash(arr).data(), hashSize);
|
2019-04-17 06:45:14 +08:00
|
|
|
});
|
|
|
|
break;
|
|
|
|
case BuildIdKind::Sha1:
|
2019-05-09 16:08:09 +08:00
|
|
|
computeHash(buildId, buf, [&](uint8_t *dest, ArrayRef<uint8_t> arr) {
|
2019-06-08 01:57:58 +08:00
|
|
|
memcpy(dest, SHA1::hash(arr).data(), hashSize);
|
2019-04-17 06:45:14 +08:00
|
|
|
});
|
|
|
|
break;
|
|
|
|
case BuildIdKind::Uuid:
|
2019-06-08 01:57:58 +08:00
|
|
|
if (auto ec = llvm::getRandomBytes(buildId.data(), hashSize))
|
2019-04-17 06:45:14 +08:00
|
|
|
error("entropy source failure: " + ec.message());
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("unknown BuildIdKind");
|
|
|
|
}
|
2019-06-08 01:57:58 +08:00
|
|
|
for (Partition &part : partitions)
|
|
|
|
part.buildId->writeBuildId(buildId);
|
ELF: Implement --build-id.
This patch implements --build-id. After the linker creates an output file
in the memory buffer, it computes the FNV1 hash of the resulting file
and set the hash to the .note section as a build-id.
GNU ld and gold have the same feature, but their default choice of the
hash function is different. Their default is SHA1.
We made a deliberate choice to not use a secure hash function for the
sake of performance. Computing a secure hash is slow -- for example,
MD5 throughput is usually 400 MB/s or so. SHA1 is slower than that.
As a result, if you pass --build-id to gold, then the linker becomes about
10% slower than that without the option. We observed a similar degradation
in an experimental implementation of build-id for LLD. On the other hand,
we observed only 1-2% performance degradation with the FNV hash.
Since build-id is not for digital certificate or anything, we think that
a very small probability of collision is acceptable.
We considered using other signals such as using input file timestamps as
inputs to a secure hash function. But such signals would have an issue
with build reproducibility (if you build a binary from the same source
tree using the same toolchain, the build id should become the same.)
GNU linkers accepts --build-id=<style> option where style is one of
"MD5", "SHA1", or an arbitrary hex string. That option is out of scope
of this patch.
http://reviews.llvm.org/D18091
llvm-svn: 263292
2016-03-12 04:51:53 +08:00
|
|
|
}
|
|
|
|
|
2020-05-15 13:18:58 +08:00
|
|
|
template void elf::createSyntheticSections<ELF32LE>();
|
|
|
|
template void elf::createSyntheticSections<ELF32BE>();
|
|
|
|
template void elf::createSyntheticSections<ELF64LE>();
|
|
|
|
template void elf::createSyntheticSections<ELF64BE>();
|
2019-10-07 16:31:18 +08:00
|
|
|
|
2020-05-15 13:18:58 +08:00
|
|
|
template void elf::writeResult<ELF32LE>();
|
|
|
|
template void elf::writeResult<ELF32BE>();
|
|
|
|
template void elf::writeResult<ELF64LE>();
|
|
|
|
template void elf::writeResult<ELF64BE>();
|