2015-09-22 05:38:08 +08:00
|
|
|
//===- OutputSections.cpp -------------------------------------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-09-22 05:38:08 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "OutputSections.h"
|
|
|
|
#include "Config.h"
|
2022-02-24 11:18:24 +08:00
|
|
|
#include "InputFiles.h"
|
2016-11-02 07:09:07 +08:00
|
|
|
#include "LinkerScript.h"
|
2022-02-08 13:53:34 +08:00
|
|
|
#include "Symbols.h"
|
2016-11-06 07:05:47 +08:00
|
|
|
#include "SyntheticSections.h"
|
2015-09-23 02:19:46 +08:00
|
|
|
#include "Target.h"
|
2022-01-30 16:44:19 +08:00
|
|
|
#include "lld/Common/Arrays.h"
|
2017-11-29 04:39:17 +08:00
|
|
|
#include "lld/Common/Memory.h"
|
2017-06-07 11:48:56 +08:00
|
|
|
#include "llvm/BinaryFormat/Dwarf.h"
|
2022-02-08 01:20:03 +08:00
|
|
|
#include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB
|
[Support] Move LLD's parallel algorithm wrappers to support
Essentially takes the lld/Common/Threads.h wrappers and moves them to
the llvm/Support/Paralle.h algorithm header.
The changes are:
- Remove policy parameter, since all clients use `par`.
- Rename the methods to `parallelSort` etc to match LLVM style, since
they are no longer C++17 pstl compatible.
- Move algorithms from llvm::parallel:: to llvm::, since they have
"parallel" in the name and are no longer overloads of the regular
algorithms.
- Add range overloads
- Use the sequential algorithm directly when 1 thread is requested
(skips task grouping)
- Fix the index type of parallelForEachN to size_t. Nobody in LLVM was
using any other parameter, and it made overload resolution hard for
for_each_n(par, 0, foo.size(), ...) because 0 is int, not size_t.
Remove Threads.h and update LLD for that.
This is a prerequisite for parallel public symbol processing in the PDB
library, which is in LLVM.
Reviewed By: MaskRay, aganea
Differential Revision: https://reviews.llvm.org/D79390
2020-05-05 11:03:19 +08:00
|
|
|
#include "llvm/Support/Parallel.h"
|
2022-02-08 13:53:34 +08:00
|
|
|
#include "llvm/Support/Path.h"
|
2020-11-03 22:41:09 +08:00
|
|
|
#include "llvm/Support/TimeProfiler.h"
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-26 02:29:04 +08:00
|
|
|
#if LLVM_ENABLE_ZLIB
|
|
|
|
#include <zlib.h>
|
|
|
|
#endif
|
2015-09-22 05:38:08 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
2016-02-10 05:46:11 +08:00
|
|
|
using namespace llvm::dwarf;
|
2015-09-22 05:38:08 +08:00
|
|
|
using namespace llvm::object;
|
2018-11-15 05:05:20 +08:00
|
|
|
using namespace llvm::support::endian;
|
2015-09-22 05:38:08 +08:00
|
|
|
using namespace llvm::ELF;
|
2020-05-15 13:18:58 +08:00
|
|
|
using namespace lld;
|
|
|
|
using namespace lld::elf;
|
2015-09-22 05:38:08 +08:00
|
|
|
|
2019-03-01 07:11:35 +08:00
|
|
|
uint8_t *Out::bufferStart;
|
2017-02-27 10:31:26 +08:00
|
|
|
PhdrEntry *Out::tlsPhdr;
|
|
|
|
OutputSection *Out::elfHeader;
|
|
|
|
OutputSection *Out::programHeaders;
|
|
|
|
OutputSection *Out::preinitArray;
|
|
|
|
OutputSection *Out::initArray;
|
|
|
|
OutputSection *Out::finiArray;
|
|
|
|
|
2021-12-23 14:30:07 +08:00
|
|
|
SmallVector<OutputSection *, 0> elf::outputSections;
|
2017-06-14 07:26:31 +08:00
|
|
|
|
2017-02-24 23:07:30 +08:00
|
|
|
uint32_t OutputSection::getPhdrFlags() const {
|
2018-02-28 00:55:25 +08:00
|
|
|
uint32_t ret = 0;
|
|
|
|
if (config->emachine != EM_ARM || !(flags & SHF_ARM_PURECODE))
|
|
|
|
ret |= PF_R;
|
2016-07-27 22:10:56 +08:00
|
|
|
if (flags & SHF_WRITE)
|
|
|
|
ret |= PF_W;
|
|
|
|
if (flags & SHF_EXECINSTR)
|
|
|
|
ret |= PF_X;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-13 14:50:33 +08:00
|
|
|
template <class ELFT>
|
2017-02-24 23:07:30 +08:00
|
|
|
void OutputSection::writeHeaderTo(typename ELFT::Shdr *shdr) {
|
2016-11-09 09:42:41 +08:00
|
|
|
shdr->sh_entsize = entsize;
|
2017-03-07 22:55:52 +08:00
|
|
|
shdr->sh_addralign = alignment;
|
2016-11-09 09:42:41 +08:00
|
|
|
shdr->sh_type = type;
|
|
|
|
shdr->sh_offset = offset;
|
|
|
|
shdr->sh_flags = flags;
|
|
|
|
shdr->sh_info = info;
|
|
|
|
shdr->sh_link = link;
|
2017-02-08 23:19:03 +08:00
|
|
|
shdr->sh_addr = addr;
|
2016-11-09 09:42:41 +08:00
|
|
|
shdr->sh_size = size;
|
|
|
|
shdr->sh_name = shName;
|
2016-03-13 14:50:33 +08:00
|
|
|
}
|
|
|
|
|
2017-02-24 23:07:30 +08:00
|
|
|
OutputSection::OutputSection(StringRef name, uint32_t type, uint64_t flags)
|
2022-03-09 03:23:41 +08:00
|
|
|
: SectionBase(Output, name, flags, /*Entsize*/ 0, /*Alignment*/ 1, type,
|
2019-03-12 10:18:03 +08:00
|
|
|
/*Info*/ 0, /*Link*/ 0) {}
|
2016-02-25 16:23:37 +08:00
|
|
|
|
2017-10-07 08:43:31 +08:00
|
|
|
// We allow sections of types listed below to merged into a
|
|
|
|
// single progbits section. This is typically done by linker
|
|
|
|
// scripts. Merging nobits and progbits will force disk space
|
|
|
|
// to be allocated for nobits sections. Other ones don't require
|
|
|
|
// any special treatment on top of progbits, so there doesn't
|
|
|
|
// seem to be a harm in merging them.
|
2020-08-13 11:14:00 +08:00
|
|
|
//
|
|
|
|
// NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
|
|
|
|
// them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
|
2017-10-07 08:43:31 +08:00
|
|
|
static bool canMergeToProgbits(unsigned type) {
|
|
|
|
return type == SHT_NOBITS || type == SHT_PROGBITS || type == SHT_INIT_ARRAY ||
|
|
|
|
type == SHT_PREINIT_ARRAY || type == SHT_FINI_ARRAY ||
|
2020-08-13 11:14:00 +08:00
|
|
|
type == SHT_NOTE ||
|
|
|
|
(type == SHT_X86_64_UNWIND && config->emachine == EM_X86_64);
|
2017-10-07 08:43:31 +08:00
|
|
|
}
|
|
|
|
|
2019-09-24 19:48:31 +08:00
|
|
|
// Record that isec will be placed in the OutputSection. isec does not become
|
|
|
|
// permanent until finalizeInputSections() is called. The function should not be
|
|
|
|
// used after finalizeInputSections() is called. If you need to add an
|
|
|
|
// InputSection post finalizeInputSections(), then you must do the following:
|
|
|
|
//
|
|
|
|
// 1. Find or create an InputSectionDescription to hold InputSection.
|
2020-01-07 02:21:05 +08:00
|
|
|
// 2. Add the InputSection to the InputSectionDescription::sections.
|
2019-09-24 19:48:31 +08:00
|
|
|
// 3. Call commitSection(isec).
|
|
|
|
void OutputSection::recordSection(InputSectionBase *isec) {
|
|
|
|
partition = isec->partition;
|
|
|
|
isec->parent = this;
|
2021-11-26 08:47:07 +08:00
|
|
|
if (commands.empty() || !isa<InputSectionDescription>(commands.back()))
|
|
|
|
commands.push_back(make<InputSectionDescription>(""));
|
|
|
|
auto *isd = cast<InputSectionDescription>(commands.back());
|
2019-09-24 19:48:31 +08:00
|
|
|
isd->sectionBases.push_back(isec);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update fields (type, flags, alignment, etc) according to the InputSection
|
|
|
|
// isec. Also check whether the InputSection flags and type are consistent with
|
|
|
|
// other InputSections.
|
|
|
|
void OutputSection::commitSection(InputSection *isec) {
|
2022-02-18 04:10:58 +08:00
|
|
|
if (LLVM_UNLIKELY(type != isec->type)) {
|
|
|
|
if (hasInputSections || typeIsSet) {
|
|
|
|
if (typeIsSet || !canMergeToProgbits(type) ||
|
|
|
|
!canMergeToProgbits(isec->type)) {
|
2022-02-19 03:20:36 +08:00
|
|
|
// Changing the type of a (NOLOAD) section is fishy, but some projects
|
|
|
|
// (e.g. https://github.com/ClangBuiltLinux/linux/issues/1597)
|
|
|
|
// traditionally rely on the behavior. Issue a warning to not break
|
|
|
|
// them. Other types get an error.
|
|
|
|
auto diagnose = type == SHT_NOBITS ? warn : errorOrWarn;
|
|
|
|
diagnose("section type mismatch for " + isec->name + "\n>>> " +
|
|
|
|
toString(isec) + ": " +
|
|
|
|
getELFSectionTypeName(config->emachine, isec->type) +
|
|
|
|
"\n>>> output section " + name + ": " +
|
|
|
|
getELFSectionTypeName(config->emachine, type));
|
2022-02-18 04:10:58 +08:00
|
|
|
}
|
|
|
|
type = SHT_PROGBITS;
|
|
|
|
} else {
|
|
|
|
type = isec->type;
|
|
|
|
}
|
|
|
|
}
|
2019-06-04 04:14:25 +08:00
|
|
|
if (!hasInputSections) {
|
2017-10-07 08:58:19 +08:00
|
|
|
// If IS is the first section to be added to this section,
|
2019-09-24 19:48:31 +08:00
|
|
|
// initialize type, entsize and flags from isec.
|
2019-06-04 04:14:25 +08:00
|
|
|
hasInputSections = true;
|
2017-11-16 01:35:22 +08:00
|
|
|
entsize = isec->entsize;
|
2018-02-28 00:55:25 +08:00
|
|
|
flags = isec->flags;
|
2017-10-07 08:58:19 +08:00
|
|
|
} else {
|
|
|
|
// Otherwise, check if new type or flags are compatible with existing ones.
|
2020-03-29 14:12:04 +08:00
|
|
|
if ((flags ^ isec->flags) & SHF_TLS)
|
2022-02-18 04:10:58 +08:00
|
|
|
error("incompatible section flags for " + name + "\n>>> " +
|
|
|
|
toString(isec) + ": 0x" + utohexstr(isec->flags) +
|
|
|
|
"\n>>> output section " + name + ": 0x" + utohexstr(flags));
|
2017-10-07 08:43:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
isec->parent = this;
|
2018-02-28 01:11:10 +08:00
|
|
|
uint64_t andMask =
|
|
|
|
config->emachine == EM_ARM ? (uint64_t)SHF_ARM_PURECODE : 0;
|
2018-02-28 00:55:25 +08:00
|
|
|
uint64_t orMask = ~andMask;
|
|
|
|
uint64_t andFlags = (flags & isec->flags) & andMask;
|
|
|
|
uint64_t orFlags = (flags | isec->flags) & orMask;
|
|
|
|
flags = andFlags | orFlags;
|
2019-09-24 19:48:31 +08:00
|
|
|
if (nonAlloc)
|
|
|
|
flags &= ~(uint64_t)SHF_ALLOC;
|
2018-02-28 00:55:25 +08:00
|
|
|
|
2017-10-07 08:58:34 +08:00
|
|
|
alignment = std::max(alignment, isec->alignment);
|
2017-05-08 18:18:12 +08:00
|
|
|
|
2017-03-01 12:04:23 +08:00
|
|
|
// If this section contains a table of fixed-size entries, sh_entsize
|
2017-11-16 01:35:22 +08:00
|
|
|
// holds the element size. If it contains elements of different size we
|
|
|
|
// set sh_entsize to 0.
|
|
|
|
if (entsize != isec->entsize)
|
|
|
|
entsize = 0;
|
2019-09-24 19:48:31 +08:00
|
|
|
}
|
|
|
|
|
2021-12-17 12:50:06 +08:00
|
|
|
static MergeSyntheticSection *createMergeSynthetic(StringRef name,
|
|
|
|
uint32_t type,
|
|
|
|
uint64_t flags,
|
|
|
|
uint32_t alignment) {
|
|
|
|
if ((flags & SHF_STRINGS) && config->optimize >= 2)
|
|
|
|
return make<MergeTailSection>(name, type, flags, alignment);
|
|
|
|
return make<MergeNoTailSection>(name, type, flags, alignment);
|
|
|
|
}
|
|
|
|
|
2019-09-24 19:48:31 +08:00
|
|
|
// This function scans over the InputSectionBase list sectionBases to create
|
|
|
|
// InputSectionDescription::sections.
|
|
|
|
//
|
|
|
|
// It removes MergeInputSections from the input section array and adds
|
|
|
|
// new synthetic sections at the location of the first input section
|
|
|
|
// that it replaces. It then finalizes each synthetic section in order
|
|
|
|
// to compute an output offset for each piece of each input section.
|
|
|
|
void OutputSection::finalizeInputSections() {
|
|
|
|
std::vector<MergeSyntheticSection *> mergeSections;
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : commands) {
|
|
|
|
auto *isd = dyn_cast<InputSectionDescription>(cmd);
|
|
|
|
if (!isd)
|
2019-09-24 19:48:31 +08:00
|
|
|
continue;
|
2021-11-26 12:24:23 +08:00
|
|
|
isd->sections.reserve(isd->sectionBases.size());
|
|
|
|
for (InputSectionBase *s : isd->sectionBases) {
|
2019-09-24 19:48:31 +08:00
|
|
|
MergeInputSection *ms = dyn_cast<MergeInputSection>(s);
|
|
|
|
if (!ms) {
|
2021-11-26 12:24:23 +08:00
|
|
|
isd->sections.push_back(cast<InputSection>(s));
|
2019-09-24 19:48:31 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We do not want to handle sections that are not alive, so just remove
|
|
|
|
// them instead of trying to merge.
|
|
|
|
if (!ms->isLive())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
auto i = llvm::find_if(mergeSections, [=](MergeSyntheticSection *sec) {
|
|
|
|
// While we could create a single synthetic section for two different
|
|
|
|
// values of Entsize, it is better to take Entsize into consideration.
|
|
|
|
//
|
|
|
|
// With a single synthetic section no two pieces with different Entsize
|
|
|
|
// could be equal, so we may as well have two sections.
|
|
|
|
//
|
|
|
|
// Using Entsize in here also allows us to propagate it to the synthetic
|
|
|
|
// section.
|
|
|
|
//
|
|
|
|
// SHF_STRINGS section with different alignments should not be merged.
|
|
|
|
return sec->flags == ms->flags && sec->entsize == ms->entsize &&
|
|
|
|
(sec->alignment == ms->alignment || !(sec->flags & SHF_STRINGS));
|
|
|
|
});
|
|
|
|
if (i == mergeSections.end()) {
|
|
|
|
MergeSyntheticSection *syn =
|
|
|
|
createMergeSynthetic(name, ms->type, ms->flags, ms->alignment);
|
|
|
|
mergeSections.push_back(syn);
|
|
|
|
i = std::prev(mergeSections.end());
|
|
|
|
syn->entsize = ms->entsize;
|
2021-11-26 12:24:23 +08:00
|
|
|
isd->sections.push_back(syn);
|
2019-09-24 19:48:31 +08:00
|
|
|
}
|
|
|
|
(*i)->addSection(ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
// sectionBases should not be used from this point onwards. Clear it to
|
|
|
|
// catch misuses.
|
2021-11-26 12:24:23 +08:00
|
|
|
isd->sectionBases.clear();
|
[Coding style change] Rename variables so that they start with a lowercase letter
This patch is mechanically generated by clang-llvm-rename tool that I wrote
using Clang Refactoring Engine just for creating this patch. You can see the
source code of the tool at https://reviews.llvm.org/D64123. There's no manual
post-processing; you can generate the same patch by re-running the tool against
lld's code base.
Here is the main discussion thread to change the LLVM coding style:
https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html
In the discussion thread, I proposed we use lld as a testbed for variable
naming scheme change, and this patch does that.
I chose to rename variables so that they are in camelCase, just because that
is a minimal change to make variables to start with a lowercase letter.
Note to downstream patch maintainers: if you are maintaining a downstream lld
repo, just rebasing ahead of this commit would cause massive merge conflicts
because this patch essentially changes every line in the lld subdirectory. But
there's a remedy.
clang-llvm-rename tool is a batch tool, so you can rename variables in your
downstream repo with the tool. Given that, here is how to rebase your repo to
a commit after the mass renaming:
1. rebase to the commit just before the mass variable renaming,
2. apply the tool to your downstream repo to mass-rename variables locally, and
3. rebase again to the head.
Most changes made by the tool should be identical for a downstream repo and
for the head, so at the step 3, almost all changes should be merged and
disappear. I'd expect that there would be some lines that you need to merge by
hand, but that shouldn't be too many.
Differential Revision: https://reviews.llvm.org/D64121
llvm-svn: 365595
2019-07-10 13:00:37 +08:00
|
|
|
|
2019-09-24 19:48:31 +08:00
|
|
|
// Some input sections may be removed from the list after ICF.
|
2021-11-26 12:24:23 +08:00
|
|
|
for (InputSection *s : isd->sections)
|
2019-09-24 19:48:31 +08:00
|
|
|
commitSection(s);
|
2017-07-28 03:22:43 +08:00
|
|
|
}
|
2019-09-24 19:48:31 +08:00
|
|
|
for (auto *ms : mergeSections)
|
|
|
|
ms->finalizeContents();
|
2015-09-22 05:38:08 +08:00
|
|
|
}
|
|
|
|
|
2018-01-31 00:20:08 +08:00
|
|
|
static void sortByOrder(MutableArrayRef<InputSection *> in,
|
2018-06-16 20:11:34 +08:00
|
|
|
llvm::function_ref<int(InputSectionBase *s)> order) {
|
2019-04-23 10:42:06 +08:00
|
|
|
std::vector<std::pair<int, InputSection *>> v;
|
2017-08-04 18:25:29 +08:00
|
|
|
for (InputSection *s : in)
|
|
|
|
v.push_back({order(s), s});
|
2019-04-23 10:42:06 +08:00
|
|
|
llvm::stable_sort(v, less_first());
|
2017-08-04 18:25:29 +08:00
|
|
|
|
|
|
|
for (size_t i = 0; i < v.size(); ++i)
|
|
|
|
in[i] = v[i].second;
|
|
|
|
}
|
|
|
|
|
2020-05-15 13:18:58 +08:00
|
|
|
uint64_t elf::getHeaderSize() {
|
2017-03-13 22:40:58 +08:00
|
|
|
if (config->oFormatBinary)
|
|
|
|
return 0;
|
|
|
|
return Out::elfHeader->size + Out::programHeaders->size;
|
|
|
|
}
|
|
|
|
|
2018-06-16 20:11:34 +08:00
|
|
|
void OutputSection::sort(llvm::function_ref<int(InputSectionBase *s)> order) {
|
2019-05-29 11:55:20 +08:00
|
|
|
assert(isLive());
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *b : commands)
|
2018-02-02 03:30:15 +08:00
|
|
|
if (auto *isd = dyn_cast<InputSectionDescription>(b))
|
|
|
|
sortByOrder(isd->sections, order);
|
2017-07-28 03:22:43 +08:00
|
|
|
}
|
|
|
|
|
2020-04-07 21:48:18 +08:00
|
|
|
static void nopInstrFill(uint8_t *buf, size_t size) {
|
|
|
|
if (size == 0)
|
|
|
|
return;
|
|
|
|
unsigned i = 0;
|
|
|
|
if (size == 0)
|
|
|
|
return;
|
|
|
|
std::vector<std::vector<uint8_t>> nopFiller = *target->nopInstrs;
|
|
|
|
unsigned num = size / nopFiller.back().size();
|
|
|
|
for (unsigned c = 0; c < num; ++c) {
|
|
|
|
memcpy(buf + i, nopFiller.back().data(), nopFiller.back().size());
|
|
|
|
i += nopFiller.back().size();
|
|
|
|
}
|
|
|
|
unsigned remaining = size - i;
|
|
|
|
if (!remaining)
|
|
|
|
return;
|
|
|
|
assert(nopFiller[remaining - 1].size() == remaining);
|
|
|
|
memcpy(buf + i, nopFiller[remaining - 1].data(), remaining);
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:22:43 +08:00
|
|
|
// Fill [Buf, Buf + Size) with Filler.
|
|
|
|
// This is used for linker script "=fillexp" command.
|
2018-11-15 05:05:20 +08:00
|
|
|
static void fill(uint8_t *buf, size_t size,
|
|
|
|
const std::array<uint8_t, 4> &filler) {
|
2017-07-28 03:22:43 +08:00
|
|
|
size_t i = 0;
|
|
|
|
for (; i + 4 < size; i += 4)
|
2018-11-15 05:05:20 +08:00
|
|
|
memcpy(buf + i, filler.data(), 4);
|
|
|
|
memcpy(buf + i, filler.data(), size - i);
|
2017-07-28 03:22:43 +08:00
|
|
|
}
|
|
|
|
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-26 02:29:04 +08:00
|
|
|
#if LLVM_ENABLE_ZLIB
|
|
|
|
static SmallVector<uint8_t, 0> deflateShard(ArrayRef<uint8_t> in, int level,
|
|
|
|
int flush) {
|
|
|
|
// 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
|
|
|
|
// data with no zlib header or trailer.
|
|
|
|
z_stream s = {};
|
|
|
|
deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY);
|
|
|
|
s.next_in = const_cast<uint8_t *>(in.data());
|
|
|
|
s.avail_in = in.size();
|
|
|
|
|
|
|
|
// Allocate a buffer of half of the input size, and grow it by 1.5x if
|
|
|
|
// insufficient.
|
|
|
|
SmallVector<uint8_t, 0> out;
|
|
|
|
size_t pos = 0;
|
|
|
|
out.resize_for_overwrite(std::max<size_t>(in.size() / 2, 64));
|
|
|
|
do {
|
|
|
|
if (pos == out.size())
|
|
|
|
out.resize_for_overwrite(out.size() * 3 / 2);
|
|
|
|
s.next_out = out.data() + pos;
|
|
|
|
s.avail_out = out.size() - pos;
|
|
|
|
(void)deflate(&s, flush);
|
|
|
|
pos = s.next_out - out.data();
|
|
|
|
} while (s.avail_out == 0);
|
|
|
|
assert(s.avail_in == 0);
|
|
|
|
|
|
|
|
out.truncate(pos);
|
|
|
|
deflateEnd(&s);
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-07-28 03:22:43 +08:00
|
|
|
// Compress section contents if this section contains debug info.
|
|
|
|
template <class ELFT> void OutputSection::maybeCompress() {
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-26 02:29:04 +08:00
|
|
|
#if LLVM_ENABLE_ZLIB
|
2019-04-01 08:11:24 +08:00
|
|
|
using Elf_Chdr = typename ELFT::Chdr;
|
2017-07-28 03:22:43 +08:00
|
|
|
|
|
|
|
// Compress only DWARF debug sections.
|
|
|
|
if (!config->compressDebugSections || (flags & SHF_ALLOC) ||
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-26 02:29:04 +08:00
|
|
|
!name.startswith(".debug_") || size == 0)
|
2017-07-28 03:22:43 +08:00
|
|
|
return;
|
|
|
|
|
2020-11-03 22:41:09 +08:00
|
|
|
llvm::TimeTraceScope timeScope("Compress debug sections");
|
|
|
|
|
2022-01-26 14:15:44 +08:00
|
|
|
// Write uncompressed data to a temporary zero-initialized buffer.
|
|
|
|
auto buf = std::make_unique<uint8_t[]>(size);
|
|
|
|
writeTo<ELFT>(buf.get());
|
2022-01-26 14:40:53 +08:00
|
|
|
// We chose 1 (Z_BEST_SPEED) as the default compression level because it is
|
|
|
|
// the fastest. If -O2 is given, we use level 6 to compress debug info more by
|
|
|
|
// ~15%. We found that level 7 to 9 doesn't make much difference (~1% more
|
|
|
|
// compression) while they take significant amount of time (~2x), so level 6
|
|
|
|
// seems enough.
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-26 02:29:04 +08:00
|
|
|
const int level = config->optimize >= 2 ? 6 : Z_BEST_SPEED;
|
|
|
|
|
|
|
|
// Split input into 1-MiB shards.
|
|
|
|
constexpr size_t shardSize = 1 << 20;
|
2022-01-30 16:44:19 +08:00
|
|
|
auto shardsIn = split(makeArrayRef<uint8_t>(buf.get(), size), shardSize);
|
|
|
|
const size_t numShards = shardsIn.size();
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-26 02:29:04 +08:00
|
|
|
|
|
|
|
// Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
|
|
|
|
// shards but the last to flush the output to a byte boundary to be
|
|
|
|
// concatenated with the next shard.
|
|
|
|
auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(numShards);
|
|
|
|
auto shardsAdler = std::make_unique<uint32_t[]>(numShards);
|
|
|
|
parallelForEachN(0, numShards, [&](size_t i) {
|
|
|
|
shardsOut[i] = deflateShard(shardsIn[i], level,
|
|
|
|
i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH);
|
|
|
|
shardsAdler[i] = adler32(1, shardsIn[i].data(), shardsIn[i].size());
|
|
|
|
});
|
|
|
|
|
|
|
|
// Update section size and combine Alder-32 checksums.
|
|
|
|
uint32_t checksum = 1; // Initial Adler-32 value
|
2022-01-27 02:23:56 +08:00
|
|
|
compressed.uncompressedSize = size;
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-26 02:29:04 +08:00
|
|
|
size = sizeof(Elf_Chdr) + 2; // Elf_Chdir and zlib header
|
|
|
|
for (size_t i = 0; i != numShards; ++i) {
|
|
|
|
size += shardsOut[i].size();
|
|
|
|
checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
|
|
|
|
}
|
|
|
|
size += 4; // checksum
|
2017-07-28 03:22:43 +08:00
|
|
|
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-26 02:29:04 +08:00
|
|
|
compressed.shards = std::move(shardsOut);
|
|
|
|
compressed.numShards = numShards;
|
|
|
|
compressed.checksum = checksum;
|
2017-07-28 03:22:43 +08:00
|
|
|
flags |= SHF_COMPRESSED;
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-26 02:29:04 +08:00
|
|
|
#endif
|
2017-07-28 03:22:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void writeInt(uint8_t *buf, uint64_t data, uint64_t size) {
|
|
|
|
if (size == 1)
|
|
|
|
*buf = data;
|
|
|
|
else if (size == 2)
|
2018-03-21 02:07:25 +08:00
|
|
|
write16(buf, data);
|
2017-07-28 03:22:43 +08:00
|
|
|
else if (size == 4)
|
2018-03-21 02:07:25 +08:00
|
|
|
write32(buf, data);
|
2017-07-28 03:22:43 +08:00
|
|
|
else if (size == 8)
|
2018-03-21 02:07:25 +08:00
|
|
|
write64(buf, data);
|
2017-07-28 03:22:43 +08:00
|
|
|
else
|
|
|
|
llvm_unreachable("unsupported Size argument");
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class ELFT> void OutputSection::writeTo(uint8_t *buf) {
|
2021-12-21 02:51:24 +08:00
|
|
|
llvm::TimeTraceScope timeScope("Write sections", name);
|
2017-07-28 03:22:43 +08:00
|
|
|
if (type == SHT_NOBITS)
|
|
|
|
return;
|
|
|
|
|
2021-10-26 03:52:06 +08:00
|
|
|
// If --compress-debug-section is specified and if this is a debug section,
|
2017-07-28 03:22:43 +08:00
|
|
|
// we've already compressed section contents. If that's the case,
|
|
|
|
// just write it down.
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-26 02:29:04 +08:00
|
|
|
if (compressed.shards) {
|
2022-01-27 02:23:56 +08:00
|
|
|
auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf);
|
|
|
|
chdr->ch_type = ELFCOMPRESS_ZLIB;
|
|
|
|
chdr->ch_size = compressed.uncompressedSize;
|
|
|
|
chdr->ch_addralign = alignment;
|
|
|
|
buf += sizeof(*chdr);
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-26 02:29:04 +08:00
|
|
|
|
|
|
|
// Compute shard offsets.
|
|
|
|
auto offsets = std::make_unique<size_t[]>(compressed.numShards);
|
|
|
|
offsets[0] = 2; // zlib header
|
|
|
|
for (size_t i = 1; i != compressed.numShards; ++i)
|
|
|
|
offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size();
|
|
|
|
|
|
|
|
buf[0] = 0x78; // CMF
|
|
|
|
buf[1] = 0x01; // FLG: best speed
|
|
|
|
parallelForEachN(0, compressed.numShards, [&](size_t i) {
|
|
|
|
memcpy(buf + offsets[i], compressed.shards[i].data(),
|
|
|
|
compressed.shards[i].size());
|
|
|
|
});
|
|
|
|
|
2022-01-27 02:23:56 +08:00
|
|
|
write32be(buf + (size - sizeof(*chdr) - 4), compressed.checksum);
|
2017-07-28 03:22:43 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write leading padding.
|
2021-12-27 05:53:47 +08:00
|
|
|
SmallVector<InputSection *, 0> sections = getInputSections(*this);
|
2018-11-15 05:05:20 +08:00
|
|
|
std::array<uint8_t, 4> filler = getFiller();
|
|
|
|
bool nonZeroFiller = read32(filler.data()) != 0;
|
|
|
|
if (nonZeroFiller)
|
2017-07-28 03:22:43 +08:00
|
|
|
fill(buf, sections.empty() ? size : sections[0]->outSecOff, filler);
|
|
|
|
|
2017-10-11 13:13:16 +08:00
|
|
|
parallelForEachN(0, sections.size(), [&](size_t i) {
|
2017-07-28 03:22:43 +08:00
|
|
|
InputSection *isec = sections[i];
|
2022-02-28 15:28:51 +08:00
|
|
|
if (auto *s = dyn_cast<SyntheticSection>(isec))
|
|
|
|
s->writeTo(buf + isec->outSecOff);
|
|
|
|
else
|
|
|
|
isec->writeTo<ELFT>(buf + isec->outSecOff);
|
2017-07-28 03:22:43 +08:00
|
|
|
|
|
|
|
// Fill gaps between sections.
|
2018-11-15 05:05:20 +08:00
|
|
|
if (nonZeroFiller) {
|
2017-07-28 03:22:43 +08:00
|
|
|
uint8_t *start = buf + isec->outSecOff + isec->getSize();
|
|
|
|
uint8_t *end;
|
|
|
|
if (i + 1 == sections.size())
|
|
|
|
end = buf + size;
|
|
|
|
else
|
|
|
|
end = buf + sections[i + 1]->outSecOff;
|
2020-04-07 21:48:18 +08:00
|
|
|
if (isec->nopFiller) {
|
|
|
|
assert(target->nopInstrs);
|
|
|
|
nopInstrFill(start, end - start);
|
|
|
|
} else
|
|
|
|
fill(start, end - start, filler);
|
2017-07-28 03:22:43 +08:00
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Linker scripts may have BYTE()-family commands with which you
|
|
|
|
// can write arbitrary bytes to the output. Process them if any.
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : commands)
|
|
|
|
if (auto *data = dyn_cast<ByteCommand>(cmd))
|
2017-07-28 03:22:43 +08:00
|
|
|
writeInt(buf + data->offset, data->expression().getValue(), data->size);
|
|
|
|
}
|
|
|
|
|
2022-02-18 04:10:58 +08:00
|
|
|
static void finalizeShtGroup(OutputSection *os, InputSection *section) {
|
2017-07-28 03:22:43 +08:00
|
|
|
// sh_link field for SHT_GROUP sections should contain the section index of
|
|
|
|
// the symbol table.
|
2018-09-26 03:26:58 +08:00
|
|
|
os->link = in.symTab->getParent()->sectionIndex;
|
2017-07-28 03:22:43 +08:00
|
|
|
|
2022-02-18 04:10:58 +08:00
|
|
|
if (!section)
|
|
|
|
return;
|
|
|
|
|
2017-07-28 03:22:43 +08:00
|
|
|
// sh_info then contain index of an entry in symbol table section which
|
|
|
|
// provides signature of the section group.
|
2019-03-06 11:07:57 +08:00
|
|
|
ArrayRef<Symbol *> symbols = section->file->getSymbols();
|
2018-09-26 03:26:58 +08:00
|
|
|
os->info = in.symTab->getSymbolIndex(symbols[section->info]);
|
2020-07-21 23:49:04 +08:00
|
|
|
|
|
|
|
// Some group members may be combined or discarded, so we need to compute the
|
|
|
|
// new size. The content will be rewritten in InputSection::copyShtGroup.
|
2022-01-30 17:18:41 +08:00
|
|
|
DenseSet<uint32_t> seen;
|
2020-07-21 23:49:04 +08:00
|
|
|
ArrayRef<InputSectionBase *> sections = section->file->getSections();
|
|
|
|
for (const uint32_t &idx : section->getDataAs<uint32_t>().slice(1))
|
|
|
|
if (OutputSection *osec = sections[read32(&idx)]->getOutputSection())
|
|
|
|
seen.insert(osec->sectionIndex);
|
|
|
|
os->size = (1 + seen.size()) * sizeof(uint32_t);
|
2017-07-28 03:22:43 +08:00
|
|
|
}
|
|
|
|
|
2019-03-06 11:07:57 +08:00
|
|
|
void OutputSection::finalize() {
|
2020-01-16 21:23:08 +08:00
|
|
|
InputSection *first = getFirstInputSection(this);
|
2017-07-28 03:22:43 +08:00
|
|
|
|
2017-09-16 06:14:59 +08:00
|
|
|
if (flags & SHF_LINK_ORDER) {
|
2017-07-28 03:22:43 +08:00
|
|
|
// We must preserve the link order dependency of sections with the
|
|
|
|
// SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
|
|
|
|
// need to translate the InputSection sh_link to the OutputSection sh_link,
|
|
|
|
// all InputSections in the OutputSection have the same dependency.
|
2019-03-28 19:10:20 +08:00
|
|
|
if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(first))
|
|
|
|
link = ex->getLinkOrderDep()->getParent()->sectionIndex;
|
2020-03-29 14:12:04 +08:00
|
|
|
else if (first->flags & SHF_LINK_ORDER)
|
|
|
|
if (auto *d = first->getLinkOrderDep())
|
|
|
|
link = d->getParent()->sectionIndex;
|
2017-07-28 03:22:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (type == SHT_GROUP) {
|
2019-03-06 11:07:57 +08:00
|
|
|
finalizeShtGroup(this, first);
|
2017-07-28 03:22:43 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!config->copyRelocs || (type != SHT_RELA && type != SHT_REL))
|
|
|
|
return;
|
|
|
|
|
2020-12-17 00:59:38 +08:00
|
|
|
// Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
|
|
|
|
// Normally 'type' was changed by 'first' so 'first' should be non-null.
|
|
|
|
// However, if the output section is .rela.dyn, 'type' can be set by the empty
|
|
|
|
// synthetic .rela.plt and first can be null.
|
|
|
|
if (!first || isa<SyntheticSection>(first))
|
2017-07-28 03:22:43 +08:00
|
|
|
return;
|
|
|
|
|
2018-09-26 03:26:58 +08:00
|
|
|
link = in.symTab->getParent()->sectionIndex;
|
2017-07-28 03:22:43 +08:00
|
|
|
// sh_info for SHT_REL[A] sections should contain the section header index of
|
|
|
|
// the section to which the relocation applies.
|
|
|
|
InputSectionBase *s = first->getRelocatedSection();
|
|
|
|
info = s->getOutputSection()->sectionIndex;
|
|
|
|
flags |= SHF_INFO_LINK;
|
|
|
|
}
|
|
|
|
|
2019-10-26 02:04:56 +08:00
|
|
|
// Returns true if S is in one of the many forms the compiler driver may pass
|
|
|
|
// crtbegin files.
|
|
|
|
//
|
|
|
|
// Gcc uses any of crtbegin[<empty>|S|T].o.
|
2022-01-30 17:11:19 +08:00
|
|
|
// Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
|
2019-10-26 02:04:56 +08:00
|
|
|
|
2022-01-30 17:11:19 +08:00
|
|
|
static bool isCrt(StringRef s, StringRef beginEnd) {
|
2019-10-26 02:04:56 +08:00
|
|
|
s = sys::path::filename(s);
|
2022-01-30 17:11:19 +08:00
|
|
|
if (!s.consume_back(".o"))
|
|
|
|
return false;
|
|
|
|
if (s.consume_front("clang_rt."))
|
|
|
|
return s.consume_front(beginEnd);
|
|
|
|
return s.consume_front(beginEnd) && s.size() <= 1;
|
2019-10-26 02:04:56 +08:00
|
|
|
}
|
2017-07-28 03:22:43 +08:00
|
|
|
|
2020-11-13 00:56:12 +08:00
|
|
|
// .ctors and .dtors are sorted by this order:
|
2017-07-28 03:22:43 +08:00
|
|
|
//
|
2020-11-13 00:56:12 +08:00
|
|
|
// 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
|
|
|
|
// 2. The section is named ".ctors" or ".dtors" (priority: 65536).
|
|
|
|
// 3. The section has an optional priority value in the form of ".ctors.N" or
|
|
|
|
// ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
|
|
|
|
// 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
|
2017-07-28 03:22:43 +08:00
|
|
|
//
|
2020-11-13 00:56:12 +08:00
|
|
|
// For 2 and 3, the sections are sorted by priority from high to low, e.g.
|
|
|
|
// .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336). In GNU ld's
|
|
|
|
// internal linker scripts, the sorting is by string comparison which can
|
|
|
|
// achieve the same goal given the optional priority values are of the same
|
|
|
|
// length.
|
2017-07-28 03:22:43 +08:00
|
|
|
//
|
|
|
|
// In an ideal world, we don't need this function because .init_array and
|
|
|
|
// .ctors are duplicate features (and .init_array is newer.) However, there
|
|
|
|
// are too many real-world use cases of .ctors, so we had no choice to
|
|
|
|
// support that with this rather ad-hoc semantics.
|
|
|
|
static bool compCtors(const InputSection *a, const InputSection *b) {
|
2022-01-30 17:11:19 +08:00
|
|
|
bool beginA = isCrt(a->file->getName(), "crtbegin");
|
|
|
|
bool beginB = isCrt(b->file->getName(), "crtbegin");
|
2017-07-28 03:22:43 +08:00
|
|
|
if (beginA != beginB)
|
|
|
|
return beginA;
|
2022-01-30 17:11:19 +08:00
|
|
|
bool endA = isCrt(a->file->getName(), "crtend");
|
|
|
|
bool endB = isCrt(b->file->getName(), "crtend");
|
2017-07-28 03:22:43 +08:00
|
|
|
if (endA != endB)
|
|
|
|
return endB;
|
2020-11-13 00:56:12 +08:00
|
|
|
return getPriority(a->name) > getPriority(b->name);
|
2017-07-28 03:22:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Sorts input sections by the special rules for .ctors and .dtors.
|
|
|
|
// Unfortunately, the rules are different from the one for .{init,fini}_array.
|
|
|
|
// Read the comment above.
|
|
|
|
void OutputSection::sortCtorsDtors() {
|
2021-11-26 08:47:07 +08:00
|
|
|
assert(commands.size() == 1);
|
|
|
|
auto *isd = cast<InputSectionDescription>(commands[0]);
|
2019-04-23 10:42:06 +08:00
|
|
|
llvm::stable_sort(isd->sections, compCtors);
|
2017-07-28 03:22:43 +08:00
|
|
|
}
|
|
|
|
|
2020-11-13 00:56:12 +08:00
|
|
|
// If an input string is in the form of "foo.N" where N is a number, return N
|
|
|
|
// (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
|
|
|
|
// greater than the lowest priority.
|
2020-05-15 13:18:58 +08:00
|
|
|
int elf::getPriority(StringRef s) {
|
2017-07-28 03:22:43 +08:00
|
|
|
size_t pos = s.rfind('.');
|
|
|
|
if (pos == StringRef::npos)
|
|
|
|
return 65536;
|
2020-11-13 00:56:12 +08:00
|
|
|
int v = 65536;
|
|
|
|
if (to_integer(s.substr(pos + 1), v, 10) &&
|
|
|
|
(pos == 6 && (s.startswith(".ctors") || s.startswith(".dtors"))))
|
|
|
|
v = 65535 - v;
|
2017-07-28 03:22:43 +08:00
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2020-05-15 13:18:58 +08:00
|
|
|
InputSection *elf::getFirstInputSection(const OutputSection *os) {
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : os->commands)
|
|
|
|
if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
|
2020-01-16 21:23:08 +08:00
|
|
|
if (!isd->sections.empty())
|
|
|
|
return isd->sections[0];
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2021-12-27 05:53:47 +08:00
|
|
|
SmallVector<InputSection *, 0> elf::getInputSections(const OutputSection &os) {
|
|
|
|
SmallVector<InputSection *, 0> ret;
|
|
|
|
for (SectionCommand *cmd : os.commands)
|
2021-11-26 12:24:23 +08:00
|
|
|
if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
|
2018-02-22 17:55:28 +08:00
|
|
|
ret.insert(ret.end(), isd->sections.begin(), isd->sections.end());
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:22:43 +08:00
|
|
|
// Sorts input sections by section name suffixes, so that .foo.N comes
|
|
|
|
// before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
|
|
|
|
// We want to keep the original order if the priorities are the same
|
|
|
|
// because the compiler keeps the original initialization order in a
|
|
|
|
// translation unit and we need to respect that.
|
|
|
|
// For more detail, read the section of the GCC's manual about init_priority.
|
|
|
|
void OutputSection::sortInitFini() {
|
|
|
|
// Sort sections by priority.
|
|
|
|
sort([](InputSectionBase *s) { return getPriority(s->name); });
|
|
|
|
}
|
|
|
|
|
2018-11-15 05:05:20 +08:00
|
|
|
std::array<uint8_t, 4> OutputSection::getFiller() {
|
2017-07-28 03:22:43 +08:00
|
|
|
if (filler)
|
|
|
|
return *filler;
|
|
|
|
if (flags & SHF_EXECINSTR)
|
|
|
|
return target->trapInstr;
|
2018-11-15 05:05:20 +08:00
|
|
|
return {0, 0, 0, 0};
|
2017-07-28 03:22:43 +08:00
|
|
|
}
|
|
|
|
|
2021-07-09 17:05:18 +08:00
|
|
|
void OutputSection::checkDynRelAddends(const uint8_t *bufStart) {
|
|
|
|
assert(config->writeAddends && config->checkDynamicRelocs);
|
|
|
|
assert(type == SHT_REL || type == SHT_RELA);
|
2021-12-27 05:53:47 +08:00
|
|
|
SmallVector<InputSection *, 0> sections = getInputSections(*this);
|
2021-07-09 17:05:18 +08:00
|
|
|
parallelForEachN(0, sections.size(), [&](size_t i) {
|
|
|
|
// When linking with -r or --emit-relocs we might also call this function
|
|
|
|
// for input .rel[a].<sec> sections which we simply pass through to the
|
|
|
|
// output. We skip over those and only look at the synthetic relocation
|
|
|
|
// sections created during linking.
|
|
|
|
const auto *sec = dyn_cast<RelocationBaseSection>(sections[i]);
|
|
|
|
if (!sec)
|
|
|
|
return;
|
|
|
|
for (const DynamicReloc &rel : sec->relocs) {
|
2021-12-22 01:43:44 +08:00
|
|
|
int64_t addend = rel.addend;
|
2021-07-09 17:05:18 +08:00
|
|
|
const OutputSection *relOsec = rel.inputSec->getOutputSection();
|
|
|
|
assert(relOsec != nullptr && "missing output section for relocation");
|
|
|
|
const uint8_t *relocTarget =
|
|
|
|
bufStart + relOsec->offset + rel.inputSec->getOffset(rel.offsetInSec);
|
|
|
|
// For SHT_NOBITS the written addend is always zero.
|
|
|
|
int64_t writtenAddend =
|
|
|
|
relOsec->type == SHT_NOBITS
|
|
|
|
? 0
|
|
|
|
: target->getImplicitAddend(relocTarget, rel.type);
|
|
|
|
if (addend != writtenAddend)
|
|
|
|
internalLinkerError(
|
|
|
|
getErrorLocation(relocTarget),
|
|
|
|
"wrote incorrect addend value 0x" + utohexstr(writtenAddend) +
|
|
|
|
" instead of 0x" + utohexstr(addend) +
|
|
|
|
" for dynamic relocation " + toString(rel.type) +
|
|
|
|
" at offset 0x" + utohexstr(rel.getOffset()) +
|
|
|
|
(rel.sym ? " against symbol " + toString(*rel.sym) : ""));
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-02-24 23:07:30 +08:00
|
|
|
template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
|
|
|
|
template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
|
|
|
|
template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
|
|
|
|
template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
|
2017-07-28 03:22:43 +08:00
|
|
|
|
|
|
|
template void OutputSection::writeTo<ELF32LE>(uint8_t *Buf);
|
|
|
|
template void OutputSection::writeTo<ELF32BE>(uint8_t *Buf);
|
|
|
|
template void OutputSection::writeTo<ELF64LE>(uint8_t *Buf);
|
|
|
|
template void OutputSection::writeTo<ELF64BE>(uint8_t *Buf);
|
|
|
|
|
|
|
|
template void OutputSection::maybeCompress<ELF32LE>();
|
|
|
|
template void OutputSection::maybeCompress<ELF32BE>();
|
|
|
|
template void OutputSection::maybeCompress<ELF64LE>();
|
|
|
|
template void OutputSection::maybeCompress<ELF64BE>();
|