2020-08-21 04:05:13 +08:00
|
|
|
//===- UnwindInfoSection.cpp ----------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "UnwindInfoSection.h"
|
2021-05-26 02:57:16 +08:00
|
|
|
#include "ConcatOutputSection.h"
|
2020-08-21 04:05:13 +08:00
|
|
|
#include "Config.h"
|
|
|
|
#include "InputSection.h"
|
|
|
|
#include "OutputSection.h"
|
|
|
|
#include "OutputSegment.h"
|
2021-02-09 02:47:33 +08:00
|
|
|
#include "SymbolTable.h"
|
2020-08-21 04:05:13 +08:00
|
|
|
#include "Symbols.h"
|
|
|
|
#include "SyntheticSections.h"
|
|
|
|
#include "Target.h"
|
|
|
|
|
|
|
|
#include "lld/Common/ErrorHandler.h"
|
2021-02-09 02:47:33 +08:00
|
|
|
#include "lld/Common/Memory.h"
|
2021-04-26 13:23:32 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2021-05-26 02:58:06 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2020-08-21 04:05:13 +08:00
|
|
|
#include "llvm/BinaryFormat/MachO.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
using namespace llvm::MachO;
|
|
|
|
using namespace lld;
|
|
|
|
using namespace lld::macho;
|
|
|
|
|
2020-12-07 14:33:38 +08:00
|
|
|
#define COMMON_ENCODINGS_MAX 127
|
|
|
|
#define COMPACT_ENCODINGS_MAX 256
|
|
|
|
|
|
|
|
#define SECOND_LEVEL_PAGE_BYTES 4096
|
|
|
|
#define SECOND_LEVEL_PAGE_WORDS (SECOND_LEVEL_PAGE_BYTES / sizeof(uint32_t))
|
|
|
|
#define REGULAR_SECOND_LEVEL_ENTRIES_MAX \
|
|
|
|
((SECOND_LEVEL_PAGE_BYTES - \
|
|
|
|
sizeof(unwind_info_regular_second_level_page_header)) / \
|
|
|
|
sizeof(unwind_info_regular_second_level_entry))
|
|
|
|
#define COMPRESSED_SECOND_LEVEL_ENTRIES_MAX \
|
|
|
|
((SECOND_LEVEL_PAGE_BYTES - \
|
|
|
|
sizeof(unwind_info_compressed_second_level_page_header)) / \
|
|
|
|
sizeof(uint32_t))
|
|
|
|
|
|
|
|
#define COMPRESSED_ENTRY_FUNC_OFFSET_BITS 24
|
|
|
|
#define COMPRESSED_ENTRY_FUNC_OFFSET_MASK \
|
|
|
|
UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(~0)
|
|
|
|
|
2020-08-21 04:05:13 +08:00
|
|
|
// Compact Unwind format is a Mach-O evolution of DWARF Unwind that
|
|
|
|
// optimizes space and exception-time lookup. Most DWARF unwind
|
|
|
|
// entries can be replaced with Compact Unwind entries, but the ones
|
|
|
|
// that cannot are retained in DWARF form.
|
|
|
|
//
|
|
|
|
// This comment will address macro-level organization of the pre-link
|
|
|
|
// and post-link compact unwind tables. For micro-level organization
|
|
|
|
// pertaining to the bitfield layout of the 32-bit compact unwind
|
|
|
|
// entries, see libunwind/include/mach-o/compact_unwind_encoding.h
|
|
|
|
//
|
|
|
|
// Important clarifying factoids:
|
|
|
|
//
|
|
|
|
// * __LD,__compact_unwind is the compact unwind format for compiler
|
|
|
|
// output and linker input. It is never a final output. It could be
|
|
|
|
// an intermediate output with the `-r` option which retains relocs.
|
|
|
|
//
|
|
|
|
// * __TEXT,__unwind_info is the compact unwind format for final
|
|
|
|
// linker output. It is never an input.
|
|
|
|
//
|
|
|
|
// * __TEXT,__eh_frame is the DWARF format for both linker input and output.
|
|
|
|
//
|
|
|
|
// * __TEXT,__unwind_info entries are divided into 4 KiB pages (2nd
|
|
|
|
// level) by ascending address, and the pages are referenced by an
|
|
|
|
// index (1st level) in the section header.
|
|
|
|
//
|
|
|
|
// * Following the headers in __TEXT,__unwind_info, the bulk of the
|
|
|
|
// section contains a vector of compact unwind entries
|
|
|
|
// `{functionOffset, encoding}` sorted by ascending `functionOffset`.
|
|
|
|
// Adjacent entries with the same encoding can be folded to great
|
|
|
|
// advantage, achieving a 3-order-of-magnitude reduction in the
|
|
|
|
// number of entries.
|
|
|
|
//
|
|
|
|
// * The __TEXT,__unwind_info format can accommodate up to 127 unique
|
|
|
|
// encodings for the space-efficient compressed format. In practice,
|
|
|
|
// fewer than a dozen unique encodings are used by C++ programs of
|
|
|
|
// all sizes. Therefore, we don't even bother implementing the regular
|
|
|
|
// non-compressed format. Time will tell if anyone in the field ever
|
|
|
|
// overflows the 127-encodings limit.
|
2021-02-09 02:47:33 +08:00
|
|
|
//
|
|
|
|
// Refer to the definition of unwind_info_section_header in
|
|
|
|
// compact_unwind_encoding.h for an overview of the format we are encoding
|
|
|
|
// here.
|
2020-08-21 04:05:13 +08:00
|
|
|
|
[lld/mac] Implement -dead_strip
Also adds support for live_support sections, no_dead_strip sections,
.no_dead_strip symbols.
Chromium Framework 345MB unstripped -> 250MB stripped
(vs 290MB unstripped -> 236M stripped with ld64).
Doing dead stripping is a bit faster than not, because so much less
data needs to be processed:
% ministat lld_*
x lld_nostrip.txt
+ lld_strip.txt
N Min Max Median Avg Stddev
x 10 3.929414 4.07692 4.0269079 4.0089678 0.044214794
+ 10 3.8129408 3.9025559 3.8670411 3.8642573 0.024779651
Difference at 95.0% confidence
-0.144711 +/- 0.0336749
-3.60967% +/- 0.839989%
(Student's t, pooled s = 0.0358398)
This interacts with many parts of the linker. I tried to add test coverage
for all added `isLive()` checks, so that some test will fail if any of them
is removed. I checked that the test expectations for the most part match
ld64's behavior (except for live-support-iterations.s, see the comment
in the test). Interacts with:
- debug info
- export tries
- import opcodes
- flags like -exported_symbol(s_list)
- -U / dynamic_lookup
- mod_init_funcs, mod_term_funcs
- weak symbol handling
- unwind info
- stubs
- map files
- -sectcreate
- undefined, dylib, common, defined (both absolute and normal) symbols
It's possible it interacts with more features I didn't think of,
of course.
I also did some manual testing:
- check-llvm check-clang check-lld work with lld with this patch
as host linker and -dead_strip enabled
- Chromium still starts
- Chromium's base_unittests still pass, including unwind tests
Implemenation-wise, this is InputSection-based, so it'll work for
object files with .subsections_via_symbols (which includes all
object files generated by clang). I first based this on the COFF
implementation, but later realized that things are more similar to ELF.
I think it'd be good to refactor MarkLive.cpp to look more like the ELF
part at some point, but I'd like to get a working state checked in first.
Mechanical parts:
- Rename canOmitFromOutput to wasCoalesced (no behavior change)
since it really is for weak coalesced symbols
- Add noDeadStrip to Defined, corresponding to N_NO_DEAD_STRIP
(`.no_dead_strip` in asm)
Fixes PR49276.
Differential Revision: https://reviews.llvm.org/D103324
2021-05-08 05:10:05 +08:00
|
|
|
// TODO(gkm): prune __eh_frame entries superseded by __unwind_info, PR50410
|
2020-08-21 04:05:13 +08:00
|
|
|
// TODO(gkm): how do we align the 2nd-level pages?
|
|
|
|
|
2021-04-16 09:14:33 +08:00
|
|
|
using EncodingMap = llvm::DenseMap<compact_unwind_encoding_t, size_t>;
|
|
|
|
|
|
|
|
struct SecondLevelPage {
|
|
|
|
uint32_t kind;
|
|
|
|
size_t entryIndex;
|
|
|
|
size_t entryCount;
|
|
|
|
size_t byteCount;
|
|
|
|
std::vector<compact_unwind_encoding_t> localEncodings;
|
|
|
|
EncodingMap localEncodingIndexes;
|
|
|
|
};
|
|
|
|
|
|
|
|
template <class Ptr> class UnwindInfoSectionImpl : public UnwindInfoSection {
|
|
|
|
public:
|
2021-06-12 07:49:52 +08:00
|
|
|
void prepareRelocations(ConcatInputSection *) override;
|
2021-04-16 09:14:33 +08:00
|
|
|
void finalize() override;
|
|
|
|
void writeTo(uint8_t *buf) const override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::vector<std::pair<compact_unwind_encoding_t, size_t>> commonEncodings;
|
|
|
|
EncodingMap commonEncodingIndexes;
|
|
|
|
// Indices of personality functions within the GOT.
|
|
|
|
std::vector<uint32_t> personalities;
|
|
|
|
SmallDenseMap<std::pair<InputSection *, uint64_t /* addend */>, Symbol *>
|
|
|
|
personalityTable;
|
|
|
|
std::vector<unwind_info_section_header_lsda_index_entry> lsdaEntries;
|
|
|
|
// Map of function offset (from the image base) to an index within the LSDA
|
|
|
|
// array.
|
|
|
|
llvm::DenseMap<uint32_t, uint32_t> functionToLsdaIndex;
|
|
|
|
std::vector<CompactUnwindEntry<Ptr>> cuVector;
|
|
|
|
std::vector<CompactUnwindEntry<Ptr> *> cuPtrVector;
|
|
|
|
std::vector<SecondLevelPage> secondLevelPages;
|
|
|
|
uint64_t level2PagesOffset = 0;
|
|
|
|
};
|
2021-02-24 10:42:02 +08:00
|
|
|
|
2021-02-09 02:47:33 +08:00
|
|
|
// Compact unwind relocations have different semantics, so we handle them in a
|
|
|
|
// separate code path from regular relocations. First, we do not wish to add
|
|
|
|
// rebase opcodes for __LD,__compact_unwind, because that section doesn't
|
|
|
|
// actually end up in the final binary. Second, personality pointers always
|
|
|
|
// reside in the GOT and must be treated specially.
|
2021-04-16 09:14:33 +08:00
|
|
|
template <class Ptr>
|
2021-06-12 07:49:52 +08:00
|
|
|
void UnwindInfoSectionImpl<Ptr>::prepareRelocations(ConcatInputSection *isec) {
|
2021-02-09 02:47:33 +08:00
|
|
|
assert(isec->segname == segment_names::ld &&
|
|
|
|
isec->name == section_names::compactUnwind);
|
[lld/mac] Write every weak symbol only once in the output
Before this, if an inline function was defined in several input files,
lld would write each copy of the inline function the output. With this
patch, it only writes one copy.
Reduces the size of Chromium Framework from 378MB to 345MB (compared
to 290MB linked with ld64, which also does dead-stripping, which we
don't do yet), and makes linking it faster:
N Min Max Median Avg Stddev
x 10 3.9957051 4.3496981 4.1411121 4.156837 0.10092097
+ 10 3.908154 4.169318 3.9712729 3.9846753 0.075773012
Difference at 95.0% confidence
-0.172162 +/- 0.083847
-4.14165% +/- 2.01709%
(Student's t, pooled s = 0.0892373)
Implementation-wise, when merging two weak symbols, this sets a
"canOmitFromOutput" on the InputSection belonging to the weak symbol not put in
the symbol table. We then don't write InputSections that have this set, as long
as they are not referenced from other symbols. (This happens e.g. for object
files that don't set .subsections_via_symbols or that use .alt_entry.)
Some restrictions:
- not yet done for bitcode inputs
- no "comdat" handling (`kindNoneGroupSubordinate*` in ld64) --
Frame Descriptor Entries (FDEs), Language Specific Data Areas (LSDAs)
(that is, catch block unwind information) and Personality Routines
associated with weak functions still not stripped. This is wasteful,
but harmless.
- However, this does strip weaks from __unwind_info (which is needed for
correctness and not just for size)
- This nopes out on InputSections that are referenced form more than
one symbol (eg from .alt_entry) for now
Things that work based on symbols Just Work:
- map files (change in MapFile.cpp is no-op and not needed; I just
found it a bit more explicit)
- exports
Things that work with inputSections need to explicitly check if
an inputSection is written (e.g. unwind info).
This patch is useful in itself, but it's also likely also a useful foundation
for dead_strip.
I used to have a "canoncialRepresentative" pointer on InputSection instead of
just the bool, which would be handy for ICF too. But I ended up not needing it
for this patch, so I removed that again for now.
Differential Revision: https://reviews.llvm.org/D102076
2021-05-07 02:47:57 +08:00
|
|
|
assert(!isec->shouldOmitFromOutput() &&
|
|
|
|
"__compact_unwind section should not be omitted");
|
2021-02-09 02:47:33 +08:00
|
|
|
|
2021-06-14 01:30:05 +08:00
|
|
|
// FIXME: Make this skip relocations for CompactUnwindEntries that
|
[lld/mac] Implement -dead_strip
Also adds support for live_support sections, no_dead_strip sections,
.no_dead_strip symbols.
Chromium Framework 345MB unstripped -> 250MB stripped
(vs 290MB unstripped -> 236M stripped with ld64).
Doing dead stripping is a bit faster than not, because so much less
data needs to be processed:
% ministat lld_*
x lld_nostrip.txt
+ lld_strip.txt
N Min Max Median Avg Stddev
x 10 3.929414 4.07692 4.0269079 4.0089678 0.044214794
+ 10 3.8129408 3.9025559 3.8670411 3.8642573 0.024779651
Difference at 95.0% confidence
-0.144711 +/- 0.0336749
-3.60967% +/- 0.839989%
(Student's t, pooled s = 0.0358398)
This interacts with many parts of the linker. I tried to add test coverage
for all added `isLive()` checks, so that some test will fail if any of them
is removed. I checked that the test expectations for the most part match
ld64's behavior (except for live-support-iterations.s, see the comment
in the test). Interacts with:
- debug info
- export tries
- import opcodes
- flags like -exported_symbol(s_list)
- -U / dynamic_lookup
- mod_init_funcs, mod_term_funcs
- weak symbol handling
- unwind info
- stubs
- map files
- -sectcreate
- undefined, dylib, common, defined (both absolute and normal) symbols
It's possible it interacts with more features I didn't think of,
of course.
I also did some manual testing:
- check-llvm check-clang check-lld work with lld with this patch
as host linker and -dead_strip enabled
- Chromium still starts
- Chromium's base_unittests still pass, including unwind tests
Implemenation-wise, this is InputSection-based, so it'll work for
object files with .subsections_via_symbols (which includes all
object files generated by clang). I first based this on the COFF
implementation, but later realized that things are more similar to ELF.
I think it'd be good to refactor MarkLive.cpp to look more like the ELF
part at some point, but I'd like to get a working state checked in first.
Mechanical parts:
- Rename canOmitFromOutput to wasCoalesced (no behavior change)
since it really is for weak coalesced symbols
- Add noDeadStrip to Defined, corresponding to N_NO_DEAD_STRIP
(`.no_dead_strip` in asm)
Fixes PR49276.
Differential Revision: https://reviews.llvm.org/D103324
2021-05-08 05:10:05 +08:00
|
|
|
// point to dead-stripped functions. That might save some amount of
|
|
|
|
// work. But since there are usually just few personality functions
|
|
|
|
// that are referenced from many places, at least some of them likely
|
|
|
|
// live, it wouldn't reduce number of got entries.
|
2021-02-09 02:47:33 +08:00
|
|
|
for (Reloc &r : isec->relocs) {
|
2021-02-24 10:42:02 +08:00
|
|
|
assert(target->hasAttr(r.type, RelocAttrBits::UNSIGNED));
|
2021-04-16 09:14:33 +08:00
|
|
|
if (r.offset % sizeof(CompactUnwindEntry<Ptr>) !=
|
|
|
|
offsetof(CompactUnwindEntry<Ptr>, personality))
|
2021-02-09 02:47:33 +08:00
|
|
|
continue;
|
|
|
|
|
2021-03-30 08:19:29 +08:00
|
|
|
if (auto *s = r.referent.dyn_cast<Symbol *>()) {
|
2021-02-24 10:42:02 +08:00
|
|
|
if (auto *undefined = dyn_cast<Undefined>(s)) {
|
2021-02-09 02:47:33 +08:00
|
|
|
treatUndefinedSymbol(*undefined);
|
2021-03-01 02:42:14 +08:00
|
|
|
// treatUndefinedSymbol() can replace s with a DylibSymbol; re-check.
|
|
|
|
if (isa<Undefined>(s))
|
|
|
|
continue;
|
2021-02-24 10:42:02 +08:00
|
|
|
}
|
|
|
|
if (auto *defined = dyn_cast<Defined>(s)) {
|
|
|
|
// Check if we have created a synthetic symbol at the same address.
|
2021-03-30 08:19:29 +08:00
|
|
|
Symbol *&personality =
|
2021-02-24 10:42:02 +08:00
|
|
|
personalityTable[{defined->isec, defined->value}];
|
|
|
|
if (personality == nullptr) {
|
|
|
|
personality = defined;
|
|
|
|
in.got->addEntry(defined);
|
|
|
|
} else if (personality != defined) {
|
|
|
|
r.referent = personality;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
assert(isa<DylibSymbol>(s));
|
|
|
|
in.got->addEntry(s);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) {
|
2021-02-09 02:47:33 +08:00
|
|
|
// Personality functions can be referenced via section relocations
|
2021-02-24 10:42:02 +08:00
|
|
|
// if they live in the same object file. Create placeholder synthetic
|
|
|
|
// symbols for them in the GOT.
|
2021-03-30 08:19:29 +08:00
|
|
|
Symbol *&s = personalityTable[{referentIsec, r.addend}];
|
2021-02-09 02:47:33 +08:00
|
|
|
if (s == nullptr) {
|
[lld/mac] Implement -dead_strip
Also adds support for live_support sections, no_dead_strip sections,
.no_dead_strip symbols.
Chromium Framework 345MB unstripped -> 250MB stripped
(vs 290MB unstripped -> 236M stripped with ld64).
Doing dead stripping is a bit faster than not, because so much less
data needs to be processed:
% ministat lld_*
x lld_nostrip.txt
+ lld_strip.txt
N Min Max Median Avg Stddev
x 10 3.929414 4.07692 4.0269079 4.0089678 0.044214794
+ 10 3.8129408 3.9025559 3.8670411 3.8642573 0.024779651
Difference at 95.0% confidence
-0.144711 +/- 0.0336749
-3.60967% +/- 0.839989%
(Student's t, pooled s = 0.0358398)
This interacts with many parts of the linker. I tried to add test coverage
for all added `isLive()` checks, so that some test will fail if any of them
is removed. I checked that the test expectations for the most part match
ld64's behavior (except for live-support-iterations.s, see the comment
in the test). Interacts with:
- debug info
- export tries
- import opcodes
- flags like -exported_symbol(s_list)
- -U / dynamic_lookup
- mod_init_funcs, mod_term_funcs
- weak symbol handling
- unwind info
- stubs
- map files
- -sectcreate
- undefined, dylib, common, defined (both absolute and normal) symbols
It's possible it interacts with more features I didn't think of,
of course.
I also did some manual testing:
- check-llvm check-clang check-lld work with lld with this patch
as host linker and -dead_strip enabled
- Chromium still starts
- Chromium's base_unittests still pass, including unwind tests
Implemenation-wise, this is InputSection-based, so it'll work for
object files with .subsections_via_symbols (which includes all
object files generated by clang). I first based this on the COFF
implementation, but later realized that things are more similar to ELF.
I think it'd be good to refactor MarkLive.cpp to look more like the ELF
part at some point, but I'd like to get a working state checked in first.
Mechanical parts:
- Rename canOmitFromOutput to wasCoalesced (no behavior change)
since it really is for weak coalesced symbols
- Add noDeadStrip to Defined, corresponding to N_NO_DEAD_STRIP
(`.no_dead_strip` in asm)
Fixes PR49276.
Differential Revision: https://reviews.llvm.org/D103324
2021-05-08 05:10:05 +08:00
|
|
|
// This runs after dead stripping, so the noDeadStrip argument does not
|
|
|
|
// matter.
|
2021-04-22 22:44:56 +08:00
|
|
|
s = make<Defined>("<internal>", /*file=*/nullptr, referentIsec,
|
|
|
|
r.addend, /*size=*/0, /*isWeakDef=*/false,
|
2021-05-01 04:17:26 +08:00
|
|
|
/*isExternal=*/false, /*isPrivateExtern=*/false,
|
[lld/mac] Implement -dead_strip
Also adds support for live_support sections, no_dead_strip sections,
.no_dead_strip symbols.
Chromium Framework 345MB unstripped -> 250MB stripped
(vs 290MB unstripped -> 236M stripped with ld64).
Doing dead stripping is a bit faster than not, because so much less
data needs to be processed:
% ministat lld_*
x lld_nostrip.txt
+ lld_strip.txt
N Min Max Median Avg Stddev
x 10 3.929414 4.07692 4.0269079 4.0089678 0.044214794
+ 10 3.8129408 3.9025559 3.8670411 3.8642573 0.024779651
Difference at 95.0% confidence
-0.144711 +/- 0.0336749
-3.60967% +/- 0.839989%
(Student's t, pooled s = 0.0358398)
This interacts with many parts of the linker. I tried to add test coverage
for all added `isLive()` checks, so that some test will fail if any of them
is removed. I checked that the test expectations for the most part match
ld64's behavior (except for live-support-iterations.s, see the comment
in the test). Interacts with:
- debug info
- export tries
- import opcodes
- flags like -exported_symbol(s_list)
- -U / dynamic_lookup
- mod_init_funcs, mod_term_funcs
- weak symbol handling
- unwind info
- stubs
- map files
- -sectcreate
- undefined, dylib, common, defined (both absolute and normal) symbols
It's possible it interacts with more features I didn't think of,
of course.
I also did some manual testing:
- check-llvm check-clang check-lld work with lld with this patch
as host linker and -dead_strip enabled
- Chromium still starts
- Chromium's base_unittests still pass, including unwind tests
Implemenation-wise, this is InputSection-based, so it'll work for
object files with .subsections_via_symbols (which includes all
object files generated by clang). I first based this on the COFF
implementation, but later realized that things are more similar to ELF.
I think it'd be good to refactor MarkLive.cpp to look more like the ELF
part at some point, but I'd like to get a working state checked in first.
Mechanical parts:
- Rename canOmitFromOutput to wasCoalesced (no behavior change)
since it really is for weak coalesced symbols
- Add noDeadStrip to Defined, corresponding to N_NO_DEAD_STRIP
(`.no_dead_strip` in asm)
Fixes PR49276.
Differential Revision: https://reviews.llvm.org/D103324
2021-05-08 05:10:05 +08:00
|
|
|
/*isThumb=*/false, /*isReferencedDynamically=*/false,
|
|
|
|
/*noDeadStrip=*/false);
|
2021-02-09 02:47:33 +08:00
|
|
|
in.got->addEntry(s);
|
|
|
|
}
|
|
|
|
r.referent = s;
|
|
|
|
r.addend = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unwind info lives in __DATA, and finalization of __TEXT will occur before
|
|
|
|
// finalization of __DATA. Moreover, the finalization of unwind info depends on
|
|
|
|
// the exact addresses that it references. So it is safe for compact unwind to
|
|
|
|
// reference addresses in __TEXT, but not addresses in any other segment.
|
2021-06-12 07:49:52 +08:00
|
|
|
static ConcatInputSection *checkTextSegment(InputSection *isec) {
|
2021-02-09 02:47:33 +08:00
|
|
|
if (isec->segname != segment_names::text)
|
|
|
|
error("compact unwind references address in " + toString(isec) +
|
|
|
|
" which is not in segment __TEXT");
|
2021-06-12 07:49:52 +08:00
|
|
|
// __text should always be a ConcatInputSection.
|
|
|
|
return cast<ConcatInputSection>(isec);
|
2021-02-09 02:47:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// We need to apply the relocations to the pre-link compact unwind section
|
|
|
|
// before converting it to post-link form. There should only be absolute
|
|
|
|
// relocations here: since we are not emitting the pre-link CU section, there
|
|
|
|
// is no source address to make a relative location meaningful.
|
2021-04-16 09:14:33 +08:00
|
|
|
template <class Ptr>
|
|
|
|
static void
|
2021-05-26 02:57:16 +08:00
|
|
|
relocateCompactUnwind(ConcatOutputSection *compactUnwindSection,
|
2021-04-16 09:14:33 +08:00
|
|
|
std::vector<CompactUnwindEntry<Ptr>> &cuVector) {
|
[lld-macho] Implement cstring deduplication
Our implementation draws heavily from LLD-ELF's, which in turn delegates
its string deduplication to llvm-mc's StringTableBuilder. The messiness of
this diff is largely due to the fact that we've previously assumed that
all InputSections get concatenated together to form the output. This is
no longer true with CStringInputSections, which split their contents into
StringPieces. StringPieces are much more lightweight than InputSections,
which is important as we create a lot of them. They may also overlap in
the output, which makes it possible for strings to be tail-merged. In
fact, the initial version of this diff implemented tail merging, but
I've dropped it for reasons I'll explain later.
**Alignment Issues**
Mergeable cstring literals are found under the `__TEXT,__cstring`
section. In contrast to ELF, which puts strings that need different
alignments into different sections, clang's Mach-O backend puts them all
in one section. Strings that need to be aligned have the `.p2align`
directive emitted before them, which simply translates into zero padding
in the object file.
I *think* ld64 extracts the desired per-string alignment from this data
by preserving each string's offset from the last section-aligned
address. I'm not entirely certain since it doesn't seem consistent about
doing this; but perhaps this can be chalked up to cases where ld64 has
to deduplicate strings with different offset/alignment combos -- it
seems to pick one of their alignments to preserve. This doesn't seem
correct in general; we can in fact can induce ld64 to produce a crashing
binary just by linking in an additional object file that only contains
cstrings and no code. See PR50563 for details.
Moreover, this scheme seems rather inefficient: since unaligned and
aligned strings are all put in the same section, which has a single
alignment value, it doesn't seem possible to tell whether a given string
doesn't have any alignment requirements. Preserving offset+alignments
for strings that don't need it is wasteful.
In practice, the crashes seen so far seem to stem from x86_64 SIMD
operations on cstrings. X86_64 requires SIMD accesses to be
16-byte-aligned. So for now, I'm thinking of just aligning all strings
to 16 bytes on x86_64. This is indeed wasteful, but implementation-wise
it's simpler than preserving per-string alignment+offsets. It also
avoids the aforementioned crash after deduplication of
differently-aligned strings. Finally, the overhead is not huge: using
16-byte alignment (vs no alignment) is only a 0.5% size overhead when
linking chromium_framework.
With these alignment requirements, it doesn't make sense to attempt tail
merging -- most strings will not be eligible since their overlaps aren't
likely to start at a 16-byte boundary. Tail-merging (with alignment) for
chromium_framework only improves size by 0.3%.
It's worth noting that LLD-ELF only does tail merging at `-O2`. By
default (at `-O1`), it just deduplicates w/o tail merging. @thakis has
also mentioned that they saw it regress compressed size in some cases
and therefore turned it off. `ld64` does not seem to do tail merging at
all.
**Performance Numbers**
CString deduplication reduces chromium_framework from 250MB to 242MB, or
about a 3.2% reduction.
Numbers for linking chromium_framework on my 3.2 GHz 16-Core Intel Xeon W:
N Min Max Median Avg Stddev
x 20 3.91 4.03 3.935 3.95 0.034641016
+ 20 3.99 4.14 4.015 4.0365 0.0492336
Difference at 95.0% confidence
0.0865 +/- 0.027245
2.18987% +/- 0.689746%
(Student's t, pooled s = 0.0425673)
As expected, cstring merging incurs some non-trivial overhead.
When passing `--no-literal-merge`, it seems that performance is the
same, i.e. the refactoring in this diff didn't cost us.
N Min Max Median Avg Stddev
x 20 3.91 4.03 3.935 3.95 0.034641016
+ 20 3.89 4.02 3.935 3.9435 0.043197831
No difference proven at 95.0% confidence
Reviewed By: #lld-macho, gkm
Differential Revision: https://reviews.llvm.org/D102964
2021-06-08 11:47:12 +08:00
|
|
|
for (const ConcatInputSection *isec : compactUnwindSection->inputs) {
|
[lld/mac] Write every weak symbol only once in the output
Before this, if an inline function was defined in several input files,
lld would write each copy of the inline function the output. With this
patch, it only writes one copy.
Reduces the size of Chromium Framework from 378MB to 345MB (compared
to 290MB linked with ld64, which also does dead-stripping, which we
don't do yet), and makes linking it faster:
N Min Max Median Avg Stddev
x 10 3.9957051 4.3496981 4.1411121 4.156837 0.10092097
+ 10 3.908154 4.169318 3.9712729 3.9846753 0.075773012
Difference at 95.0% confidence
-0.172162 +/- 0.083847
-4.14165% +/- 2.01709%
(Student's t, pooled s = 0.0892373)
Implementation-wise, when merging two weak symbols, this sets a
"canOmitFromOutput" on the InputSection belonging to the weak symbol not put in
the symbol table. We then don't write InputSections that have this set, as long
as they are not referenced from other symbols. (This happens e.g. for object
files that don't set .subsections_via_symbols or that use .alt_entry.)
Some restrictions:
- not yet done for bitcode inputs
- no "comdat" handling (`kindNoneGroupSubordinate*` in ld64) --
Frame Descriptor Entries (FDEs), Language Specific Data Areas (LSDAs)
(that is, catch block unwind information) and Personality Routines
associated with weak functions still not stripped. This is wasteful,
but harmless.
- However, this does strip weaks from __unwind_info (which is needed for
correctness and not just for size)
- This nopes out on InputSections that are referenced form more than
one symbol (eg from .alt_entry) for now
Things that work based on symbols Just Work:
- map files (change in MapFile.cpp is no-op and not needed; I just
found it a bit more explicit)
- exports
Things that work with inputSections need to explicitly check if
an inputSection is written (e.g. unwind info).
This patch is useful in itself, but it's also likely also a useful foundation
for dead_strip.
I used to have a "canoncialRepresentative" pointer on InputSection instead of
just the bool, which would be handy for ICF too. But I ended up not needing it
for this patch, so I removed that again for now.
Differential Revision: https://reviews.llvm.org/D102076
2021-05-07 02:47:57 +08:00
|
|
|
assert(isec->parent == compactUnwindSection);
|
|
|
|
|
2021-02-09 02:47:33 +08:00
|
|
|
uint8_t *buf =
|
|
|
|
reinterpret_cast<uint8_t *>(cuVector.data()) + isec->outSecFileOff;
|
|
|
|
memcpy(buf, isec->data.data(), isec->data.size());
|
|
|
|
|
2021-03-10 13:41:34 +08:00
|
|
|
for (const Reloc &r : isec->relocs) {
|
2021-02-09 02:47:33 +08:00
|
|
|
uint64_t referentVA = 0;
|
2021-03-30 08:19:29 +08:00
|
|
|
if (auto *referentSym = r.referent.dyn_cast<Symbol *>()) {
|
2021-02-09 02:47:33 +08:00
|
|
|
if (!isa<Undefined>(referentSym)) {
|
|
|
|
assert(referentSym->isInGot());
|
|
|
|
if (auto *defined = dyn_cast<Defined>(referentSym))
|
|
|
|
checkTextSegment(defined->isec);
|
|
|
|
// At this point in the link, we may not yet know the final address of
|
|
|
|
// the GOT, so we just encode the index. We make it a 1-based index so
|
|
|
|
// that we can distinguish the null pointer case.
|
|
|
|
referentVA = referentSym->gotIndex + 1;
|
|
|
|
}
|
|
|
|
} else if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) {
|
2021-06-12 07:49:52 +08:00
|
|
|
ConcatInputSection *concatIsec = checkTextSegment(referentIsec);
|
|
|
|
if (concatIsec->shouldOmitFromOutput())
|
[lld/mac] Write every weak symbol only once in the output
Before this, if an inline function was defined in several input files,
lld would write each copy of the inline function the output. With this
patch, it only writes one copy.
Reduces the size of Chromium Framework from 378MB to 345MB (compared
to 290MB linked with ld64, which also does dead-stripping, which we
don't do yet), and makes linking it faster:
N Min Max Median Avg Stddev
x 10 3.9957051 4.3496981 4.1411121 4.156837 0.10092097
+ 10 3.908154 4.169318 3.9712729 3.9846753 0.075773012
Difference at 95.0% confidence
-0.172162 +/- 0.083847
-4.14165% +/- 2.01709%
(Student's t, pooled s = 0.0892373)
Implementation-wise, when merging two weak symbols, this sets a
"canOmitFromOutput" on the InputSection belonging to the weak symbol not put in
the symbol table. We then don't write InputSections that have this set, as long
as they are not referenced from other symbols. (This happens e.g. for object
files that don't set .subsections_via_symbols or that use .alt_entry.)
Some restrictions:
- not yet done for bitcode inputs
- no "comdat" handling (`kindNoneGroupSubordinate*` in ld64) --
Frame Descriptor Entries (FDEs), Language Specific Data Areas (LSDAs)
(that is, catch block unwind information) and Personality Routines
associated with weak functions still not stripped. This is wasteful,
but harmless.
- However, this does strip weaks from __unwind_info (which is needed for
correctness and not just for size)
- This nopes out on InputSections that are referenced form more than
one symbol (eg from .alt_entry) for now
Things that work based on symbols Just Work:
- map files (change in MapFile.cpp is no-op and not needed; I just
found it a bit more explicit)
- exports
Things that work with inputSections need to explicitly check if
an inputSection is written (e.g. unwind info).
This patch is useful in itself, but it's also likely also a useful foundation
for dead_strip.
I used to have a "canoncialRepresentative" pointer on InputSection instead of
just the bool, which would be handy for ICF too. But I ended up not needing it
for this patch, so I removed that again for now.
Differential Revision: https://reviews.llvm.org/D102076
2021-05-07 02:47:57 +08:00
|
|
|
referentVA = UINT64_MAX; // Tombstone value
|
|
|
|
else
|
[lld-macho] Implement cstring deduplication
Our implementation draws heavily from LLD-ELF's, which in turn delegates
its string deduplication to llvm-mc's StringTableBuilder. The messiness of
this diff is largely due to the fact that we've previously assumed that
all InputSections get concatenated together to form the output. This is
no longer true with CStringInputSections, which split their contents into
StringPieces. StringPieces are much more lightweight than InputSections,
which is important as we create a lot of them. They may also overlap in
the output, which makes it possible for strings to be tail-merged. In
fact, the initial version of this diff implemented tail merging, but
I've dropped it for reasons I'll explain later.
**Alignment Issues**
Mergeable cstring literals are found under the `__TEXT,__cstring`
section. In contrast to ELF, which puts strings that need different
alignments into different sections, clang's Mach-O backend puts them all
in one section. Strings that need to be aligned have the `.p2align`
directive emitted before them, which simply translates into zero padding
in the object file.
I *think* ld64 extracts the desired per-string alignment from this data
by preserving each string's offset from the last section-aligned
address. I'm not entirely certain since it doesn't seem consistent about
doing this; but perhaps this can be chalked up to cases where ld64 has
to deduplicate strings with different offset/alignment combos -- it
seems to pick one of their alignments to preserve. This doesn't seem
correct in general; we can in fact can induce ld64 to produce a crashing
binary just by linking in an additional object file that only contains
cstrings and no code. See PR50563 for details.
Moreover, this scheme seems rather inefficient: since unaligned and
aligned strings are all put in the same section, which has a single
alignment value, it doesn't seem possible to tell whether a given string
doesn't have any alignment requirements. Preserving offset+alignments
for strings that don't need it is wasteful.
In practice, the crashes seen so far seem to stem from x86_64 SIMD
operations on cstrings. X86_64 requires SIMD accesses to be
16-byte-aligned. So for now, I'm thinking of just aligning all strings
to 16 bytes on x86_64. This is indeed wasteful, but implementation-wise
it's simpler than preserving per-string alignment+offsets. It also
avoids the aforementioned crash after deduplication of
differently-aligned strings. Finally, the overhead is not huge: using
16-byte alignment (vs no alignment) is only a 0.5% size overhead when
linking chromium_framework.
With these alignment requirements, it doesn't make sense to attempt tail
merging -- most strings will not be eligible since their overlaps aren't
likely to start at a 16-byte boundary. Tail-merging (with alignment) for
chromium_framework only improves size by 0.3%.
It's worth noting that LLD-ELF only does tail merging at `-O2`. By
default (at `-O1`), it just deduplicates w/o tail merging. @thakis has
also mentioned that they saw it regress compressed size in some cases
and therefore turned it off. `ld64` does not seem to do tail merging at
all.
**Performance Numbers**
CString deduplication reduces chromium_framework from 250MB to 242MB, or
about a 3.2% reduction.
Numbers for linking chromium_framework on my 3.2 GHz 16-Core Intel Xeon W:
N Min Max Median Avg Stddev
x 20 3.91 4.03 3.935 3.95 0.034641016
+ 20 3.99 4.14 4.015 4.0365 0.0492336
Difference at 95.0% confidence
0.0865 +/- 0.027245
2.18987% +/- 0.689746%
(Student's t, pooled s = 0.0425673)
As expected, cstring merging incurs some non-trivial overhead.
When passing `--no-literal-merge`, it seems that performance is the
same, i.e. the refactoring in this diff didn't cost us.
N Min Max Median Avg Stddev
x 20 3.91 4.03 3.935 3.95 0.034641016
+ 20 3.89 4.02 3.935 3.9435 0.043197831
No difference proven at 95.0% confidence
Reviewed By: #lld-macho, gkm
Differential Revision: https://reviews.llvm.org/D102964
2021-06-08 11:47:12 +08:00
|
|
|
referentVA = referentIsec->getVA(r.addend);
|
2021-02-09 02:47:33 +08:00
|
|
|
}
|
2021-04-16 09:14:33 +08:00
|
|
|
|
|
|
|
writeAddress(buf + r.offset, referentVA, r.length);
|
2021-02-09 02:47:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// There should only be a handful of unique personality pointers, so we can
|
|
|
|
// encode them as 2-bit indices into a small array.
|
2021-04-16 09:14:33 +08:00
|
|
|
template <class Ptr>
|
|
|
|
void encodePersonalities(
|
|
|
|
const std::vector<CompactUnwindEntry<Ptr> *> &cuPtrVector,
|
|
|
|
std::vector<uint32_t> &personalities) {
|
|
|
|
for (CompactUnwindEntry<Ptr> *cu : cuPtrVector) {
|
2021-02-09 02:47:33 +08:00
|
|
|
if (cu->personality == 0)
|
|
|
|
continue;
|
|
|
|
// Linear search is fast enough for a small array.
|
2021-04-16 09:14:33 +08:00
|
|
|
auto it = find(personalities, cu->personality);
|
2021-02-09 02:47:33 +08:00
|
|
|
uint32_t personalityIndex; // 1-based index
|
|
|
|
if (it != personalities.end()) {
|
|
|
|
personalityIndex = std::distance(personalities.begin(), it) + 1;
|
|
|
|
} else {
|
|
|
|
personalities.push_back(cu->personality);
|
|
|
|
personalityIndex = personalities.size();
|
|
|
|
}
|
|
|
|
cu->encoding |=
|
|
|
|
personalityIndex << countTrailingZeros(
|
|
|
|
static_cast<compact_unwind_encoding_t>(UNWIND_PERSONALITY_MASK));
|
|
|
|
}
|
|
|
|
if (personalities.size() > 3)
|
|
|
|
error("too many personalities (" + std::to_string(personalities.size()) +
|
|
|
|
") for compact unwind to encode");
|
|
|
|
}
|
|
|
|
|
2020-08-21 04:05:13 +08:00
|
|
|
// Scan the __LD,__compact_unwind entries and compute the space needs of
|
|
|
|
// __TEXT,__unwind_info and __TEXT,__eh_frame
|
2021-04-16 09:14:33 +08:00
|
|
|
template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() {
|
2020-08-21 04:05:13 +08:00
|
|
|
if (compactUnwindSection == nullptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// At this point, the address space for __TEXT,__text has been
|
|
|
|
// assigned, so we can relocate the __LD,__compact_unwind entries
|
|
|
|
// into a temporary buffer. Relocation is necessary in order to sort
|
|
|
|
// the CU entries by function address. Sorting is necessary so that
|
|
|
|
// we can fold adjacent CU entries with identical
|
|
|
|
// encoding+personality+lsda. Folding is necessary because it reduces
|
|
|
|
// the number of CU entries by as much as 3 orders of magnitude!
|
|
|
|
compactUnwindSection->finalize();
|
2021-04-16 09:14:33 +08:00
|
|
|
assert(compactUnwindSection->getSize() % sizeof(CompactUnwindEntry<Ptr>) ==
|
|
|
|
0);
|
2020-08-21 04:05:13 +08:00
|
|
|
size_t cuCount =
|
2021-04-16 09:14:33 +08:00
|
|
|
compactUnwindSection->getSize() / sizeof(CompactUnwindEntry<Ptr>);
|
2020-08-21 04:05:13 +08:00
|
|
|
cuVector.resize(cuCount);
|
2021-02-09 02:47:33 +08:00
|
|
|
relocateCompactUnwind(compactUnwindSection, cuVector);
|
2020-08-21 04:05:13 +08:00
|
|
|
|
|
|
|
// Rather than sort & fold the 32-byte entries directly, we create a
|
|
|
|
// vector of pointers to entries and sort & fold that instead.
|
|
|
|
cuPtrVector.reserve(cuCount);
|
2021-04-16 09:14:33 +08:00
|
|
|
for (CompactUnwindEntry<Ptr> &cuEntry : cuVector)
|
2020-08-21 04:05:13 +08:00
|
|
|
cuPtrVector.emplace_back(&cuEntry);
|
2021-04-26 13:23:32 +08:00
|
|
|
llvm::sort(cuPtrVector, [](const CompactUnwindEntry<Ptr> *a,
|
|
|
|
const CompactUnwindEntry<Ptr> *b) {
|
|
|
|
return a->functionAddress < b->functionAddress;
|
|
|
|
});
|
2020-08-21 04:05:13 +08:00
|
|
|
|
[lld/mac] Write every weak symbol only once in the output
Before this, if an inline function was defined in several input files,
lld would write each copy of the inline function the output. With this
patch, it only writes one copy.
Reduces the size of Chromium Framework from 378MB to 345MB (compared
to 290MB linked with ld64, which also does dead-stripping, which we
don't do yet), and makes linking it faster:
N Min Max Median Avg Stddev
x 10 3.9957051 4.3496981 4.1411121 4.156837 0.10092097
+ 10 3.908154 4.169318 3.9712729 3.9846753 0.075773012
Difference at 95.0% confidence
-0.172162 +/- 0.083847
-4.14165% +/- 2.01709%
(Student's t, pooled s = 0.0892373)
Implementation-wise, when merging two weak symbols, this sets a
"canOmitFromOutput" on the InputSection belonging to the weak symbol not put in
the symbol table. We then don't write InputSections that have this set, as long
as they are not referenced from other symbols. (This happens e.g. for object
files that don't set .subsections_via_symbols or that use .alt_entry.)
Some restrictions:
- not yet done for bitcode inputs
- no "comdat" handling (`kindNoneGroupSubordinate*` in ld64) --
Frame Descriptor Entries (FDEs), Language Specific Data Areas (LSDAs)
(that is, catch block unwind information) and Personality Routines
associated with weak functions still not stripped. This is wasteful,
but harmless.
- However, this does strip weaks from __unwind_info (which is needed for
correctness and not just for size)
- This nopes out on InputSections that are referenced form more than
one symbol (eg from .alt_entry) for now
Things that work based on symbols Just Work:
- map files (change in MapFile.cpp is no-op and not needed; I just
found it a bit more explicit)
- exports
Things that work with inputSections need to explicitly check if
an inputSection is written (e.g. unwind info).
This patch is useful in itself, but it's also likely also a useful foundation
for dead_strip.
I used to have a "canoncialRepresentative" pointer on InputSection instead of
just the bool, which would be handy for ICF too. But I ended up not needing it
for this patch, so I removed that again for now.
Differential Revision: https://reviews.llvm.org/D102076
2021-05-07 02:47:57 +08:00
|
|
|
// Dead-stripped functions get a functionAddress of UINT64_MAX in
|
|
|
|
// relocateCompactUnwind(). Filter them out here.
|
2021-05-09 01:03:17 +08:00
|
|
|
// FIXME: This doesn't yet collect associated data like LSDAs kept
|
|
|
|
// alive only by a now-removed CompactUnwindEntry or other comdat-like
|
|
|
|
// data (`kindNoneGroupSubordinate*` in ld64).
|
[lld/mac] Write every weak symbol only once in the output
Before this, if an inline function was defined in several input files,
lld would write each copy of the inline function the output. With this
patch, it only writes one copy.
Reduces the size of Chromium Framework from 378MB to 345MB (compared
to 290MB linked with ld64, which also does dead-stripping, which we
don't do yet), and makes linking it faster:
N Min Max Median Avg Stddev
x 10 3.9957051 4.3496981 4.1411121 4.156837 0.10092097
+ 10 3.908154 4.169318 3.9712729 3.9846753 0.075773012
Difference at 95.0% confidence
-0.172162 +/- 0.083847
-4.14165% +/- 2.01709%
(Student's t, pooled s = 0.0892373)
Implementation-wise, when merging two weak symbols, this sets a
"canOmitFromOutput" on the InputSection belonging to the weak symbol not put in
the symbol table. We then don't write InputSections that have this set, as long
as they are not referenced from other symbols. (This happens e.g. for object
files that don't set .subsections_via_symbols or that use .alt_entry.)
Some restrictions:
- not yet done for bitcode inputs
- no "comdat" handling (`kindNoneGroupSubordinate*` in ld64) --
Frame Descriptor Entries (FDEs), Language Specific Data Areas (LSDAs)
(that is, catch block unwind information) and Personality Routines
associated with weak functions still not stripped. This is wasteful,
but harmless.
- However, this does strip weaks from __unwind_info (which is needed for
correctness and not just for size)
- This nopes out on InputSections that are referenced form more than
one symbol (eg from .alt_entry) for now
Things that work based on symbols Just Work:
- map files (change in MapFile.cpp is no-op and not needed; I just
found it a bit more explicit)
- exports
Things that work with inputSections need to explicitly check if
an inputSection is written (e.g. unwind info).
This patch is useful in itself, but it's also likely also a useful foundation
for dead_strip.
I used to have a "canoncialRepresentative" pointer on InputSection instead of
just the bool, which would be handy for ICF too. But I ended up not needing it
for this patch, so I removed that again for now.
Differential Revision: https://reviews.llvm.org/D102076
2021-05-07 02:47:57 +08:00
|
|
|
CompactUnwindEntry<Ptr> tombstone;
|
|
|
|
tombstone.functionAddress = static_cast<Ptr>(UINT64_MAX);
|
|
|
|
cuPtrVector.erase(
|
|
|
|
std::lower_bound(cuPtrVector.begin(), cuPtrVector.end(), &tombstone,
|
|
|
|
[](const CompactUnwindEntry<Ptr> *a,
|
|
|
|
const CompactUnwindEntry<Ptr> *b) {
|
|
|
|
return a->functionAddress < b->functionAddress;
|
|
|
|
}),
|
|
|
|
cuPtrVector.end());
|
|
|
|
|
2020-08-21 04:05:13 +08:00
|
|
|
// Fold adjacent entries with matching encoding+personality+lsda
|
|
|
|
// We use three iterators on the same cuPtrVector to fold in-situ:
|
|
|
|
// (1) `foldBegin` is the first of a potential sequence of matching entries
|
|
|
|
// (2) `foldEnd` is the first non-matching entry after `foldBegin`.
|
|
|
|
// The semi-open interval [ foldBegin .. foldEnd ) contains a range
|
|
|
|
// entries that can be folded into a single entry and written to ...
|
|
|
|
// (3) `foldWrite`
|
|
|
|
auto foldWrite = cuPtrVector.begin();
|
|
|
|
for (auto foldBegin = cuPtrVector.begin(); foldBegin < cuPtrVector.end();) {
|
|
|
|
auto foldEnd = foldBegin;
|
|
|
|
while (++foldEnd < cuPtrVector.end() &&
|
|
|
|
(*foldBegin)->encoding == (*foldEnd)->encoding &&
|
|
|
|
(*foldBegin)->personality == (*foldEnd)->personality &&
|
|
|
|
(*foldBegin)->lsda == (*foldEnd)->lsda)
|
|
|
|
;
|
|
|
|
*foldWrite++ = *foldBegin;
|
|
|
|
foldBegin = foldEnd;
|
|
|
|
}
|
|
|
|
cuPtrVector.erase(foldWrite, cuPtrVector.end());
|
|
|
|
|
2021-02-09 02:47:33 +08:00
|
|
|
encodePersonalities(cuPtrVector, personalities);
|
|
|
|
|
2020-08-21 04:05:13 +08:00
|
|
|
// Count frequencies of the folded encodings
|
2020-12-07 14:33:38 +08:00
|
|
|
EncodingMap encodingFrequencies;
|
2021-04-16 09:14:33 +08:00
|
|
|
for (const CompactUnwindEntry<Ptr> *cuPtrEntry : cuPtrVector)
|
2020-08-21 04:05:13 +08:00
|
|
|
encodingFrequencies[cuPtrEntry->encoding]++;
|
|
|
|
|
2020-12-07 14:33:38 +08:00
|
|
|
// Make a vector of encodings, sorted by descending frequency
|
2020-08-21 04:05:13 +08:00
|
|
|
for (const auto &frequency : encodingFrequencies)
|
|
|
|
commonEncodings.emplace_back(frequency);
|
2021-04-26 13:23:32 +08:00
|
|
|
llvm::sort(commonEncodings,
|
|
|
|
[](const std::pair<compact_unwind_encoding_t, size_t> &a,
|
|
|
|
const std::pair<compact_unwind_encoding_t, size_t> &b) {
|
|
|
|
if (a.second == b.second)
|
|
|
|
// When frequencies match, secondarily sort on encoding
|
|
|
|
// to maintain parity with validate-unwind-info.py
|
|
|
|
return a.first > b.first;
|
|
|
|
return a.second > b.second;
|
|
|
|
});
|
2020-08-21 04:05:13 +08:00
|
|
|
|
2020-12-07 14:33:38 +08:00
|
|
|
// Truncate the vector to 127 elements.
|
2021-01-02 11:28:11 +08:00
|
|
|
// Common encoding indexes are limited to 0..126, while encoding
|
2020-12-07 14:33:38 +08:00
|
|
|
// indexes 127..255 are local to each second-level page
|
|
|
|
if (commonEncodings.size() > COMMON_ENCODINGS_MAX)
|
|
|
|
commonEncodings.resize(COMMON_ENCODINGS_MAX);
|
|
|
|
|
|
|
|
// Create a map from encoding to common-encoding-table index
|
|
|
|
for (size_t i = 0; i < commonEncodings.size(); i++)
|
|
|
|
commonEncodingIndexes[commonEncodings[i].first] = i;
|
|
|
|
|
|
|
|
// Split folded encodings into pages, where each page is limited by ...
|
|
|
|
// (a) 4 KiB capacity
|
|
|
|
// (b) 24-bit difference between first & final function address
|
|
|
|
// (c) 8-bit compact-encoding-table index,
|
|
|
|
// for which 0..126 references the global common-encodings table,
|
|
|
|
// and 127..255 references a local per-second-level-page table.
|
|
|
|
// First we try the compact format and determine how many entries fit.
|
|
|
|
// If more entries fit in the regular format, we use that.
|
|
|
|
for (size_t i = 0; i < cuPtrVector.size();) {
|
|
|
|
secondLevelPages.emplace_back();
|
2021-04-16 09:14:33 +08:00
|
|
|
SecondLevelPage &page = secondLevelPages.back();
|
2020-12-07 14:33:38 +08:00
|
|
|
page.entryIndex = i;
|
|
|
|
uintptr_t functionAddressMax =
|
|
|
|
cuPtrVector[i]->functionAddress + COMPRESSED_ENTRY_FUNC_OFFSET_MASK;
|
|
|
|
size_t n = commonEncodings.size();
|
|
|
|
size_t wordsRemaining =
|
|
|
|
SECOND_LEVEL_PAGE_WORDS -
|
|
|
|
sizeof(unwind_info_compressed_second_level_page_header) /
|
|
|
|
sizeof(uint32_t);
|
|
|
|
while (wordsRemaining >= 1 && i < cuPtrVector.size()) {
|
2021-04-16 09:14:33 +08:00
|
|
|
const CompactUnwindEntry<Ptr> *cuPtr = cuPtrVector[i];
|
2020-12-07 14:33:38 +08:00
|
|
|
if (cuPtr->functionAddress >= functionAddressMax) {
|
|
|
|
break;
|
|
|
|
} else if (commonEncodingIndexes.count(cuPtr->encoding) ||
|
|
|
|
page.localEncodingIndexes.count(cuPtr->encoding)) {
|
|
|
|
i++;
|
|
|
|
wordsRemaining--;
|
|
|
|
} else if (wordsRemaining >= 2 && n < COMPACT_ENCODINGS_MAX) {
|
|
|
|
page.localEncodings.emplace_back(cuPtr->encoding);
|
|
|
|
page.localEncodingIndexes[cuPtr->encoding] = n++;
|
|
|
|
i++;
|
|
|
|
wordsRemaining -= 2;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
page.entryCount = i - page.entryIndex;
|
|
|
|
|
|
|
|
// If this is not the final page, see if it's possible to fit more
|
|
|
|
// entries by using the regular format. This can happen when there
|
|
|
|
// are many unique encodings, and we we saturated the local
|
|
|
|
// encoding table early.
|
|
|
|
if (i < cuPtrVector.size() &&
|
|
|
|
page.entryCount < REGULAR_SECOND_LEVEL_ENTRIES_MAX) {
|
|
|
|
page.kind = UNWIND_SECOND_LEVEL_REGULAR;
|
|
|
|
page.entryCount = std::min(REGULAR_SECOND_LEVEL_ENTRIES_MAX,
|
|
|
|
cuPtrVector.size() - page.entryIndex);
|
|
|
|
i = page.entryIndex + page.entryCount;
|
|
|
|
} else {
|
|
|
|
page.kind = UNWIND_SECOND_LEVEL_COMPRESSED;
|
|
|
|
}
|
2020-08-21 04:05:13 +08:00
|
|
|
}
|
|
|
|
|
2021-04-16 09:14:33 +08:00
|
|
|
for (const CompactUnwindEntry<Ptr> *cu : cuPtrVector) {
|
2021-02-09 02:47:34 +08:00
|
|
|
uint32_t functionOffset = cu->functionAddress - in.header->addr;
|
|
|
|
functionToLsdaIndex[functionOffset] = lsdaEntries.size();
|
|
|
|
if (cu->lsda != 0)
|
|
|
|
lsdaEntries.push_back(
|
|
|
|
{functionOffset, static_cast<uint32_t>(cu->lsda - in.header->addr)});
|
|
|
|
}
|
|
|
|
|
2020-08-21 04:05:13 +08:00
|
|
|
// compute size of __TEXT,__unwind_info section
|
|
|
|
level2PagesOffset =
|
|
|
|
sizeof(unwind_info_section_header) +
|
|
|
|
commonEncodings.size() * sizeof(uint32_t) +
|
|
|
|
personalities.size() * sizeof(uint32_t) +
|
2020-12-07 14:33:38 +08:00
|
|
|
// The extra second-level-page entry is for the sentinel
|
|
|
|
(secondLevelPages.size() + 1) *
|
|
|
|
sizeof(unwind_info_section_header_index_entry) +
|
2020-08-21 04:05:13 +08:00
|
|
|
lsdaEntries.size() * sizeof(unwind_info_section_header_lsda_index_entry);
|
2020-12-07 14:33:38 +08:00
|
|
|
unwindInfoSize =
|
|
|
|
level2PagesOffset + secondLevelPages.size() * SECOND_LEVEL_PAGE_BYTES;
|
2020-08-21 04:05:13 +08:00
|
|
|
}
|
|
|
|
|
2020-12-02 09:27:33 +08:00
|
|
|
// All inputs are relocated and output addresses are known, so write!
|
2020-08-21 04:05:13 +08:00
|
|
|
|
2021-04-16 09:14:33 +08:00
|
|
|
template <class Ptr>
|
|
|
|
void UnwindInfoSectionImpl<Ptr>::writeTo(uint8_t *buf) const {
|
2020-08-21 04:05:13 +08:00
|
|
|
// section header
|
|
|
|
auto *uip = reinterpret_cast<unwind_info_section_header *>(buf);
|
|
|
|
uip->version = 1;
|
|
|
|
uip->commonEncodingsArraySectionOffset = sizeof(unwind_info_section_header);
|
|
|
|
uip->commonEncodingsArrayCount = commonEncodings.size();
|
|
|
|
uip->personalityArraySectionOffset =
|
|
|
|
uip->commonEncodingsArraySectionOffset +
|
|
|
|
(uip->commonEncodingsArrayCount * sizeof(uint32_t));
|
|
|
|
uip->personalityArrayCount = personalities.size();
|
|
|
|
uip->indexSectionOffset = uip->personalityArraySectionOffset +
|
|
|
|
(uip->personalityArrayCount * sizeof(uint32_t));
|
2020-12-07 14:33:38 +08:00
|
|
|
uip->indexCount = secondLevelPages.size() + 1;
|
2020-08-21 04:05:13 +08:00
|
|
|
|
|
|
|
// Common encodings
|
|
|
|
auto *i32p = reinterpret_cast<uint32_t *>(&uip[1]);
|
|
|
|
for (const auto &encoding : commonEncodings)
|
|
|
|
*i32p++ = encoding.first;
|
|
|
|
|
|
|
|
// Personalities
|
2020-12-07 14:33:38 +08:00
|
|
|
for (const uint32_t &personality : personalities)
|
2021-04-16 09:14:33 +08:00
|
|
|
*i32p++ =
|
|
|
|
in.got->addr + (personality - 1) * target->wordSize - in.header->addr;
|
2020-08-21 04:05:13 +08:00
|
|
|
|
|
|
|
// Level-1 index
|
|
|
|
uint32_t lsdaOffset =
|
|
|
|
uip->indexSectionOffset +
|
|
|
|
uip->indexCount * sizeof(unwind_info_section_header_index_entry);
|
|
|
|
uint64_t l2PagesOffset = level2PagesOffset;
|
|
|
|
auto *iep = reinterpret_cast<unwind_info_section_header_index_entry *>(i32p);
|
2020-12-07 14:33:38 +08:00
|
|
|
for (const SecondLevelPage &page : secondLevelPages) {
|
2021-02-09 02:47:34 +08:00
|
|
|
iep->functionOffset =
|
|
|
|
cuPtrVector[page.entryIndex]->functionAddress - in.header->addr;
|
2020-08-21 04:05:13 +08:00
|
|
|
iep->secondLevelPagesSectionOffset = l2PagesOffset;
|
2021-02-09 02:47:34 +08:00
|
|
|
iep->lsdaIndexArraySectionOffset =
|
|
|
|
lsdaOffset + functionToLsdaIndex.lookup(iep->functionOffset) *
|
|
|
|
sizeof(unwind_info_section_header_lsda_index_entry);
|
2020-08-21 04:05:13 +08:00
|
|
|
iep++;
|
2020-12-07 14:33:38 +08:00
|
|
|
l2PagesOffset += SECOND_LEVEL_PAGE_BYTES;
|
2020-08-21 04:05:13 +08:00
|
|
|
}
|
|
|
|
// Level-1 sentinel
|
2021-04-16 09:14:33 +08:00
|
|
|
const CompactUnwindEntry<Ptr> &cuEnd = cuVector.back();
|
2020-08-21 04:05:13 +08:00
|
|
|
iep->functionOffset = cuEnd.functionAddress + cuEnd.functionLength;
|
|
|
|
iep->secondLevelPagesSectionOffset = 0;
|
2021-02-09 02:47:34 +08:00
|
|
|
iep->lsdaIndexArraySectionOffset =
|
|
|
|
lsdaOffset +
|
|
|
|
lsdaEntries.size() * sizeof(unwind_info_section_header_lsda_index_entry);
|
2020-08-21 04:05:13 +08:00
|
|
|
iep++;
|
|
|
|
|
|
|
|
// LSDAs
|
2021-02-09 02:47:34 +08:00
|
|
|
size_t lsdaBytes =
|
|
|
|
lsdaEntries.size() * sizeof(unwind_info_section_header_lsda_index_entry);
|
2021-02-09 03:50:13 +08:00
|
|
|
if (lsdaBytes > 0)
|
|
|
|
memcpy(iep, lsdaEntries.data(), lsdaBytes);
|
2020-08-21 04:05:13 +08:00
|
|
|
|
|
|
|
// Level-2 pages
|
2021-02-09 02:47:34 +08:00
|
|
|
auto *pp = reinterpret_cast<uint32_t *>(reinterpret_cast<uint8_t *>(iep) +
|
|
|
|
lsdaBytes);
|
2020-12-07 14:33:38 +08:00
|
|
|
for (const SecondLevelPage &page : secondLevelPages) {
|
|
|
|
if (page.kind == UNWIND_SECOND_LEVEL_COMPRESSED) {
|
|
|
|
uintptr_t functionAddressBase =
|
|
|
|
cuPtrVector[page.entryIndex]->functionAddress;
|
|
|
|
auto *p2p =
|
|
|
|
reinterpret_cast<unwind_info_compressed_second_level_page_header *>(
|
|
|
|
pp);
|
|
|
|
p2p->kind = page.kind;
|
|
|
|
p2p->entryPageOffset =
|
|
|
|
sizeof(unwind_info_compressed_second_level_page_header);
|
|
|
|
p2p->entryCount = page.entryCount;
|
|
|
|
p2p->encodingsPageOffset =
|
|
|
|
p2p->entryPageOffset + p2p->entryCount * sizeof(uint32_t);
|
|
|
|
p2p->encodingsCount = page.localEncodings.size();
|
|
|
|
auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
|
|
|
|
for (size_t i = 0; i < page.entryCount; i++) {
|
2021-04-16 09:14:33 +08:00
|
|
|
const CompactUnwindEntry<Ptr> *cuep = cuPtrVector[page.entryIndex + i];
|
2020-12-07 14:33:38 +08:00
|
|
|
auto it = commonEncodingIndexes.find(cuep->encoding);
|
|
|
|
if (it == commonEncodingIndexes.end())
|
|
|
|
it = page.localEncodingIndexes.find(cuep->encoding);
|
|
|
|
*ep++ = (it->second << COMPRESSED_ENTRY_FUNC_OFFSET_BITS) |
|
|
|
|
(cuep->functionAddress - functionAddressBase);
|
|
|
|
}
|
2020-12-21 12:01:20 +08:00
|
|
|
if (page.localEncodings.size() != 0)
|
|
|
|
memcpy(ep, page.localEncodings.data(),
|
|
|
|
page.localEncodings.size() * sizeof(uint32_t));
|
2020-12-07 14:33:38 +08:00
|
|
|
} else {
|
|
|
|
auto *p2p =
|
|
|
|
reinterpret_cast<unwind_info_regular_second_level_page_header *>(pp);
|
|
|
|
p2p->kind = page.kind;
|
|
|
|
p2p->entryPageOffset =
|
|
|
|
sizeof(unwind_info_regular_second_level_page_header);
|
|
|
|
p2p->entryCount = page.entryCount;
|
|
|
|
auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
|
|
|
|
for (size_t i = 0; i < page.entryCount; i++) {
|
2021-04-16 09:14:33 +08:00
|
|
|
const CompactUnwindEntry<Ptr> *cuep = cuPtrVector[page.entryIndex + i];
|
2020-12-07 14:33:38 +08:00
|
|
|
*ep++ = cuep->functionAddress;
|
|
|
|
*ep++ = cuep->encoding;
|
|
|
|
}
|
2020-08-21 04:05:13 +08:00
|
|
|
}
|
2020-12-07 14:33:38 +08:00
|
|
|
pp += SECOND_LEVEL_PAGE_WORDS;
|
2020-08-21 04:05:13 +08:00
|
|
|
}
|
|
|
|
}
|
2021-04-16 09:14:33 +08:00
|
|
|
|
|
|
|
UnwindInfoSection *macho::makeUnwindInfoSection() {
|
|
|
|
if (target->wordSize == 8)
|
|
|
|
return make<UnwindInfoSectionImpl<uint64_t>>();
|
|
|
|
else
|
|
|
|
return make<UnwindInfoSectionImpl<uint32_t>>();
|
|
|
|
}
|