2016-02-26 02:43:51 +08:00
|
|
|
//===- ICF.cpp ------------------------------------------------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-02-26 02:43:51 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2016-12-06 02:11:35 +08:00
|
|
|
// ICF is short for Identical Code Folding. This is a size optimization to
|
2016-12-02 03:45:22 +08:00
|
|
|
// identify and merge two or more read-only sections (typically functions)
|
|
|
|
// that happened to have the same contents. It usually reduces output size
|
|
|
|
// by a few percent.
|
2016-02-26 02:43:51 +08:00
|
|
|
//
|
2016-12-02 03:45:22 +08:00
|
|
|
// In ICF, two sections are considered identical if they have the same
|
|
|
|
// section flags, section data, and relocations. Relocations are tricky,
|
|
|
|
// because two relocations are considered the same if they have the same
|
|
|
|
// relocation types, values, and if they point to the same sections *in
|
|
|
|
// terms of ICF*.
|
2016-02-26 02:43:51 +08:00
|
|
|
//
|
2016-12-02 03:45:22 +08:00
|
|
|
// Here is an example. If foo and bar defined below are compiled to the
|
|
|
|
// same machine instructions, ICF can and should merge the two, although
|
|
|
|
// their relocations point to each other.
|
2016-02-26 02:43:51 +08:00
|
|
|
//
|
2016-12-02 03:45:22 +08:00
|
|
|
// void foo() { bar(); }
|
|
|
|
// void bar() { foo(); }
|
2016-02-26 02:43:51 +08:00
|
|
|
//
|
2016-12-02 03:45:22 +08:00
|
|
|
// If you merge the two, their relocations point to the same section and
|
2016-12-06 02:11:35 +08:00
|
|
|
// thus you know they are mergeable, but how do you know they are
|
|
|
|
// mergeable in the first place? This is not an easy problem to solve.
|
2016-02-26 02:43:51 +08:00
|
|
|
//
|
2016-12-06 02:11:35 +08:00
|
|
|
// What we are doing in LLD is to partition sections into equivalence
|
|
|
|
// classes. Sections in the same equivalence class when the algorithm
|
|
|
|
// terminates are considered identical. Here are details:
|
2016-02-26 02:43:51 +08:00
|
|
|
//
|
2016-12-06 02:11:35 +08:00
|
|
|
// 1. First, we partition sections using their hash values as keys. Hash
|
|
|
|
// values contain section types, section contents and numbers of
|
|
|
|
// relocations. During this step, relocation targets are not taken into
|
|
|
|
// account. We just put sections that apparently differ into different
|
|
|
|
// equivalence classes.
|
2016-02-26 02:43:51 +08:00
|
|
|
//
|
2016-12-06 02:11:35 +08:00
|
|
|
// 2. Next, for each equivalence class, we visit sections to compare
|
|
|
|
// relocation targets. Relocation targets are considered equivalent if
|
|
|
|
// their targets are in the same equivalence class. Sections with
|
|
|
|
// different relocation targets are put into different equivalence
|
2020-01-07 02:21:05 +08:00
|
|
|
// classes.
|
2016-12-02 03:45:22 +08:00
|
|
|
//
|
2016-12-06 02:11:35 +08:00
|
|
|
// 3. If we split an equivalence class in step 2, two relocations
|
|
|
|
// previously target the same equivalence class may now target
|
|
|
|
// different equivalence classes. Therefore, we repeat step 2 until a
|
|
|
|
// convergence is obtained.
|
2016-12-02 03:45:22 +08:00
|
|
|
//
|
2016-12-06 02:11:35 +08:00
|
|
|
// 4. For each equivalence class C, pick an arbitrary section in C, and
|
|
|
|
// merge all the other sections in C with it.
|
2016-12-02 03:45:22 +08:00
|
|
|
//
|
|
|
|
// For small programs, this algorithm needs 3-5 iterations. For large
|
|
|
|
// programs such as Chromium, it takes more than 20 iterations.
|
2016-02-26 02:43:51 +08:00
|
|
|
//
|
2016-12-06 02:11:35 +08:00
|
|
|
// This algorithm was mentioned as an "optimistic algorithm" in [1],
|
|
|
|
// though gold implements a different algorithm than this.
|
|
|
|
//
|
2016-12-02 03:45:22 +08:00
|
|
|
// We parallelize each step so that multiple threads can work on different
|
2016-12-06 02:11:35 +08:00
|
|
|
// equivalence classes concurrently. That gave us a large performance
|
|
|
|
// boost when applying ICF on large programs. For example, MSVC link.exe
|
|
|
|
// or GNU gold takes 10-20 seconds to apply ICF on Chromium, whose output
|
|
|
|
// size is about 1.5 GB, but LLD can finish it in less than 2 seconds on a
|
|
|
|
// 2.8 GHz 40 core machine. Even without threading, LLD's ICF is still
|
|
|
|
// faster than MSVC or gold though.
|
|
|
|
//
|
|
|
|
// [1] Safe ICF: Pointer Safe and Unwinding aware Identical Code Folding
|
|
|
|
// in the Gold Linker
|
|
|
|
// http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/36912.pdf
|
2016-02-26 02:43:51 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ICF.h"
|
|
|
|
#include "Config.h"
|
2022-02-24 11:18:24 +08:00
|
|
|
#include "InputFiles.h"
|
2019-09-06 23:57:24 +08:00
|
|
|
#include "LinkerScript.h"
|
|
|
|
#include "OutputSections.h"
|
2016-02-26 02:43:51 +08:00
|
|
|
#include "SymbolTable.h"
|
2017-12-10 00:56:18 +08:00
|
|
|
#include "Symbols.h"
|
2018-03-27 22:10:07 +08:00
|
|
|
#include "SyntheticSections.h"
|
2017-06-07 11:48:56 +08:00
|
|
|
#include "llvm/BinaryFormat/ELF.h"
|
2016-02-26 02:43:51 +08:00
|
|
|
#include "llvm/Object/ELF.h"
|
[Support] Move LLD's parallel algorithm wrappers to support
Essentially takes the lld/Common/Threads.h wrappers and moves them to
the llvm/Support/Paralle.h algorithm header.
The changes are:
- Remove policy parameter, since all clients use `par`.
- Rename the methods to `parallelSort` etc to match LLVM style, since
they are no longer C++17 pstl compatible.
- Move algorithms from llvm::parallel:: to llvm::, since they have
"parallel" in the name and are no longer overloads of the regular
algorithms.
- Add range overloads
- Use the sequential algorithm directly when 1 thread is requested
(skips task grouping)
- Fix the index type of parallelForEachN to size_t. Nobody in LLVM was
using any other parameter, and it made overload resolution hard for
for_each_n(par, 0, foo.size(), ...) because 0 is int, not size_t.
Remove Threads.h and update LLD for that.
This is a prerequisite for parallel public symbol processing in the PDB
library, which is in LLVM.
Reviewed By: MaskRay, aganea
Differential Revision: https://reviews.llvm.org/D79390
2020-05-05 11:03:19 +08:00
|
|
|
#include "llvm/Support/Parallel.h"
|
2020-01-29 00:05:13 +08:00
|
|
|
#include "llvm/Support/TimeProfiler.h"
|
2018-07-28 03:10:44 +08:00
|
|
|
#include "llvm/Support/xxhash.h"
|
2016-11-20 04:15:55 +08:00
|
|
|
#include <algorithm>
|
2016-12-02 13:35:46 +08:00
|
|
|
#include <atomic>
|
2016-02-26 02:43:51 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
using namespace llvm::ELF;
|
|
|
|
using namespace llvm::object;
|
2020-05-15 13:18:58 +08:00
|
|
|
using namespace lld;
|
|
|
|
using namespace lld::elf;
|
2016-02-26 02:43:51 +08:00
|
|
|
|
2016-11-20 10:39:59 +08:00
|
|
|
namespace {
|
2016-02-26 02:43:51 +08:00
|
|
|
template <class ELFT> class ICF {
|
|
|
|
public:
|
2016-05-03 03:30:42 +08:00
|
|
|
void run();
|
2016-02-26 02:43:51 +08:00
|
|
|
|
|
|
|
private:
|
2020-09-23 22:07:35 +08:00
|
|
|
void segregate(size_t begin, size_t end, uint32_t eqClassBase, bool constant);
|
2016-11-30 09:50:03 +08:00
|
|
|
|
|
|
|
template <class RelTy>
|
2017-06-12 08:05:54 +08:00
|
|
|
bool constantEq(const InputSection *a, ArrayRef<RelTy> relsA,
|
|
|
|
const InputSection *b, ArrayRef<RelTy> relsB);
|
2016-11-30 09:50:03 +08:00
|
|
|
|
|
|
|
template <class RelTy>
|
2017-02-24 00:49:07 +08:00
|
|
|
bool variableEq(const InputSection *a, ArrayRef<RelTy> relsA,
|
|
|
|
const InputSection *b, ArrayRef<RelTy> relsB);
|
2016-02-26 02:43:51 +08:00
|
|
|
|
2017-02-24 00:49:07 +08:00
|
|
|
bool equalsConstant(const InputSection *a, const InputSection *b);
|
|
|
|
bool equalsVariable(const InputSection *a, const InputSection *b);
|
2016-02-26 02:43:51 +08:00
|
|
|
|
2016-12-02 13:35:46 +08:00
|
|
|
size_t findBoundary(size_t begin, size_t end);
|
|
|
|
|
2016-12-06 02:11:35 +08:00
|
|
|
void forEachClassRange(size_t begin, size_t end,
|
2018-06-16 20:11:34 +08:00
|
|
|
llvm::function_ref<void(size_t, size_t)> fn);
|
2016-02-26 02:43:51 +08:00
|
|
|
|
2018-06-16 20:11:34 +08:00
|
|
|
void forEachClass(llvm::function_ref<void(size_t, size_t)> fn);
|
2016-12-02 13:35:46 +08:00
|
|
|
|
2022-02-01 16:14:21 +08:00
|
|
|
SmallVector<InputSection *, 0> sections;
|
2016-12-05 00:33:13 +08:00
|
|
|
|
|
|
|
// We repeat the main loop while `Repeat` is true.
|
|
|
|
std::atomic<bool> repeat;
|
|
|
|
|
|
|
|
// The main loop counter.
|
Parallelize ICF to make LLD's ICF really fast.
ICF is short for Identical Code Folding. It is a size optimization to
identify two or more functions that happened to have the same contents
to merges them. It usually reduces output size by a few percent.
ICF is slow because it is computationally intensive process. I tried
to paralellize it before but failed because I couldn't make a
parallelized version produce consistent outputs. Although it didn't
create broken executables, every invocation of the linker generated
slightly different output, and I couldn't figure out why.
I think I now understand what was going on, and also came up with a
simple algorithm to fix it. So is this patch.
The result is very exciting. Chromium for example has 780,662 input
sections in which 20,774 are reducible by ICF. LLD previously took
7.980 seconds for ICF. Now it finishes in 1.065 seconds.
As a result, LLD can now link a Chromium binary (output size 1.59 GB)
in 10.28 seconds on my machine with ICF enabled. Compared to gold
which takes 40.94 seconds to do the same thing, this is an amazing
number.
From here, I'll describe what we are doing for ICF, what was the
previous problem, and what I did in this patch.
In ICF, two sections are considered identical if they have the same
section flags, section data, and relocations. Relocations are tricky,
becuase two relocations are considered the same if they have the same
relocation type, values, and if they point to the same section _in
terms of ICF_.
Here is an example. If foo and bar defined below are compiled to the
same machine instructions, ICF can (and should) merge the two,
although their relocations point to each other.
void foo() { bar(); }
void bar() { foo(); }
This is not an easy problem to solve.
What we are doing in LLD is some sort of coloring algorithm. We color
non-identical sections using different colors repeatedly, and sections
in the same color when the algorithm terminates are considered
identical. Here is the details:
1. First, we color all sections using their hash values of section
types, section contents, and numbers of relocations. At this moment,
relocation targets are not taken into account. We just color
sections that apparently differ in different colors.
2. Next, for each color C, we visit sections having color C to see
if their relocations are the same. Relocations are considered equal
if their targets have the same color. We then recolor sections that
have different relocation targets in new colors.
3. If we recolor some section in step 2, relocations that were
previously pointing to the same color targets may now be pointing to
different colors. Therefore, repeat 2 until a convergence is
obtained.
Step 2 is a heavy operation. For Chromium, the first iteration of step
2 takes 2.882 seconds, and the second iteration takes 1.038 seconds,
and in total it needs 23 iterations.
Parallelizing step 1 is easy because we can color each section
independently. This patch does that.
Parallelizing step 2 is tricky. We could work on each color
independently, but we cannot recolor sections in place, because it
will break the invariance that two possibly-identical sections must
have the same color at any moment.
Consider sections S1, S2, S3, S4 in the same color C, where S1 and S2
are identical, S3 and S4 are identical, but S2 and S3 are not. Thread
A is about to recolor S1 and S2 in C'. After thread A recolor S1 in
C', but before recolor S2 in C', other thread B might observe S1 and
S2. Then thread B will conclude that S1 and S2 are different, and it
will split thread B's sections into smaller groups wrongly. Over-
splitting doesn't produce broken results, but it loses a chance to
merge some identical sections. That was the cause of indeterminism.
To fix the problem, I made sections have two colors, namely current
color and next color. At the beginning of each iteration, both colors
are the same. Each thread reads from current color and writes to next
color. In this way, we can avoid threads from reading partial
results. After each iteration, we flip current and next.
This is a very simple solution and is implemented in less than 50
lines of code.
I tested this patch with Chromium and confirmed that this parallelized
ICF produces the identical output as the non-parallelized one.
Differential Revision: https://reviews.llvm.org/D27247
llvm-svn: 288373
2016-12-02 01:09:04 +08:00
|
|
|
int cnt = 0;
|
2016-12-05 00:33:13 +08:00
|
|
|
|
2016-12-06 02:11:35 +08:00
|
|
|
// We have two locations for equivalence classes. On the first iteration
|
|
|
|
// of the main loop, Class[0] has a valid value, and Class[1] contains
|
|
|
|
// garbage. We read equivalence classes from slot 0 and write to slot 1.
|
|
|
|
// So, Class[0] represents the current class, and Class[1] represents
|
|
|
|
// the next class. On each iteration, we switch their roles and use them
|
|
|
|
// alternately.
|
2016-12-05 00:33:13 +08:00
|
|
|
//
|
|
|
|
// Why are we doing this? Recall that other threads may be working on
|
2016-12-06 02:11:35 +08:00
|
|
|
// other equivalence classes in parallel. They may read sections that we
|
|
|
|
// are updating. We cannot update equivalence classes in place because
|
|
|
|
// it breaks the invariance that all possibly-identical sections must be
|
|
|
|
// in the same equivalence class at any moment. In other words, the for
|
|
|
|
// loop to update equivalence classes is not atomic, and that is
|
|
|
|
// observable from other threads. By writing new classes to other
|
|
|
|
// places, we can keep the invariance.
|
2016-12-05 00:33:13 +08:00
|
|
|
//
|
2016-12-06 02:11:35 +08:00
|
|
|
// Below, `Current` has the index of the current class, and `Next` has
|
|
|
|
// the index of the next class. If threading is enabled, they are either
|
|
|
|
// (0, 1) or (1, 0).
|
2016-12-05 00:33:13 +08:00
|
|
|
//
|
|
|
|
// Note on single-thread: if that's the case, they are always (0, 0)
|
2016-12-06 02:11:35 +08:00
|
|
|
// because we can safely read the next class without worrying about race
|
2016-12-05 00:33:13 +08:00
|
|
|
// conditions. Using the same location makes this algorithm converge
|
|
|
|
// faster because it uses results of the same iteration earlier.
|
|
|
|
int current = 0;
|
|
|
|
int next = 0;
|
2016-02-26 02:43:51 +08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2016-11-20 10:39:59 +08:00
|
|
|
// Returns true if section S is subject of ICF.
|
2017-02-27 10:32:08 +08:00
|
|
|
static bool isEligible(InputSection *s) {
|
2019-05-29 11:55:20 +08:00
|
|
|
if (!s->isLive() || s->keepUnique || !(s->flags & SHF_ALLOC))
|
2018-05-23 07:22:35 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Don't merge writable sections. .data.rel.ro sections are marked as writable
|
|
|
|
// but are semantically read-only.
|
|
|
|
if ((s->flags & SHF_WRITE) && s->name != ".data.rel.ro" &&
|
|
|
|
!s->name.startswith(".data.rel.ro."))
|
2018-03-29 06:47:53 +08:00
|
|
|
return false;
|
|
|
|
|
2018-07-26 05:40:54 +08:00
|
|
|
// SHF_LINK_ORDER sections are ICF'd as a unit with their dependent sections,
|
|
|
|
// so we don't consider them for ICF individually.
|
|
|
|
if (s->flags & SHF_LINK_ORDER)
|
|
|
|
return false;
|
|
|
|
|
2018-03-27 22:10:07 +08:00
|
|
|
// Don't merge synthetic sections as their Data member is not valid and empty.
|
|
|
|
// The Data member needs to be valid for ICF as it is used by ICF to determine
|
|
|
|
// the equality of section contents.
|
|
|
|
if (isa<SyntheticSection>(s))
|
|
|
|
return false;
|
|
|
|
|
2018-03-29 06:47:53 +08:00
|
|
|
// .init and .fini contains instructions that must be executed to initialize
|
|
|
|
// and finalize the process. They cannot and should not be merged.
|
|
|
|
if (s->name == ".init" || s->name == ".fini")
|
|
|
|
return false;
|
|
|
|
|
2018-05-23 10:14:28 +08:00
|
|
|
// A user program may enumerate sections named with a C identifier using
|
|
|
|
// __start_* and __stop_* symbols. We cannot ICF any such sections because
|
|
|
|
// that could change program semantics.
|
|
|
|
if (isValidCIdentifier(s->name))
|
|
|
|
return false;
|
|
|
|
|
2018-03-29 06:47:53 +08:00
|
|
|
return true;
|
2016-02-26 02:43:51 +08:00
|
|
|
}
|
|
|
|
|
2016-12-06 02:11:35 +08:00
|
|
|
// Split an equivalence class into smaller classes.
|
2016-12-02 13:35:46 +08:00
|
|
|
template <class ELFT>
|
2020-09-23 22:07:35 +08:00
|
|
|
void ICF<ELFT>::segregate(size_t begin, size_t end, uint32_t eqClassBase,
|
|
|
|
bool constant) {
|
2016-12-02 13:35:46 +08:00
|
|
|
// This loop rearranges sections in [Begin, End) so that all sections
|
2016-11-30 09:50:03 +08:00
|
|
|
// that are equal in terms of equals{Constant,Variable} are contiguous
|
2016-12-02 13:35:46 +08:00
|
|
|
// in [Begin, End).
|
2016-11-30 09:50:03 +08:00
|
|
|
//
|
|
|
|
// The algorithm is quadratic in the worst case, but that is not an
|
|
|
|
// issue in practice because the number of the distinct sections in
|
2016-12-02 13:35:46 +08:00
|
|
|
// each range is usually very small.
|
Parallelize ICF to make LLD's ICF really fast.
ICF is short for Identical Code Folding. It is a size optimization to
identify two or more functions that happened to have the same contents
to merges them. It usually reduces output size by a few percent.
ICF is slow because it is computationally intensive process. I tried
to paralellize it before but failed because I couldn't make a
parallelized version produce consistent outputs. Although it didn't
create broken executables, every invocation of the linker generated
slightly different output, and I couldn't figure out why.
I think I now understand what was going on, and also came up with a
simple algorithm to fix it. So is this patch.
The result is very exciting. Chromium for example has 780,662 input
sections in which 20,774 are reducible by ICF. LLD previously took
7.980 seconds for ICF. Now it finishes in 1.065 seconds.
As a result, LLD can now link a Chromium binary (output size 1.59 GB)
in 10.28 seconds on my machine with ICF enabled. Compared to gold
which takes 40.94 seconds to do the same thing, this is an amazing
number.
From here, I'll describe what we are doing for ICF, what was the
previous problem, and what I did in this patch.
In ICF, two sections are considered identical if they have the same
section flags, section data, and relocations. Relocations are tricky,
becuase two relocations are considered the same if they have the same
relocation type, values, and if they point to the same section _in
terms of ICF_.
Here is an example. If foo and bar defined below are compiled to the
same machine instructions, ICF can (and should) merge the two,
although their relocations point to each other.
void foo() { bar(); }
void bar() { foo(); }
This is not an easy problem to solve.
What we are doing in LLD is some sort of coloring algorithm. We color
non-identical sections using different colors repeatedly, and sections
in the same color when the algorithm terminates are considered
identical. Here is the details:
1. First, we color all sections using their hash values of section
types, section contents, and numbers of relocations. At this moment,
relocation targets are not taken into account. We just color
sections that apparently differ in different colors.
2. Next, for each color C, we visit sections having color C to see
if their relocations are the same. Relocations are considered equal
if their targets have the same color. We then recolor sections that
have different relocation targets in new colors.
3. If we recolor some section in step 2, relocations that were
previously pointing to the same color targets may now be pointing to
different colors. Therefore, repeat 2 until a convergence is
obtained.
Step 2 is a heavy operation. For Chromium, the first iteration of step
2 takes 2.882 seconds, and the second iteration takes 1.038 seconds,
and in total it needs 23 iterations.
Parallelizing step 1 is easy because we can color each section
independently. This patch does that.
Parallelizing step 2 is tricky. We could work on each color
independently, but we cannot recolor sections in place, because it
will break the invariance that two possibly-identical sections must
have the same color at any moment.
Consider sections S1, S2, S3, S4 in the same color C, where S1 and S2
are identical, S3 and S4 are identical, but S2 and S3 are not. Thread
A is about to recolor S1 and S2 in C'. After thread A recolor S1 in
C', but before recolor S2 in C', other thread B might observe S1 and
S2. Then thread B will conclude that S1 and S2 are different, and it
will split thread B's sections into smaller groups wrongly. Over-
splitting doesn't produce broken results, but it loses a chance to
merge some identical sections. That was the cause of indeterminism.
To fix the problem, I made sections have two colors, namely current
color and next color. At the beginning of each iteration, both colors
are the same. Each thread reads from current color and writes to next
color. In this way, we can avoid threads from reading partial
results. After each iteration, we flip current and next.
This is a very simple solution and is implemented in less than 50
lines of code.
I tested this patch with Chromium and confirmed that this parallelized
ICF produces the identical output as the non-parallelized one.
Differential Revision: https://reviews.llvm.org/D27247
llvm-svn: 288373
2016-12-02 01:09:04 +08:00
|
|
|
|
2016-12-02 13:35:46 +08:00
|
|
|
while (begin < end) {
|
|
|
|
// Divide [Begin, End) into two. Let Mid be the start index of the
|
2016-11-30 09:50:03 +08:00
|
|
|
// second group.
|
2017-02-24 00:49:07 +08:00
|
|
|
auto bound =
|
|
|
|
std::stable_partition(sections.begin() + begin + 1,
|
|
|
|
sections.begin() + end, [&](InputSection *s) {
|
|
|
|
if (constant)
|
|
|
|
return equalsConstant(sections[begin], s);
|
|
|
|
return equalsVariable(sections[begin], s);
|
|
|
|
});
|
2016-11-30 09:50:03 +08:00
|
|
|
size_t mid = bound - sections.begin();
|
|
|
|
|
2016-12-02 13:35:46 +08:00
|
|
|
// Now we split [Begin, End) into [Begin, Mid) and [Mid, End) by
|
2020-09-23 22:07:35 +08:00
|
|
|
// updating the sections in [Begin, Mid). We use Mid as the basis for
|
|
|
|
// the equivalence class ID because every group ends with a unique index.
|
|
|
|
// Add this to eqClassBase to avoid equality with unique IDs.
|
2016-12-02 13:35:46 +08:00
|
|
|
for (size_t i = begin; i < mid; ++i)
|
2020-09-23 22:07:35 +08:00
|
|
|
sections[i]->eqClass[next] = eqClassBase + mid;
|
2016-12-02 13:35:46 +08:00
|
|
|
|
|
|
|
// If we created a group, we need to iterate the main loop again.
|
|
|
|
if (mid != end)
|
|
|
|
repeat = true;
|
Parallelize ICF to make LLD's ICF really fast.
ICF is short for Identical Code Folding. It is a size optimization to
identify two or more functions that happened to have the same contents
to merges them. It usually reduces output size by a few percent.
ICF is slow because it is computationally intensive process. I tried
to paralellize it before but failed because I couldn't make a
parallelized version produce consistent outputs. Although it didn't
create broken executables, every invocation of the linker generated
slightly different output, and I couldn't figure out why.
I think I now understand what was going on, and also came up with a
simple algorithm to fix it. So is this patch.
The result is very exciting. Chromium for example has 780,662 input
sections in which 20,774 are reducible by ICF. LLD previously took
7.980 seconds for ICF. Now it finishes in 1.065 seconds.
As a result, LLD can now link a Chromium binary (output size 1.59 GB)
in 10.28 seconds on my machine with ICF enabled. Compared to gold
which takes 40.94 seconds to do the same thing, this is an amazing
number.
From here, I'll describe what we are doing for ICF, what was the
previous problem, and what I did in this patch.
In ICF, two sections are considered identical if they have the same
section flags, section data, and relocations. Relocations are tricky,
becuase two relocations are considered the same if they have the same
relocation type, values, and if they point to the same section _in
terms of ICF_.
Here is an example. If foo and bar defined below are compiled to the
same machine instructions, ICF can (and should) merge the two,
although their relocations point to each other.
void foo() { bar(); }
void bar() { foo(); }
This is not an easy problem to solve.
What we are doing in LLD is some sort of coloring algorithm. We color
non-identical sections using different colors repeatedly, and sections
in the same color when the algorithm terminates are considered
identical. Here is the details:
1. First, we color all sections using their hash values of section
types, section contents, and numbers of relocations. At this moment,
relocation targets are not taken into account. We just color
sections that apparently differ in different colors.
2. Next, for each color C, we visit sections having color C to see
if their relocations are the same. Relocations are considered equal
if their targets have the same color. We then recolor sections that
have different relocation targets in new colors.
3. If we recolor some section in step 2, relocations that were
previously pointing to the same color targets may now be pointing to
different colors. Therefore, repeat 2 until a convergence is
obtained.
Step 2 is a heavy operation. For Chromium, the first iteration of step
2 takes 2.882 seconds, and the second iteration takes 1.038 seconds,
and in total it needs 23 iterations.
Parallelizing step 1 is easy because we can color each section
independently. This patch does that.
Parallelizing step 2 is tricky. We could work on each color
independently, but we cannot recolor sections in place, because it
will break the invariance that two possibly-identical sections must
have the same color at any moment.
Consider sections S1, S2, S3, S4 in the same color C, where S1 and S2
are identical, S3 and S4 are identical, but S2 and S3 are not. Thread
A is about to recolor S1 and S2 in C'. After thread A recolor S1 in
C', but before recolor S2 in C', other thread B might observe S1 and
S2. Then thread B will conclude that S1 and S2 are different, and it
will split thread B's sections into smaller groups wrongly. Over-
splitting doesn't produce broken results, but it loses a chance to
merge some identical sections. That was the cause of indeterminism.
To fix the problem, I made sections have two colors, namely current
color and next color. At the beginning of each iteration, both colors
are the same. Each thread reads from current color and writes to next
color. In this way, we can avoid threads from reading partial
results. After each iteration, we flip current and next.
This is a very simple solution and is implemented in less than 50
lines of code.
I tested this patch with Chromium and confirmed that this parallelized
ICF produces the identical output as the non-parallelized one.
Differential Revision: https://reviews.llvm.org/D27247
llvm-svn: 288373
2016-12-02 01:09:04 +08:00
|
|
|
|
2016-12-02 13:35:46 +08:00
|
|
|
begin = mid;
|
2016-02-26 02:43:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compare two lists of relocations.
|
2016-11-30 09:50:03 +08:00
|
|
|
template <class ELFT>
|
|
|
|
template <class RelTy>
|
2017-08-29 06:28:41 +08:00
|
|
|
bool ICF<ELFT>::constantEq(const InputSection *secA, ArrayRef<RelTy> ra,
|
|
|
|
const InputSection *secB, ArrayRef<RelTy> rb) {
|
2021-10-28 00:51:06 +08:00
|
|
|
if (ra.size() != rb.size())
|
|
|
|
return false;
|
2017-08-29 06:28:41 +08:00
|
|
|
for (size_t i = 0; i < ra.size(); ++i) {
|
|
|
|
if (ra[i].r_offset != rb[i].r_offset ||
|
|
|
|
ra[i].getType(config->isMips64EL) != rb[i].getType(config->isMips64EL))
|
2017-06-12 08:05:54 +08:00
|
|
|
return false;
|
|
|
|
|
2017-08-29 06:28:41 +08:00
|
|
|
uint64_t addA = getAddend<ELFT>(ra[i]);
|
|
|
|
uint64_t addB = getAddend<ELFT>(rb[i]);
|
|
|
|
|
2017-11-04 05:21:47 +08:00
|
|
|
Symbol &sa = secA->template getFile<ELFT>()->getRelocTargetSym(ra[i]);
|
|
|
|
Symbol &sb = secB->template getFile<ELFT>()->getRelocTargetSym(rb[i]);
|
2017-08-29 06:28:41 +08:00
|
|
|
if (&sa == &sb) {
|
|
|
|
if (addA == addB)
|
|
|
|
continue;
|
|
|
|
return false;
|
|
|
|
}
|
2017-06-12 08:05:54 +08:00
|
|
|
|
2017-11-06 12:35:31 +08:00
|
|
|
auto *da = dyn_cast<Defined>(&sa);
|
|
|
|
auto *db = dyn_cast<Defined>(&sb);
|
2018-08-30 07:43:38 +08:00
|
|
|
|
|
|
|
// Placeholder symbols generated by linker scripts look the same now but
|
|
|
|
// may have different values later.
|
|
|
|
if (!da || !db || da->scriptDefined || db->scriptDefined)
|
2017-06-12 08:05:54 +08:00
|
|
|
return false;
|
|
|
|
|
2019-11-30 13:58:36 +08:00
|
|
|
// When comparing a pair of relocations, if they refer to different symbols,
|
|
|
|
// and either symbol is preemptible, the containing sections should be
|
|
|
|
// considered different. This is because even if the sections are identical
|
|
|
|
// in this DSO, they may not be after preemption.
|
|
|
|
if (da->isPreemptible || db->isPreemptible)
|
|
|
|
return false;
|
|
|
|
|
2017-06-12 08:05:54 +08:00
|
|
|
// Relocations referring to absolute symbols are constant-equal if their
|
|
|
|
// values are equal.
|
2017-08-29 06:28:41 +08:00
|
|
|
if (!da->section && !db->section && da->value + addA == db->value + addB)
|
|
|
|
continue;
|
2017-06-12 08:05:54 +08:00
|
|
|
if (!da->section || !db->section)
|
2017-08-29 06:28:41 +08:00
|
|
|
return false;
|
2017-06-12 08:05:54 +08:00
|
|
|
|
2018-06-25 21:46:39 +08:00
|
|
|
if (da->section->kind() != db->section->kind())
|
|
|
|
return false;
|
|
|
|
|
2017-06-12 08:05:54 +08:00
|
|
|
// Relocations referring to InputSections are constant-equal if their
|
|
|
|
// section offsets are equal.
|
2017-08-29 06:28:41 +08:00
|
|
|
if (isa<InputSection>(da->section)) {
|
|
|
|
if (da->value + addA == db->value + addB)
|
|
|
|
continue;
|
|
|
|
return false;
|
|
|
|
}
|
2017-06-12 08:05:54 +08:00
|
|
|
|
|
|
|
// Relocations referring to MergeInputSections are constant-equal if their
|
|
|
|
// offsets in the output section are equal.
|
|
|
|
auto *x = dyn_cast<MergeInputSection>(da->section);
|
|
|
|
if (!x)
|
|
|
|
return false;
|
|
|
|
auto *y = cast<MergeInputSection>(db->section);
|
|
|
|
if (x->getParent() != y->getParent())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
uint64_t offsetA =
|
|
|
|
sa.isSection() ? x->getOffset(addA) : x->getOffset(da->value) + addA;
|
|
|
|
uint64_t offsetB =
|
|
|
|
sb.isSection() ? y->getOffset(addB) : y->getOffset(db->value) + addB;
|
2017-08-29 06:28:41 +08:00
|
|
|
if (offsetA != offsetB)
|
|
|
|
return false;
|
|
|
|
}
|
2016-11-20 04:15:55 +08:00
|
|
|
|
2017-08-29 06:28:41 +08:00
|
|
|
return true;
|
2016-02-26 02:43:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Compare "non-moving" part of two InputSections, namely everything
|
|
|
|
// except relocation targets.
|
|
|
|
template <class ELFT>
|
2017-02-24 00:49:07 +08:00
|
|
|
bool ICF<ELFT>::equalsConstant(const InputSection *a, const InputSection *b) {
|
2021-10-28 00:51:06 +08:00
|
|
|
if (a->flags != b->flags || a->getSize() != b->getSize() ||
|
2022-02-21 16:39:26 +08:00
|
|
|
a->rawData != b->rawData)
|
2016-02-26 02:43:51 +08:00
|
|
|
return false;
|
|
|
|
|
2018-05-23 09:58:43 +08:00
|
|
|
// If two sections have different output sections, we cannot merge them.
|
2019-09-06 23:57:44 +08:00
|
|
|
assert(a->getParent() && b->getParent());
|
|
|
|
if (a->getParent() != b->getParent())
|
2018-05-23 09:58:43 +08:00
|
|
|
return false;
|
|
|
|
|
2021-10-28 00:51:06 +08:00
|
|
|
const RelsOrRelas<ELFT> ra = a->template relsOrRelas<ELFT>();
|
|
|
|
const RelsOrRelas<ELFT> rb = b->template relsOrRelas<ELFT>();
|
|
|
|
return ra.areRelocsRel() ? constantEq(a, ra.rels, b, rb.rels)
|
|
|
|
: constantEq(a, ra.relas, b, rb.relas);
|
2016-02-26 02:43:51 +08:00
|
|
|
}
|
|
|
|
|
2016-11-21 07:15:54 +08:00
|
|
|
// Compare two lists of relocations. Returns true if all pairs of
|
|
|
|
// relocations point to the same section in terms of ICF.
|
2016-11-30 09:50:03 +08:00
|
|
|
template <class ELFT>
|
|
|
|
template <class RelTy>
|
2017-08-29 06:28:41 +08:00
|
|
|
bool ICF<ELFT>::variableEq(const InputSection *secA, ArrayRef<RelTy> ra,
|
|
|
|
const InputSection *secB, ArrayRef<RelTy> rb) {
|
|
|
|
assert(ra.size() == rb.size());
|
|
|
|
|
|
|
|
for (size_t i = 0; i < ra.size(); ++i) {
|
2016-12-02 03:45:22 +08:00
|
|
|
// The two sections must be identical.
|
2017-11-04 05:21:47 +08:00
|
|
|
Symbol &sa = secA->template getFile<ELFT>()->getRelocTargetSym(ra[i]);
|
|
|
|
Symbol &sb = secB->template getFile<ELFT>()->getRelocTargetSym(rb[i]);
|
2016-03-11 20:06:30 +08:00
|
|
|
if (&sa == &sb)
|
2017-08-29 06:28:41 +08:00
|
|
|
continue;
|
2016-02-26 02:43:51 +08:00
|
|
|
|
2017-11-06 12:35:31 +08:00
|
|
|
auto *da = cast<Defined>(&sa);
|
|
|
|
auto *db = cast<Defined>(&sb);
|
2017-01-20 12:58:12 +08:00
|
|
|
|
2017-06-12 08:05:54 +08:00
|
|
|
// We already dealt with absolute and non-InputSection symbols in
|
|
|
|
// constantEq, and for InputSections we have already checked everything
|
|
|
|
// except the equivalence class.
|
|
|
|
if (!da->section)
|
2017-08-29 06:28:41 +08:00
|
|
|
continue;
|
2017-02-24 00:49:07 +08:00
|
|
|
auto *x = dyn_cast<InputSection>(da->section);
|
2017-06-12 08:05:54 +08:00
|
|
|
if (!x)
|
2017-08-29 06:28:41 +08:00
|
|
|
continue;
|
2017-06-12 08:05:54 +08:00
|
|
|
auto *y = cast<InputSection>(db->section);
|
Parallelize ICF to make LLD's ICF really fast.
ICF is short for Identical Code Folding. It is a size optimization to
identify two or more functions that happened to have the same contents
to merges them. It usually reduces output size by a few percent.
ICF is slow because it is computationally intensive process. I tried
to paralellize it before but failed because I couldn't make a
parallelized version produce consistent outputs. Although it didn't
create broken executables, every invocation of the linker generated
slightly different output, and I couldn't figure out why.
I think I now understand what was going on, and also came up with a
simple algorithm to fix it. So is this patch.
The result is very exciting. Chromium for example has 780,662 input
sections in which 20,774 are reducible by ICF. LLD previously took
7.980 seconds for ICF. Now it finishes in 1.065 seconds.
As a result, LLD can now link a Chromium binary (output size 1.59 GB)
in 10.28 seconds on my machine with ICF enabled. Compared to gold
which takes 40.94 seconds to do the same thing, this is an amazing
number.
From here, I'll describe what we are doing for ICF, what was the
previous problem, and what I did in this patch.
In ICF, two sections are considered identical if they have the same
section flags, section data, and relocations. Relocations are tricky,
becuase two relocations are considered the same if they have the same
relocation type, values, and if they point to the same section _in
terms of ICF_.
Here is an example. If foo and bar defined below are compiled to the
same machine instructions, ICF can (and should) merge the two,
although their relocations point to each other.
void foo() { bar(); }
void bar() { foo(); }
This is not an easy problem to solve.
What we are doing in LLD is some sort of coloring algorithm. We color
non-identical sections using different colors repeatedly, and sections
in the same color when the algorithm terminates are considered
identical. Here is the details:
1. First, we color all sections using their hash values of section
types, section contents, and numbers of relocations. At this moment,
relocation targets are not taken into account. We just color
sections that apparently differ in different colors.
2. Next, for each color C, we visit sections having color C to see
if their relocations are the same. Relocations are considered equal
if their targets have the same color. We then recolor sections that
have different relocation targets in new colors.
3. If we recolor some section in step 2, relocations that were
previously pointing to the same color targets may now be pointing to
different colors. Therefore, repeat 2 until a convergence is
obtained.
Step 2 is a heavy operation. For Chromium, the first iteration of step
2 takes 2.882 seconds, and the second iteration takes 1.038 seconds,
and in total it needs 23 iterations.
Parallelizing step 1 is easy because we can color each section
independently. This patch does that.
Parallelizing step 2 is tricky. We could work on each color
independently, but we cannot recolor sections in place, because it
will break the invariance that two possibly-identical sections must
have the same color at any moment.
Consider sections S1, S2, S3, S4 in the same color C, where S1 and S2
are identical, S3 and S4 are identical, but S2 and S3 are not. Thread
A is about to recolor S1 and S2 in C'. After thread A recolor S1 in
C', but before recolor S2 in C', other thread B might observe S1 and
S2. Then thread B will conclude that S1 and S2 are different, and it
will split thread B's sections into smaller groups wrongly. Over-
splitting doesn't produce broken results, but it loses a chance to
merge some identical sections. That was the cause of indeterminism.
To fix the problem, I made sections have two colors, namely current
color and next color. At the beginning of each iteration, both colors
are the same. Each thread reads from current color and writes to next
color. In this way, we can avoid threads from reading partial
results. After each iteration, we flip current and next.
This is a very simple solution and is implemented in less than 50
lines of code.
I tested this patch with Chromium and confirmed that this parallelized
ICF produces the identical output as the non-parallelized one.
Differential Revision: https://reviews.llvm.org/D27247
llvm-svn: 288373
2016-12-02 01:09:04 +08:00
|
|
|
|
2020-09-23 22:07:35 +08:00
|
|
|
// Sections that are in the special equivalence class 0, can never be the
|
|
|
|
// same in terms of the equivalence class.
|
2016-12-06 02:11:35 +08:00
|
|
|
if (x->eqClass[current] == 0)
|
2016-12-03 01:23:58 +08:00
|
|
|
return false;
|
2017-08-29 06:28:41 +08:00
|
|
|
if (x->eqClass[current] != y->eqClass[current])
|
|
|
|
return false;
|
2016-11-20 04:15:55 +08:00
|
|
|
};
|
|
|
|
|
2017-08-29 06:28:41 +08:00
|
|
|
return true;
|
2016-02-26 02:43:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Compare "moving" part of two InputSections, namely relocation targets.
|
|
|
|
template <class ELFT>
|
2017-02-24 00:49:07 +08:00
|
|
|
bool ICF<ELFT>::equalsVariable(const InputSection *a, const InputSection *b) {
|
2021-10-28 00:51:06 +08:00
|
|
|
const RelsOrRelas<ELFT> ra = a->template relsOrRelas<ELFT>();
|
|
|
|
const RelsOrRelas<ELFT> rb = b->template relsOrRelas<ELFT>();
|
|
|
|
return ra.areRelocsRel() ? variableEq(a, ra.rels, b, rb.rels)
|
|
|
|
: variableEq(a, ra.relas, b, rb.relas);
|
2016-02-26 02:43:51 +08:00
|
|
|
}
|
|
|
|
|
2016-12-02 13:35:46 +08:00
|
|
|
template <class ELFT> size_t ICF<ELFT>::findBoundary(size_t begin, size_t end) {
|
2016-12-06 02:11:35 +08:00
|
|
|
uint32_t eqClass = sections[begin]->eqClass[current];
|
2016-12-02 13:35:46 +08:00
|
|
|
for (size_t i = begin + 1; i < end; ++i)
|
2016-12-06 02:11:35 +08:00
|
|
|
if (eqClass != sections[i]->eqClass[current])
|
2016-12-02 13:35:46 +08:00
|
|
|
return i;
|
|
|
|
return end;
|
|
|
|
}
|
|
|
|
|
2016-12-06 02:11:35 +08:00
|
|
|
// Sections in the same equivalence class are contiguous in Sections
|
|
|
|
// vector. Therefore, Sections vector can be considered as contiguous
|
|
|
|
// groups of sections, grouped by the class.
|
2016-12-02 13:35:46 +08:00
|
|
|
//
|
2018-04-04 01:27:39 +08:00
|
|
|
// This function calls Fn on every group within [Begin, End).
|
2016-12-02 13:35:46 +08:00
|
|
|
template <class ELFT>
|
2016-12-06 02:11:35 +08:00
|
|
|
void ICF<ELFT>::forEachClassRange(size_t begin, size_t end,
|
2018-06-16 20:11:34 +08:00
|
|
|
llvm::function_ref<void(size_t, size_t)> fn) {
|
2016-12-02 13:35:46 +08:00
|
|
|
while (begin < end) {
|
2018-04-04 01:27:39 +08:00
|
|
|
size_t mid = findBoundary(begin, end);
|
2016-12-02 13:35:46 +08:00
|
|
|
fn(begin, mid);
|
|
|
|
begin = mid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-06 02:11:35 +08:00
|
|
|
// Call Fn on each equivalence class.
|
2016-12-02 13:35:46 +08:00
|
|
|
template <class ELFT>
|
2018-06-16 20:11:34 +08:00
|
|
|
void ICF<ELFT>::forEachClass(llvm::function_ref<void(size_t, size_t)> fn) {
|
2016-12-02 13:35:46 +08:00
|
|
|
// If threading is disabled or the number of sections are
|
|
|
|
// too small to use threading, call Fn sequentially.
|
[lld][COFF][ELF][WebAssembly] Replace --[no-]threads /threads[:no] with --threads={1,2,...} /threads:{1,2,...}
--no-threads is a name copied from gold.
gold has --no-thread, --thread-count and several other --thread-count-*.
There are needs to customize the number of threads (running several lld
processes concurrently or customizing the number of LTO threads).
Having a single --threads=N is a straightforward replacement of gold's
--no-threads + --thread-count.
--no-threads is used rarely. So just delete --no-threads instead of
keeping it for compatibility for a while.
If --threads= is specified (ELF,wasm; COFF /threads: is similar),
--thinlto-jobs= defaults to --threads=,
otherwise all available hardware threads are used.
There is currently no way to override a --threads={1,2,...}. It is still
a debate whether we should use --threads=all.
Reviewed By: rnk, aganea
Differential Revision: https://reviews.llvm.org/D76885
2020-03-18 03:40:19 +08:00
|
|
|
if (parallel::strategy.ThreadsRequested == 1 || sections.size() < 1024) {
|
2016-12-06 02:11:35 +08:00
|
|
|
forEachClassRange(0, sections.size(), fn);
|
2016-12-05 00:33:13 +08:00
|
|
|
++cnt;
|
2016-12-02 13:35:46 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-12-05 00:33:13 +08:00
|
|
|
current = cnt % 2;
|
|
|
|
next = (cnt + 1) % 2;
|
|
|
|
|
2018-04-04 01:27:39 +08:00
|
|
|
// Shard into non-overlapping intervals, and call Fn in parallel.
|
|
|
|
// The sharding must be completed before any calls to Fn are made
|
|
|
|
// so that Fn can modify the Chunks in its shard without causing data
|
|
|
|
// races.
|
|
|
|
const size_t numShards = 256;
|
2016-12-02 13:35:46 +08:00
|
|
|
size_t step = sections.size() / numShards;
|
2018-04-04 01:27:39 +08:00
|
|
|
size_t boundaries[numShards + 1];
|
|
|
|
boundaries[0] = 0;
|
|
|
|
boundaries[numShards] = sections.size();
|
[Coding style change] Rename variables so that they start with a lowercase letter
This patch is mechanically generated by clang-llvm-rename tool that I wrote
using Clang Refactoring Engine just for creating this patch. You can see the
source code of the tool at https://reviews.llvm.org/D64123. There's no manual
post-processing; you can generate the same patch by re-running the tool against
lld's code base.
Here is the main discussion thread to change the LLVM coding style:
https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html
In the discussion thread, I proposed we use lld as a testbed for variable
naming scheme change, and this patch does that.
I chose to rename variables so that they are in camelCase, just because that
is a minimal change to make variables to start with a lowercase letter.
Note to downstream patch maintainers: if you are maintaining a downstream lld
repo, just rebasing ahead of this commit would cause massive merge conflicts
because this patch essentially changes every line in the lld subdirectory. But
there's a remedy.
clang-llvm-rename tool is a batch tool, so you can rename variables in your
downstream repo with the tool. Given that, here is how to rebase your repo to
a commit after the mass renaming:
1. rebase to the commit just before the mass variable renaming,
2. apply the tool to your downstream repo to mass-rename variables locally, and
3. rebase again to the head.
Most changes made by the tool should be identical for a downstream repo and
for the head, so at the step 3, almost all changes should be merged and
disappear. I'd expect that there would be some lines that you need to merge by
hand, but that shouldn't be too many.
Differential Revision: https://reviews.llvm.org/D64121
llvm-svn: 365595
2019-07-10 13:00:37 +08:00
|
|
|
|
2018-04-04 01:27:39 +08:00
|
|
|
parallelForEachN(1, numShards, [&](size_t i) {
|
|
|
|
boundaries[i] = findBoundary((i - 1) * step, sections.size());
|
|
|
|
});
|
|
|
|
|
|
|
|
parallelForEachN(1, numShards + 1, [&](size_t i) {
|
|
|
|
if (boundaries[i - 1] < boundaries[i])
|
|
|
|
forEachClassRange(boundaries[i - 1], boundaries[i], fn);
|
2017-03-23 07:03:35 +08:00
|
|
|
});
|
2016-12-05 00:33:13 +08:00
|
|
|
++cnt;
|
Parallelize ICF to make LLD's ICF really fast.
ICF is short for Identical Code Folding. It is a size optimization to
identify two or more functions that happened to have the same contents
to merges them. It usually reduces output size by a few percent.
ICF is slow because it is computationally intensive process. I tried
to paralellize it before but failed because I couldn't make a
parallelized version produce consistent outputs. Although it didn't
create broken executables, every invocation of the linker generated
slightly different output, and I couldn't figure out why.
I think I now understand what was going on, and also came up with a
simple algorithm to fix it. So is this patch.
The result is very exciting. Chromium for example has 780,662 input
sections in which 20,774 are reducible by ICF. LLD previously took
7.980 seconds for ICF. Now it finishes in 1.065 seconds.
As a result, LLD can now link a Chromium binary (output size 1.59 GB)
in 10.28 seconds on my machine with ICF enabled. Compared to gold
which takes 40.94 seconds to do the same thing, this is an amazing
number.
From here, I'll describe what we are doing for ICF, what was the
previous problem, and what I did in this patch.
In ICF, two sections are considered identical if they have the same
section flags, section data, and relocations. Relocations are tricky,
becuase two relocations are considered the same if they have the same
relocation type, values, and if they point to the same section _in
terms of ICF_.
Here is an example. If foo and bar defined below are compiled to the
same machine instructions, ICF can (and should) merge the two,
although their relocations point to each other.
void foo() { bar(); }
void bar() { foo(); }
This is not an easy problem to solve.
What we are doing in LLD is some sort of coloring algorithm. We color
non-identical sections using different colors repeatedly, and sections
in the same color when the algorithm terminates are considered
identical. Here is the details:
1. First, we color all sections using their hash values of section
types, section contents, and numbers of relocations. At this moment,
relocation targets are not taken into account. We just color
sections that apparently differ in different colors.
2. Next, for each color C, we visit sections having color C to see
if their relocations are the same. Relocations are considered equal
if their targets have the same color. We then recolor sections that
have different relocation targets in new colors.
3. If we recolor some section in step 2, relocations that were
previously pointing to the same color targets may now be pointing to
different colors. Therefore, repeat 2 until a convergence is
obtained.
Step 2 is a heavy operation. For Chromium, the first iteration of step
2 takes 2.882 seconds, and the second iteration takes 1.038 seconds,
and in total it needs 23 iterations.
Parallelizing step 1 is easy because we can color each section
independently. This patch does that.
Parallelizing step 2 is tricky. We could work on each color
independently, but we cannot recolor sections in place, because it
will break the invariance that two possibly-identical sections must
have the same color at any moment.
Consider sections S1, S2, S3, S4 in the same color C, where S1 and S2
are identical, S3 and S4 are identical, but S2 and S3 are not. Thread
A is about to recolor S1 and S2 in C'. After thread A recolor S1 in
C', but before recolor S2 in C', other thread B might observe S1 and
S2. Then thread B will conclude that S1 and S2 are different, and it
will split thread B's sections into smaller groups wrongly. Over-
splitting doesn't produce broken results, but it loses a chance to
merge some identical sections. That was the cause of indeterminism.
To fix the problem, I made sections have two colors, namely current
color and next color. At the beginning of each iteration, both colors
are the same. Each thread reads from current color and writes to next
color. In this way, we can avoid threads from reading partial
results. After each iteration, we flip current and next.
This is a very simple solution and is implemented in less than 50
lines of code.
I tested this patch with Chromium and confirmed that this parallelized
ICF produces the identical output as the non-parallelized one.
Differential Revision: https://reviews.llvm.org/D27247
llvm-svn: 288373
2016-12-02 01:09:04 +08:00
|
|
|
}
|
|
|
|
|
2018-11-27 05:15:47 +08:00
|
|
|
// Combine the hashes of the sections referenced by the given section into its
|
|
|
|
// hash.
|
2018-11-28 01:47:24 +08:00
|
|
|
template <class ELFT, class RelTy>
|
2019-01-23 07:54:49 +08:00
|
|
|
static void combineRelocHashes(unsigned cnt, InputSection *isec,
|
|
|
|
ArrayRef<RelTy> rels) {
|
|
|
|
uint32_t hash = isec->eqClass[cnt % 2];
|
2018-11-27 05:15:47 +08:00
|
|
|
for (RelTy rel : rels) {
|
|
|
|
Symbol &s = isec->template getFile<ELFT>()->getRelocTargetSym(rel);
|
|
|
|
if (auto *d = dyn_cast<Defined>(&s))
|
|
|
|
if (auto *relSec = dyn_cast_or_null<InputSection>(d->section))
|
2019-01-23 07:54:49 +08:00
|
|
|
hash += relSec->eqClass[cnt % 2];
|
2018-11-27 05:15:47 +08:00
|
|
|
}
|
2020-09-23 22:07:35 +08:00
|
|
|
// Set MSB to 1 to avoid collisions with unique IDs.
|
2019-01-23 07:54:49 +08:00
|
|
|
isec->eqClass[(cnt + 1) % 2] = hash | (1U << 31);
|
2018-11-27 05:15:47 +08:00
|
|
|
}
|
|
|
|
|
2018-02-14 06:56:49 +08:00
|
|
|
static void print(const Twine &s) {
|
|
|
|
if (config->printIcfSections)
|
|
|
|
message(s);
|
2018-02-11 10:32:21 +08:00
|
|
|
}
|
2018-02-10 02:00:46 +08:00
|
|
|
|
2016-02-26 02:43:51 +08:00
|
|
|
// The main function of ICF.
|
2016-05-03 03:30:42 +08:00
|
|
|
template <class ELFT> void ICF<ELFT>::run() {
|
2019-12-11 13:37:57 +08:00
|
|
|
// Compute isPreemptible early. We may add more symbols later, so this loop
|
|
|
|
// cannot be merged with the later computeIsPreemptible() pass which is used
|
|
|
|
// by scanRelocations().
|
2022-01-16 15:27:45 +08:00
|
|
|
if (config->hasDynSymTab)
|
|
|
|
for (Symbol *sym : symtab->symbols())
|
|
|
|
sym->isPreemptible = computeIsPreemptible(*sym);
|
2019-12-11 13:37:57 +08:00
|
|
|
|
2020-08-05 07:05:14 +08:00
|
|
|
// Two text sections may have identical content and relocations but different
|
|
|
|
// LSDA, e.g. the two functions may have catch blocks of different types. If a
|
|
|
|
// text section is referenced by a .eh_frame FDE with LSDA, it is not
|
|
|
|
// eligible. This is implemented by iterating over CIE/FDE and setting
|
|
|
|
// eqClass[0] to the referenced text section from a live FDE.
|
|
|
|
//
|
|
|
|
// If two .gcc_except_table have identical semantics (usually identical
|
|
|
|
// content with PC-relative encoding), we will lose folding opportunity.
|
2020-08-08 04:42:09 +08:00
|
|
|
uint32_t uniqueId = 0;
|
2020-08-05 07:05:14 +08:00
|
|
|
for (Partition &part : partitions)
|
|
|
|
part.ehFrame->iterateFDEWithLSDA<ELFT>(
|
2020-09-23 22:07:35 +08:00
|
|
|
[&](InputSection &s) { s.eqClass[0] = s.eqClass[1] = ++uniqueId; });
|
2020-08-05 07:05:14 +08:00
|
|
|
|
2016-11-30 09:50:03 +08:00
|
|
|
// Collect sections to merge.
|
2019-09-13 00:46:19 +08:00
|
|
|
for (InputSectionBase *sec : inputSections) {
|
|
|
|
auto *s = cast<InputSection>(sec);
|
2020-09-23 22:07:35 +08:00
|
|
|
if (s->eqClass[0] == 0) {
|
|
|
|
if (isEligible(s))
|
|
|
|
sections.push_back(s);
|
|
|
|
else
|
|
|
|
// Ineligible sections are assigned unique IDs, i.e. each section
|
|
|
|
// belongs to an equivalence class of its own.
|
|
|
|
s->eqClass[0] = s->eqClass[1] = ++uniqueId;
|
|
|
|
}
|
2019-09-13 00:46:19 +08:00
|
|
|
}
|
2016-11-30 09:50:03 +08:00
|
|
|
|
2016-12-06 02:11:35 +08:00
|
|
|
// Initially, we use hash values to partition sections.
|
2020-09-23 22:07:35 +08:00
|
|
|
parallelForEach(sections, [&](InputSection *s) {
|
|
|
|
// Set MSB to 1 to avoid collisions with unique IDs.
|
2022-02-21 16:39:26 +08:00
|
|
|
s->eqClass[0] = xxHash64(s->rawData) | (1U << 31);
|
2020-09-23 22:07:35 +08:00
|
|
|
});
|
2018-11-27 05:15:47 +08:00
|
|
|
|
2020-08-05 07:05:14 +08:00
|
|
|
// Perform 2 rounds of relocation hash propagation. 2 is an empirical value to
|
|
|
|
// reduce the average sizes of equivalence classes, i.e. segregate() which has
|
|
|
|
// a large time complexity will have less work to do.
|
2019-01-23 07:54:49 +08:00
|
|
|
for (unsigned cnt = 0; cnt != 2; ++cnt) {
|
|
|
|
parallelForEach(sections, [&](InputSection *s) {
|
2021-10-28 00:51:06 +08:00
|
|
|
const RelsOrRelas<ELFT> rels = s->template relsOrRelas<ELFT>();
|
|
|
|
if (rels.areRelocsRel())
|
|
|
|
combineRelocHashes<ELFT>(cnt, s, rels.rels);
|
2019-01-23 07:54:49 +08:00
|
|
|
else
|
2021-10-28 00:51:06 +08:00
|
|
|
combineRelocHashes<ELFT>(cnt, s, rels.relas);
|
2019-01-23 07:54:49 +08:00
|
|
|
});
|
|
|
|
}
|
2016-02-26 02:43:51 +08:00
|
|
|
|
2016-12-06 02:11:35 +08:00
|
|
|
// From now on, sections in Sections vector are ordered so that sections
|
|
|
|
// in the same equivalence class are consecutive in the vector.
|
2019-04-23 10:42:06 +08:00
|
|
|
llvm::stable_sort(sections, [](const InputSection *a, const InputSection *b) {
|
|
|
|
return a->eqClass[0] < b->eqClass[0];
|
|
|
|
});
|
2016-02-26 02:43:51 +08:00
|
|
|
|
2020-09-23 22:07:35 +08:00
|
|
|
// Compare static contents and assign unique equivalence class IDs for each
|
|
|
|
// static content. Use a base offset for these IDs to ensure no overlap with
|
|
|
|
// the unique IDs already assigned.
|
|
|
|
uint32_t eqClassBase = ++uniqueId;
|
|
|
|
forEachClass([&](size_t begin, size_t end) {
|
|
|
|
segregate(begin, end, eqClassBase, true);
|
|
|
|
});
|
2016-02-26 02:43:51 +08:00
|
|
|
|
2016-12-02 13:35:46 +08:00
|
|
|
// Split groups by comparing relocations until convergence is obtained.
|
|
|
|
do {
|
|
|
|
repeat = false;
|
2020-09-23 22:07:35 +08:00
|
|
|
forEachClass([&](size_t begin, size_t end) {
|
|
|
|
segregate(begin, end, eqClassBase, false);
|
|
|
|
});
|
2016-12-02 13:35:46 +08:00
|
|
|
} while (repeat);
|
2016-11-30 09:50:03 +08:00
|
|
|
|
|
|
|
log("ICF needed " + Twine(cnt) + " iterations");
|
2016-02-26 02:43:51 +08:00
|
|
|
|
2016-12-06 02:11:35 +08:00
|
|
|
// Merge sections by the equivalence class.
|
2018-02-09 07:51:58 +08:00
|
|
|
forEachClassRange(0, sections.size(), [&](size_t begin, size_t end) {
|
2016-12-02 13:35:46 +08:00
|
|
|
if (end - begin == 1)
|
|
|
|
return;
|
2018-07-24 03:36:55 +08:00
|
|
|
print("selected section " + toString(sections[begin]));
|
|
|
|
for (size_t i = begin + 1; i < end; ++i) {
|
2018-02-14 06:56:49 +08:00
|
|
|
print(" removing identical section " + toString(sections[i]));
|
2018-07-24 03:36:55 +08:00
|
|
|
sections[begin]->replace(sections[i]);
|
2018-02-23 18:37:33 +08:00
|
|
|
|
|
|
|
// At this point we know sections merged are fully identical and hence
|
|
|
|
// we want to remove duplicate implicit dependencies such as link order
|
|
|
|
// and relocation sections.
|
|
|
|
for (InputSection *isec : sections[i]->dependentSections)
|
2019-05-29 11:55:20 +08:00
|
|
|
isec->markDead();
|
2016-02-26 02:43:51 +08:00
|
|
|
}
|
2016-12-02 13:35:46 +08:00
|
|
|
});
|
2019-09-06 23:57:24 +08:00
|
|
|
|
2021-12-25 04:09:48 +08:00
|
|
|
// Change Defined symbol's section field to the canonical one.
|
|
|
|
auto fold = [](Symbol *sym) {
|
|
|
|
if (auto *d = dyn_cast<Defined>(sym))
|
|
|
|
if (auto *sec = dyn_cast_or_null<InputSection>(d->section))
|
|
|
|
if (sec->repl != d->section) {
|
|
|
|
d->section = sec->repl;
|
|
|
|
d->folded = true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
for (Symbol *sym : symtab->symbols())
|
|
|
|
fold(sym);
|
|
|
|
parallelForEach(objectFiles, [&](ELFFileBase *file) {
|
|
|
|
for (Symbol *sym : file->getLocalSymbols())
|
|
|
|
fold(sym);
|
|
|
|
});
|
|
|
|
|
2019-09-06 23:57:24 +08:00
|
|
|
// InputSectionDescription::sections is populated by processSectionCommands().
|
|
|
|
// ICF may fold some input sections assigned to output sections. Remove them.
|
2021-11-26 12:24:23 +08:00
|
|
|
for (SectionCommand *cmd : script->sectionCommands)
|
2022-03-09 03:23:41 +08:00
|
|
|
if (auto *osd = dyn_cast<OutputDesc>(cmd))
|
|
|
|
for (SectionCommand *subCmd : osd->osec.commands)
|
2021-11-26 12:24:23 +08:00
|
|
|
if (auto *isd = dyn_cast<InputSectionDescription>(subCmd))
|
2019-09-06 23:57:24 +08:00
|
|
|
llvm::erase_if(isd->sections,
|
|
|
|
[](InputSection *isec) { return !isec->isLive(); });
|
2016-02-26 02:43:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// ICF entry point function.
|
2020-05-15 13:18:58 +08:00
|
|
|
template <class ELFT> void elf::doIcf() {
|
2020-01-29 00:05:13 +08:00
|
|
|
llvm::TimeTraceScope timeScope("ICF");
|
|
|
|
ICF<ELFT>().run();
|
|
|
|
}
|
2016-02-26 02:43:51 +08:00
|
|
|
|
2020-05-15 13:18:58 +08:00
|
|
|
template void elf::doIcf<ELF32LE>();
|
|
|
|
template void elf::doIcf<ELF32BE>();
|
|
|
|
template void elf::doIcf<ELF64LE>();
|
|
|
|
template void elf::doIcf<ELF64BE>();
|