2015-08-14 22:12:54 +08:00
|
|
|
//===- Chunks.h -------------------------------------------------*- C++ -*-===//
|
2015-05-29 03:09:30 +08:00
|
|
|
//
|
|
|
|
// The LLVM Linker
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef LLD_COFF_CHUNKS_H
|
|
|
|
#define LLD_COFF_CHUNKS_H
|
|
|
|
|
2015-09-17 05:40:47 +08:00
|
|
|
#include "Config.h"
|
[opt] Replace the recursive walk for GC with a worklist algorithm.
This flattens the entire liveness walk from a recursive mark approach to
a worklist approach. It also sinks the worklist management completely
out of the SectionChunk and into the Writer by exposing the ability to
iterato over children of a chunk and over the symbol bodies of relocated
symbols. I'm not 100% happy with the API names, so suggestions welcome
there.
This allows us to use a single worklist for the entire recursive walk
and would also be a natural place to take advantage of parallelism at
some future point.
With this, we completely inline away the GC walk into the
Writer::markLive function and it makes it very easy to profile what is
slow. Currently, time is being wasted checking whether a Chunk isa
SectionChunk (it essentially always is), finding (or skipping)
a replacement for a symbol, and chasing pointers between symbols and
their chunks. There are a bunch of things we can do to fix this, and its
easier to do them after this change IMO.
This change alone saves 1-2% of the time for my self-link of lld.exe
(which I'm running and benchmarking on Linux ironically).
Perhaps more notably, we'll no longer blow out the stack for large
links. =]
Just as an FYI, at this point, I/O is starting to really dominate the
profile. Well over 10% of the time appears to be inside the kernel doing
page table silliness. I think a decent chunk of this can be nuked as
well, but it's a little odd as cross-linking in this way isn't really
the primary goal here.
Differential Revision: http://reviews.llvm.org/D10790
llvm-svn: 240995
2015-06-30 05:12:49 +08:00
|
|
|
#include "InputFiles.h"
|
2017-10-03 05:00:41 +08:00
|
|
|
#include "lld/Common/LLVM.h"
|
2015-05-29 03:09:30 +08:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
[opt] Replace the recursive walk for GC with a worklist algorithm.
This flattens the entire liveness walk from a recursive mark approach to
a worklist approach. It also sinks the worklist management completely
out of the SectionChunk and into the Writer by exposing the ability to
iterato over children of a chunk and over the symbol bodies of relocated
symbols. I'm not 100% happy with the API names, so suggestions welcome
there.
This allows us to use a single worklist for the entire recursive walk
and would also be a natural place to take advantage of parallelism at
some future point.
With this, we completely inline away the GC walk into the
Writer::markLive function and it makes it very easy to profile what is
slow. Currently, time is being wasted checking whether a Chunk isa
SectionChunk (it essentially always is), finding (or skipping)
a replacement for a symbol, and chasing pointers between symbols and
their chunks. There are a bunch of things we can do to fix this, and its
easier to do them after this change IMO.
This change alone saves 1-2% of the time for my self-link of lld.exe
(which I'm running and benchmarking on Linux ironically).
Perhaps more notably, we'll no longer blow out the stack for large
links. =]
Just as an FYI, at this point, I/O is starting to really dominate the
profile. Well over 10% of the time appears to be inside the kernel doing
page table silliness. I think a decent chunk of this can be nuked as
well, but it's a little odd as cross-linking in this way isn't really
the primary goal here.
Differential Revision: http://reviews.llvm.org/D10790
llvm-svn: 240995
2015-06-30 05:12:49 +08:00
|
|
|
#include "llvm/ADT/iterator.h"
|
2015-06-25 08:33:38 +08:00
|
|
|
#include "llvm/ADT/iterator_range.h"
|
2015-05-29 03:09:30 +08:00
|
|
|
#include "llvm/Object/COFF.h"
|
2016-06-04 00:57:13 +08:00
|
|
|
#include <utility>
|
2015-05-29 03:09:30 +08:00
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
namespace lld {
|
|
|
|
namespace coff {
|
|
|
|
|
|
|
|
using llvm::COFF::ImportDirectoryTableEntry;
|
|
|
|
using llvm::object::COFFSymbolRef;
|
|
|
|
using llvm::object::SectionRef;
|
|
|
|
using llvm::object::coff_relocation;
|
|
|
|
using llvm::object::coff_section;
|
|
|
|
|
2015-07-25 09:44:32 +08:00
|
|
|
class Baserel;
|
2015-05-29 03:09:30 +08:00
|
|
|
class Defined;
|
|
|
|
class DefinedImportData;
|
2015-07-25 09:44:32 +08:00
|
|
|
class DefinedRegular;
|
2017-07-27 07:05:24 +08:00
|
|
|
class ObjFile;
|
2015-05-29 03:09:30 +08:00
|
|
|
class OutputSection;
|
2017-11-04 05:21:47 +08:00
|
|
|
class Symbol;
|
2015-05-29 03:09:30 +08:00
|
|
|
|
2015-08-06 03:51:28 +08:00
|
|
|
// Mask for section types (code, data, bss, disacardable, etc.)
|
|
|
|
// and permissions (writable, readable or executable).
|
|
|
|
const uint32_t PermMask = 0xFF0000F0;
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
// A Chunk represents a chunk of data that will occupy space in the
|
|
|
|
// output (if the resolver chose that). It may or may not be backed by
|
|
|
|
// a section of an input file. It could be linker-created data, or
|
|
|
|
// doesn't even have actual data (if common or bss).
|
|
|
|
class Chunk {
|
|
|
|
public:
|
2015-06-26 03:10:58 +08:00
|
|
|
enum Kind { SectionKind, OtherKind };
|
|
|
|
Kind kind() const { return ChunkKind; }
|
2015-05-29 03:09:30 +08:00
|
|
|
virtual ~Chunk() = default;
|
|
|
|
|
|
|
|
// Returns the size of this chunk (even if this is a common or BSS.)
|
|
|
|
virtual size_t getSize() const = 0;
|
|
|
|
|
2015-06-06 12:07:39 +08:00
|
|
|
// Write this chunk to a mmap'ed file, assuming Buf is pointing to
|
|
|
|
// beginning of the file. Because this function may use RVA values
|
|
|
|
// of other chunks for relocations, you need to set them properly
|
|
|
|
// before calling this function.
|
2015-09-20 07:28:57 +08:00
|
|
|
virtual void writeTo(uint8_t *Buf) const {}
|
2015-05-29 03:45:43 +08:00
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
// The writer sets and uses the addresses.
|
2015-09-26 00:50:12 +08:00
|
|
|
uint64_t getRVA() const { return RVA; }
|
2015-05-29 03:09:30 +08:00
|
|
|
void setRVA(uint64_t V) { RVA = V; }
|
|
|
|
|
2015-05-29 03:45:43 +08:00
|
|
|
// Returns true if this has non-zero data. BSS chunks return
|
|
|
|
// false. If false is returned, the space occupied by this chunk
|
|
|
|
// will be filled with zeros.
|
2015-05-29 03:09:30 +08:00
|
|
|
virtual bool hasData() const { return true; }
|
|
|
|
|
|
|
|
// Returns readable/writable/executable bits.
|
|
|
|
virtual uint32_t getPermissions() const { return 0; }
|
|
|
|
|
|
|
|
// Returns the section name if this is a section chunk.
|
|
|
|
// It is illegal to call this function on non-section chunks.
|
|
|
|
virtual StringRef getSectionName() const {
|
|
|
|
llvm_unreachable("unimplemented getSectionName");
|
|
|
|
}
|
|
|
|
|
|
|
|
// An output section has pointers to chunks in the section, and each
|
|
|
|
// chunk has a back pointer to an output section.
|
|
|
|
void setOutputSection(OutputSection *O) { Out = O; }
|
2017-08-03 07:19:54 +08:00
|
|
|
OutputSection *getOutputSection() const { return Out; }
|
2015-05-29 03:09:30 +08:00
|
|
|
|
2015-06-15 09:23:58 +08:00
|
|
|
// Windows-specific.
|
|
|
|
// Collect all locations that contain absolute addresses for base relocations.
|
2015-07-25 09:44:32 +08:00
|
|
|
virtual void getBaserels(std::vector<Baserel> *Res) {}
|
2015-06-15 09:23:58 +08:00
|
|
|
|
2015-06-24 08:00:52 +08:00
|
|
|
// Returns a human-readable name of this chunk. Chunks are unnamed chunks of
|
|
|
|
// bytes, so this is used only for logging or debugging.
|
|
|
|
virtual StringRef getDebugName() { return ""; }
|
|
|
|
|
2017-09-14 05:54:55 +08:00
|
|
|
// The alignment of this chunk. The writer uses the value.
|
|
|
|
uint32_t Alignment = 1;
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
protected:
|
2015-06-26 03:10:58 +08:00
|
|
|
Chunk(Kind K = OtherKind) : ChunkKind(K) {}
|
|
|
|
const Kind ChunkKind;
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
// The RVA of this chunk in the output. The writer sets a value.
|
|
|
|
uint64_t RVA = 0;
|
|
|
|
|
2017-09-14 05:54:55 +08:00
|
|
|
// The output section for this chunk.
|
|
|
|
OutputSection *Out = nullptr;
|
|
|
|
|
2017-06-20 01:21:45 +08:00
|
|
|
public:
|
2015-08-14 11:30:59 +08:00
|
|
|
// The offset from beginning of the output section. The writer sets a value.
|
|
|
|
uint64_t OutputSectionOff = 0;
|
2015-05-29 03:09:30 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// A chunk corresponding a section of an input file.
|
2017-07-14 04:29:59 +08:00
|
|
|
class SectionChunk final : public Chunk {
|
2015-09-16 22:19:10 +08:00
|
|
|
// Identical COMDAT Folding feature accesses section internal data.
|
|
|
|
friend class ICF;
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
public:
|
[opt] Replace the recursive walk for GC with a worklist algorithm.
This flattens the entire liveness walk from a recursive mark approach to
a worklist approach. It also sinks the worklist management completely
out of the SectionChunk and into the Writer by exposing the ability to
iterato over children of a chunk and over the symbol bodies of relocated
symbols. I'm not 100% happy with the API names, so suggestions welcome
there.
This allows us to use a single worklist for the entire recursive walk
and would also be a natural place to take advantage of parallelism at
some future point.
With this, we completely inline away the GC walk into the
Writer::markLive function and it makes it very easy to profile what is
slow. Currently, time is being wasted checking whether a Chunk isa
SectionChunk (it essentially always is), finding (or skipping)
a replacement for a symbol, and chasing pointers between symbols and
their chunks. There are a bunch of things we can do to fix this, and its
easier to do them after this change IMO.
This change alone saves 1-2% of the time for my self-link of lld.exe
(which I'm running and benchmarking on Linux ironically).
Perhaps more notably, we'll no longer blow out the stack for large
links. =]
Just as an FYI, at this point, I/O is starting to really dominate the
profile. Well over 10% of the time appears to be inside the kernel doing
page table silliness. I think a decent chunk of this can be nuked as
well, but it's a little odd as cross-linking in this way isn't really
the primary goal here.
Differential Revision: http://reviews.llvm.org/D10790
llvm-svn: 240995
2015-06-30 05:12:49 +08:00
|
|
|
class symbol_iterator : public llvm::iterator_adaptor_base<
|
|
|
|
symbol_iterator, const coff_relocation *,
|
2017-11-04 05:21:47 +08:00
|
|
|
std::random_access_iterator_tag, Symbol *> {
|
[opt] Replace the recursive walk for GC with a worklist algorithm.
This flattens the entire liveness walk from a recursive mark approach to
a worklist approach. It also sinks the worklist management completely
out of the SectionChunk and into the Writer by exposing the ability to
iterato over children of a chunk and over the symbol bodies of relocated
symbols. I'm not 100% happy with the API names, so suggestions welcome
there.
This allows us to use a single worklist for the entire recursive walk
and would also be a natural place to take advantage of parallelism at
some future point.
With this, we completely inline away the GC walk into the
Writer::markLive function and it makes it very easy to profile what is
slow. Currently, time is being wasted checking whether a Chunk isa
SectionChunk (it essentially always is), finding (or skipping)
a replacement for a symbol, and chasing pointers between symbols and
their chunks. There are a bunch of things we can do to fix this, and its
easier to do them after this change IMO.
This change alone saves 1-2% of the time for my self-link of lld.exe
(which I'm running and benchmarking on Linux ironically).
Perhaps more notably, we'll no longer blow out the stack for large
links. =]
Just as an FYI, at this point, I/O is starting to really dominate the
profile. Well over 10% of the time appears to be inside the kernel doing
page table silliness. I think a decent chunk of this can be nuked as
well, but it's a little odd as cross-linking in this way isn't really
the primary goal here.
Differential Revision: http://reviews.llvm.org/D10790
llvm-svn: 240995
2015-06-30 05:12:49 +08:00
|
|
|
friend SectionChunk;
|
|
|
|
|
2017-07-27 07:05:24 +08:00
|
|
|
ObjFile *File;
|
[opt] Replace the recursive walk for GC with a worklist algorithm.
This flattens the entire liveness walk from a recursive mark approach to
a worklist approach. It also sinks the worklist management completely
out of the SectionChunk and into the Writer by exposing the ability to
iterato over children of a chunk and over the symbol bodies of relocated
symbols. I'm not 100% happy with the API names, so suggestions welcome
there.
This allows us to use a single worklist for the entire recursive walk
and would also be a natural place to take advantage of parallelism at
some future point.
With this, we completely inline away the GC walk into the
Writer::markLive function and it makes it very easy to profile what is
slow. Currently, time is being wasted checking whether a Chunk isa
SectionChunk (it essentially always is), finding (or skipping)
a replacement for a symbol, and chasing pointers between symbols and
their chunks. There are a bunch of things we can do to fix this, and its
easier to do them after this change IMO.
This change alone saves 1-2% of the time for my self-link of lld.exe
(which I'm running and benchmarking on Linux ironically).
Perhaps more notably, we'll no longer blow out the stack for large
links. =]
Just as an FYI, at this point, I/O is starting to really dominate the
profile. Well over 10% of the time appears to be inside the kernel doing
page table silliness. I think a decent chunk of this can be nuked as
well, but it's a little odd as cross-linking in this way isn't really
the primary goal here.
Differential Revision: http://reviews.llvm.org/D10790
llvm-svn: 240995
2015-06-30 05:12:49 +08:00
|
|
|
|
2017-07-27 07:05:24 +08:00
|
|
|
symbol_iterator(ObjFile *File, const coff_relocation *I)
|
[opt] Replace the recursive walk for GC with a worklist algorithm.
This flattens the entire liveness walk from a recursive mark approach to
a worklist approach. It also sinks the worklist management completely
out of the SectionChunk and into the Writer by exposing the ability to
iterato over children of a chunk and over the symbol bodies of relocated
symbols. I'm not 100% happy with the API names, so suggestions welcome
there.
This allows us to use a single worklist for the entire recursive walk
and would also be a natural place to take advantage of parallelism at
some future point.
With this, we completely inline away the GC walk into the
Writer::markLive function and it makes it very easy to profile what is
slow. Currently, time is being wasted checking whether a Chunk isa
SectionChunk (it essentially always is), finding (or skipping)
a replacement for a symbol, and chasing pointers between symbols and
their chunks. There are a bunch of things we can do to fix this, and its
easier to do them after this change IMO.
This change alone saves 1-2% of the time for my self-link of lld.exe
(which I'm running and benchmarking on Linux ironically).
Perhaps more notably, we'll no longer blow out the stack for large
links. =]
Just as an FYI, at this point, I/O is starting to really dominate the
profile. Well over 10% of the time appears to be inside the kernel doing
page table silliness. I think a decent chunk of this can be nuked as
well, but it's a little odd as cross-linking in this way isn't really
the primary goal here.
Differential Revision: http://reviews.llvm.org/D10790
llvm-svn: 240995
2015-06-30 05:12:49 +08:00
|
|
|
: symbol_iterator::iterator_adaptor_base(I), File(File) {}
|
|
|
|
|
|
|
|
public:
|
|
|
|
symbol_iterator() = default;
|
|
|
|
|
2017-11-04 05:21:47 +08:00
|
|
|
Symbol *operator*() const { return File->getSymbol(I->SymbolTableIndex); }
|
[opt] Replace the recursive walk for GC with a worklist algorithm.
This flattens the entire liveness walk from a recursive mark approach to
a worklist approach. It also sinks the worklist management completely
out of the SectionChunk and into the Writer by exposing the ability to
iterato over children of a chunk and over the symbol bodies of relocated
symbols. I'm not 100% happy with the API names, so suggestions welcome
there.
This allows us to use a single worklist for the entire recursive walk
and would also be a natural place to take advantage of parallelism at
some future point.
With this, we completely inline away the GC walk into the
Writer::markLive function and it makes it very easy to profile what is
slow. Currently, time is being wasted checking whether a Chunk isa
SectionChunk (it essentially always is), finding (or skipping)
a replacement for a symbol, and chasing pointers between symbols and
their chunks. There are a bunch of things we can do to fix this, and its
easier to do them after this change IMO.
This change alone saves 1-2% of the time for my self-link of lld.exe
(which I'm running and benchmarking on Linux ironically).
Perhaps more notably, we'll no longer blow out the stack for large
links. =]
Just as an FYI, at this point, I/O is starting to really dominate the
profile. Well over 10% of the time appears to be inside the kernel doing
page table silliness. I think a decent chunk of this can be nuked as
well, but it's a little odd as cross-linking in this way isn't really
the primary goal here.
Differential Revision: http://reviews.llvm.org/D10790
llvm-svn: 240995
2015-06-30 05:12:49 +08:00
|
|
|
};
|
|
|
|
|
2017-07-27 07:05:24 +08:00
|
|
|
SectionChunk(ObjFile *File, const coff_section *Header);
|
2015-06-26 03:10:58 +08:00
|
|
|
static bool classof(const Chunk *C) { return C->kind() == SectionKind; }
|
2015-05-29 03:09:30 +08:00
|
|
|
size_t getSize() const override { return Header->SizeOfRawData; }
|
2016-03-15 17:48:27 +08:00
|
|
|
ArrayRef<uint8_t> getContents() const;
|
2015-09-20 07:28:57 +08:00
|
|
|
void writeTo(uint8_t *Buf) const override;
|
2015-05-29 03:09:30 +08:00
|
|
|
bool hasData() const override;
|
|
|
|
uint32_t getPermissions() const override;
|
|
|
|
StringRef getSectionName() const override { return SectionName; }
|
2015-07-25 09:44:32 +08:00
|
|
|
void getBaserels(std::vector<Baserel> *Res) override;
|
2015-06-26 03:10:58 +08:00
|
|
|
bool isCOMDAT() const;
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
void applyRelX64(uint8_t *Off, uint16_t Type, OutputSection *OS, uint64_t S,
|
|
|
|
uint64_t P) const;
|
|
|
|
void applyRelX86(uint8_t *Off, uint16_t Type, OutputSection *OS, uint64_t S,
|
|
|
|
uint64_t P) const;
|
|
|
|
void applyRelARM(uint8_t *Off, uint16_t Type, OutputSection *OS, uint64_t S,
|
|
|
|
uint64_t P) const;
|
2017-07-11 15:22:44 +08:00
|
|
|
void applyRelARM64(uint8_t *Off, uint16_t Type, OutputSection *OS, uint64_t S,
|
|
|
|
uint64_t P) const;
|
2015-06-26 03:10:58 +08:00
|
|
|
|
|
|
|
// Called if the garbage collector decides to not include this chunk
|
|
|
|
// in a final output. It's supposed to print out a log message to stdout.
|
|
|
|
void printDiscardedMessage() const;
|
2015-05-29 03:09:30 +08:00
|
|
|
|
|
|
|
// Adds COMDAT associative sections to this COMDAT section. A chunk
|
|
|
|
// and its children are treated as a group by the garbage collector.
|
|
|
|
void addAssociative(SectionChunk *Child);
|
|
|
|
|
2015-06-24 08:00:52 +08:00
|
|
|
StringRef getDebugName() override;
|
|
|
|
|
2017-11-28 09:30:07 +08:00
|
|
|
// Returns true if the chunk was not dropped by GC.
|
|
|
|
bool isLive() { return Live; }
|
2017-06-17 04:47:19 +08:00
|
|
|
|
2015-06-26 03:10:58 +08:00
|
|
|
// Used by the garbage collector.
|
[opt] Replace the recursive walk for GC with a worklist algorithm.
This flattens the entire liveness walk from a recursive mark approach to
a worklist approach. It also sinks the worklist management completely
out of the SectionChunk and into the Writer by exposing the ability to
iterato over children of a chunk and over the symbol bodies of relocated
symbols. I'm not 100% happy with the API names, so suggestions welcome
there.
This allows us to use a single worklist for the entire recursive walk
and would also be a natural place to take advantage of parallelism at
some future point.
With this, we completely inline away the GC walk into the
Writer::markLive function and it makes it very easy to profile what is
slow. Currently, time is being wasted checking whether a Chunk isa
SectionChunk (it essentially always is), finding (or skipping)
a replacement for a symbol, and chasing pointers between symbols and
their chunks. There are a bunch of things we can do to fix this, and its
easier to do them after this change IMO.
This change alone saves 1-2% of the time for my self-link of lld.exe
(which I'm running and benchmarking on Linux ironically).
Perhaps more notably, we'll no longer blow out the stack for large
links. =]
Just as an FYI, at this point, I/O is starting to really dominate the
profile. Well over 10% of the time appears to be inside the kernel doing
page table silliness. I think a decent chunk of this can be nuked as
well, but it's a little odd as cross-linking in this way isn't really
the primary goal here.
Differential Revision: http://reviews.llvm.org/D10790
llvm-svn: 240995
2015-06-30 05:12:49 +08:00
|
|
|
void markLive() {
|
2017-06-17 04:47:19 +08:00
|
|
|
assert(Config->DoGC && "should only mark things live from GC");
|
2015-09-17 05:40:47 +08:00
|
|
|
assert(!isLive() && "Cannot mark an already live section!");
|
[opt] Replace the recursive walk for GC with a worklist algorithm.
This flattens the entire liveness walk from a recursive mark approach to
a worklist approach. It also sinks the worklist management completely
out of the SectionChunk and into the Writer by exposing the ability to
iterato over children of a chunk and over the symbol bodies of relocated
symbols. I'm not 100% happy with the API names, so suggestions welcome
there.
This allows us to use a single worklist for the entire recursive walk
and would also be a natural place to take advantage of parallelism at
some future point.
With this, we completely inline away the GC walk into the
Writer::markLive function and it makes it very easy to profile what is
slow. Currently, time is being wasted checking whether a Chunk isa
SectionChunk (it essentially always is), finding (or skipping)
a replacement for a symbol, and chasing pointers between symbols and
their chunks. There are a bunch of things we can do to fix this, and its
easier to do them after this change IMO.
This change alone saves 1-2% of the time for my self-link of lld.exe
(which I'm running and benchmarking on Linux ironically).
Perhaps more notably, we'll no longer blow out the stack for large
links. =]
Just as an FYI, at this point, I/O is starting to really dominate the
profile. Well over 10% of the time appears to be inside the kernel doing
page table silliness. I think a decent chunk of this can be nuked as
well, but it's a little odd as cross-linking in this way isn't really
the primary goal here.
Differential Revision: http://reviews.llvm.org/D10790
llvm-svn: 240995
2015-06-30 05:12:49 +08:00
|
|
|
Live = true;
|
|
|
|
}
|
|
|
|
|
2017-06-21 01:14:09 +08:00
|
|
|
// True if this is a codeview debug info chunk. These will not be laid out in
|
|
|
|
// the image. Instead they will end up in the PDB, if one is requested.
|
|
|
|
bool isCodeView() const {
|
|
|
|
return SectionName == ".debug" || SectionName.startswith(".debug$");
|
|
|
|
}
|
|
|
|
|
2017-10-10 14:05:29 +08:00
|
|
|
// True if this is a DWARF debug info or exception handling chunk.
|
|
|
|
bool isDWARF() const {
|
|
|
|
return SectionName.startswith(".debug_") || SectionName == ".eh_frame";
|
|
|
|
}
|
2017-07-18 23:11:05 +08:00
|
|
|
|
[opt] Replace the recursive walk for GC with a worklist algorithm.
This flattens the entire liveness walk from a recursive mark approach to
a worklist approach. It also sinks the worklist management completely
out of the SectionChunk and into the Writer by exposing the ability to
iterato over children of a chunk and over the symbol bodies of relocated
symbols. I'm not 100% happy with the API names, so suggestions welcome
there.
This allows us to use a single worklist for the entire recursive walk
and would also be a natural place to take advantage of parallelism at
some future point.
With this, we completely inline away the GC walk into the
Writer::markLive function and it makes it very easy to profile what is
slow. Currently, time is being wasted checking whether a Chunk isa
SectionChunk (it essentially always is), finding (or skipping)
a replacement for a symbol, and chasing pointers between symbols and
their chunks. There are a bunch of things we can do to fix this, and its
easier to do them after this change IMO.
This change alone saves 1-2% of the time for my self-link of lld.exe
(which I'm running and benchmarking on Linux ironically).
Perhaps more notably, we'll no longer blow out the stack for large
links. =]
Just as an FYI, at this point, I/O is starting to really dominate the
profile. Well over 10% of the time appears to be inside the kernel doing
page table silliness. I think a decent chunk of this can be nuked as
well, but it's a little odd as cross-linking in this way isn't really
the primary goal here.
Differential Revision: http://reviews.llvm.org/D10790
llvm-svn: 240995
2015-06-30 05:12:49 +08:00
|
|
|
// Allow iteration over the bodies of this chunk's relocated symbols.
|
|
|
|
llvm::iterator_range<symbol_iterator> symbols() const {
|
|
|
|
return llvm::make_range(symbol_iterator(File, Relocs.begin()),
|
|
|
|
symbol_iterator(File, Relocs.end()));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allow iteration over the associated child chunks for this section.
|
|
|
|
ArrayRef<SectionChunk *> children() const { return AssocChildren; }
|
2015-06-24 12:36:52 +08:00
|
|
|
|
|
|
|
// A pointer pointing to a replacement for this chunk.
|
|
|
|
// Initially it points to "this" object. If this chunk is merged
|
|
|
|
// with other chunk by ICF, it points to another chunk,
|
|
|
|
// and this chunk is considrered as dead.
|
2015-09-26 00:20:24 +08:00
|
|
|
SectionChunk *Repl;
|
2015-06-24 12:36:52 +08:00
|
|
|
|
2015-09-05 04:45:50 +08:00
|
|
|
// The CRC of the contents as described in the COFF spec 4.5.5.
|
|
|
|
// Auxiliary Format 5: Section Definitions. Used for ICF.
|
|
|
|
uint32_t Checksum = 0;
|
|
|
|
|
2016-11-12 08:00:51 +08:00
|
|
|
const coff_section *Header;
|
|
|
|
|
2017-01-14 11:14:46 +08:00
|
|
|
// The file that this chunk was created from.
|
2017-07-27 07:05:24 +08:00
|
|
|
ObjFile *File;
|
2015-06-26 06:00:42 +08:00
|
|
|
|
2017-11-28 09:30:07 +08:00
|
|
|
// The COMDAT leader symbol if this is a COMDAT chunk.
|
|
|
|
DefinedRegular *Sym = nullptr;
|
|
|
|
|
2017-01-14 11:14:46 +08:00
|
|
|
private:
|
2015-05-29 03:09:30 +08:00
|
|
|
StringRef SectionName;
|
2015-06-28 09:30:54 +08:00
|
|
|
std::vector<SectionChunk *> AssocChildren;
|
2015-06-25 08:33:38 +08:00
|
|
|
llvm::iterator_range<const coff_relocation *> Relocs;
|
2015-06-26 01:43:37 +08:00
|
|
|
size_t NumRelocs;
|
2015-06-24 08:00:52 +08:00
|
|
|
|
2015-06-26 03:10:58 +08:00
|
|
|
// Used by the garbage collector.
|
2015-09-17 05:40:47 +08:00
|
|
|
bool Live;
|
2015-06-26 03:10:58 +08:00
|
|
|
|
2015-09-16 22:19:10 +08:00
|
|
|
// Used for ICF (Identical COMDAT Folding)
|
2015-09-22 03:36:51 +08:00
|
|
|
void replace(SectionChunk *Other);
|
2017-05-06 07:52:24 +08:00
|
|
|
uint32_t Class[2] = {0, 0};
|
2015-05-29 03:09:30 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// A chunk for common symbols. Common chunks don't have actual data.
|
|
|
|
class CommonChunk : public Chunk {
|
|
|
|
public:
|
2015-06-08 11:17:07 +08:00
|
|
|
CommonChunk(const COFFSymbolRef Sym);
|
2015-05-29 03:09:30 +08:00
|
|
|
size_t getSize() const override { return Sym.getValue(); }
|
|
|
|
bool hasData() const override { return false; }
|
|
|
|
uint32_t getPermissions() const override;
|
|
|
|
StringRef getSectionName() const override { return ".bss"; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
const COFFSymbolRef Sym;
|
|
|
|
};
|
|
|
|
|
|
|
|
// A chunk for linker-created strings.
|
|
|
|
class StringChunk : public Chunk {
|
|
|
|
public:
|
2015-05-29 03:45:43 +08:00
|
|
|
explicit StringChunk(StringRef S) : Str(S) {}
|
|
|
|
size_t getSize() const override { return Str.size() + 1; }
|
2015-09-20 07:28:57 +08:00
|
|
|
void writeTo(uint8_t *Buf) const override;
|
2015-05-29 03:09:30 +08:00
|
|
|
|
|
|
|
private:
|
2015-05-29 03:45:43 +08:00
|
|
|
StringRef Str;
|
2015-05-29 03:09:30 +08:00
|
|
|
};
|
|
|
|
|
2015-07-25 09:16:06 +08:00
|
|
|
static const uint8_t ImportThunkX86[] = {
|
2015-05-29 03:09:30 +08:00
|
|
|
0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // JMP *0x0
|
|
|
|
};
|
|
|
|
|
2015-07-25 11:39:29 +08:00
|
|
|
static const uint8_t ImportThunkARM[] = {
|
|
|
|
0x40, 0xf2, 0x00, 0x0c, // mov.w ip, #0
|
|
|
|
0xc0, 0xf2, 0x00, 0x0c, // mov.t ip, #0
|
|
|
|
0xdc, 0xf8, 0x00, 0xf0, // ldr.w pc, [ip]
|
|
|
|
};
|
|
|
|
|
2017-07-11 15:22:44 +08:00
|
|
|
static const uint8_t ImportThunkARM64[] = {
|
|
|
|
0x10, 0x00, 0x00, 0x90, // adrp x16, #0
|
|
|
|
0x10, 0x02, 0x40, 0xf9, // ldr x16, [x16]
|
|
|
|
0x00, 0x02, 0x1f, 0xd6, // br x16
|
|
|
|
};
|
|
|
|
|
2015-06-07 09:15:04 +08:00
|
|
|
// Windows-specific.
|
2015-05-29 03:09:30 +08:00
|
|
|
// A chunk for DLL import jump table entry. In a final output, it's
|
|
|
|
// contents will be a JMP instruction to some __imp_ symbol.
|
2015-07-25 09:16:06 +08:00
|
|
|
class ImportThunkChunkX64 : public Chunk {
|
2015-05-29 03:09:30 +08:00
|
|
|
public:
|
2015-07-25 09:16:06 +08:00
|
|
|
explicit ImportThunkChunkX64(Defined *S);
|
|
|
|
size_t getSize() const override { return sizeof(ImportThunkX86); }
|
2015-09-20 07:28:57 +08:00
|
|
|
void writeTo(uint8_t *Buf) const override;
|
2015-07-25 09:16:06 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
Defined *ImpSymbol;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ImportThunkChunkX86 : public Chunk {
|
|
|
|
public:
|
|
|
|
explicit ImportThunkChunkX86(Defined *S) : ImpSymbol(S) {}
|
|
|
|
size_t getSize() const override { return sizeof(ImportThunkX86); }
|
2015-07-25 09:44:32 +08:00
|
|
|
void getBaserels(std::vector<Baserel> *Res) override;
|
2015-09-20 07:28:57 +08:00
|
|
|
void writeTo(uint8_t *Buf) const override;
|
2015-05-29 03:09:30 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
Defined *ImpSymbol;
|
|
|
|
};
|
|
|
|
|
2015-07-25 11:39:29 +08:00
|
|
|
class ImportThunkChunkARM : public Chunk {
|
|
|
|
public:
|
|
|
|
explicit ImportThunkChunkARM(Defined *S) : ImpSymbol(S) {}
|
|
|
|
size_t getSize() const override { return sizeof(ImportThunkARM); }
|
|
|
|
void getBaserels(std::vector<Baserel> *Res) override;
|
2015-09-20 07:28:57 +08:00
|
|
|
void writeTo(uint8_t *Buf) const override;
|
2015-07-25 11:39:29 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
Defined *ImpSymbol;
|
|
|
|
};
|
|
|
|
|
2017-07-11 15:22:44 +08:00
|
|
|
class ImportThunkChunkARM64 : public Chunk {
|
|
|
|
public:
|
|
|
|
explicit ImportThunkChunkARM64(Defined *S) : ImpSymbol(S) {}
|
|
|
|
size_t getSize() const override { return sizeof(ImportThunkARM64); }
|
|
|
|
void writeTo(uint8_t *Buf) const override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
Defined *ImpSymbol;
|
|
|
|
};
|
|
|
|
|
2015-06-25 11:31:47 +08:00
|
|
|
// Windows-specific.
|
|
|
|
// See comments for DefinedLocalImport class.
|
|
|
|
class LocalImportChunk : public Chunk {
|
|
|
|
public:
|
|
|
|
explicit LocalImportChunk(Defined *S) : Sym(S) {}
|
2015-07-10 05:15:58 +08:00
|
|
|
size_t getSize() const override;
|
2015-07-25 09:44:32 +08:00
|
|
|
void getBaserels(std::vector<Baserel> *Res) override;
|
2015-09-20 07:28:57 +08:00
|
|
|
void writeTo(uint8_t *Buf) const override;
|
2015-06-25 11:31:47 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
Defined *Sym;
|
|
|
|
};
|
|
|
|
|
2015-07-25 07:51:14 +08:00
|
|
|
// Windows-specific.
|
|
|
|
// A chunk for SEH table which contains RVAs of safe exception handler
|
|
|
|
// functions. x86-only.
|
|
|
|
class SEHTableChunk : public Chunk {
|
|
|
|
public:
|
2016-06-04 00:57:13 +08:00
|
|
|
explicit SEHTableChunk(std::set<Defined *> S) : Syms(std::move(S)) {}
|
2015-07-25 07:51:14 +08:00
|
|
|
size_t getSize() const override { return Syms.size() * 4; }
|
2015-09-20 07:28:57 +08:00
|
|
|
void writeTo(uint8_t *Buf) const override;
|
2015-07-25 07:51:14 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
std::set<Defined *> Syms;
|
|
|
|
};
|
|
|
|
|
2015-06-15 09:23:58 +08:00
|
|
|
// Windows-specific.
|
|
|
|
// This class represents a block in .reloc section.
|
|
|
|
// See the PE/COFF spec 5.6 for details.
|
|
|
|
class BaserelChunk : public Chunk {
|
|
|
|
public:
|
2015-07-25 09:44:32 +08:00
|
|
|
BaserelChunk(uint32_t Page, Baserel *Begin, Baserel *End);
|
2015-06-15 09:23:58 +08:00
|
|
|
size_t getSize() const override { return Data.size(); }
|
2015-09-20 07:28:57 +08:00
|
|
|
void writeTo(uint8_t *Buf) const override;
|
2015-06-15 09:23:58 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
std::vector<uint8_t> Data;
|
|
|
|
};
|
|
|
|
|
2015-07-25 09:44:32 +08:00
|
|
|
class Baserel {
|
|
|
|
public:
|
|
|
|
Baserel(uint32_t V, uint8_t Ty) : RVA(V), Type(Ty) {}
|
|
|
|
explicit Baserel(uint32_t V) : Baserel(V, getDefaultType()) {}
|
|
|
|
uint8_t getDefaultType();
|
|
|
|
|
|
|
|
uint32_t RVA;
|
|
|
|
uint8_t Type;
|
|
|
|
};
|
|
|
|
|
2017-07-26 04:00:37 +08:00
|
|
|
void applyMOV32T(uint8_t *Off, uint32_t V);
|
|
|
|
void applyBranch24T(uint8_t *Off, int32_t V);
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
} // namespace coff
|
|
|
|
} // namespace lld
|
|
|
|
|
|
|
|
#endif
|