2015-05-29 03:09:30 +08:00
|
|
|
//===- Writer.cpp ---------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Linker
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-10-12 03:45:07 +08:00
|
|
|
#include "Writer.h"
|
2015-05-29 03:09:30 +08:00
|
|
|
#include "Config.h"
|
2015-08-06 07:43:53 +08:00
|
|
|
#include "DLL.h"
|
|
|
|
#include "InputFiles.h"
|
2017-01-14 11:14:46 +08:00
|
|
|
#include "MapFile.h"
|
2016-10-12 03:45:07 +08:00
|
|
|
#include "PDB.h"
|
2015-08-06 07:43:53 +08:00
|
|
|
#include "SymbolTable.h"
|
|
|
|
#include "Symbols.h"
|
[lld] unified COFF and ELF error handling on new Common/ErrorHandler
Summary:
The COFF linker and the ELF linker have long had similar but separate
Error.h and Error.cpp files to implement error handling. This change
introduces new error handling code in Common/ErrorHandler.h, changes the
COFF and ELF linkers to use it, and removes the old, separate
implementations.
Reviewers: ruiu
Reviewed By: ruiu
Subscribers: smeenai, jyknight, emaste, sdardis, nemanjai, nhaehnle, mgorny, javed.absar, kbarton, fedor.sergeev, llvm-commits
Differential Revision: https://reviews.llvm.org/D39259
llvm-svn: 316624
2017-10-26 06:28:38 +08:00
|
|
|
#include "lld/Common/ErrorHandler.h"
|
2017-11-29 04:39:17 +08:00
|
|
|
#include "lld/Common/Memory.h"
|
2018-01-18 03:16:26 +08:00
|
|
|
#include "lld/Common/Timer.h"
|
2015-07-28 08:17:25 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2015-05-29 03:09:30 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2015-07-28 08:17:25 +08:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
#include "llvm/Support/BinaryStreamReader.h"
|
2015-05-29 03:09:30 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/Endian.h"
|
|
|
|
#include "llvm/Support/FileOutputBuffer.h"
|
2017-05-11 08:03:52 +08:00
|
|
|
#include "llvm/Support/Parallel.h"
|
2018-02-06 09:58:26 +08:00
|
|
|
#include "llvm/Support/Path.h"
|
2016-08-30 05:20:46 +08:00
|
|
|
#include "llvm/Support/RandomNumberGenerator.h"
|
2015-05-29 03:09:30 +08:00
|
|
|
#include <algorithm>
|
2015-05-31 03:09:50 +08:00
|
|
|
#include <cstdio>
|
2015-05-29 03:09:30 +08:00
|
|
|
#include <map>
|
2015-08-06 07:43:53 +08:00
|
|
|
#include <memory>
|
2015-05-29 03:09:30 +08:00
|
|
|
#include <utility>
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
using namespace llvm::COFF;
|
2015-05-31 03:09:50 +08:00
|
|
|
using namespace llvm::object;
|
|
|
|
using namespace llvm::support;
|
|
|
|
using namespace llvm::support::endian;
|
2015-08-06 07:43:53 +08:00
|
|
|
using namespace lld;
|
|
|
|
using namespace lld::coff;
|
2015-05-29 03:09:30 +08:00
|
|
|
|
2015-08-12 07:09:00 +08:00
|
|
|
static const int SectorSize = 512;
|
2015-05-29 03:09:30 +08:00
|
|
|
static const int DOSStubSize = 64;
|
|
|
|
static const int NumberfOfDataDirectory = 16;
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
namespace {
|
2016-08-30 05:20:46 +08:00
|
|
|
|
|
|
|
class DebugDirectoryChunk : public Chunk {
|
|
|
|
public:
|
2017-05-19 01:03:49 +08:00
|
|
|
DebugDirectoryChunk(const std::vector<Chunk *> &R) : Records(R) {}
|
2016-08-30 05:20:46 +08:00
|
|
|
|
|
|
|
size_t getSize() const override {
|
|
|
|
return Records.size() * sizeof(debug_directory);
|
|
|
|
}
|
|
|
|
|
|
|
|
void writeTo(uint8_t *B) const override {
|
|
|
|
auto *D = reinterpret_cast<debug_directory *>(B + OutputSectionOff);
|
|
|
|
|
2017-05-19 01:03:49 +08:00
|
|
|
for (const Chunk *Record : Records) {
|
2016-08-30 05:20:46 +08:00
|
|
|
D->Characteristics = 0;
|
|
|
|
D->TimeDateStamp = 0;
|
|
|
|
D->MajorVersion = 0;
|
|
|
|
D->MinorVersion = 0;
|
|
|
|
D->Type = COFF::IMAGE_DEBUG_TYPE_CODEVIEW;
|
|
|
|
D->SizeOfData = Record->getSize();
|
|
|
|
D->AddressOfRawData = Record->getRVA();
|
2017-08-03 07:19:54 +08:00
|
|
|
OutputSection *OS = Record->getOutputSection();
|
|
|
|
uint64_t Offs = OS->getFileOff() + (Record->getRVA() - OS->getRVA());
|
|
|
|
D->PointerToRawData = Offs;
|
2016-08-30 05:20:46 +08:00
|
|
|
|
|
|
|
++D;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2017-05-19 01:03:49 +08:00
|
|
|
const std::vector<Chunk *> &Records;
|
2016-08-30 05:20:46 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
class CVDebugRecordChunk : public Chunk {
|
2017-08-05 04:02:55 +08:00
|
|
|
public:
|
|
|
|
CVDebugRecordChunk() {
|
|
|
|
PDBAbsPath = Config->PDBPath;
|
|
|
|
if (!PDBAbsPath.empty())
|
|
|
|
llvm::sys::fs::make_absolute(PDBAbsPath);
|
|
|
|
}
|
|
|
|
|
2016-08-30 05:20:46 +08:00
|
|
|
size_t getSize() const override {
|
2017-08-05 04:02:55 +08:00
|
|
|
return sizeof(codeview::DebugInfo) + PDBAbsPath.size() + 1;
|
2016-08-30 05:20:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void writeTo(uint8_t *B) const override {
|
2016-09-10 03:26:03 +08:00
|
|
|
// Save off the DebugInfo entry to backfill the file signature (build id)
|
|
|
|
// in Writer::writeBuildId
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
BuildId = reinterpret_cast<codeview::DebugInfo *>(B + OutputSectionOff);
|
2016-08-30 05:20:46 +08:00
|
|
|
|
|
|
|
// variable sized field (PDB Path)
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
char *P = reinterpret_cast<char *>(B + OutputSectionOff + sizeof(*BuildId));
|
2017-08-05 04:02:55 +08:00
|
|
|
if (!PDBAbsPath.empty())
|
|
|
|
memcpy(P, PDBAbsPath.data(), PDBAbsPath.size());
|
|
|
|
P[PDBAbsPath.size()] = '\0';
|
2016-08-30 05:20:46 +08:00
|
|
|
}
|
2016-09-10 03:26:03 +08:00
|
|
|
|
2017-08-05 04:02:55 +08:00
|
|
|
SmallString<128> PDBAbsPath;
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
mutable codeview::DebugInfo *BuildId = nullptr;
|
2016-08-30 05:20:46 +08:00
|
|
|
};
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
// The writer writes a SymbolTable result to a file.
|
|
|
|
class Writer {
|
|
|
|
public:
|
2017-11-14 02:15:22 +08:00
|
|
|
Writer() : Buffer(errorHandler().OutputBuffer) {}
|
2015-08-06 22:58:50 +08:00
|
|
|
void run();
|
2015-08-06 07:43:53 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
void createSections();
|
|
|
|
void createMiscChunks();
|
|
|
|
void createImportTables();
|
|
|
|
void createExportTable();
|
|
|
|
void assignAddresses();
|
|
|
|
void removeEmptySections();
|
2017-11-21 09:14:14 +08:00
|
|
|
void createSymbolAndStringTable();
|
2015-08-06 22:58:50 +08:00
|
|
|
void openFile(StringRef OutputPath);
|
2015-08-06 07:43:53 +08:00
|
|
|
template <typename PEHeaderTy> void writeHeader();
|
2017-11-08 07:24:10 +08:00
|
|
|
void createSEHTable(OutputSection *RData);
|
2018-02-14 04:32:53 +08:00
|
|
|
void createGuardCFTables(OutputSection *RData);
|
|
|
|
void createGLJmpTable(OutputSection *RData);
|
2018-02-06 09:58:26 +08:00
|
|
|
void markSymbolsForRVATable(ObjFile *File,
|
|
|
|
ArrayRef<SectionChunk *> SymIdxChunks,
|
|
|
|
SymbolRVASet &TableSymbols);
|
|
|
|
void maybeAddRVATable(OutputSection *RData, SymbolRVASet TableSymbols,
|
|
|
|
StringRef TableSym, StringRef CountSym);
|
2016-06-20 11:39:39 +08:00
|
|
|
void setSectionPermissions();
|
2015-08-06 07:43:53 +08:00
|
|
|
void writeSections();
|
2016-09-10 03:26:03 +08:00
|
|
|
void writeBuildId();
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
void sortExceptionTable();
|
2015-08-06 07:43:53 +08:00
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
llvm::Optional<coff_symbol16> createSymbol(Defined *D);
|
|
|
|
size_t addEntryToStringTable(StringRef Str);
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
OutputSection *findSection(StringRef Name);
|
|
|
|
OutputSection *createSection(StringRef Name);
|
|
|
|
void addBaserels(OutputSection *Dest);
|
|
|
|
void addBaserelBlocks(OutputSection *Dest, std::vector<Baserel> &V);
|
|
|
|
|
|
|
|
uint32_t getSizeOfInitializedData();
|
|
|
|
std::map<StringRef, std::vector<DefinedImportData *>> binImports();
|
|
|
|
|
2017-11-14 02:15:22 +08:00
|
|
|
std::unique_ptr<FileOutputBuffer> &Buffer;
|
2015-08-06 07:43:53 +08:00
|
|
|
std::vector<OutputSection *> OutputSections;
|
|
|
|
std::vector<char> Strtab;
|
|
|
|
std::vector<llvm::object::coff_symbol16> OutputSymtab;
|
|
|
|
IdataContents Idata;
|
|
|
|
DelayLoadContents DelayIdata;
|
|
|
|
EdataContents Edata;
|
2018-02-06 09:58:26 +08:00
|
|
|
RVATableChunk *GuardFidsTable = nullptr;
|
|
|
|
RVATableChunk *SEHTable = nullptr;
|
2015-08-06 07:43:53 +08:00
|
|
|
|
2017-05-19 01:03:49 +08:00
|
|
|
Chunk *DebugDirectory = nullptr;
|
|
|
|
std::vector<Chunk *> DebugRecords;
|
2016-09-10 03:26:03 +08:00
|
|
|
CVDebugRecordChunk *BuildId = nullptr;
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
Optional<codeview::DebugInfo> PreviousBuildId;
|
2016-10-12 03:45:07 +08:00
|
|
|
ArrayRef<uint8_t> SectionTable;
|
2016-08-30 05:20:46 +08:00
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
uint64_t FileSize;
|
2017-11-21 09:14:14 +08:00
|
|
|
uint32_t PointerToSymbolTable = 0;
|
2015-08-06 07:43:53 +08:00
|
|
|
uint64_t SizeOfImage;
|
|
|
|
uint64_t SizeOfHeaders;
|
|
|
|
};
|
|
|
|
} // anonymous namespace
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
namespace lld {
|
|
|
|
namespace coff {
|
|
|
|
|
2018-01-18 03:16:26 +08:00
|
|
|
static Timer CodeLayoutTimer("Code Layout", Timer::root());
|
|
|
|
static Timer DiskCommitTimer("Commit Output File", Timer::root());
|
|
|
|
|
2017-08-29 05:51:07 +08:00
|
|
|
void writeResult() { Writer().run(); }
|
2015-06-07 07:32:08 +08:00
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
void OutputSection::setRVA(uint64_t RVA) {
|
|
|
|
Header.VirtualAddress = RVA;
|
|
|
|
for (Chunk *C : Chunks)
|
|
|
|
C->setRVA(C->getRVA() + RVA);
|
|
|
|
}
|
|
|
|
|
|
|
|
void OutputSection::setFileOffset(uint64_t Off) {
|
|
|
|
// If a section has no actual data (i.e. BSS section), we want to
|
|
|
|
// set 0 to its PointerToRawData. Otherwise the output is rejected
|
|
|
|
// by the loader.
|
|
|
|
if (Header.SizeOfRawData == 0)
|
|
|
|
return;
|
[LLD][COFF] Report error when file will exceed Windows maximum image size (4GB)
Patch by Colden Cullen.
Currently, when a large PE (>4 GiB) is to be produced, a crash occurs
because:
1. Calling setOffset with a number greater than UINT32_MAX causes the
PointerToRawData to overflow
2. When adding the symbol table to the end of the file, the last section's
offset was used to calculate file size. Because this had overflowed,
this number was too low, and the file created would not be large enough.
This lead to the actual crash I saw, which was a buffer overrun.
This change:
1. Adds comment to setOffset, clarifying that overflow can occur, but it's
somewhat safe because the error will be handled elsewhere
2. Adds file size check after all output data has been created This matches
the MS link.exe error, which looks prints as: "LINK : fatal error
LNK1248: image size (10000EFC9) exceeds maximum allowable size
(FFFFFFFF)"
3. Changes calculate of the symbol table offset to just use the existing
FileSize. This should match the previous calculations, but doesn't rely
on the use of a u32 that can overflow.
4. Removes trivial usage of a magic number that bugged me while I was
debugging the issue
I'm not sure how to add a test for this outside of adding 4GB of object
files to the repo. If there's an easier way, let me know and I'll be
happy to add a test.
Differential Revision: https://reviews.llvm.org/D42010
llvm-svn: 322605
2018-01-17 09:08:02 +08:00
|
|
|
|
|
|
|
// It is possible that this assignment could cause an overflow of the u32,
|
|
|
|
// but that should be caught by the FileSize check in OutputSection::run().
|
2015-05-29 03:09:30 +08:00
|
|
|
Header.PointerToRawData = Off;
|
|
|
|
}
|
|
|
|
|
|
|
|
void OutputSection::addChunk(Chunk *C) {
|
|
|
|
Chunks.push_back(C);
|
2015-06-15 06:16:47 +08:00
|
|
|
C->setOutputSection(this);
|
2015-05-29 03:09:30 +08:00
|
|
|
uint64_t Off = Header.VirtualSize;
|
2017-09-14 05:54:55 +08:00
|
|
|
Off = alignTo(Off, C->Alignment);
|
2015-05-29 03:09:30 +08:00
|
|
|
C->setRVA(Off);
|
2017-06-20 01:21:45 +08:00
|
|
|
C->OutputSectionOff = Off;
|
2015-05-29 03:09:30 +08:00
|
|
|
Off += C->getSize();
|
2017-09-20 07:58:05 +08:00
|
|
|
if (Off > UINT32_MAX)
|
|
|
|
error("section larger than 4 GiB: " + Name);
|
2015-05-29 03:09:30 +08:00
|
|
|
Header.VirtualSize = Off;
|
|
|
|
if (C->hasData())
|
2016-01-15 04:53:50 +08:00
|
|
|
Header.SizeOfRawData = alignTo(Off, SectorSize);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void OutputSection::addPermissions(uint32_t C) {
|
2015-06-15 11:03:23 +08:00
|
|
|
Header.Characteristics |= C & PermMask;
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2016-06-20 11:39:39 +08:00
|
|
|
void OutputSection::setPermissions(uint32_t C) {
|
|
|
|
Header.Characteristics = C & PermMask;
|
|
|
|
}
|
|
|
|
|
2015-05-31 03:09:50 +08:00
|
|
|
// Write the section header to a given buffer.
|
2015-06-07 07:19:38 +08:00
|
|
|
void OutputSection::writeHeaderTo(uint8_t *Buf) {
|
2015-05-31 03:09:50 +08:00
|
|
|
auto *Hdr = reinterpret_cast<coff_section *>(Buf);
|
|
|
|
*Hdr = Header;
|
|
|
|
if (StringTableOff) {
|
|
|
|
// If name is too long, write offset into the string table as a name.
|
|
|
|
sprintf(Hdr->Name, "/%d", StringTableOff);
|
|
|
|
} else {
|
2017-11-16 20:06:42 +08:00
|
|
|
assert(!Config->Debug || Name.size() <= COFF::NameSize ||
|
|
|
|
(Hdr->Characteristics & IMAGE_SCN_MEM_DISCARDABLE) == 0);
|
2015-07-09 00:37:50 +08:00
|
|
|
strncpy(Hdr->Name, Name.data(),
|
|
|
|
std::min(Name.size(), (size_t)COFF::NameSize));
|
2015-05-31 03:09:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
} // namespace coff
|
|
|
|
} // namespace lld
|
|
|
|
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
// PDBs are matched against executables using a build id which consists of three
|
|
|
|
// components:
|
|
|
|
// 1. A 16-bit GUID
|
|
|
|
// 2. An age
|
|
|
|
// 3. A time stamp.
|
|
|
|
//
|
|
|
|
// Debuggers and symbol servers match executables against debug info by checking
|
|
|
|
// each of these components of the EXE/DLL against the corresponding value in
|
|
|
|
// the PDB and failing a match if any of the components differ. In the case of
|
|
|
|
// symbol servers, symbols are cached in a folder that is a function of the
|
|
|
|
// GUID. As a result, in order to avoid symbol cache pollution where every
|
|
|
|
// incremental build copies a new PDB to the symbol cache, we must try to re-use
|
|
|
|
// the existing GUID if one exists, but bump the age. This way the match will
|
|
|
|
// fail, so the symbol cache knows to use the new PDB, but the GUID matches, so
|
|
|
|
// it overwrites the existing item in the symbol cache rather than making a new
|
|
|
|
// one.
|
|
|
|
static Optional<codeview::DebugInfo> loadExistingBuildId(StringRef Path) {
|
|
|
|
// We don't need to incrementally update a previous build id if we're not
|
|
|
|
// writing codeview debug info.
|
|
|
|
if (!Config->Debug)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
auto ExpectedBinary = llvm::object::createBinary(Path);
|
|
|
|
if (!ExpectedBinary) {
|
|
|
|
consumeError(ExpectedBinary.takeError());
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto Binary = std::move(*ExpectedBinary);
|
|
|
|
if (!Binary.getBinary()->isCOFF())
|
|
|
|
return None;
|
|
|
|
|
|
|
|
std::error_code EC;
|
|
|
|
COFFObjectFile File(Binary.getBinary()->getMemoryBufferRef(), EC);
|
|
|
|
if (EC)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
// If the machine of the binary we're outputting doesn't match the machine
|
|
|
|
// of the existing binary, don't try to re-use the build id.
|
|
|
|
if (File.is64() != Config->is64() || File.getMachine() != Config->Machine)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
for (const auto &DebugDir : File.debug_directories()) {
|
|
|
|
if (DebugDir.Type != IMAGE_DEBUG_TYPE_CODEVIEW)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const codeview::DebugInfo *ExistingDI = nullptr;
|
|
|
|
StringRef PDBFileName;
|
2017-08-16 05:46:51 +08:00
|
|
|
if (auto EC = File.getDebugPDBInfo(ExistingDI, PDBFileName)) {
|
|
|
|
(void)EC;
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
return None;
|
2017-08-16 05:46:51 +08:00
|
|
|
}
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
// We only support writing PDBs in v70 format. So if this is not a build
|
|
|
|
// id that we recognize / support, ignore it.
|
|
|
|
if (ExistingDI->Signature.CVSignature != OMF::Signature::PDB70)
|
|
|
|
return None;
|
|
|
|
return *ExistingDI;
|
|
|
|
}
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
// The main function of the writer.
|
2015-08-06 22:58:50 +08:00
|
|
|
void Writer::run() {
|
2018-01-18 03:16:26 +08:00
|
|
|
ScopedTimer T1(CodeLayoutTimer);
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
createSections();
|
|
|
|
createMiscChunks();
|
|
|
|
createImportTables();
|
|
|
|
createExportTable();
|
|
|
|
if (Config->Relocatable)
|
|
|
|
createSection(".reloc");
|
|
|
|
assignAddresses();
|
|
|
|
removeEmptySections();
|
2016-06-20 11:39:39 +08:00
|
|
|
setSectionPermissions();
|
2017-11-21 09:14:14 +08:00
|
|
|
createSymbolAndStringTable();
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
|
[LLD][COFF] Report error when file will exceed Windows maximum image size (4GB)
Patch by Colden Cullen.
Currently, when a large PE (>4 GiB) is to be produced, a crash occurs
because:
1. Calling setOffset with a number greater than UINT32_MAX causes the
PointerToRawData to overflow
2. When adding the symbol table to the end of the file, the last section's
offset was used to calculate file size. Because this had overflowed,
this number was too low, and the file created would not be large enough.
This lead to the actual crash I saw, which was a buffer overrun.
This change:
1. Adds comment to setOffset, clarifying that overflow can occur, but it's
somewhat safe because the error will be handled elsewhere
2. Adds file size check after all output data has been created This matches
the MS link.exe error, which looks prints as: "LINK : fatal error
LNK1248: image size (10000EFC9) exceeds maximum allowable size
(FFFFFFFF)"
3. Changes calculate of the symbol table offset to just use the existing
FileSize. This should match the previous calculations, but doesn't rely
on the use of a u32 that can overflow.
4. Removes trivial usage of a magic number that bugged me while I was
debugging the issue
I'm not sure how to add a test for this outside of adding 4GB of object
files to the repo. If there's an easier way, let me know and I'll be
happy to add a test.
Differential Revision: https://reviews.llvm.org/D42010
llvm-svn: 322605
2018-01-17 09:08:02 +08:00
|
|
|
if (FileSize > UINT32_MAX)
|
|
|
|
fatal("image size (" + Twine(FileSize) + ") " +
|
|
|
|
"exceeds maximum allowable size (" + Twine(UINT32_MAX) + ")");
|
|
|
|
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
// We must do this before opening the output file, as it depends on being able
|
|
|
|
// to read the contents of the existing output file.
|
|
|
|
PreviousBuildId = loadExistingBuildId(Config->OutputFile);
|
2015-08-06 22:58:50 +08:00
|
|
|
openFile(Config->OutputFile);
|
2015-08-06 07:43:53 +08:00
|
|
|
if (Config->is64()) {
|
|
|
|
writeHeader<pe32plus_header>();
|
|
|
|
} else {
|
|
|
|
writeHeader<pe32_header>();
|
|
|
|
}
|
|
|
|
writeSections();
|
|
|
|
sortExceptionTable();
|
2016-09-10 03:26:03 +08:00
|
|
|
writeBuildId();
|
2016-10-12 03:45:07 +08:00
|
|
|
|
2018-01-18 03:16:26 +08:00
|
|
|
T1.stop();
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
|
2018-01-18 03:16:26 +08:00
|
|
|
if (!Config->PDBPath.empty() && Config->Debug) {
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
assert(BuildId);
|
|
|
|
createPDB(Symtab, OutputSections, SectionTable, *BuildId->BuildId);
|
2017-02-07 12:28:02 +08:00
|
|
|
}
|
2016-10-12 03:45:07 +08:00
|
|
|
|
2017-01-14 11:14:46 +08:00
|
|
|
writeMapFile(OutputSections);
|
|
|
|
|
2018-01-18 03:16:26 +08:00
|
|
|
ScopedTimer T2(DiskCommitTimer);
|
2017-11-08 09:50:34 +08:00
|
|
|
if (auto E = Buffer->commit())
|
|
|
|
fatal("failed to write the output file: " + toString(std::move(E)));
|
2015-08-06 07:43:53 +08:00
|
|
|
}
|
|
|
|
|
2015-07-05 07:37:32 +08:00
|
|
|
static StringRef getOutputSection(StringRef Name) {
|
|
|
|
StringRef S = Name.split('$').first;
|
2017-11-28 16:08:37 +08:00
|
|
|
|
|
|
|
// Treat a later period as a separator for MinGW, for sections like
|
|
|
|
// ".ctors.01234".
|
|
|
|
S = S.substr(0, S.find('.', 1));
|
|
|
|
|
2015-07-05 07:37:32 +08:00
|
|
|
auto It = Config->Merge.find(S);
|
|
|
|
if (It == Config->Merge.end())
|
|
|
|
return S;
|
|
|
|
return It->second;
|
|
|
|
}
|
|
|
|
|
2018-01-27 08:34:46 +08:00
|
|
|
// For /order.
|
|
|
|
static void sortBySectionOrder(std::vector<Chunk *> &Chunks) {
|
|
|
|
auto GetPriority = [](const Chunk *C) {
|
|
|
|
if (auto *Sec = dyn_cast<SectionChunk>(C))
|
|
|
|
if (Sec->Sym)
|
|
|
|
return Config->Order.lookup(Sec->Sym->getName());
|
|
|
|
return 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
std::stable_sort(Chunks.begin(), Chunks.end(),
|
|
|
|
[=](const Chunk *A, const Chunk *B) {
|
|
|
|
return GetPriority(A) < GetPriority(B);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2015-06-07 07:32:08 +08:00
|
|
|
// Create output section objects and add them to OutputSections.
|
2015-05-29 03:09:30 +08:00
|
|
|
void Writer::createSections() {
|
2015-06-07 07:32:08 +08:00
|
|
|
// First, bin chunks by name.
|
2015-05-29 03:09:30 +08:00
|
|
|
std::map<StringRef, std::vector<Chunk *>> Map;
|
|
|
|
for (Chunk *C : Symtab->getChunks()) {
|
2015-09-17 05:40:47 +08:00
|
|
|
auto *SC = dyn_cast<SectionChunk>(C);
|
|
|
|
if (SC && !SC->isLive()) {
|
|
|
|
if (Config->Verbose)
|
|
|
|
SC->printDiscardedMessage();
|
|
|
|
continue;
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
2015-06-08 16:26:28 +08:00
|
|
|
Map[C->getSectionName()].push_back(C);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2018-01-27 08:34:46 +08:00
|
|
|
// Process an /order option.
|
|
|
|
if (!Config->Order.empty())
|
|
|
|
for (auto &Pair : Map)
|
|
|
|
sortBySectionOrder(Pair.second);
|
|
|
|
|
2015-06-07 07:32:08 +08:00
|
|
|
// Then create an OutputSection for each section.
|
2015-06-08 16:26:28 +08:00
|
|
|
// '$' and all following characters in input section names are
|
|
|
|
// discarded when determining output section. So, .text$foo
|
|
|
|
// contributes to .text, for example. See PE/COFF spec 3.2.
|
2015-07-28 08:17:25 +08:00
|
|
|
SmallDenseMap<StringRef, OutputSection *> Sections;
|
2015-07-05 07:37:32 +08:00
|
|
|
for (auto Pair : Map) {
|
|
|
|
StringRef Name = getOutputSection(Pair.first);
|
|
|
|
OutputSection *&Sec = Sections[Name];
|
|
|
|
if (!Sec) {
|
2016-12-09 10:13:12 +08:00
|
|
|
Sec = make<OutputSection>(Name);
|
2015-06-08 16:26:28 +08:00
|
|
|
OutputSections.push_back(Sec);
|
|
|
|
}
|
2015-07-05 07:37:32 +08:00
|
|
|
std::vector<Chunk *> &Chunks = Pair.second;
|
2015-05-29 03:09:30 +08:00
|
|
|
for (Chunk *C : Chunks) {
|
|
|
|
Sec->addChunk(C);
|
|
|
|
Sec->addPermissions(C->getPermissions());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-25 11:31:47 +08:00
|
|
|
void Writer::createMiscChunks() {
|
2016-08-22 07:05:43 +08:00
|
|
|
OutputSection *RData = createSection(".rdata");
|
|
|
|
|
2015-07-25 07:51:14 +08:00
|
|
|
// Create thunks for locally-dllimported symbols.
|
|
|
|
if (!Symtab->LocalImportChunks.empty()) {
|
|
|
|
for (Chunk *C : Symtab->LocalImportChunks)
|
2016-08-22 07:05:43 +08:00
|
|
|
RData->addChunk(C);
|
2015-07-25 07:51:14 +08:00
|
|
|
}
|
|
|
|
|
2016-08-30 05:20:46 +08:00
|
|
|
// Create Debug Information Chunks
|
|
|
|
if (Config->Debug) {
|
2017-05-19 01:03:49 +08:00
|
|
|
DebugDirectory = make<DebugDirectoryChunk>(DebugRecords);
|
2016-08-30 05:20:46 +08:00
|
|
|
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
// Make a CVDebugRecordChunk even when /DEBUG:CV is not specified. We
|
|
|
|
// output a PDB no matter what, and this chunk provides the only means of
|
|
|
|
// allowing a debugger to match a PDB and an executable. So we need it even
|
|
|
|
// if we're ultimately not going to write CodeView data to the PDB.
|
|
|
|
auto *CVChunk = make<CVDebugRecordChunk>();
|
|
|
|
BuildId = CVChunk;
|
|
|
|
DebugRecords.push_back(CVChunk);
|
2016-08-30 05:20:46 +08:00
|
|
|
|
2017-05-19 01:03:49 +08:00
|
|
|
RData->addChunk(DebugDirectory);
|
|
|
|
for (Chunk *C : DebugRecords)
|
|
|
|
RData->addChunk(C);
|
2016-08-30 05:20:46 +08:00
|
|
|
}
|
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
// Create SEH table. x86-only.
|
|
|
|
if (Config->Machine == I386)
|
|
|
|
createSEHTable(RData);
|
|
|
|
|
2018-02-14 04:32:53 +08:00
|
|
|
// Create /guard:cf tables if requested.
|
|
|
|
if (Config->GuardCF != GuardCFLevel::Off)
|
|
|
|
createGuardCFTables(RData);
|
2015-06-25 11:31:47 +08:00
|
|
|
}
|
|
|
|
|
2015-06-07 06:56:55 +08:00
|
|
|
// Create .idata section for the DLL-imported symbol table.
|
|
|
|
// The format of this section is inherently Windows-specific.
|
|
|
|
// IdataContents class abstracted away the details for us,
|
|
|
|
// so we just let it create chunks and add them to the section.
|
2015-06-07 06:46:15 +08:00
|
|
|
void Writer::createImportTables() {
|
2017-07-27 08:45:26 +08:00
|
|
|
if (ImportFile::Instances.empty())
|
2015-06-07 06:46:15 +08:00
|
|
|
return;
|
2015-08-17 16:30:31 +08:00
|
|
|
|
|
|
|
// Initialize DLLOrder so that import entries are ordered in
|
|
|
|
// the same order as in the command line. (That affects DLL
|
|
|
|
// initialization order, and this ordering is MSVC-compatible.)
|
2017-07-27 08:45:26 +08:00
|
|
|
for (ImportFile *File : ImportFile::Instances) {
|
2017-05-25 06:30:06 +08:00
|
|
|
if (!File->Live)
|
|
|
|
continue;
|
|
|
|
|
2015-09-02 15:27:31 +08:00
|
|
|
std::string DLL = StringRef(File->DLLName).lower();
|
|
|
|
if (Config->DLLOrder.count(DLL) == 0)
|
|
|
|
Config->DLLOrder[DLL] = Config->DLLOrder.size();
|
|
|
|
}
|
2015-08-17 16:30:31 +08:00
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
OutputSection *Text = createSection(".text");
|
2017-07-27 08:45:26 +08:00
|
|
|
for (ImportFile *File : ImportFile::Instances) {
|
2017-05-25 06:30:06 +08:00
|
|
|
if (!File->Live)
|
|
|
|
continue;
|
|
|
|
|
2015-08-17 15:27:45 +08:00
|
|
|
if (DefinedImportThunk *Thunk = File->ThunkSym)
|
2017-05-22 14:01:37 +08:00
|
|
|
Text->addChunk(Thunk->getChunk());
|
2017-05-25 06:30:06 +08:00
|
|
|
|
2015-09-03 22:49:47 +08:00
|
|
|
if (Config->DelayLoads.count(StringRef(File->DLLName).lower())) {
|
2017-05-26 02:03:34 +08:00
|
|
|
if (!File->ThunkSym)
|
|
|
|
fatal("cannot delay-load " + toString(File) +
|
|
|
|
" due to import of data: " + toString(*File->ImpSym));
|
2017-05-22 14:01:37 +08:00
|
|
|
DelayIdata.add(File->ImpSym);
|
2015-08-17 15:27:45 +08:00
|
|
|
} else {
|
2017-05-22 14:01:37 +08:00
|
|
|
Idata.add(File->ImpSym);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
}
|
2017-05-25 06:30:06 +08:00
|
|
|
|
2015-06-22 06:31:52 +08:00
|
|
|
if (!Idata.empty()) {
|
|
|
|
OutputSection *Sec = createSection(".idata");
|
|
|
|
for (Chunk *C : Idata.getChunks())
|
|
|
|
Sec->addChunk(C);
|
|
|
|
}
|
2017-05-25 06:30:06 +08:00
|
|
|
|
2015-06-22 06:31:52 +08:00
|
|
|
if (!DelayIdata.empty()) {
|
2016-12-10 05:55:24 +08:00
|
|
|
Defined *Helper = cast<Defined>(Config->DelayLoadHelper);
|
2015-07-02 11:59:04 +08:00
|
|
|
DelayIdata.create(Helper);
|
2015-06-22 06:31:52 +08:00
|
|
|
OutputSection *Sec = createSection(".didat");
|
2015-06-27 05:40:15 +08:00
|
|
|
for (Chunk *C : DelayIdata.getChunks())
|
|
|
|
Sec->addChunk(C);
|
|
|
|
Sec = createSection(".data");
|
|
|
|
for (Chunk *C : DelayIdata.getDataChunks())
|
2015-06-22 06:31:52 +08:00
|
|
|
Sec->addChunk(C);
|
|
|
|
Sec = createSection(".text");
|
2017-05-19 01:03:49 +08:00
|
|
|
for (Chunk *C : DelayIdata.getCodeChunks())
|
|
|
|
Sec->addChunk(C);
|
2015-06-22 06:31:52 +08:00
|
|
|
}
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2015-06-17 08:16:33 +08:00
|
|
|
void Writer::createExportTable() {
|
|
|
|
if (Config->Exports.empty())
|
|
|
|
return;
|
|
|
|
OutputSection *Sec = createSection(".edata");
|
2017-05-19 01:03:49 +08:00
|
|
|
for (Chunk *C : Edata.Chunks)
|
|
|
|
Sec->addChunk(C);
|
2015-06-17 08:16:33 +08:00
|
|
|
}
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
// The Windows loader doesn't seem to like empty sections,
|
|
|
|
// so we remove them if any.
|
|
|
|
void Writer::removeEmptySections() {
|
2015-06-04 00:44:00 +08:00
|
|
|
auto IsEmpty = [](OutputSection *S) { return S->getVirtualSize() == 0; };
|
2015-05-29 03:09:30 +08:00
|
|
|
OutputSections.erase(
|
|
|
|
std::remove_if(OutputSections.begin(), OutputSections.end(), IsEmpty),
|
|
|
|
OutputSections.end());
|
2015-07-09 00:37:50 +08:00
|
|
|
uint32_t Idx = 1;
|
|
|
|
for (OutputSection *Sec : OutputSections)
|
|
|
|
Sec->SectionIndex = Idx++;
|
|
|
|
}
|
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
size_t Writer::addEntryToStringTable(StringRef Str) {
|
|
|
|
assert(Str.size() > COFF::NameSize);
|
|
|
|
size_t OffsetOfEntry = Strtab.size() + 4; // +4 for the size field
|
|
|
|
Strtab.insert(Strtab.end(), Str.begin(), Str.end());
|
|
|
|
Strtab.push_back('\0');
|
|
|
|
return OffsetOfEntry;
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<coff_symbol16> Writer::createSymbol(Defined *Def) {
|
|
|
|
// Relative symbols are unrepresentable in a COFF symbol table.
|
|
|
|
if (isa<DefinedSynthetic>(Def))
|
|
|
|
return None;
|
|
|
|
|
|
|
|
// Don't write dead symbols or symbols in codeview sections to the symbol
|
|
|
|
// table.
|
|
|
|
if (!Def->isLive())
|
|
|
|
return None;
|
|
|
|
if (auto *D = dyn_cast<DefinedRegular>(Def))
|
|
|
|
if (D->getChunk()->isCodeView())
|
|
|
|
return None;
|
|
|
|
|
|
|
|
coff_symbol16 Sym;
|
|
|
|
StringRef Name = Def->getName();
|
|
|
|
if (Name.size() > COFF::NameSize) {
|
|
|
|
Sym.Name.Offset.Zeroes = 0;
|
|
|
|
Sym.Name.Offset.Offset = addEntryToStringTable(Name);
|
|
|
|
} else {
|
|
|
|
memset(Sym.Name.ShortName, 0, COFF::NameSize);
|
|
|
|
memcpy(Sym.Name.ShortName, Name.data(), Name.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto *D = dyn_cast<DefinedCOFF>(Def)) {
|
|
|
|
COFFSymbolRef Ref = D->getCOFFSymbol();
|
|
|
|
Sym.Type = Ref.getType();
|
|
|
|
Sym.StorageClass = Ref.getStorageClass();
|
|
|
|
} else {
|
|
|
|
Sym.Type = IMAGE_SYM_TYPE_NULL;
|
|
|
|
Sym.StorageClass = IMAGE_SYM_CLASS_EXTERNAL;
|
|
|
|
}
|
|
|
|
Sym.NumberOfAuxSymbols = 0;
|
|
|
|
|
|
|
|
switch (Def->kind()) {
|
|
|
|
case Symbol::DefinedAbsoluteKind:
|
|
|
|
Sym.Value = Def->getRVA();
|
|
|
|
Sym.SectionNumber = IMAGE_SYM_ABSOLUTE;
|
|
|
|
break;
|
|
|
|
default: {
|
|
|
|
uint64_t RVA = Def->getRVA();
|
|
|
|
OutputSection *Sec = nullptr;
|
|
|
|
for (OutputSection *S : OutputSections) {
|
|
|
|
if (S->getRVA() > RVA)
|
|
|
|
break;
|
|
|
|
Sec = S;
|
|
|
|
}
|
|
|
|
Sym.Value = RVA - Sec->getRVA();
|
|
|
|
Sym.SectionNumber = Sec->SectionIndex;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Sym;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Writer::createSymbolAndStringTable() {
|
2015-07-09 00:37:50 +08:00
|
|
|
// Name field in the section table is 8 byte long. Longer names need
|
|
|
|
// to be written to the string table. First, construct string table.
|
|
|
|
for (OutputSection *Sec : OutputSections) {
|
|
|
|
StringRef Name = Sec->getName();
|
|
|
|
if (Name.size() <= COFF::NameSize)
|
|
|
|
continue;
|
2017-11-16 20:06:42 +08:00
|
|
|
// If a section isn't discardable (i.e. will be mapped at runtime),
|
|
|
|
// prefer a truncated section name over a long section name in
|
2018-01-28 02:17:08 +08:00
|
|
|
// the string table that is unavailable at runtime. Note that link.exe
|
|
|
|
// always truncates, even for discardable sections.
|
2017-11-16 20:06:42 +08:00
|
|
|
if ((Sec->getPermissions() & IMAGE_SCN_MEM_DISCARDABLE) == 0)
|
|
|
|
continue;
|
2017-11-21 09:14:14 +08:00
|
|
|
Sec->setStringTableOff(addEntryToStringTable(Name));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Config->DebugDwarf) {
|
|
|
|
for (ObjFile *File : ObjFile::Instances) {
|
|
|
|
for (Symbol *B : File->getSymbols()) {
|
|
|
|
auto *D = dyn_cast_or_null<Defined>(B);
|
|
|
|
if (!D || D->WrittenToSymtab)
|
|
|
|
continue;
|
|
|
|
D->WrittenToSymtab = true;
|
|
|
|
|
|
|
|
if (Optional<coff_symbol16> Sym = createSymbol(D))
|
|
|
|
OutputSymtab.push_back(*Sym);
|
|
|
|
}
|
|
|
|
}
|
2015-07-09 00:37:50 +08:00
|
|
|
}
|
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
if (OutputSymtab.empty() && Strtab.empty())
|
2017-11-18 03:51:20 +08:00
|
|
|
return;
|
2015-07-09 00:37:50 +08:00
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
// We position the symbol table to be adjacent to the end of the last section.
|
[LLD][COFF] Report error when file will exceed Windows maximum image size (4GB)
Patch by Colden Cullen.
Currently, when a large PE (>4 GiB) is to be produced, a crash occurs
because:
1. Calling setOffset with a number greater than UINT32_MAX causes the
PointerToRawData to overflow
2. When adding the symbol table to the end of the file, the last section's
offset was used to calculate file size. Because this had overflowed,
this number was too low, and the file created would not be large enough.
This lead to the actual crash I saw, which was a buffer overrun.
This change:
1. Adds comment to setOffset, clarifying that overflow can occur, but it's
somewhat safe because the error will be handled elsewhere
2. Adds file size check after all output data has been created This matches
the MS link.exe error, which looks prints as: "LINK : fatal error
LNK1248: image size (10000EFC9) exceeds maximum allowable size
(FFFFFFFF)"
3. Changes calculate of the symbol table offset to just use the existing
FileSize. This should match the previous calculations, but doesn't rely
on the use of a u32 that can overflow.
4. Removes trivial usage of a magic number that bugged me while I was
debugging the issue
I'm not sure how to add a test for this outside of adding 4GB of object
files to the repo. If there's an easier way, let me know and I'll be
happy to add a test.
Differential Revision: https://reviews.llvm.org/D42010
llvm-svn: 322605
2018-01-17 09:08:02 +08:00
|
|
|
uint64_t FileOff = FileSize;
|
2017-11-21 09:14:14 +08:00
|
|
|
PointerToSymbolTable = FileOff;
|
|
|
|
FileOff += OutputSymtab.size() * sizeof(coff_symbol16);
|
|
|
|
FileOff += 4 + Strtab.size();
|
|
|
|
FileSize = alignTo(FileOff, SectorSize);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Visits all sections to assign incremental, non-overlapping RVAs and
|
|
|
|
// file offsets.
|
|
|
|
void Writer::assignAddresses() {
|
2015-07-08 09:45:29 +08:00
|
|
|
SizeOfHeaders = DOSStubSize + sizeof(PEMagic) + sizeof(coff_file_header) +
|
|
|
|
sizeof(data_directory) * NumberfOfDataDirectory +
|
|
|
|
sizeof(coff_section) * OutputSections.size();
|
2015-07-10 00:40:39 +08:00
|
|
|
SizeOfHeaders +=
|
|
|
|
Config->is64() ? sizeof(pe32plus_header) : sizeof(pe32_header);
|
2016-01-15 04:53:50 +08:00
|
|
|
SizeOfHeaders = alignTo(SizeOfHeaders, SectorSize);
|
[LLD][COFF] Report error when file will exceed Windows maximum image size (4GB)
Patch by Colden Cullen.
Currently, when a large PE (>4 GiB) is to be produced, a crash occurs
because:
1. Calling setOffset with a number greater than UINT32_MAX causes the
PointerToRawData to overflow
2. When adding the symbol table to the end of the file, the last section's
offset was used to calculate file size. Because this had overflowed,
this number was too low, and the file created would not be large enough.
This lead to the actual crash I saw, which was a buffer overrun.
This change:
1. Adds comment to setOffset, clarifying that overflow can occur, but it's
somewhat safe because the error will be handled elsewhere
2. Adds file size check after all output data has been created This matches
the MS link.exe error, which looks prints as: "LINK : fatal error
LNK1248: image size (10000EFC9) exceeds maximum allowable size
(FFFFFFFF)"
3. Changes calculate of the symbol table offset to just use the existing
FileSize. This should match the previous calculations, but doesn't rely
on the use of a u32 that can overflow.
4. Removes trivial usage of a magic number that bugged me while I was
debugging the issue
I'm not sure how to add a test for this outside of adding 4GB of object
files to the repo. If there's an easier way, let me know and I'll be
happy to add a test.
Differential Revision: https://reviews.llvm.org/D42010
llvm-svn: 322605
2018-01-17 09:08:02 +08:00
|
|
|
uint64_t RVA = PageSize; // The first page is kept unmapped.
|
2015-08-12 07:09:00 +08:00
|
|
|
FileSize = SizeOfHeaders;
|
2015-07-31 04:26:55 +08:00
|
|
|
// Move DISCARDABLE (or non-memory-mapped) sections to the end of file because
|
|
|
|
// the loader cannot handle holes.
|
|
|
|
std::stable_partition(
|
|
|
|
OutputSections.begin(), OutputSections.end(), [](OutputSection *S) {
|
|
|
|
return (S->getPermissions() & IMAGE_SCN_MEM_DISCARDABLE) == 0;
|
|
|
|
});
|
2015-06-04 00:44:00 +08:00
|
|
|
for (OutputSection *Sec : OutputSections) {
|
2015-06-15 09:23:58 +08:00
|
|
|
if (Sec->getName() == ".reloc")
|
|
|
|
addBaserels(Sec);
|
2015-05-29 03:09:30 +08:00
|
|
|
Sec->setRVA(RVA);
|
2015-08-12 07:09:00 +08:00
|
|
|
Sec->setFileOffset(FileSize);
|
2016-01-15 04:53:50 +08:00
|
|
|
RVA += alignTo(Sec->getVirtualSize(), PageSize);
|
|
|
|
FileSize += alignTo(Sec->getRawSize(), SectorSize);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
2017-10-26 07:00:40 +08:00
|
|
|
SizeOfImage = alignTo(RVA, PageSize);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2015-07-08 09:45:29 +08:00
|
|
|
template <typename PEHeaderTy> void Writer::writeHeader() {
|
2015-05-29 03:09:30 +08:00
|
|
|
// Write DOS stub
|
|
|
|
uint8_t *Buf = Buffer->getBufferStart();
|
|
|
|
auto *DOS = reinterpret_cast<dos_header *>(Buf);
|
|
|
|
Buf += DOSStubSize;
|
|
|
|
DOS->Magic[0] = 'M';
|
|
|
|
DOS->Magic[1] = 'Z';
|
|
|
|
DOS->AddressOfRelocationTable = sizeof(dos_header);
|
|
|
|
DOS->AddressOfNewExeHeader = DOSStubSize;
|
|
|
|
|
|
|
|
// Write PE magic
|
|
|
|
memcpy(Buf, PEMagic, sizeof(PEMagic));
|
|
|
|
Buf += sizeof(PEMagic);
|
|
|
|
|
|
|
|
// Write COFF header
|
|
|
|
auto *COFF = reinterpret_cast<coff_file_header *>(Buf);
|
|
|
|
Buf += sizeof(*COFF);
|
2015-07-26 05:54:50 +08:00
|
|
|
COFF->Machine = Config->Machine;
|
2015-05-29 03:09:30 +08:00
|
|
|
COFF->NumberOfSections = OutputSections.size();
|
2015-06-15 11:03:23 +08:00
|
|
|
COFF->Characteristics = IMAGE_FILE_EXECUTABLE_IMAGE;
|
2015-07-28 11:12:00 +08:00
|
|
|
if (Config->LargeAddressAware)
|
2015-07-08 09:45:29 +08:00
|
|
|
COFF->Characteristics |= IMAGE_FILE_LARGE_ADDRESS_AWARE;
|
2015-07-28 11:12:00 +08:00
|
|
|
if (!Config->is64())
|
2015-07-09 09:25:49 +08:00
|
|
|
COFF->Characteristics |= IMAGE_FILE_32BIT_MACHINE;
|
2015-06-17 08:16:33 +08:00
|
|
|
if (Config->DLL)
|
|
|
|
COFF->Characteristics |= IMAGE_FILE_DLL;
|
2015-06-15 09:23:58 +08:00
|
|
|
if (!Config->Relocatable)
|
2015-06-15 11:03:23 +08:00
|
|
|
COFF->Characteristics |= IMAGE_FILE_RELOCS_STRIPPED;
|
2015-05-29 03:09:30 +08:00
|
|
|
COFF->SizeOfOptionalHeader =
|
2015-07-08 09:45:29 +08:00
|
|
|
sizeof(PEHeaderTy) + sizeof(data_directory) * NumberfOfDataDirectory;
|
2015-05-29 03:09:30 +08:00
|
|
|
|
|
|
|
// Write PE header
|
2015-07-08 09:45:29 +08:00
|
|
|
auto *PE = reinterpret_cast<PEHeaderTy *>(Buf);
|
2015-05-29 03:09:30 +08:00
|
|
|
Buf += sizeof(*PE);
|
2015-07-10 00:40:39 +08:00
|
|
|
PE->Magic = Config->is64() ? PE32Header::PE32_PLUS : PE32Header::PE32;
|
2017-06-22 00:42:08 +08:00
|
|
|
|
|
|
|
// If {Major,Minor}LinkerVersion is left at 0.0, then for some
|
|
|
|
// reason signing the resulting PE file with Authenticode produces a
|
|
|
|
// signature that fails to validate on Windows 7 (but is OK on 10).
|
|
|
|
// Set it to 14.0, which is what VS2015 outputs, and which avoids
|
|
|
|
// that problem.
|
|
|
|
PE->MajorLinkerVersion = 14;
|
|
|
|
PE->MinorLinkerVersion = 0;
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
PE->ImageBase = Config->ImageBase;
|
2015-08-12 07:09:00 +08:00
|
|
|
PE->SectionAlignment = PageSize;
|
|
|
|
PE->FileAlignment = SectorSize;
|
2015-05-30 00:28:29 +08:00
|
|
|
PE->MajorImageVersion = Config->MajorImageVersion;
|
|
|
|
PE->MinorImageVersion = Config->MinorImageVersion;
|
2015-05-30 00:34:31 +08:00
|
|
|
PE->MajorOperatingSystemVersion = Config->MajorOSVersion;
|
|
|
|
PE->MinorOperatingSystemVersion = Config->MinorOSVersion;
|
|
|
|
PE->MajorSubsystemVersion = Config->MajorOSVersion;
|
|
|
|
PE->MinorSubsystemVersion = Config->MinorOSVersion;
|
|
|
|
PE->Subsystem = Config->Subsystem;
|
2015-05-29 03:09:30 +08:00
|
|
|
PE->SizeOfImage = SizeOfImage;
|
|
|
|
PE->SizeOfHeaders = SizeOfHeaders;
|
2015-06-29 03:56:30 +08:00
|
|
|
if (!Config->NoEntry) {
|
2016-12-10 05:55:24 +08:00
|
|
|
Defined *Entry = cast<Defined>(Config->Entry);
|
2015-06-29 03:56:30 +08:00
|
|
|
PE->AddressOfEntryPoint = Entry->getRVA();
|
2015-07-25 10:25:14 +08:00
|
|
|
// Pointer to thumb code must have the LSB set, so adjust it.
|
2015-07-26 05:54:50 +08:00
|
|
|
if (Config->Machine == ARMNT)
|
2015-07-25 10:25:14 +08:00
|
|
|
PE->AddressOfEntryPoint |= 1;
|
2015-06-29 03:56:30 +08:00
|
|
|
}
|
2015-05-30 00:21:11 +08:00
|
|
|
PE->SizeOfStackReserve = Config->StackReserve;
|
|
|
|
PE->SizeOfStackCommit = Config->StackCommit;
|
2015-05-30 00:23:40 +08:00
|
|
|
PE->SizeOfHeapReserve = Config->HeapReserve;
|
|
|
|
PE->SizeOfHeapCommit = Config->HeapCommit;
|
2017-04-07 07:07:53 +08:00
|
|
|
if (Config->AppContainer)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_APPCONTAINER;
|
2015-06-17 07:13:00 +08:00
|
|
|
if (Config->DynamicBase)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE;
|
|
|
|
if (Config->HighEntropyVA)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA;
|
2017-09-16 06:49:13 +08:00
|
|
|
if (!Config->AllowBind)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NO_BIND;
|
2015-06-17 07:13:00 +08:00
|
|
|
if (Config->NxCompat)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NX_COMPAT;
|
|
|
|
if (!Config->AllowIsolation)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION;
|
2018-02-14 04:32:53 +08:00
|
|
|
if (Config->GuardCF != GuardCFLevel::Off)
|
2018-02-06 09:58:26 +08:00
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_GUARD_CF;
|
2017-12-16 04:53:03 +08:00
|
|
|
if (Config->Machine == I386 && !SEHTable &&
|
|
|
|
!Symtab->findUnderscore("_load_config_used"))
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NO_SEH;
|
2015-06-17 07:13:00 +08:00
|
|
|
if (Config->TerminalServerAware)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE;
|
2015-05-29 03:09:30 +08:00
|
|
|
PE->NumberOfRvaAndSize = NumberfOfDataDirectory;
|
|
|
|
if (OutputSection *Text = findSection(".text")) {
|
|
|
|
PE->BaseOfCode = Text->getRVA();
|
|
|
|
PE->SizeOfCode = Text->getRawSize();
|
|
|
|
}
|
|
|
|
PE->SizeOfInitializedData = getSizeOfInitializedData();
|
|
|
|
|
|
|
|
// Write data directory
|
2015-06-21 12:10:54 +08:00
|
|
|
auto *Dir = reinterpret_cast<data_directory *>(Buf);
|
|
|
|
Buf += sizeof(*Dir) * NumberfOfDataDirectory;
|
2015-06-17 08:16:33 +08:00
|
|
|
if (OutputSection *Sec = findSection(".edata")) {
|
2015-06-21 12:10:54 +08:00
|
|
|
Dir[EXPORT_TABLE].RelativeVirtualAddress = Sec->getRVA();
|
|
|
|
Dir[EXPORT_TABLE].Size = Sec->getVirtualSize();
|
2015-06-17 08:16:33 +08:00
|
|
|
}
|
2015-06-22 06:31:52 +08:00
|
|
|
if (!Idata.empty()) {
|
|
|
|
Dir[IMPORT_TABLE].RelativeVirtualAddress = Idata.getDirRVA();
|
|
|
|
Dir[IMPORT_TABLE].Size = Idata.getDirSize();
|
|
|
|
Dir[IAT].RelativeVirtualAddress = Idata.getIATRVA();
|
|
|
|
Dir[IAT].Size = Idata.getIATSize();
|
|
|
|
}
|
2015-06-15 05:50:50 +08:00
|
|
|
if (OutputSection *Sec = findSection(".rsrc")) {
|
2015-06-21 12:10:54 +08:00
|
|
|
Dir[RESOURCE_TABLE].RelativeVirtualAddress = Sec->getRVA();
|
|
|
|
Dir[RESOURCE_TABLE].Size = Sec->getVirtualSize();
|
2015-06-15 05:50:50 +08:00
|
|
|
}
|
2015-06-21 12:00:54 +08:00
|
|
|
if (OutputSection *Sec = findSection(".pdata")) {
|
2015-06-21 12:10:54 +08:00
|
|
|
Dir[EXCEPTION_TABLE].RelativeVirtualAddress = Sec->getRVA();
|
|
|
|
Dir[EXCEPTION_TABLE].Size = Sec->getVirtualSize();
|
2015-06-21 12:00:54 +08:00
|
|
|
}
|
2016-08-10 12:37:56 +08:00
|
|
|
if (OutputSection *Sec = findSection(".reloc")) {
|
|
|
|
Dir[BASE_RELOCATION_TABLE].RelativeVirtualAddress = Sec->getRVA();
|
|
|
|
Dir[BASE_RELOCATION_TABLE].Size = Sec->getVirtualSize();
|
|
|
|
}
|
2017-11-04 05:21:47 +08:00
|
|
|
if (Symbol *Sym = Symtab->findUnderscore("_tls_used")) {
|
2017-11-01 00:10:24 +08:00
|
|
|
if (Defined *B = dyn_cast<Defined>(Sym)) {
|
2015-07-06 09:48:01 +08:00
|
|
|
Dir[TLS_TABLE].RelativeVirtualAddress = B->getRVA();
|
2016-03-15 14:41:02 +08:00
|
|
|
Dir[TLS_TABLE].Size = Config->is64()
|
|
|
|
? sizeof(object::coff_tls_directory64)
|
|
|
|
: sizeof(object::coff_tls_directory32);
|
2015-07-06 09:48:01 +08:00
|
|
|
}
|
|
|
|
}
|
2016-08-30 05:20:46 +08:00
|
|
|
if (Config->Debug) {
|
|
|
|
Dir[DEBUG_DIRECTORY].RelativeVirtualAddress = DebugDirectory->getRVA();
|
|
|
|
Dir[DEBUG_DIRECTORY].Size = DebugDirectory->getSize();
|
|
|
|
}
|
2017-11-04 05:21:47 +08:00
|
|
|
if (Symbol *Sym = Symtab->findUnderscore("_load_config_used")) {
|
2017-11-01 00:10:24 +08:00
|
|
|
if (auto *B = dyn_cast<DefinedRegular>(Sym)) {
|
2016-03-15 17:48:27 +08:00
|
|
|
SectionChunk *SC = B->getChunk();
|
|
|
|
assert(B->getRVA() >= SC->getRVA());
|
|
|
|
uint64_t OffsetInChunk = B->getRVA() - SC->getRVA();
|
|
|
|
if (!SC->hasData() || OffsetInChunk + 4 > SC->getSize())
|
2016-07-15 07:37:14 +08:00
|
|
|
fatal("_load_config_used is malformed");
|
2016-03-15 17:48:27 +08:00
|
|
|
|
|
|
|
ArrayRef<uint8_t> SecContents = SC->getContents();
|
|
|
|
uint32_t LoadConfigSize =
|
|
|
|
*reinterpret_cast<const ulittle32_t *>(&SecContents[OffsetInChunk]);
|
|
|
|
if (OffsetInChunk + LoadConfigSize > SC->getSize())
|
2016-07-15 07:37:14 +08:00
|
|
|
fatal("_load_config_used is too large");
|
2015-07-17 02:30:35 +08:00
|
|
|
Dir[LOAD_CONFIG_TABLE].RelativeVirtualAddress = B->getRVA();
|
2016-03-15 17:48:27 +08:00
|
|
|
Dir[LOAD_CONFIG_TABLE].Size = LoadConfigSize;
|
2015-07-17 02:30:35 +08:00
|
|
|
}
|
|
|
|
}
|
2016-08-10 12:37:56 +08:00
|
|
|
if (!DelayIdata.empty()) {
|
|
|
|
Dir[DELAY_IMPORT_DESCRIPTOR].RelativeVirtualAddress =
|
|
|
|
DelayIdata.getDirRVA();
|
|
|
|
Dir[DELAY_IMPORT_DESCRIPTOR].Size = DelayIdata.getDirSize();
|
|
|
|
}
|
2015-05-29 03:09:30 +08:00
|
|
|
|
|
|
|
// Write section table
|
2015-06-04 00:44:00 +08:00
|
|
|
for (OutputSection *Sec : OutputSections) {
|
2015-06-07 07:19:38 +08:00
|
|
|
Sec->writeHeaderTo(Buf);
|
2015-05-31 03:09:50 +08:00
|
|
|
Buf += sizeof(coff_section);
|
|
|
|
}
|
2016-10-12 03:45:07 +08:00
|
|
|
SectionTable = ArrayRef<uint8_t>(
|
|
|
|
Buf - OutputSections.size() * sizeof(coff_section), Buf);
|
2015-05-31 03:09:50 +08:00
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
if (OutputSymtab.empty() && Strtab.empty())
|
2015-05-31 03:09:50 +08:00
|
|
|
return;
|
2015-07-09 00:37:50 +08:00
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
COFF->PointerToSymbolTable = PointerToSymbolTable;
|
|
|
|
uint32_t NumberOfSymbols = OutputSymtab.size();
|
|
|
|
COFF->NumberOfSymbols = NumberOfSymbols;
|
|
|
|
auto *SymbolTable = reinterpret_cast<coff_symbol16 *>(
|
|
|
|
Buffer->getBufferStart() + COFF->PointerToSymbolTable);
|
|
|
|
for (size_t I = 0; I != NumberOfSymbols; ++I)
|
|
|
|
SymbolTable[I] = OutputSymtab[I];
|
|
|
|
// Create the string table, it follows immediately after the symbol table.
|
|
|
|
// The first 4 bytes is length including itself.
|
|
|
|
Buf = reinterpret_cast<uint8_t *>(&SymbolTable[NumberOfSymbols]);
|
|
|
|
write32le(Buf, Strtab.size() + 4);
|
|
|
|
if (!Strtab.empty())
|
|
|
|
memcpy(Buf + 4, Strtab.data(), Strtab.size());
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2015-08-06 22:58:50 +08:00
|
|
|
void Writer::openFile(StringRef Path) {
|
2017-12-07 06:08:17 +08:00
|
|
|
Buffer = CHECK(
|
2016-07-15 09:06:38 +08:00
|
|
|
FileOutputBuffer::create(Path, FileSize, FileOutputBuffer::F_executable),
|
|
|
|
"failed to open " + Path);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2017-11-08 07:24:10 +08:00
|
|
|
void Writer::createSEHTable(OutputSection *RData) {
|
2018-02-06 09:58:26 +08:00
|
|
|
SymbolRVASet Handlers;
|
|
|
|
for (ObjFile *File : ObjFile::Instances) {
|
|
|
|
// FIXME: We should error here instead of earlier unless /safeseh:no was
|
|
|
|
// passed.
|
|
|
|
if (!File->hasSafeSEH())
|
|
|
|
return;
|
|
|
|
|
|
|
|
markSymbolsForRVATable(File, File->getSXDataChunks(), Handlers);
|
|
|
|
}
|
2017-11-08 07:24:10 +08:00
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
maybeAddRVATable(RData, std::move(Handlers), "__safe_se_handler_table",
|
|
|
|
"__safe_se_handler_count");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a symbol to an RVA set. Two symbols may have the same RVA, but an RVA set
|
|
|
|
// cannot contain duplicates. Therefore, the set is uniqued by Chunk and the
|
|
|
|
// symbol's offset into that Chunk.
|
|
|
|
static void addSymbolToRVASet(SymbolRVASet &RVASet, Defined *S) {
|
|
|
|
Chunk *C = S->getChunk();
|
|
|
|
if (auto *SC = dyn_cast<SectionChunk>(C))
|
|
|
|
C = SC->Repl; // Look through ICF replacement.
|
|
|
|
uint32_t Off = S->getRVA() - (C ? C->getRVA() : 0);
|
|
|
|
RVASet.insert({C, Off});
|
|
|
|
}
|
|
|
|
|
|
|
|
// Visit all relocations from all section contributions of this object file and
|
|
|
|
// mark the relocation target as address-taken.
|
|
|
|
static void markSymbolsWithRelocations(ObjFile *File,
|
|
|
|
SymbolRVASet &UsedSymbols) {
|
|
|
|
for (Chunk *C : File->getChunks()) {
|
|
|
|
// We only care about live section chunks. Common chunks and other chunks
|
|
|
|
// don't generally contain relocations.
|
|
|
|
SectionChunk *SC = dyn_cast<SectionChunk>(C);
|
|
|
|
if (!SC || !SC->isLive())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Look for relocations in this section against symbols in executable output
|
|
|
|
// sections.
|
|
|
|
for (Symbol *Ref : SC->symbols()) {
|
|
|
|
// FIXME: Do further testing to see if the relocation type matters,
|
|
|
|
// especially for 32-bit where taking the address of something usually
|
|
|
|
// uses an absolute relocation instead of a relative one.
|
|
|
|
if (auto *D = dyn_cast_or_null<Defined>(Ref)) {
|
|
|
|
Chunk *RefChunk = D->getChunk();
|
|
|
|
OutputSection *OS = RefChunk ? RefChunk->getOutputSection() : nullptr;
|
|
|
|
if (OS && OS->getPermissions() & IMAGE_SCN_MEM_EXECUTE)
|
|
|
|
addSymbolToRVASet(UsedSymbols, D);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-11-08 07:24:10 +08:00
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
// Create the guard function id table. This is a table of RVAs of all
|
|
|
|
// address-taken functions. It is sorted and uniqued, just like the safe SEH
|
|
|
|
// table.
|
2018-02-14 04:32:53 +08:00
|
|
|
void Writer::createGuardCFTables(OutputSection *RData) {
|
2018-02-06 09:58:26 +08:00
|
|
|
SymbolRVASet AddressTakenSyms;
|
2018-02-14 04:32:53 +08:00
|
|
|
SymbolRVASet LongJmpTargets;
|
2017-11-08 07:24:10 +08:00
|
|
|
for (ObjFile *File : ObjFile::Instances) {
|
2018-02-14 04:32:53 +08:00
|
|
|
// If the object was compiled with /guard:cf, the address taken symbols
|
|
|
|
// are in .gfids$y sections, and the longjmp targets are in .gljmp$y
|
|
|
|
// sections. If the object was not compiled with /guard:cf, we assume there
|
|
|
|
// were no setjmp targets, and that all code symbols with relocations are
|
|
|
|
// possibly address-taken.
|
|
|
|
if (File->hasGuardCF()) {
|
2018-02-06 09:58:26 +08:00
|
|
|
markSymbolsForRVATable(File, File->getGuardFidChunks(), AddressTakenSyms);
|
2018-02-14 04:32:53 +08:00
|
|
|
markSymbolsForRVATable(File, File->getGuardLJmpChunks(), LongJmpTargets);
|
|
|
|
} else {
|
2018-02-06 09:58:26 +08:00
|
|
|
markSymbolsWithRelocations(File, AddressTakenSyms);
|
2018-02-14 04:32:53 +08:00
|
|
|
}
|
2018-02-06 09:58:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mark the image entry as address-taken.
|
|
|
|
if (Config->Entry)
|
|
|
|
addSymbolToRVASet(AddressTakenSyms, cast<Defined>(Config->Entry));
|
|
|
|
|
|
|
|
maybeAddRVATable(RData, std::move(AddressTakenSyms), "__guard_fids_table",
|
|
|
|
"__guard_fids_count");
|
|
|
|
|
2018-02-14 04:32:53 +08:00
|
|
|
// Add the longjmp target table unless the user told us not to.
|
|
|
|
if (Config->GuardCF == GuardCFLevel::Full)
|
|
|
|
maybeAddRVATable(RData, std::move(LongJmpTargets), "__guard_longjmp_table",
|
|
|
|
"__guard_longjmp_count");
|
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
// Set __guard_flags, which will be used in the load config to indicate that
|
|
|
|
// /guard:cf was enabled.
|
|
|
|
uint32_t GuardFlags = uint32_t(coff_guard_flags::CFInstrumented) |
|
|
|
|
uint32_t(coff_guard_flags::HasFidTable);
|
2018-02-14 04:32:53 +08:00
|
|
|
if (Config->GuardCF == GuardCFLevel::Full)
|
|
|
|
GuardFlags |= uint32_t(coff_guard_flags::HasLongJmpTable);
|
2018-02-06 09:58:26 +08:00
|
|
|
Symbol *FlagSym = Symtab->findUnderscore("__guard_flags");
|
|
|
|
cast<DefinedAbsolute>(FlagSym)->setVA(GuardFlags);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Take a list of input sections containing symbol table indices and add those
|
|
|
|
// symbols to an RVA table. The challenge is that symbol RVAs are not known and
|
|
|
|
// depend on the table size, so we can't directly build a set of integers.
|
|
|
|
void Writer::markSymbolsForRVATable(ObjFile *File,
|
|
|
|
ArrayRef<SectionChunk *> SymIdxChunks,
|
|
|
|
SymbolRVASet &TableSymbols) {
|
|
|
|
for (SectionChunk *C : SymIdxChunks) {
|
|
|
|
// Skip sections discarded by linker GC. This comes up when a .gfids section
|
|
|
|
// is associated with something like a vtable and the vtable is discarded.
|
|
|
|
// In this case, the associated gfids section is discarded, and we don't
|
|
|
|
// mark the virtual member functions as address-taken by the vtable.
|
|
|
|
if (!C->isLive())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Validate that the contents look like symbol table indices.
|
|
|
|
ArrayRef<uint8_t> Data = C->getContents();
|
|
|
|
if (Data.size() % 4 != 0) {
|
|
|
|
warn("ignoring " + C->getSectionName() +
|
|
|
|
" symbol table index section in object " + toString(File));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read each symbol table index and check if that symbol was included in the
|
|
|
|
// final link. If so, add it to the table symbol set.
|
|
|
|
ArrayRef<ulittle32_t> SymIndices(
|
|
|
|
reinterpret_cast<const ulittle32_t *>(Data.data()), Data.size() / 4);
|
|
|
|
ArrayRef<Symbol *> ObjSymbols = File->getSymbols();
|
|
|
|
for (uint32_t SymIndex : SymIndices) {
|
|
|
|
if (SymIndex >= ObjSymbols.size()) {
|
|
|
|
warn("ignoring invalid symbol table index in section " +
|
|
|
|
C->getSectionName() + " in object " + toString(File));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (Symbol *S = ObjSymbols[SymIndex]) {
|
|
|
|
if (S->isLive())
|
|
|
|
addSymbolToRVASet(TableSymbols, cast<Defined>(S));
|
|
|
|
}
|
|
|
|
}
|
2017-11-08 07:24:10 +08:00
|
|
|
}
|
2018-02-06 09:58:26 +08:00
|
|
|
}
|
2017-11-08 07:24:10 +08:00
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
// Replace the absolute table symbol with a synthetic symbol pointing to
|
|
|
|
// TableChunk so that we can emit base relocations for it and resolve section
|
|
|
|
// relative relocations.
|
|
|
|
void Writer::maybeAddRVATable(OutputSection *RData,
|
|
|
|
SymbolRVASet TableSymbols,
|
|
|
|
StringRef TableSym, StringRef CountSym) {
|
|
|
|
if (TableSymbols.empty())
|
2017-11-08 07:24:10 +08:00
|
|
|
return;
|
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
RVATableChunk *TableChunk = make<RVATableChunk>(std::move(TableSymbols));
|
|
|
|
RData->addChunk(TableChunk);
|
2017-11-08 07:24:10 +08:00
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
Symbol *T = Symtab->findUnderscore(TableSym);
|
|
|
|
Symbol *C = Symtab->findUnderscore(CountSym);
|
|
|
|
replaceSymbol<DefinedSynthetic>(T, T->getName(), TableChunk);
|
|
|
|
cast<DefinedAbsolute>(C)->setVA(TableChunk->getSize() / 4);
|
2015-07-25 07:51:14 +08:00
|
|
|
}
|
|
|
|
|
2016-06-20 11:39:39 +08:00
|
|
|
// Handles /section options to allow users to overwrite
|
|
|
|
// section attributes.
|
|
|
|
void Writer::setSectionPermissions() {
|
|
|
|
for (auto &P : Config->Section) {
|
|
|
|
StringRef Name = P.first;
|
|
|
|
uint32_t Perm = P.second;
|
|
|
|
if (auto *Sec = findSection(Name))
|
|
|
|
Sec->setPermissions(Perm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
// Write section contents to a mmap'ed file.
|
|
|
|
void Writer::writeSections() {
|
2018-02-18 04:41:38 +08:00
|
|
|
// Record the number of sections to apply section index relocations
|
|
|
|
// against absolute symbols. See applySecIdx in Chunks.cpp..
|
|
|
|
DefinedAbsolute::NumOutputSections = OutputSections.size();
|
2017-06-23 07:33:04 +08:00
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
uint8_t *Buf = Buffer->getBufferStart();
|
2015-06-04 00:44:00 +08:00
|
|
|
for (OutputSection *Sec : OutputSections) {
|
2015-08-14 11:30:59 +08:00
|
|
|
uint8_t *SecBuf = Buf + Sec->getFileOff();
|
2015-05-29 03:09:30 +08:00
|
|
|
// Fill gaps between functions in .text with INT3 instructions
|
|
|
|
// instead of leaving as NUL bytes (which can be interpreted as
|
|
|
|
// ADD instructions).
|
|
|
|
if (Sec->getPermissions() & IMAGE_SCN_CNT_CODE)
|
2015-08-14 11:30:59 +08:00
|
|
|
memset(SecBuf, 0xCC, Sec->getRawSize());
|
2017-05-10 09:16:22 +08:00
|
|
|
for_each(parallel::par, Sec->getChunks().begin(), Sec->getChunks().end(),
|
|
|
|
[&](Chunk *C) { C->writeTo(SecBuf); });
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
void Writer::writeBuildId() {
|
|
|
|
// If we're not writing a build id (e.g. because /debug is not specified),
|
|
|
|
// then just return;
|
|
|
|
if (!Config->Debug)
|
|
|
|
return;
|
|
|
|
|
|
|
|
assert(BuildId && "BuildId is not set!");
|
|
|
|
|
|
|
|
if (PreviousBuildId.hasValue()) {
|
|
|
|
*BuildId->BuildId = *PreviousBuildId;
|
|
|
|
BuildId->BuildId->PDB70.Age = BuildId->BuildId->PDB70.Age + 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
BuildId->BuildId->Signature.CVSignature = OMF::Signature::PDB70;
|
|
|
|
BuildId->BuildId->PDB70.Age = 1;
|
|
|
|
llvm::getRandomBytes(BuildId->BuildId->PDB70.Signature, 16);
|
|
|
|
}
|
|
|
|
|
2015-06-21 12:00:54 +08:00
|
|
|
// Sort .pdata section contents according to PE/COFF spec 5.5.
|
|
|
|
void Writer::sortExceptionTable() {
|
2015-08-06 11:45:27 +08:00
|
|
|
OutputSection *Sec = findSection(".pdata");
|
|
|
|
if (!Sec)
|
|
|
|
return;
|
|
|
|
// We assume .pdata contains function table entries only.
|
|
|
|
uint8_t *Begin = Buffer->getBufferStart() + Sec->getFileOff();
|
|
|
|
uint8_t *End = Begin + Sec->getVirtualSize();
|
|
|
|
if (Config->Machine == AMD64) {
|
2015-06-21 12:00:54 +08:00
|
|
|
struct Entry { ulittle32_t Begin, End, Unwind; };
|
2017-05-10 09:16:22 +08:00
|
|
|
sort(parallel::par, (Entry *)Begin, (Entry *)End,
|
|
|
|
[](const Entry &A, const Entry &B) { return A.Begin < B.Begin; });
|
2015-08-06 11:45:27 +08:00
|
|
|
return;
|
|
|
|
}
|
2017-12-14 16:56:29 +08:00
|
|
|
if (Config->Machine == ARMNT || Config->Machine == ARM64) {
|
2015-08-06 11:45:27 +08:00
|
|
|
struct Entry { ulittle32_t Begin, Unwind; };
|
2017-05-10 09:16:22 +08:00
|
|
|
sort(parallel::par, (Entry *)Begin, (Entry *)End,
|
|
|
|
[](const Entry &A, const Entry &B) { return A.Begin < B.Begin; });
|
2015-08-06 11:45:27 +08:00
|
|
|
return;
|
2015-06-21 12:00:54 +08:00
|
|
|
}
|
2015-08-06 11:45:27 +08:00
|
|
|
errs() << "warning: don't know how to handle .pdata.\n";
|
2015-06-21 12:00:54 +08:00
|
|
|
}
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
OutputSection *Writer::findSection(StringRef Name) {
|
2015-06-04 00:44:00 +08:00
|
|
|
for (OutputSection *Sec : OutputSections)
|
2015-05-29 03:09:30 +08:00
|
|
|
if (Sec->getName() == Name)
|
2015-06-04 00:44:00 +08:00
|
|
|
return Sec;
|
2015-05-29 03:09:30 +08:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t Writer::getSizeOfInitializedData() {
|
|
|
|
uint32_t Res = 0;
|
2015-06-04 00:44:00 +08:00
|
|
|
for (OutputSection *S : OutputSections)
|
2015-05-29 03:09:30 +08:00
|
|
|
if (S->getPermissions() & IMAGE_SCN_CNT_INITIALIZED_DATA)
|
|
|
|
Res += S->getRawSize();
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns an existing section or create a new one if not found.
|
|
|
|
OutputSection *Writer::createSection(StringRef Name) {
|
|
|
|
if (auto *Sec = findSection(Name))
|
|
|
|
return Sec;
|
2015-06-07 07:07:01 +08:00
|
|
|
const auto DATA = IMAGE_SCN_CNT_INITIALIZED_DATA;
|
|
|
|
const auto BSS = IMAGE_SCN_CNT_UNINITIALIZED_DATA;
|
|
|
|
const auto CODE = IMAGE_SCN_CNT_CODE;
|
2015-06-15 09:23:58 +08:00
|
|
|
const auto DISCARDABLE = IMAGE_SCN_MEM_DISCARDABLE;
|
2015-05-29 03:09:30 +08:00
|
|
|
const auto R = IMAGE_SCN_MEM_READ;
|
|
|
|
const auto W = IMAGE_SCN_MEM_WRITE;
|
2015-06-15 06:21:29 +08:00
|
|
|
const auto X = IMAGE_SCN_MEM_EXECUTE;
|
2015-06-07 07:07:01 +08:00
|
|
|
uint32_t Perms = StringSwitch<uint32_t>(Name)
|
|
|
|
.Case(".bss", BSS | R | W)
|
|
|
|
.Case(".data", DATA | R | W)
|
2016-10-01 06:01:25 +08:00
|
|
|
.Cases(".didat", ".edata", ".idata", ".rdata", DATA | R)
|
2015-06-15 09:23:58 +08:00
|
|
|
.Case(".reloc", DATA | DISCARDABLE | R)
|
2015-06-15 06:21:29 +08:00
|
|
|
.Case(".text", CODE | R | X)
|
2015-06-07 07:07:01 +08:00
|
|
|
.Default(0);
|
|
|
|
if (!Perms)
|
2015-05-29 03:09:30 +08:00
|
|
|
llvm_unreachable("unknown section name");
|
2016-12-09 10:13:12 +08:00
|
|
|
auto Sec = make<OutputSection>(Name);
|
2015-06-07 07:07:01 +08:00
|
|
|
Sec->addPermissions(Perms);
|
2015-06-04 00:44:00 +08:00
|
|
|
OutputSections.push_back(Sec);
|
2015-05-29 03:09:30 +08:00
|
|
|
return Sec;
|
|
|
|
}
|
|
|
|
|
2015-06-15 09:23:58 +08:00
|
|
|
// Dest is .reloc section. Add contents to that section.
|
|
|
|
void Writer::addBaserels(OutputSection *Dest) {
|
2015-07-25 09:44:32 +08:00
|
|
|
std::vector<Baserel> V;
|
2015-06-15 09:23:58 +08:00
|
|
|
for (OutputSection *Sec : OutputSections) {
|
|
|
|
if (Sec == Dest)
|
|
|
|
continue;
|
|
|
|
// Collect all locations for base relocations.
|
|
|
|
for (Chunk *C : Sec->getChunks())
|
2015-07-25 06:58:44 +08:00
|
|
|
C->getBaserels(&V);
|
2015-06-15 09:23:58 +08:00
|
|
|
// Add the addresses to .reloc section.
|
|
|
|
if (!V.empty())
|
|
|
|
addBaserelBlocks(Dest, V);
|
|
|
|
V.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add addresses to .reloc section. Note that addresses are grouped by page.
|
2015-07-25 09:44:32 +08:00
|
|
|
void Writer::addBaserelBlocks(OutputSection *Dest, std::vector<Baserel> &V) {
|
2015-06-15 09:23:58 +08:00
|
|
|
const uint32_t Mask = ~uint32_t(PageSize - 1);
|
2015-07-25 09:44:32 +08:00
|
|
|
uint32_t Page = V[0].RVA & Mask;
|
2015-06-15 09:23:58 +08:00
|
|
|
size_t I = 0, J = 1;
|
|
|
|
for (size_t E = V.size(); J < E; ++J) {
|
2015-07-25 09:44:32 +08:00
|
|
|
uint32_t P = V[J].RVA & Mask;
|
2015-06-15 09:23:58 +08:00
|
|
|
if (P == Page)
|
|
|
|
continue;
|
2016-12-09 10:13:12 +08:00
|
|
|
Dest->addChunk(make<BaserelChunk>(Page, &V[I], &V[0] + J));
|
2015-06-15 09:23:58 +08:00
|
|
|
I = J;
|
|
|
|
Page = P;
|
|
|
|
}
|
|
|
|
if (I == J)
|
|
|
|
return;
|
2016-12-09 10:13:12 +08:00
|
|
|
Dest->addChunk(make<BaserelChunk>(Page, &V[I], &V[0] + J));
|
2015-06-15 09:23:58 +08:00
|
|
|
}
|