2015-05-29 03:09:30 +08:00
|
|
|
//===- Writer.cpp ---------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Linker
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-10-12 03:45:07 +08:00
|
|
|
#include "Writer.h"
|
2015-05-29 03:09:30 +08:00
|
|
|
#include "Config.h"
|
2015-08-06 07:43:53 +08:00
|
|
|
#include "DLL.h"
|
|
|
|
#include "InputFiles.h"
|
2017-01-14 11:14:46 +08:00
|
|
|
#include "MapFile.h"
|
2016-10-12 03:45:07 +08:00
|
|
|
#include "PDB.h"
|
2015-08-06 07:43:53 +08:00
|
|
|
#include "SymbolTable.h"
|
|
|
|
#include "Symbols.h"
|
[lld] unified COFF and ELF error handling on new Common/ErrorHandler
Summary:
The COFF linker and the ELF linker have long had similar but separate
Error.h and Error.cpp files to implement error handling. This change
introduces new error handling code in Common/ErrorHandler.h, changes the
COFF and ELF linkers to use it, and removes the old, separate
implementations.
Reviewers: ruiu
Reviewed By: ruiu
Subscribers: smeenai, jyknight, emaste, sdardis, nemanjai, nhaehnle, mgorny, javed.absar, kbarton, fedor.sergeev, llvm-commits
Differential Revision: https://reviews.llvm.org/D39259
llvm-svn: 316624
2017-10-26 06:28:38 +08:00
|
|
|
#include "lld/Common/ErrorHandler.h"
|
2017-11-29 04:39:17 +08:00
|
|
|
#include "lld/Common/Memory.h"
|
2018-01-18 03:16:26 +08:00
|
|
|
#include "lld/Common/Timer.h"
|
2015-07-28 08:17:25 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2015-05-29 03:09:30 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2015-07-28 08:17:25 +08:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
#include "llvm/Support/BinaryStreamReader.h"
|
2015-05-29 03:09:30 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/Endian.h"
|
|
|
|
#include "llvm/Support/FileOutputBuffer.h"
|
2017-05-11 08:03:52 +08:00
|
|
|
#include "llvm/Support/Parallel.h"
|
2018-02-06 09:58:26 +08:00
|
|
|
#include "llvm/Support/Path.h"
|
2016-08-30 05:20:46 +08:00
|
|
|
#include "llvm/Support/RandomNumberGenerator.h"
|
2018-03-09 03:33:47 +08:00
|
|
|
#include "llvm/Support/xxhash.h"
|
2015-05-29 03:09:30 +08:00
|
|
|
#include <algorithm>
|
2015-05-31 03:09:50 +08:00
|
|
|
#include <cstdio>
|
2015-05-29 03:09:30 +08:00
|
|
|
#include <map>
|
2015-08-06 07:43:53 +08:00
|
|
|
#include <memory>
|
2015-05-29 03:09:30 +08:00
|
|
|
#include <utility>
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
using namespace llvm::COFF;
|
2015-05-31 03:09:50 +08:00
|
|
|
using namespace llvm::object;
|
|
|
|
using namespace llvm::support;
|
|
|
|
using namespace llvm::support::endian;
|
2015-08-06 07:43:53 +08:00
|
|
|
using namespace lld;
|
|
|
|
using namespace lld::coff;
|
2015-05-29 03:09:30 +08:00
|
|
|
|
2018-03-08 22:27:28 +08:00
|
|
|
/* To re-generate DOSProgram:
|
|
|
|
$ cat > /tmp/DOSProgram.asm
|
|
|
|
org 0
|
|
|
|
; Copy cs to ds.
|
|
|
|
push cs
|
|
|
|
pop ds
|
|
|
|
; Point ds:dx at the $-terminated string.
|
|
|
|
mov dx, str
|
|
|
|
; Int 21/AH=09h: Write string to standard output.
|
|
|
|
mov ah, 0x9
|
|
|
|
int 0x21
|
|
|
|
; Int 21/AH=4Ch: Exit with return code (in AL).
|
|
|
|
mov ax, 0x4C01
|
|
|
|
int 0x21
|
|
|
|
str:
|
|
|
|
db 'This program cannot be run in DOS mode.$'
|
|
|
|
align 8, db 0
|
|
|
|
$ nasm -fbin /tmp/DOSProgram.asm -o /tmp/DOSProgram.bin
|
|
|
|
$ xxd -i /tmp/DOSProgram.bin
|
|
|
|
*/
|
|
|
|
static unsigned char DOSProgram[] = {
|
|
|
|
0x0e, 0x1f, 0xba, 0x0e, 0x00, 0xb4, 0x09, 0xcd, 0x21, 0xb8, 0x01, 0x4c,
|
|
|
|
0xcd, 0x21, 0x54, 0x68, 0x69, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x67, 0x72,
|
|
|
|
0x61, 0x6d, 0x20, 0x63, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65,
|
|
|
|
0x20, 0x72, 0x75, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x44, 0x4f, 0x53, 0x20,
|
|
|
|
0x6d, 0x6f, 0x64, 0x65, 0x2e, 0x24, 0x00, 0x00
|
|
|
|
};
|
|
|
|
static_assert(sizeof(DOSProgram) % 8 == 0,
|
|
|
|
"DOSProgram size must be multiple of 8");
|
|
|
|
|
2015-08-12 07:09:00 +08:00
|
|
|
static const int SectorSize = 512;
|
2018-03-08 22:27:28 +08:00
|
|
|
static const int DOSStubSize = sizeof(dos_header) + sizeof(DOSProgram);
|
|
|
|
static_assert(DOSStubSize % 8 == 0, "DOSStub size must be multiple of 8");
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
static const int NumberfOfDataDirectory = 16;
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
namespace {
|
2016-08-30 05:20:46 +08:00
|
|
|
|
|
|
|
class DebugDirectoryChunk : public Chunk {
|
|
|
|
public:
|
2017-05-19 01:03:49 +08:00
|
|
|
DebugDirectoryChunk(const std::vector<Chunk *> &R) : Records(R) {}
|
2016-08-30 05:20:46 +08:00
|
|
|
|
|
|
|
size_t getSize() const override {
|
|
|
|
return Records.size() * sizeof(debug_directory);
|
|
|
|
}
|
|
|
|
|
|
|
|
void writeTo(uint8_t *B) const override {
|
|
|
|
auto *D = reinterpret_cast<debug_directory *>(B + OutputSectionOff);
|
|
|
|
|
2017-05-19 01:03:49 +08:00
|
|
|
for (const Chunk *Record : Records) {
|
2016-08-30 05:20:46 +08:00
|
|
|
D->Characteristics = 0;
|
|
|
|
D->TimeDateStamp = 0;
|
|
|
|
D->MajorVersion = 0;
|
|
|
|
D->MinorVersion = 0;
|
|
|
|
D->Type = COFF::IMAGE_DEBUG_TYPE_CODEVIEW;
|
|
|
|
D->SizeOfData = Record->getSize();
|
|
|
|
D->AddressOfRawData = Record->getRVA();
|
2017-08-03 07:19:54 +08:00
|
|
|
OutputSection *OS = Record->getOutputSection();
|
|
|
|
uint64_t Offs = OS->getFileOff() + (Record->getRVA() - OS->getRVA());
|
|
|
|
D->PointerToRawData = Offs;
|
2016-08-30 05:20:46 +08:00
|
|
|
|
2018-03-09 03:33:47 +08:00
|
|
|
TimeDateStamps.push_back(&D->TimeDateStamp);
|
2016-08-30 05:20:46 +08:00
|
|
|
++D;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-09 03:33:47 +08:00
|
|
|
void setTimeDateStamp(uint32_t TimeDateStamp) {
|
|
|
|
for (support::ulittle32_t *TDS : TimeDateStamps)
|
|
|
|
*TDS = TimeDateStamp;
|
|
|
|
}
|
|
|
|
|
2016-08-30 05:20:46 +08:00
|
|
|
private:
|
2018-03-09 03:33:47 +08:00
|
|
|
mutable std::vector<support::ulittle32_t *> TimeDateStamps;
|
2017-05-19 01:03:49 +08:00
|
|
|
const std::vector<Chunk *> &Records;
|
2016-08-30 05:20:46 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
class CVDebugRecordChunk : public Chunk {
|
2017-08-05 04:02:55 +08:00
|
|
|
public:
|
2016-08-30 05:20:46 +08:00
|
|
|
size_t getSize() const override {
|
2018-04-18 07:28:38 +08:00
|
|
|
return sizeof(codeview::DebugInfo) + Config->PDBAltPath.size() + 1;
|
2016-08-30 05:20:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void writeTo(uint8_t *B) const override {
|
2016-09-10 03:26:03 +08:00
|
|
|
// Save off the DebugInfo entry to backfill the file signature (build id)
|
|
|
|
// in Writer::writeBuildId
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
BuildId = reinterpret_cast<codeview::DebugInfo *>(B + OutputSectionOff);
|
2016-08-30 05:20:46 +08:00
|
|
|
|
|
|
|
// variable sized field (PDB Path)
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
char *P = reinterpret_cast<char *>(B + OutputSectionOff + sizeof(*BuildId));
|
2018-04-18 07:28:38 +08:00
|
|
|
if (!Config->PDBAltPath.empty())
|
|
|
|
memcpy(P, Config->PDBAltPath.data(), Config->PDBAltPath.size());
|
|
|
|
P[Config->PDBAltPath.size()] = '\0';
|
2016-08-30 05:20:46 +08:00
|
|
|
}
|
2016-09-10 03:26:03 +08:00
|
|
|
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
mutable codeview::DebugInfo *BuildId = nullptr;
|
2016-08-30 05:20:46 +08:00
|
|
|
};
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
// The writer writes a SymbolTable result to a file.
|
|
|
|
class Writer {
|
|
|
|
public:
|
2017-11-14 02:15:22 +08:00
|
|
|
Writer() : Buffer(errorHandler().OutputBuffer) {}
|
2015-08-06 22:58:50 +08:00
|
|
|
void run();
|
2015-08-06 07:43:53 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
void createSections();
|
|
|
|
void createMiscChunks();
|
|
|
|
void createImportTables();
|
|
|
|
void createExportTable();
|
2018-04-21 05:10:33 +08:00
|
|
|
void mergeSections();
|
2015-08-06 07:43:53 +08:00
|
|
|
void assignAddresses();
|
|
|
|
void removeEmptySections();
|
2017-11-21 09:14:14 +08:00
|
|
|
void createSymbolAndStringTable();
|
2015-08-06 22:58:50 +08:00
|
|
|
void openFile(StringRef OutputPath);
|
2015-08-06 07:43:53 +08:00
|
|
|
template <typename PEHeaderTy> void writeHeader();
|
2018-04-06 11:25:49 +08:00
|
|
|
void createSEHTable();
|
|
|
|
void createGuardCFTables();
|
2018-02-06 09:58:26 +08:00
|
|
|
void markSymbolsForRVATable(ObjFile *File,
|
|
|
|
ArrayRef<SectionChunk *> SymIdxChunks,
|
|
|
|
SymbolRVASet &TableSymbols);
|
2018-04-06 11:25:49 +08:00
|
|
|
void maybeAddRVATable(SymbolRVASet TableSymbols, StringRef TableSym,
|
|
|
|
StringRef CountSym);
|
2016-06-20 11:39:39 +08:00
|
|
|
void setSectionPermissions();
|
2015-08-06 07:43:53 +08:00
|
|
|
void writeSections();
|
2016-09-10 03:26:03 +08:00
|
|
|
void writeBuildId();
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
void sortExceptionTable();
|
2015-08-06 07:43:53 +08:00
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
llvm::Optional<coff_symbol16> createSymbol(Defined *D);
|
|
|
|
size_t addEntryToStringTable(StringRef Str);
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
OutputSection *findSection(StringRef Name);
|
2018-04-06 11:25:49 +08:00
|
|
|
void addBaserels();
|
|
|
|
void addBaserelBlocks(std::vector<Baserel> &V);
|
2015-08-06 07:43:53 +08:00
|
|
|
|
|
|
|
uint32_t getSizeOfInitializedData();
|
|
|
|
std::map<StringRef, std::vector<DefinedImportData *>> binImports();
|
|
|
|
|
2017-11-14 02:15:22 +08:00
|
|
|
std::unique_ptr<FileOutputBuffer> &Buffer;
|
2015-08-06 07:43:53 +08:00
|
|
|
std::vector<OutputSection *> OutputSections;
|
|
|
|
std::vector<char> Strtab;
|
|
|
|
std::vector<llvm::object::coff_symbol16> OutputSymtab;
|
|
|
|
IdataContents Idata;
|
|
|
|
DelayLoadContents DelayIdata;
|
|
|
|
EdataContents Edata;
|
2018-04-19 06:37:10 +08:00
|
|
|
bool SetNoSEHCharacteristic = false;
|
2015-08-06 07:43:53 +08:00
|
|
|
|
2018-03-09 03:33:47 +08:00
|
|
|
DebugDirectoryChunk *DebugDirectory = nullptr;
|
2017-05-19 01:03:49 +08:00
|
|
|
std::vector<Chunk *> DebugRecords;
|
2016-09-10 03:26:03 +08:00
|
|
|
CVDebugRecordChunk *BuildId = nullptr;
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
Optional<codeview::DebugInfo> PreviousBuildId;
|
2016-10-12 03:45:07 +08:00
|
|
|
ArrayRef<uint8_t> SectionTable;
|
2016-08-30 05:20:46 +08:00
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
uint64_t FileSize;
|
2017-11-21 09:14:14 +08:00
|
|
|
uint32_t PointerToSymbolTable = 0;
|
2015-08-06 07:43:53 +08:00
|
|
|
uint64_t SizeOfImage;
|
|
|
|
uint64_t SizeOfHeaders;
|
2018-04-06 11:25:49 +08:00
|
|
|
|
|
|
|
OutputSection *TextSec;
|
|
|
|
OutputSection *RdataSec;
|
|
|
|
OutputSection *DataSec;
|
2018-04-21 05:10:33 +08:00
|
|
|
OutputSection *PdataSec;
|
2018-04-06 11:25:49 +08:00
|
|
|
OutputSection *IdataSec;
|
|
|
|
OutputSection *EdataSec;
|
|
|
|
OutputSection *DidatSec;
|
|
|
|
OutputSection *RsrcSec;
|
|
|
|
OutputSection *RelocSec;
|
2018-04-07 08:46:55 +08:00
|
|
|
|
|
|
|
// The first and last .pdata sections in the output file.
|
|
|
|
//
|
|
|
|
// We need to keep track of the location of .pdata in whichever section it
|
|
|
|
// gets merged into so that we can sort its contents and emit a correct data
|
|
|
|
// directory entry for the exception table. This is also the case for some
|
|
|
|
// other sections (such as .edata) but because the contents of those sections
|
|
|
|
// are entirely linker-generated we can keep track of their locations using
|
|
|
|
// the chunks that the linker creates. All .pdata chunks come from input
|
|
|
|
// files, so we need to keep track of them separately.
|
|
|
|
Chunk *FirstPdata = nullptr;
|
|
|
|
Chunk *LastPdata;
|
2015-08-06 07:43:53 +08:00
|
|
|
};
|
|
|
|
} // anonymous namespace
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
namespace lld {
|
|
|
|
namespace coff {
|
|
|
|
|
2018-01-18 03:16:26 +08:00
|
|
|
static Timer CodeLayoutTimer("Code Layout", Timer::root());
|
|
|
|
static Timer DiskCommitTimer("Commit Output File", Timer::root());
|
|
|
|
|
2017-08-29 05:51:07 +08:00
|
|
|
void writeResult() { Writer().run(); }
|
2015-06-07 07:32:08 +08:00
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
void OutputSection::addChunk(Chunk *C) {
|
|
|
|
Chunks.push_back(C);
|
2015-06-15 06:16:47 +08:00
|
|
|
C->setOutputSection(this);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2016-06-20 11:39:39 +08:00
|
|
|
void OutputSection::setPermissions(uint32_t C) {
|
2018-04-21 05:23:16 +08:00
|
|
|
Header.Characteristics &= ~PermMask;
|
|
|
|
Header.Characteristics |= C;
|
2016-06-20 11:39:39 +08:00
|
|
|
}
|
|
|
|
|
2018-04-21 05:10:33 +08:00
|
|
|
void OutputSection::merge(OutputSection *Other) {
|
|
|
|
for (Chunk *C : Other->Chunks)
|
|
|
|
C->setOutputSection(this);
|
|
|
|
Chunks.insert(Chunks.end(), Other->Chunks.begin(), Other->Chunks.end());
|
|
|
|
Other->Chunks.clear();
|
|
|
|
}
|
|
|
|
|
2015-05-31 03:09:50 +08:00
|
|
|
// Write the section header to a given buffer.
|
2015-06-07 07:19:38 +08:00
|
|
|
void OutputSection::writeHeaderTo(uint8_t *Buf) {
|
2015-05-31 03:09:50 +08:00
|
|
|
auto *Hdr = reinterpret_cast<coff_section *>(Buf);
|
|
|
|
*Hdr = Header;
|
|
|
|
if (StringTableOff) {
|
|
|
|
// If name is too long, write offset into the string table as a name.
|
|
|
|
sprintf(Hdr->Name, "/%d", StringTableOff);
|
|
|
|
} else {
|
2017-11-16 20:06:42 +08:00
|
|
|
assert(!Config->Debug || Name.size() <= COFF::NameSize ||
|
|
|
|
(Hdr->Characteristics & IMAGE_SCN_MEM_DISCARDABLE) == 0);
|
2015-07-09 00:37:50 +08:00
|
|
|
strncpy(Hdr->Name, Name.data(),
|
|
|
|
std::min(Name.size(), (size_t)COFF::NameSize));
|
2015-05-31 03:09:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
} // namespace coff
|
|
|
|
} // namespace lld
|
|
|
|
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
// PDBs are matched against executables using a build id which consists of three
|
|
|
|
// components:
|
|
|
|
// 1. A 16-bit GUID
|
|
|
|
// 2. An age
|
|
|
|
// 3. A time stamp.
|
|
|
|
//
|
|
|
|
// Debuggers and symbol servers match executables against debug info by checking
|
|
|
|
// each of these components of the EXE/DLL against the corresponding value in
|
|
|
|
// the PDB and failing a match if any of the components differ. In the case of
|
|
|
|
// symbol servers, symbols are cached in a folder that is a function of the
|
|
|
|
// GUID. As a result, in order to avoid symbol cache pollution where every
|
|
|
|
// incremental build copies a new PDB to the symbol cache, we must try to re-use
|
|
|
|
// the existing GUID if one exists, but bump the age. This way the match will
|
|
|
|
// fail, so the symbol cache knows to use the new PDB, but the GUID matches, so
|
|
|
|
// it overwrites the existing item in the symbol cache rather than making a new
|
|
|
|
// one.
|
|
|
|
static Optional<codeview::DebugInfo> loadExistingBuildId(StringRef Path) {
|
|
|
|
// We don't need to incrementally update a previous build id if we're not
|
|
|
|
// writing codeview debug info.
|
|
|
|
if (!Config->Debug)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
auto ExpectedBinary = llvm::object::createBinary(Path);
|
|
|
|
if (!ExpectedBinary) {
|
|
|
|
consumeError(ExpectedBinary.takeError());
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto Binary = std::move(*ExpectedBinary);
|
|
|
|
if (!Binary.getBinary()->isCOFF())
|
|
|
|
return None;
|
|
|
|
|
|
|
|
std::error_code EC;
|
|
|
|
COFFObjectFile File(Binary.getBinary()->getMemoryBufferRef(), EC);
|
|
|
|
if (EC)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
// If the machine of the binary we're outputting doesn't match the machine
|
|
|
|
// of the existing binary, don't try to re-use the build id.
|
|
|
|
if (File.is64() != Config->is64() || File.getMachine() != Config->Machine)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
for (const auto &DebugDir : File.debug_directories()) {
|
|
|
|
if (DebugDir.Type != IMAGE_DEBUG_TYPE_CODEVIEW)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const codeview::DebugInfo *ExistingDI = nullptr;
|
|
|
|
StringRef PDBFileName;
|
2017-08-16 05:46:51 +08:00
|
|
|
if (auto EC = File.getDebugPDBInfo(ExistingDI, PDBFileName)) {
|
|
|
|
(void)EC;
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
return None;
|
2017-08-16 05:46:51 +08:00
|
|
|
}
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
// We only support writing PDBs in v70 format. So if this is not a build
|
|
|
|
// id that we recognize / support, ignore it.
|
|
|
|
if (ExistingDI->Signature.CVSignature != OMF::Signature::PDB70)
|
|
|
|
return None;
|
|
|
|
return *ExistingDI;
|
|
|
|
}
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
// The main function of the writer.
|
2015-08-06 22:58:50 +08:00
|
|
|
void Writer::run() {
|
2018-01-18 03:16:26 +08:00
|
|
|
ScopedTimer T1(CodeLayoutTimer);
|
|
|
|
|
2015-08-06 07:43:53 +08:00
|
|
|
createSections();
|
|
|
|
createMiscChunks();
|
|
|
|
createImportTables();
|
|
|
|
createExportTable();
|
2018-04-21 05:10:33 +08:00
|
|
|
mergeSections();
|
2015-08-06 07:43:53 +08:00
|
|
|
assignAddresses();
|
|
|
|
removeEmptySections();
|
2016-06-20 11:39:39 +08:00
|
|
|
setSectionPermissions();
|
2017-11-21 09:14:14 +08:00
|
|
|
createSymbolAndStringTable();
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
|
[LLD][COFF] Report error when file will exceed Windows maximum image size (4GB)
Patch by Colden Cullen.
Currently, when a large PE (>4 GiB) is to be produced, a crash occurs
because:
1. Calling setOffset with a number greater than UINT32_MAX causes the
PointerToRawData to overflow
2. When adding the symbol table to the end of the file, the last section's
offset was used to calculate file size. Because this had overflowed,
this number was too low, and the file created would not be large enough.
This lead to the actual crash I saw, which was a buffer overrun.
This change:
1. Adds comment to setOffset, clarifying that overflow can occur, but it's
somewhat safe because the error will be handled elsewhere
2. Adds file size check after all output data has been created This matches
the MS link.exe error, which looks prints as: "LINK : fatal error
LNK1248: image size (10000EFC9) exceeds maximum allowable size
(FFFFFFFF)"
3. Changes calculate of the symbol table offset to just use the existing
FileSize. This should match the previous calculations, but doesn't rely
on the use of a u32 that can overflow.
4. Removes trivial usage of a magic number that bugged me while I was
debugging the issue
I'm not sure how to add a test for this outside of adding 4GB of object
files to the repo. If there's an easier way, let me know and I'll be
happy to add a test.
Differential Revision: https://reviews.llvm.org/D42010
llvm-svn: 322605
2018-01-17 09:08:02 +08:00
|
|
|
if (FileSize > UINT32_MAX)
|
|
|
|
fatal("image size (" + Twine(FileSize) + ") " +
|
|
|
|
"exceeds maximum allowable size (" + Twine(UINT32_MAX) + ")");
|
|
|
|
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
// We must do this before opening the output file, as it depends on being able
|
|
|
|
// to read the contents of the existing output file.
|
|
|
|
PreviousBuildId = loadExistingBuildId(Config->OutputFile);
|
2015-08-06 22:58:50 +08:00
|
|
|
openFile(Config->OutputFile);
|
2015-08-06 07:43:53 +08:00
|
|
|
if (Config->is64()) {
|
|
|
|
writeHeader<pe32plus_header>();
|
|
|
|
} else {
|
|
|
|
writeHeader<pe32_header>();
|
|
|
|
}
|
|
|
|
writeSections();
|
|
|
|
sortExceptionTable();
|
2016-09-10 03:26:03 +08:00
|
|
|
writeBuildId();
|
2016-10-12 03:45:07 +08:00
|
|
|
|
2018-01-18 03:16:26 +08:00
|
|
|
T1.stop();
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
|
2018-01-18 03:16:26 +08:00
|
|
|
if (!Config->PDBPath.empty() && Config->Debug) {
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
assert(BuildId);
|
|
|
|
createPDB(Symtab, OutputSections, SectionTable, *BuildId->BuildId);
|
2017-02-07 12:28:02 +08:00
|
|
|
}
|
2016-10-12 03:45:07 +08:00
|
|
|
|
2017-01-14 11:14:46 +08:00
|
|
|
writeMapFile(OutputSections);
|
|
|
|
|
2018-01-18 03:16:26 +08:00
|
|
|
ScopedTimer T2(DiskCommitTimer);
|
2017-11-08 09:50:34 +08:00
|
|
|
if (auto E = Buffer->commit())
|
|
|
|
fatal("failed to write the output file: " + toString(std::move(E)));
|
2015-08-06 07:43:53 +08:00
|
|
|
}
|
|
|
|
|
2018-04-07 08:46:55 +08:00
|
|
|
static StringRef getOutputSectionName(StringRef Name) {
|
2015-07-05 07:37:32 +08:00
|
|
|
StringRef S = Name.split('$').first;
|
2017-11-28 16:08:37 +08:00
|
|
|
|
|
|
|
// Treat a later period as a separator for MinGW, for sections like
|
|
|
|
// ".ctors.01234".
|
2018-04-07 08:46:55 +08:00
|
|
|
return S.substr(0, S.find('.', 1));
|
2015-07-05 07:37:32 +08:00
|
|
|
}
|
|
|
|
|
2018-01-27 08:34:46 +08:00
|
|
|
// For /order.
|
|
|
|
static void sortBySectionOrder(std::vector<Chunk *> &Chunks) {
|
|
|
|
auto GetPriority = [](const Chunk *C) {
|
|
|
|
if (auto *Sec = dyn_cast<SectionChunk>(C))
|
|
|
|
if (Sec->Sym)
|
|
|
|
return Config->Order.lookup(Sec->Sym->getName());
|
|
|
|
return 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
std::stable_sort(Chunks.begin(), Chunks.end(),
|
|
|
|
[=](const Chunk *A, const Chunk *B) {
|
|
|
|
return GetPriority(A) < GetPriority(B);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2015-06-07 07:32:08 +08:00
|
|
|
// Create output section objects and add them to OutputSections.
|
2015-05-29 03:09:30 +08:00
|
|
|
void Writer::createSections() {
|
2018-04-06 11:25:49 +08:00
|
|
|
// First, create the builtin sections.
|
|
|
|
const uint32_t DATA = IMAGE_SCN_CNT_INITIALIZED_DATA;
|
|
|
|
const uint32_t BSS = IMAGE_SCN_CNT_UNINITIALIZED_DATA;
|
|
|
|
const uint32_t CODE = IMAGE_SCN_CNT_CODE;
|
|
|
|
const uint32_t DISCARDABLE = IMAGE_SCN_MEM_DISCARDABLE;
|
|
|
|
const uint32_t R = IMAGE_SCN_MEM_READ;
|
|
|
|
const uint32_t W = IMAGE_SCN_MEM_WRITE;
|
|
|
|
const uint32_t X = IMAGE_SCN_MEM_EXECUTE;
|
|
|
|
|
2018-04-21 05:10:33 +08:00
|
|
|
SmallDenseMap<std::pair<StringRef, uint32_t>, OutputSection *> Sections;
|
|
|
|
auto CreateSection = [&](StringRef Name, uint32_t OutChars) {
|
|
|
|
OutputSection *&Sec = Sections[{Name, OutChars}];
|
2018-04-07 08:46:55 +08:00
|
|
|
if (!Sec) {
|
2018-04-21 05:10:33 +08:00
|
|
|
Sec = make<OutputSection>(Name, OutChars);
|
2018-04-07 08:46:55 +08:00
|
|
|
OutputSections.push_back(Sec);
|
|
|
|
}
|
2018-04-06 11:25:49 +08:00
|
|
|
return Sec;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Try to match the section order used by link.exe.
|
|
|
|
TextSec = CreateSection(".text", CODE | R | X);
|
|
|
|
CreateSection(".bss", BSS | R | W);
|
|
|
|
RdataSec = CreateSection(".rdata", DATA | R);
|
|
|
|
DataSec = CreateSection(".data", DATA | R | W);
|
2018-04-21 05:10:33 +08:00
|
|
|
PdataSec = CreateSection(".pdata", DATA | R);
|
2018-04-06 11:25:49 +08:00
|
|
|
IdataSec = CreateSection(".idata", DATA | R);
|
|
|
|
EdataSec = CreateSection(".edata", DATA | R);
|
|
|
|
DidatSec = CreateSection(".didat", DATA | R);
|
|
|
|
RsrcSec = CreateSection(".rsrc", DATA | R);
|
|
|
|
RelocSec = CreateSection(".reloc", DATA | DISCARDABLE | R);
|
|
|
|
|
2018-04-21 05:10:33 +08:00
|
|
|
// Then bin chunks by name and output characteristics.
|
|
|
|
std::map<std::pair<StringRef, uint32_t>, std::vector<Chunk *>> Map;
|
2015-05-29 03:09:30 +08:00
|
|
|
for (Chunk *C : Symtab->getChunks()) {
|
2015-09-17 05:40:47 +08:00
|
|
|
auto *SC = dyn_cast<SectionChunk>(C);
|
|
|
|
if (SC && !SC->isLive()) {
|
|
|
|
if (Config->Verbose)
|
|
|
|
SC->printDiscardedMessage();
|
|
|
|
continue;
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
2018-04-21 05:10:33 +08:00
|
|
|
Map[{C->getSectionName(), C->getOutputCharacteristics()}].push_back(C);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2018-01-27 08:34:46 +08:00
|
|
|
// Process an /order option.
|
|
|
|
if (!Config->Order.empty())
|
|
|
|
for (auto &Pair : Map)
|
|
|
|
sortBySectionOrder(Pair.second);
|
|
|
|
|
2015-06-07 07:32:08 +08:00
|
|
|
// Then create an OutputSection for each section.
|
2015-06-08 16:26:28 +08:00
|
|
|
// '$' and all following characters in input section names are
|
|
|
|
// discarded when determining output section. So, .text$foo
|
|
|
|
// contributes to .text, for example. See PE/COFF spec 3.2.
|
2015-07-05 07:37:32 +08:00
|
|
|
for (auto Pair : Map) {
|
2018-04-21 05:10:33 +08:00
|
|
|
StringRef Name = getOutputSectionName(Pair.first.first);
|
|
|
|
uint32_t OutChars = Pair.first.second;
|
|
|
|
|
|
|
|
// In link.exe, there is a special case for the I386 target where .CRT
|
|
|
|
// sections are treated as if they have output characteristics DATA | R if
|
|
|
|
// their characteristics are DATA | R | W. This implements the same special
|
|
|
|
// case for all architectures.
|
|
|
|
if (Name == ".CRT")
|
|
|
|
OutChars = DATA | R;
|
|
|
|
|
|
|
|
OutputSection *Sec = CreateSection(Name, OutChars);
|
2015-07-05 07:37:32 +08:00
|
|
|
std::vector<Chunk *> &Chunks = Pair.second;
|
2018-04-21 05:10:33 +08:00
|
|
|
for (Chunk *C : Chunks)
|
2015-05-29 03:09:30 +08:00
|
|
|
Sec->addChunk(C);
|
|
|
|
}
|
2018-04-06 11:25:49 +08:00
|
|
|
|
|
|
|
// Finally, move some output sections to the end.
|
|
|
|
auto SectionOrder = [&](OutputSection *S) {
|
|
|
|
// .reloc should come last of all since it refers to RVAs of data in the
|
|
|
|
// previous sections.
|
|
|
|
if (S == RelocSec)
|
|
|
|
return 3;
|
|
|
|
// Move DISCARDABLE (or non-memory-mapped) sections to the end of file because
|
|
|
|
// the loader cannot handle holes.
|
2018-04-20 05:48:37 +08:00
|
|
|
if (S->Header.Characteristics & IMAGE_SCN_MEM_DISCARDABLE)
|
2018-04-06 11:25:49 +08:00
|
|
|
return 2;
|
|
|
|
// .rsrc should come at the end of the non-discardable sections because its
|
|
|
|
// size may change by the Win32 UpdateResources() function, causing
|
|
|
|
// subsequent sections to move (see https://crbug.com/827082).
|
|
|
|
if (S == RsrcSec)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
};
|
|
|
|
std::stable_sort(OutputSections.begin(), OutputSections.end(),
|
|
|
|
[&](OutputSection *S, OutputSection *T) {
|
|
|
|
return SectionOrder(S) < SectionOrder(T);
|
|
|
|
});
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2015-06-25 11:31:47 +08:00
|
|
|
void Writer::createMiscChunks() {
|
2018-03-16 05:14:02 +08:00
|
|
|
for (auto &P : MergeChunk::Instances)
|
2018-04-06 11:25:49 +08:00
|
|
|
RdataSec->addChunk(P.second);
|
2018-03-16 05:14:02 +08:00
|
|
|
|
2015-07-25 07:51:14 +08:00
|
|
|
// Create thunks for locally-dllimported symbols.
|
|
|
|
if (!Symtab->LocalImportChunks.empty()) {
|
|
|
|
for (Chunk *C : Symtab->LocalImportChunks)
|
2018-04-06 11:25:49 +08:00
|
|
|
RdataSec->addChunk(C);
|
2015-07-25 07:51:14 +08:00
|
|
|
}
|
|
|
|
|
2016-08-30 05:20:46 +08:00
|
|
|
// Create Debug Information Chunks
|
|
|
|
if (Config->Debug) {
|
2017-05-19 01:03:49 +08:00
|
|
|
DebugDirectory = make<DebugDirectoryChunk>(DebugRecords);
|
2016-08-30 05:20:46 +08:00
|
|
|
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
// Make a CVDebugRecordChunk even when /DEBUG:CV is not specified. We
|
|
|
|
// output a PDB no matter what, and this chunk provides the only means of
|
|
|
|
// allowing a debugger to match a PDB and an executable. So we need it even
|
|
|
|
// if we're ultimately not going to write CodeView data to the PDB.
|
|
|
|
auto *CVChunk = make<CVDebugRecordChunk>();
|
|
|
|
BuildId = CVChunk;
|
|
|
|
DebugRecords.push_back(CVChunk);
|
2016-08-30 05:20:46 +08:00
|
|
|
|
2018-04-06 11:25:49 +08:00
|
|
|
RdataSec->addChunk(DebugDirectory);
|
2017-05-19 01:03:49 +08:00
|
|
|
for (Chunk *C : DebugRecords)
|
2018-04-06 11:25:49 +08:00
|
|
|
RdataSec->addChunk(C);
|
2016-08-30 05:20:46 +08:00
|
|
|
}
|
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
// Create SEH table. x86-only.
|
|
|
|
if (Config->Machine == I386)
|
2018-04-06 11:25:49 +08:00
|
|
|
createSEHTable();
|
2018-02-06 09:58:26 +08:00
|
|
|
|
2018-02-14 04:32:53 +08:00
|
|
|
// Create /guard:cf tables if requested.
|
|
|
|
if (Config->GuardCF != GuardCFLevel::Off)
|
2018-04-06 11:25:49 +08:00
|
|
|
createGuardCFTables();
|
2015-06-25 11:31:47 +08:00
|
|
|
}
|
|
|
|
|
2015-06-07 06:56:55 +08:00
|
|
|
// Create .idata section for the DLL-imported symbol table.
|
|
|
|
// The format of this section is inherently Windows-specific.
|
|
|
|
// IdataContents class abstracted away the details for us,
|
|
|
|
// so we just let it create chunks and add them to the section.
|
2015-06-07 06:46:15 +08:00
|
|
|
void Writer::createImportTables() {
|
2017-07-27 08:45:26 +08:00
|
|
|
if (ImportFile::Instances.empty())
|
2015-06-07 06:46:15 +08:00
|
|
|
return;
|
2015-08-17 16:30:31 +08:00
|
|
|
|
|
|
|
// Initialize DLLOrder so that import entries are ordered in
|
|
|
|
// the same order as in the command line. (That affects DLL
|
|
|
|
// initialization order, and this ordering is MSVC-compatible.)
|
2017-07-27 08:45:26 +08:00
|
|
|
for (ImportFile *File : ImportFile::Instances) {
|
2017-05-25 06:30:06 +08:00
|
|
|
if (!File->Live)
|
|
|
|
continue;
|
|
|
|
|
2015-09-02 15:27:31 +08:00
|
|
|
std::string DLL = StringRef(File->DLLName).lower();
|
|
|
|
if (Config->DLLOrder.count(DLL) == 0)
|
|
|
|
Config->DLLOrder[DLL] = Config->DLLOrder.size();
|
|
|
|
}
|
2015-08-17 16:30:31 +08:00
|
|
|
|
2017-07-27 08:45:26 +08:00
|
|
|
for (ImportFile *File : ImportFile::Instances) {
|
2017-05-25 06:30:06 +08:00
|
|
|
if (!File->Live)
|
|
|
|
continue;
|
|
|
|
|
2015-08-17 15:27:45 +08:00
|
|
|
if (DefinedImportThunk *Thunk = File->ThunkSym)
|
2018-04-06 11:25:49 +08:00
|
|
|
TextSec->addChunk(Thunk->getChunk());
|
2017-05-25 06:30:06 +08:00
|
|
|
|
2015-09-03 22:49:47 +08:00
|
|
|
if (Config->DelayLoads.count(StringRef(File->DLLName).lower())) {
|
2017-05-26 02:03:34 +08:00
|
|
|
if (!File->ThunkSym)
|
|
|
|
fatal("cannot delay-load " + toString(File) +
|
|
|
|
" due to import of data: " + toString(*File->ImpSym));
|
2017-05-22 14:01:37 +08:00
|
|
|
DelayIdata.add(File->ImpSym);
|
2015-08-17 15:27:45 +08:00
|
|
|
} else {
|
2017-05-22 14:01:37 +08:00
|
|
|
Idata.add(File->ImpSym);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
}
|
2017-05-25 06:30:06 +08:00
|
|
|
|
2018-04-06 11:25:49 +08:00
|
|
|
if (!Idata.empty())
|
2015-06-22 06:31:52 +08:00
|
|
|
for (Chunk *C : Idata.getChunks())
|
2018-04-06 11:25:49 +08:00
|
|
|
IdataSec->addChunk(C);
|
2017-05-25 06:30:06 +08:00
|
|
|
|
2015-06-22 06:31:52 +08:00
|
|
|
if (!DelayIdata.empty()) {
|
2016-12-10 05:55:24 +08:00
|
|
|
Defined *Helper = cast<Defined>(Config->DelayLoadHelper);
|
2015-07-02 11:59:04 +08:00
|
|
|
DelayIdata.create(Helper);
|
2015-06-27 05:40:15 +08:00
|
|
|
for (Chunk *C : DelayIdata.getChunks())
|
2018-04-06 11:25:49 +08:00
|
|
|
DidatSec->addChunk(C);
|
2015-06-27 05:40:15 +08:00
|
|
|
for (Chunk *C : DelayIdata.getDataChunks())
|
2018-04-06 11:25:49 +08:00
|
|
|
DataSec->addChunk(C);
|
2017-05-19 01:03:49 +08:00
|
|
|
for (Chunk *C : DelayIdata.getCodeChunks())
|
2018-04-06 11:25:49 +08:00
|
|
|
TextSec->addChunk(C);
|
2015-06-22 06:31:52 +08:00
|
|
|
}
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2015-06-17 08:16:33 +08:00
|
|
|
void Writer::createExportTable() {
|
|
|
|
if (Config->Exports.empty())
|
|
|
|
return;
|
2017-05-19 01:03:49 +08:00
|
|
|
for (Chunk *C : Edata.Chunks)
|
2018-04-06 11:25:49 +08:00
|
|
|
EdataSec->addChunk(C);
|
2015-06-17 08:16:33 +08:00
|
|
|
}
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
// The Windows loader doesn't seem to like empty sections,
|
|
|
|
// so we remove them if any.
|
|
|
|
void Writer::removeEmptySections() {
|
2015-06-04 00:44:00 +08:00
|
|
|
auto IsEmpty = [](OutputSection *S) { return S->getVirtualSize() == 0; };
|
2015-05-29 03:09:30 +08:00
|
|
|
OutputSections.erase(
|
|
|
|
std::remove_if(OutputSections.begin(), OutputSections.end(), IsEmpty),
|
|
|
|
OutputSections.end());
|
2015-07-09 00:37:50 +08:00
|
|
|
uint32_t Idx = 1;
|
|
|
|
for (OutputSection *Sec : OutputSections)
|
|
|
|
Sec->SectionIndex = Idx++;
|
|
|
|
}
|
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
size_t Writer::addEntryToStringTable(StringRef Str) {
|
|
|
|
assert(Str.size() > COFF::NameSize);
|
|
|
|
size_t OffsetOfEntry = Strtab.size() + 4; // +4 for the size field
|
|
|
|
Strtab.insert(Strtab.end(), Str.begin(), Str.end());
|
|
|
|
Strtab.push_back('\0');
|
|
|
|
return OffsetOfEntry;
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<coff_symbol16> Writer::createSymbol(Defined *Def) {
|
|
|
|
// Relative symbols are unrepresentable in a COFF symbol table.
|
|
|
|
if (isa<DefinedSynthetic>(Def))
|
|
|
|
return None;
|
|
|
|
|
|
|
|
// Don't write dead symbols or symbols in codeview sections to the symbol
|
|
|
|
// table.
|
|
|
|
if (!Def->isLive())
|
|
|
|
return None;
|
|
|
|
if (auto *D = dyn_cast<DefinedRegular>(Def))
|
|
|
|
if (D->getChunk()->isCodeView())
|
|
|
|
return None;
|
|
|
|
|
|
|
|
coff_symbol16 Sym;
|
|
|
|
StringRef Name = Def->getName();
|
|
|
|
if (Name.size() > COFF::NameSize) {
|
|
|
|
Sym.Name.Offset.Zeroes = 0;
|
|
|
|
Sym.Name.Offset.Offset = addEntryToStringTable(Name);
|
|
|
|
} else {
|
|
|
|
memset(Sym.Name.ShortName, 0, COFF::NameSize);
|
|
|
|
memcpy(Sym.Name.ShortName, Name.data(), Name.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto *D = dyn_cast<DefinedCOFF>(Def)) {
|
|
|
|
COFFSymbolRef Ref = D->getCOFFSymbol();
|
|
|
|
Sym.Type = Ref.getType();
|
|
|
|
Sym.StorageClass = Ref.getStorageClass();
|
|
|
|
} else {
|
|
|
|
Sym.Type = IMAGE_SYM_TYPE_NULL;
|
|
|
|
Sym.StorageClass = IMAGE_SYM_CLASS_EXTERNAL;
|
|
|
|
}
|
|
|
|
Sym.NumberOfAuxSymbols = 0;
|
|
|
|
|
|
|
|
switch (Def->kind()) {
|
|
|
|
case Symbol::DefinedAbsoluteKind:
|
|
|
|
Sym.Value = Def->getRVA();
|
|
|
|
Sym.SectionNumber = IMAGE_SYM_ABSOLUTE;
|
|
|
|
break;
|
|
|
|
default: {
|
|
|
|
uint64_t RVA = Def->getRVA();
|
|
|
|
OutputSection *Sec = nullptr;
|
|
|
|
for (OutputSection *S : OutputSections) {
|
|
|
|
if (S->getRVA() > RVA)
|
|
|
|
break;
|
|
|
|
Sec = S;
|
|
|
|
}
|
|
|
|
Sym.Value = RVA - Sec->getRVA();
|
|
|
|
Sym.SectionNumber = Sec->SectionIndex;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Sym;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Writer::createSymbolAndStringTable() {
|
2018-03-17 04:20:01 +08:00
|
|
|
// PE/COFF images are limited to 8 byte section names. Longer names can be
|
|
|
|
// supported by writing a non-standard string table, but this string table is
|
|
|
|
// not mapped at runtime and the long names will therefore be inaccessible.
|
|
|
|
// link.exe always truncates section names to 8 bytes, whereas binutils always
|
|
|
|
// preserves long section names via the string table. LLD adopts a hybrid
|
|
|
|
// solution where discardable sections have long names preserved and
|
|
|
|
// non-discardable sections have their names truncated, to ensure that any
|
|
|
|
// section which is mapped at runtime also has its name mapped at runtime.
|
2015-07-09 00:37:50 +08:00
|
|
|
for (OutputSection *Sec : OutputSections) {
|
2018-03-16 05:13:46 +08:00
|
|
|
if (Sec->Name.size() <= COFF::NameSize)
|
2015-07-09 00:37:50 +08:00
|
|
|
continue;
|
2018-04-20 05:48:37 +08:00
|
|
|
if ((Sec->Header.Characteristics & IMAGE_SCN_MEM_DISCARDABLE) == 0)
|
2017-11-16 20:06:42 +08:00
|
|
|
continue;
|
2018-03-16 05:13:46 +08:00
|
|
|
Sec->setStringTableOff(addEntryToStringTable(Sec->Name));
|
2017-11-21 09:14:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (Config->DebugDwarf) {
|
|
|
|
for (ObjFile *File : ObjFile::Instances) {
|
|
|
|
for (Symbol *B : File->getSymbols()) {
|
|
|
|
auto *D = dyn_cast_or_null<Defined>(B);
|
|
|
|
if (!D || D->WrittenToSymtab)
|
|
|
|
continue;
|
|
|
|
D->WrittenToSymtab = true;
|
|
|
|
|
|
|
|
if (Optional<coff_symbol16> Sym = createSymbol(D))
|
|
|
|
OutputSymtab.push_back(*Sym);
|
|
|
|
}
|
|
|
|
}
|
2015-07-09 00:37:50 +08:00
|
|
|
}
|
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
if (OutputSymtab.empty() && Strtab.empty())
|
2017-11-18 03:51:20 +08:00
|
|
|
return;
|
2015-07-09 00:37:50 +08:00
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
// We position the symbol table to be adjacent to the end of the last section.
|
[LLD][COFF] Report error when file will exceed Windows maximum image size (4GB)
Patch by Colden Cullen.
Currently, when a large PE (>4 GiB) is to be produced, a crash occurs
because:
1. Calling setOffset with a number greater than UINT32_MAX causes the
PointerToRawData to overflow
2. When adding the symbol table to the end of the file, the last section's
offset was used to calculate file size. Because this had overflowed,
this number was too low, and the file created would not be large enough.
This lead to the actual crash I saw, which was a buffer overrun.
This change:
1. Adds comment to setOffset, clarifying that overflow can occur, but it's
somewhat safe because the error will be handled elsewhere
2. Adds file size check after all output data has been created This matches
the MS link.exe error, which looks prints as: "LINK : fatal error
LNK1248: image size (10000EFC9) exceeds maximum allowable size
(FFFFFFFF)"
3. Changes calculate of the symbol table offset to just use the existing
FileSize. This should match the previous calculations, but doesn't rely
on the use of a u32 that can overflow.
4. Removes trivial usage of a magic number that bugged me while I was
debugging the issue
I'm not sure how to add a test for this outside of adding 4GB of object
files to the repo. If there's an easier way, let me know and I'll be
happy to add a test.
Differential Revision: https://reviews.llvm.org/D42010
llvm-svn: 322605
2018-01-17 09:08:02 +08:00
|
|
|
uint64_t FileOff = FileSize;
|
2017-11-21 09:14:14 +08:00
|
|
|
PointerToSymbolTable = FileOff;
|
|
|
|
FileOff += OutputSymtab.size() * sizeof(coff_symbol16);
|
|
|
|
FileOff += 4 + Strtab.size();
|
|
|
|
FileSize = alignTo(FileOff, SectorSize);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2018-04-21 05:10:33 +08:00
|
|
|
void Writer::mergeSections() {
|
|
|
|
if (!PdataSec->getChunks().empty()) {
|
|
|
|
FirstPdata = PdataSec->getChunks().front();
|
|
|
|
LastPdata = PdataSec->getChunks().back();
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto &P : Config->Merge) {
|
|
|
|
StringRef ToName = P.second;
|
|
|
|
if (P.first == ToName)
|
|
|
|
continue;
|
|
|
|
StringSet<> Names;
|
|
|
|
while (1) {
|
|
|
|
if (!Names.insert(ToName).second)
|
|
|
|
fatal("/merge: cycle found for section '" + P.first + "'");
|
|
|
|
auto I = Config->Merge.find(ToName);
|
|
|
|
if (I == Config->Merge.end())
|
|
|
|
break;
|
|
|
|
ToName = I->second;
|
|
|
|
}
|
|
|
|
OutputSection *From = findSection(P.first);
|
|
|
|
OutputSection *To = findSection(ToName);
|
|
|
|
if (!From)
|
|
|
|
continue;
|
|
|
|
if (!To) {
|
|
|
|
From->Name = ToName;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
To->merge(From);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
// Visits all sections to assign incremental, non-overlapping RVAs and
|
|
|
|
// file offsets.
|
|
|
|
void Writer::assignAddresses() {
|
2015-07-08 09:45:29 +08:00
|
|
|
SizeOfHeaders = DOSStubSize + sizeof(PEMagic) + sizeof(coff_file_header) +
|
|
|
|
sizeof(data_directory) * NumberfOfDataDirectory +
|
|
|
|
sizeof(coff_section) * OutputSections.size();
|
2015-07-10 00:40:39 +08:00
|
|
|
SizeOfHeaders +=
|
|
|
|
Config->is64() ? sizeof(pe32plus_header) : sizeof(pe32_header);
|
2016-01-15 04:53:50 +08:00
|
|
|
SizeOfHeaders = alignTo(SizeOfHeaders, SectorSize);
|
[LLD][COFF] Report error when file will exceed Windows maximum image size (4GB)
Patch by Colden Cullen.
Currently, when a large PE (>4 GiB) is to be produced, a crash occurs
because:
1. Calling setOffset with a number greater than UINT32_MAX causes the
PointerToRawData to overflow
2. When adding the symbol table to the end of the file, the last section's
offset was used to calculate file size. Because this had overflowed,
this number was too low, and the file created would not be large enough.
This lead to the actual crash I saw, which was a buffer overrun.
This change:
1. Adds comment to setOffset, clarifying that overflow can occur, but it's
somewhat safe because the error will be handled elsewhere
2. Adds file size check after all output data has been created This matches
the MS link.exe error, which looks prints as: "LINK : fatal error
LNK1248: image size (10000EFC9) exceeds maximum allowable size
(FFFFFFFF)"
3. Changes calculate of the symbol table offset to just use the existing
FileSize. This should match the previous calculations, but doesn't rely
on the use of a u32 that can overflow.
4. Removes trivial usage of a magic number that bugged me while I was
debugging the issue
I'm not sure how to add a test for this outside of adding 4GB of object
files to the repo. If there's an easier way, let me know and I'll be
happy to add a test.
Differential Revision: https://reviews.llvm.org/D42010
llvm-svn: 322605
2018-01-17 09:08:02 +08:00
|
|
|
uint64_t RVA = PageSize; // The first page is kept unmapped.
|
2015-08-12 07:09:00 +08:00
|
|
|
FileSize = SizeOfHeaders;
|
2018-04-05 03:15:55 +08:00
|
|
|
|
2015-06-04 00:44:00 +08:00
|
|
|
for (OutputSection *Sec : OutputSections) {
|
2018-04-06 11:25:49 +08:00
|
|
|
if (Sec == RelocSec)
|
|
|
|
addBaserels();
|
2018-03-16 05:13:46 +08:00
|
|
|
uint64_t RawSize = 0, VirtualSize = 0;
|
|
|
|
Sec->Header.VirtualAddress = RVA;
|
|
|
|
for (Chunk *C : Sec->getChunks()) {
|
|
|
|
VirtualSize = alignTo(VirtualSize, C->Alignment);
|
|
|
|
C->setRVA(RVA + VirtualSize);
|
|
|
|
C->OutputSectionOff = VirtualSize;
|
2018-03-16 05:14:02 +08:00
|
|
|
C->finalizeContents();
|
2018-03-16 05:13:46 +08:00
|
|
|
VirtualSize += C->getSize();
|
|
|
|
if (C->hasData())
|
|
|
|
RawSize = alignTo(VirtualSize, SectorSize);
|
|
|
|
}
|
|
|
|
if (VirtualSize > UINT32_MAX)
|
|
|
|
error("section larger than 4 GiB: " + Sec->Name);
|
|
|
|
Sec->Header.VirtualSize = VirtualSize;
|
|
|
|
Sec->Header.SizeOfRawData = RawSize;
|
|
|
|
if (RawSize != 0)
|
|
|
|
Sec->Header.PointerToRawData = FileSize;
|
|
|
|
RVA += alignTo(VirtualSize, PageSize);
|
|
|
|
FileSize += alignTo(RawSize, SectorSize);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
2017-10-26 07:00:40 +08:00
|
|
|
SizeOfImage = alignTo(RVA, PageSize);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2015-07-08 09:45:29 +08:00
|
|
|
template <typename PEHeaderTy> void Writer::writeHeader() {
|
2018-03-08 22:27:28 +08:00
|
|
|
// Write DOS header. For backwards compatibility, the first part of a PE/COFF
|
|
|
|
// executable consists of an MS-DOS MZ executable. If the executable is run
|
|
|
|
// under DOS, that program gets run (usually to just print an error message).
|
|
|
|
// When run under Windows, the loader looks at AddressOfNewExeHeader and uses
|
|
|
|
// the PE header instead.
|
2015-05-29 03:09:30 +08:00
|
|
|
uint8_t *Buf = Buffer->getBufferStart();
|
|
|
|
auto *DOS = reinterpret_cast<dos_header *>(Buf);
|
2018-03-08 22:27:28 +08:00
|
|
|
Buf += sizeof(dos_header);
|
2015-05-29 03:09:30 +08:00
|
|
|
DOS->Magic[0] = 'M';
|
|
|
|
DOS->Magic[1] = 'Z';
|
2018-03-08 22:27:28 +08:00
|
|
|
DOS->UsedBytesInTheLastPage = DOSStubSize % 512;
|
|
|
|
DOS->FileSizeInPages = divideCeil(DOSStubSize, 512);
|
|
|
|
DOS->HeaderSizeInParagraphs = sizeof(dos_header) / 16;
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
DOS->AddressOfRelocationTable = sizeof(dos_header);
|
|
|
|
DOS->AddressOfNewExeHeader = DOSStubSize;
|
|
|
|
|
2018-03-08 22:27:28 +08:00
|
|
|
// Write DOS program.
|
|
|
|
memcpy(Buf, DOSProgram, sizeof(DOSProgram));
|
|
|
|
Buf += sizeof(DOSProgram);
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
// Write PE magic
|
|
|
|
memcpy(Buf, PEMagic, sizeof(PEMagic));
|
|
|
|
Buf += sizeof(PEMagic);
|
|
|
|
|
|
|
|
// Write COFF header
|
|
|
|
auto *COFF = reinterpret_cast<coff_file_header *>(Buf);
|
|
|
|
Buf += sizeof(*COFF);
|
2015-07-26 05:54:50 +08:00
|
|
|
COFF->Machine = Config->Machine;
|
2015-05-29 03:09:30 +08:00
|
|
|
COFF->NumberOfSections = OutputSections.size();
|
2015-06-15 11:03:23 +08:00
|
|
|
COFF->Characteristics = IMAGE_FILE_EXECUTABLE_IMAGE;
|
2015-07-28 11:12:00 +08:00
|
|
|
if (Config->LargeAddressAware)
|
2015-07-08 09:45:29 +08:00
|
|
|
COFF->Characteristics |= IMAGE_FILE_LARGE_ADDRESS_AWARE;
|
2015-07-28 11:12:00 +08:00
|
|
|
if (!Config->is64())
|
2015-07-09 09:25:49 +08:00
|
|
|
COFF->Characteristics |= IMAGE_FILE_32BIT_MACHINE;
|
2015-06-17 08:16:33 +08:00
|
|
|
if (Config->DLL)
|
|
|
|
COFF->Characteristics |= IMAGE_FILE_DLL;
|
2015-06-15 09:23:58 +08:00
|
|
|
if (!Config->Relocatable)
|
2015-06-15 11:03:23 +08:00
|
|
|
COFF->Characteristics |= IMAGE_FILE_RELOCS_STRIPPED;
|
2015-05-29 03:09:30 +08:00
|
|
|
COFF->SizeOfOptionalHeader =
|
2015-07-08 09:45:29 +08:00
|
|
|
sizeof(PEHeaderTy) + sizeof(data_directory) * NumberfOfDataDirectory;
|
2015-05-29 03:09:30 +08:00
|
|
|
|
|
|
|
// Write PE header
|
2015-07-08 09:45:29 +08:00
|
|
|
auto *PE = reinterpret_cast<PEHeaderTy *>(Buf);
|
2015-05-29 03:09:30 +08:00
|
|
|
Buf += sizeof(*PE);
|
2015-07-10 00:40:39 +08:00
|
|
|
PE->Magic = Config->is64() ? PE32Header::PE32_PLUS : PE32Header::PE32;
|
2017-06-22 00:42:08 +08:00
|
|
|
|
|
|
|
// If {Major,Minor}LinkerVersion is left at 0.0, then for some
|
|
|
|
// reason signing the resulting PE file with Authenticode produces a
|
|
|
|
// signature that fails to validate on Windows 7 (but is OK on 10).
|
|
|
|
// Set it to 14.0, which is what VS2015 outputs, and which avoids
|
|
|
|
// that problem.
|
|
|
|
PE->MajorLinkerVersion = 14;
|
|
|
|
PE->MinorLinkerVersion = 0;
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
PE->ImageBase = Config->ImageBase;
|
2015-08-12 07:09:00 +08:00
|
|
|
PE->SectionAlignment = PageSize;
|
|
|
|
PE->FileAlignment = SectorSize;
|
2015-05-30 00:28:29 +08:00
|
|
|
PE->MajorImageVersion = Config->MajorImageVersion;
|
|
|
|
PE->MinorImageVersion = Config->MinorImageVersion;
|
2015-05-30 00:34:31 +08:00
|
|
|
PE->MajorOperatingSystemVersion = Config->MajorOSVersion;
|
|
|
|
PE->MinorOperatingSystemVersion = Config->MinorOSVersion;
|
|
|
|
PE->MajorSubsystemVersion = Config->MajorOSVersion;
|
|
|
|
PE->MinorSubsystemVersion = Config->MinorOSVersion;
|
|
|
|
PE->Subsystem = Config->Subsystem;
|
2015-05-29 03:09:30 +08:00
|
|
|
PE->SizeOfImage = SizeOfImage;
|
|
|
|
PE->SizeOfHeaders = SizeOfHeaders;
|
2015-06-29 03:56:30 +08:00
|
|
|
if (!Config->NoEntry) {
|
2016-12-10 05:55:24 +08:00
|
|
|
Defined *Entry = cast<Defined>(Config->Entry);
|
2015-06-29 03:56:30 +08:00
|
|
|
PE->AddressOfEntryPoint = Entry->getRVA();
|
2015-07-25 10:25:14 +08:00
|
|
|
// Pointer to thumb code must have the LSB set, so adjust it.
|
2015-07-26 05:54:50 +08:00
|
|
|
if (Config->Machine == ARMNT)
|
2015-07-25 10:25:14 +08:00
|
|
|
PE->AddressOfEntryPoint |= 1;
|
2015-06-29 03:56:30 +08:00
|
|
|
}
|
2015-05-30 00:21:11 +08:00
|
|
|
PE->SizeOfStackReserve = Config->StackReserve;
|
|
|
|
PE->SizeOfStackCommit = Config->StackCommit;
|
2015-05-30 00:23:40 +08:00
|
|
|
PE->SizeOfHeapReserve = Config->HeapReserve;
|
|
|
|
PE->SizeOfHeapCommit = Config->HeapCommit;
|
2017-04-07 07:07:53 +08:00
|
|
|
if (Config->AppContainer)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_APPCONTAINER;
|
2015-06-17 07:13:00 +08:00
|
|
|
if (Config->DynamicBase)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE;
|
|
|
|
if (Config->HighEntropyVA)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA;
|
2017-09-16 06:49:13 +08:00
|
|
|
if (!Config->AllowBind)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NO_BIND;
|
2015-06-17 07:13:00 +08:00
|
|
|
if (Config->NxCompat)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NX_COMPAT;
|
|
|
|
if (!Config->AllowIsolation)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION;
|
2018-02-14 04:32:53 +08:00
|
|
|
if (Config->GuardCF != GuardCFLevel::Off)
|
2018-02-06 09:58:26 +08:00
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_GUARD_CF;
|
2018-04-19 06:37:10 +08:00
|
|
|
if (SetNoSEHCharacteristic)
|
2017-12-16 04:53:03 +08:00
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NO_SEH;
|
2015-06-17 07:13:00 +08:00
|
|
|
if (Config->TerminalServerAware)
|
|
|
|
PE->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE;
|
2015-05-29 03:09:30 +08:00
|
|
|
PE->NumberOfRvaAndSize = NumberfOfDataDirectory;
|
2018-04-06 11:25:49 +08:00
|
|
|
if (TextSec->getVirtualSize()) {
|
|
|
|
PE->BaseOfCode = TextSec->getRVA();
|
|
|
|
PE->SizeOfCode = TextSec->getRawSize();
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
PE->SizeOfInitializedData = getSizeOfInitializedData();
|
|
|
|
|
|
|
|
// Write data directory
|
2015-06-21 12:10:54 +08:00
|
|
|
auto *Dir = reinterpret_cast<data_directory *>(Buf);
|
|
|
|
Buf += sizeof(*Dir) * NumberfOfDataDirectory;
|
2018-04-07 08:46:55 +08:00
|
|
|
if (!Config->Exports.empty()) {
|
|
|
|
Dir[EXPORT_TABLE].RelativeVirtualAddress = Edata.getRVA();
|
|
|
|
Dir[EXPORT_TABLE].Size = Edata.getSize();
|
2015-06-17 08:16:33 +08:00
|
|
|
}
|
2015-06-22 06:31:52 +08:00
|
|
|
if (!Idata.empty()) {
|
|
|
|
Dir[IMPORT_TABLE].RelativeVirtualAddress = Idata.getDirRVA();
|
|
|
|
Dir[IMPORT_TABLE].Size = Idata.getDirSize();
|
|
|
|
Dir[IAT].RelativeVirtualAddress = Idata.getIATRVA();
|
|
|
|
Dir[IAT].Size = Idata.getIATSize();
|
|
|
|
}
|
2018-04-06 11:25:49 +08:00
|
|
|
if (RsrcSec->getVirtualSize()) {
|
|
|
|
Dir[RESOURCE_TABLE].RelativeVirtualAddress = RsrcSec->getRVA();
|
|
|
|
Dir[RESOURCE_TABLE].Size = RsrcSec->getVirtualSize();
|
2015-06-15 05:50:50 +08:00
|
|
|
}
|
2018-04-07 08:46:55 +08:00
|
|
|
if (FirstPdata) {
|
|
|
|
Dir[EXCEPTION_TABLE].RelativeVirtualAddress = FirstPdata->getRVA();
|
|
|
|
Dir[EXCEPTION_TABLE].Size =
|
|
|
|
LastPdata->getRVA() + LastPdata->getSize() - FirstPdata->getRVA();
|
2015-06-21 12:00:54 +08:00
|
|
|
}
|
2018-04-06 11:25:49 +08:00
|
|
|
if (RelocSec->getVirtualSize()) {
|
|
|
|
Dir[BASE_RELOCATION_TABLE].RelativeVirtualAddress = RelocSec->getRVA();
|
|
|
|
Dir[BASE_RELOCATION_TABLE].Size = RelocSec->getVirtualSize();
|
2016-08-10 12:37:56 +08:00
|
|
|
}
|
2017-11-04 05:21:47 +08:00
|
|
|
if (Symbol *Sym = Symtab->findUnderscore("_tls_used")) {
|
2017-11-01 00:10:24 +08:00
|
|
|
if (Defined *B = dyn_cast<Defined>(Sym)) {
|
2015-07-06 09:48:01 +08:00
|
|
|
Dir[TLS_TABLE].RelativeVirtualAddress = B->getRVA();
|
2016-03-15 14:41:02 +08:00
|
|
|
Dir[TLS_TABLE].Size = Config->is64()
|
|
|
|
? sizeof(object::coff_tls_directory64)
|
|
|
|
: sizeof(object::coff_tls_directory32);
|
2015-07-06 09:48:01 +08:00
|
|
|
}
|
|
|
|
}
|
2016-08-30 05:20:46 +08:00
|
|
|
if (Config->Debug) {
|
|
|
|
Dir[DEBUG_DIRECTORY].RelativeVirtualAddress = DebugDirectory->getRVA();
|
|
|
|
Dir[DEBUG_DIRECTORY].Size = DebugDirectory->getSize();
|
|
|
|
}
|
2017-11-04 05:21:47 +08:00
|
|
|
if (Symbol *Sym = Symtab->findUnderscore("_load_config_used")) {
|
2017-11-01 00:10:24 +08:00
|
|
|
if (auto *B = dyn_cast<DefinedRegular>(Sym)) {
|
2016-03-15 17:48:27 +08:00
|
|
|
SectionChunk *SC = B->getChunk();
|
|
|
|
assert(B->getRVA() >= SC->getRVA());
|
|
|
|
uint64_t OffsetInChunk = B->getRVA() - SC->getRVA();
|
|
|
|
if (!SC->hasData() || OffsetInChunk + 4 > SC->getSize())
|
2016-07-15 07:37:14 +08:00
|
|
|
fatal("_load_config_used is malformed");
|
2016-03-15 17:48:27 +08:00
|
|
|
|
|
|
|
ArrayRef<uint8_t> SecContents = SC->getContents();
|
|
|
|
uint32_t LoadConfigSize =
|
|
|
|
*reinterpret_cast<const ulittle32_t *>(&SecContents[OffsetInChunk]);
|
|
|
|
if (OffsetInChunk + LoadConfigSize > SC->getSize())
|
2016-07-15 07:37:14 +08:00
|
|
|
fatal("_load_config_used is too large");
|
2015-07-17 02:30:35 +08:00
|
|
|
Dir[LOAD_CONFIG_TABLE].RelativeVirtualAddress = B->getRVA();
|
2016-03-15 17:48:27 +08:00
|
|
|
Dir[LOAD_CONFIG_TABLE].Size = LoadConfigSize;
|
2015-07-17 02:30:35 +08:00
|
|
|
}
|
|
|
|
}
|
2016-08-10 12:37:56 +08:00
|
|
|
if (!DelayIdata.empty()) {
|
|
|
|
Dir[DELAY_IMPORT_DESCRIPTOR].RelativeVirtualAddress =
|
|
|
|
DelayIdata.getDirRVA();
|
|
|
|
Dir[DELAY_IMPORT_DESCRIPTOR].Size = DelayIdata.getDirSize();
|
|
|
|
}
|
2015-05-29 03:09:30 +08:00
|
|
|
|
|
|
|
// Write section table
|
2015-06-04 00:44:00 +08:00
|
|
|
for (OutputSection *Sec : OutputSections) {
|
2015-06-07 07:19:38 +08:00
|
|
|
Sec->writeHeaderTo(Buf);
|
2015-05-31 03:09:50 +08:00
|
|
|
Buf += sizeof(coff_section);
|
|
|
|
}
|
2016-10-12 03:45:07 +08:00
|
|
|
SectionTable = ArrayRef<uint8_t>(
|
|
|
|
Buf - OutputSections.size() * sizeof(coff_section), Buf);
|
2015-05-31 03:09:50 +08:00
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
if (OutputSymtab.empty() && Strtab.empty())
|
2015-05-31 03:09:50 +08:00
|
|
|
return;
|
2015-07-09 00:37:50 +08:00
|
|
|
|
2017-11-21 09:14:14 +08:00
|
|
|
COFF->PointerToSymbolTable = PointerToSymbolTable;
|
|
|
|
uint32_t NumberOfSymbols = OutputSymtab.size();
|
|
|
|
COFF->NumberOfSymbols = NumberOfSymbols;
|
|
|
|
auto *SymbolTable = reinterpret_cast<coff_symbol16 *>(
|
|
|
|
Buffer->getBufferStart() + COFF->PointerToSymbolTable);
|
|
|
|
for (size_t I = 0; I != NumberOfSymbols; ++I)
|
|
|
|
SymbolTable[I] = OutputSymtab[I];
|
|
|
|
// Create the string table, it follows immediately after the symbol table.
|
|
|
|
// The first 4 bytes is length including itself.
|
|
|
|
Buf = reinterpret_cast<uint8_t *>(&SymbolTable[NumberOfSymbols]);
|
|
|
|
write32le(Buf, Strtab.size() + 4);
|
|
|
|
if (!Strtab.empty())
|
|
|
|
memcpy(Buf + 4, Strtab.data(), Strtab.size());
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2015-08-06 22:58:50 +08:00
|
|
|
void Writer::openFile(StringRef Path) {
|
2017-12-07 06:08:17 +08:00
|
|
|
Buffer = CHECK(
|
2016-07-15 09:06:38 +08:00
|
|
|
FileOutputBuffer::create(Path, FileSize, FileOutputBuffer::F_executable),
|
|
|
|
"failed to open " + Path);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2018-04-06 11:25:49 +08:00
|
|
|
void Writer::createSEHTable() {
|
2018-04-19 06:37:10 +08:00
|
|
|
// Set the no SEH characteristic on x86 binaries unless we find exception
|
|
|
|
// handlers.
|
|
|
|
SetNoSEHCharacteristic = true;
|
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
SymbolRVASet Handlers;
|
|
|
|
for (ObjFile *File : ObjFile::Instances) {
|
|
|
|
// FIXME: We should error here instead of earlier unless /safeseh:no was
|
|
|
|
// passed.
|
|
|
|
if (!File->hasSafeSEH())
|
|
|
|
return;
|
|
|
|
|
|
|
|
markSymbolsForRVATable(File, File->getSXDataChunks(), Handlers);
|
|
|
|
}
|
2017-11-08 07:24:10 +08:00
|
|
|
|
2018-04-19 06:37:10 +08:00
|
|
|
// Remove the "no SEH" characteristic if all object files were built with
|
|
|
|
// safeseh, we found some exception handlers, and there is a load config in
|
|
|
|
// the object.
|
|
|
|
SetNoSEHCharacteristic =
|
|
|
|
Handlers.empty() || !Symtab->findUnderscore("_load_config_used");
|
|
|
|
|
2018-04-06 11:25:49 +08:00
|
|
|
maybeAddRVATable(std::move(Handlers), "__safe_se_handler_table",
|
2018-02-06 09:58:26 +08:00
|
|
|
"__safe_se_handler_count");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a symbol to an RVA set. Two symbols may have the same RVA, but an RVA set
|
|
|
|
// cannot contain duplicates. Therefore, the set is uniqued by Chunk and the
|
|
|
|
// symbol's offset into that Chunk.
|
|
|
|
static void addSymbolToRVASet(SymbolRVASet &RVASet, Defined *S) {
|
|
|
|
Chunk *C = S->getChunk();
|
|
|
|
if (auto *SC = dyn_cast<SectionChunk>(C))
|
|
|
|
C = SC->Repl; // Look through ICF replacement.
|
|
|
|
uint32_t Off = S->getRVA() - (C ? C->getRVA() : 0);
|
|
|
|
RVASet.insert({C, Off});
|
|
|
|
}
|
|
|
|
|
|
|
|
// Visit all relocations from all section contributions of this object file and
|
|
|
|
// mark the relocation target as address-taken.
|
|
|
|
static void markSymbolsWithRelocations(ObjFile *File,
|
|
|
|
SymbolRVASet &UsedSymbols) {
|
|
|
|
for (Chunk *C : File->getChunks()) {
|
|
|
|
// We only care about live section chunks. Common chunks and other chunks
|
|
|
|
// don't generally contain relocations.
|
|
|
|
SectionChunk *SC = dyn_cast<SectionChunk>(C);
|
|
|
|
if (!SC || !SC->isLive())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Look for relocations in this section against symbols in executable output
|
|
|
|
// sections.
|
|
|
|
for (Symbol *Ref : SC->symbols()) {
|
|
|
|
// FIXME: Do further testing to see if the relocation type matters,
|
|
|
|
// especially for 32-bit where taking the address of something usually
|
|
|
|
// uses an absolute relocation instead of a relative one.
|
|
|
|
if (auto *D = dyn_cast_or_null<Defined>(Ref)) {
|
|
|
|
Chunk *RefChunk = D->getChunk();
|
|
|
|
OutputSection *OS = RefChunk ? RefChunk->getOutputSection() : nullptr;
|
2018-04-20 05:48:37 +08:00
|
|
|
if (OS && OS->Header.Characteristics & IMAGE_SCN_MEM_EXECUTE)
|
2018-02-06 09:58:26 +08:00
|
|
|
addSymbolToRVASet(UsedSymbols, D);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-11-08 07:24:10 +08:00
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
// Create the guard function id table. This is a table of RVAs of all
|
|
|
|
// address-taken functions. It is sorted and uniqued, just like the safe SEH
|
|
|
|
// table.
|
2018-04-06 11:25:49 +08:00
|
|
|
void Writer::createGuardCFTables() {
|
2018-02-06 09:58:26 +08:00
|
|
|
SymbolRVASet AddressTakenSyms;
|
2018-02-14 04:32:53 +08:00
|
|
|
SymbolRVASet LongJmpTargets;
|
2017-11-08 07:24:10 +08:00
|
|
|
for (ObjFile *File : ObjFile::Instances) {
|
2018-02-14 04:32:53 +08:00
|
|
|
// If the object was compiled with /guard:cf, the address taken symbols
|
|
|
|
// are in .gfids$y sections, and the longjmp targets are in .gljmp$y
|
|
|
|
// sections. If the object was not compiled with /guard:cf, we assume there
|
|
|
|
// were no setjmp targets, and that all code symbols with relocations are
|
|
|
|
// possibly address-taken.
|
|
|
|
if (File->hasGuardCF()) {
|
2018-02-06 09:58:26 +08:00
|
|
|
markSymbolsForRVATable(File, File->getGuardFidChunks(), AddressTakenSyms);
|
2018-02-14 04:32:53 +08:00
|
|
|
markSymbolsForRVATable(File, File->getGuardLJmpChunks(), LongJmpTargets);
|
|
|
|
} else {
|
2018-02-06 09:58:26 +08:00
|
|
|
markSymbolsWithRelocations(File, AddressTakenSyms);
|
2018-02-14 04:32:53 +08:00
|
|
|
}
|
2018-02-06 09:58:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mark the image entry as address-taken.
|
|
|
|
if (Config->Entry)
|
|
|
|
addSymbolToRVASet(AddressTakenSyms, cast<Defined>(Config->Entry));
|
|
|
|
|
2018-04-06 11:25:49 +08:00
|
|
|
maybeAddRVATable(std::move(AddressTakenSyms), "__guard_fids_table",
|
2018-02-06 09:58:26 +08:00
|
|
|
"__guard_fids_count");
|
|
|
|
|
2018-02-14 04:32:53 +08:00
|
|
|
// Add the longjmp target table unless the user told us not to.
|
|
|
|
if (Config->GuardCF == GuardCFLevel::Full)
|
2018-04-06 11:25:49 +08:00
|
|
|
maybeAddRVATable(std::move(LongJmpTargets), "__guard_longjmp_table",
|
2018-02-14 04:32:53 +08:00
|
|
|
"__guard_longjmp_count");
|
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
// Set __guard_flags, which will be used in the load config to indicate that
|
|
|
|
// /guard:cf was enabled.
|
|
|
|
uint32_t GuardFlags = uint32_t(coff_guard_flags::CFInstrumented) |
|
|
|
|
uint32_t(coff_guard_flags::HasFidTable);
|
2018-02-14 04:32:53 +08:00
|
|
|
if (Config->GuardCF == GuardCFLevel::Full)
|
|
|
|
GuardFlags |= uint32_t(coff_guard_flags::HasLongJmpTable);
|
2018-02-06 09:58:26 +08:00
|
|
|
Symbol *FlagSym = Symtab->findUnderscore("__guard_flags");
|
|
|
|
cast<DefinedAbsolute>(FlagSym)->setVA(GuardFlags);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Take a list of input sections containing symbol table indices and add those
|
|
|
|
// symbols to an RVA table. The challenge is that symbol RVAs are not known and
|
|
|
|
// depend on the table size, so we can't directly build a set of integers.
|
|
|
|
void Writer::markSymbolsForRVATable(ObjFile *File,
|
|
|
|
ArrayRef<SectionChunk *> SymIdxChunks,
|
|
|
|
SymbolRVASet &TableSymbols) {
|
|
|
|
for (SectionChunk *C : SymIdxChunks) {
|
|
|
|
// Skip sections discarded by linker GC. This comes up when a .gfids section
|
|
|
|
// is associated with something like a vtable and the vtable is discarded.
|
|
|
|
// In this case, the associated gfids section is discarded, and we don't
|
|
|
|
// mark the virtual member functions as address-taken by the vtable.
|
|
|
|
if (!C->isLive())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Validate that the contents look like symbol table indices.
|
|
|
|
ArrayRef<uint8_t> Data = C->getContents();
|
|
|
|
if (Data.size() % 4 != 0) {
|
|
|
|
warn("ignoring " + C->getSectionName() +
|
|
|
|
" symbol table index section in object " + toString(File));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read each symbol table index and check if that symbol was included in the
|
|
|
|
// final link. If so, add it to the table symbol set.
|
|
|
|
ArrayRef<ulittle32_t> SymIndices(
|
|
|
|
reinterpret_cast<const ulittle32_t *>(Data.data()), Data.size() / 4);
|
|
|
|
ArrayRef<Symbol *> ObjSymbols = File->getSymbols();
|
|
|
|
for (uint32_t SymIndex : SymIndices) {
|
|
|
|
if (SymIndex >= ObjSymbols.size()) {
|
|
|
|
warn("ignoring invalid symbol table index in section " +
|
|
|
|
C->getSectionName() + " in object " + toString(File));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (Symbol *S = ObjSymbols[SymIndex]) {
|
|
|
|
if (S->isLive())
|
|
|
|
addSymbolToRVASet(TableSymbols, cast<Defined>(S));
|
|
|
|
}
|
|
|
|
}
|
2017-11-08 07:24:10 +08:00
|
|
|
}
|
2018-02-06 09:58:26 +08:00
|
|
|
}
|
2017-11-08 07:24:10 +08:00
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
// Replace the absolute table symbol with a synthetic symbol pointing to
|
|
|
|
// TableChunk so that we can emit base relocations for it and resolve section
|
|
|
|
// relative relocations.
|
2018-04-06 11:25:49 +08:00
|
|
|
void Writer::maybeAddRVATable(SymbolRVASet TableSymbols, StringRef TableSym,
|
|
|
|
StringRef CountSym) {
|
2018-02-06 09:58:26 +08:00
|
|
|
if (TableSymbols.empty())
|
2017-11-08 07:24:10 +08:00
|
|
|
return;
|
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
RVATableChunk *TableChunk = make<RVATableChunk>(std::move(TableSymbols));
|
2018-04-06 11:25:49 +08:00
|
|
|
RdataSec->addChunk(TableChunk);
|
2017-11-08 07:24:10 +08:00
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
Symbol *T = Symtab->findUnderscore(TableSym);
|
|
|
|
Symbol *C = Symtab->findUnderscore(CountSym);
|
|
|
|
replaceSymbol<DefinedSynthetic>(T, T->getName(), TableChunk);
|
|
|
|
cast<DefinedAbsolute>(C)->setVA(TableChunk->getSize() / 4);
|
2015-07-25 07:51:14 +08:00
|
|
|
}
|
|
|
|
|
2016-06-20 11:39:39 +08:00
|
|
|
// Handles /section options to allow users to overwrite
|
|
|
|
// section attributes.
|
|
|
|
void Writer::setSectionPermissions() {
|
|
|
|
for (auto &P : Config->Section) {
|
|
|
|
StringRef Name = P.first;
|
|
|
|
uint32_t Perm = P.second;
|
2018-04-21 05:10:33 +08:00
|
|
|
for (OutputSection *Sec : OutputSections)
|
|
|
|
if (Sec->Name == Name)
|
|
|
|
Sec->setPermissions(Perm);
|
2016-06-20 11:39:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
// Write section contents to a mmap'ed file.
|
|
|
|
void Writer::writeSections() {
|
2018-02-18 04:41:38 +08:00
|
|
|
// Record the number of sections to apply section index relocations
|
|
|
|
// against absolute symbols. See applySecIdx in Chunks.cpp..
|
|
|
|
DefinedAbsolute::NumOutputSections = OutputSections.size();
|
2017-06-23 07:33:04 +08:00
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
uint8_t *Buf = Buffer->getBufferStart();
|
2015-06-04 00:44:00 +08:00
|
|
|
for (OutputSection *Sec : OutputSections) {
|
2015-08-14 11:30:59 +08:00
|
|
|
uint8_t *SecBuf = Buf + Sec->getFileOff();
|
2015-05-29 03:09:30 +08:00
|
|
|
// Fill gaps between functions in .text with INT3 instructions
|
|
|
|
// instead of leaving as NUL bytes (which can be interpreted as
|
|
|
|
// ADD instructions).
|
2018-04-20 05:48:37 +08:00
|
|
|
if (Sec->Header.Characteristics & IMAGE_SCN_CNT_CODE)
|
2015-08-14 11:30:59 +08:00
|
|
|
memset(SecBuf, 0xCC, Sec->getRawSize());
|
2017-05-10 09:16:22 +08:00
|
|
|
for_each(parallel::par, Sec->getChunks().begin(), Sec->getChunks().end(),
|
|
|
|
[&](Chunk *C) { C->writeTo(SecBuf); });
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
void Writer::writeBuildId() {
|
2018-03-09 03:33:47 +08:00
|
|
|
// There are two important parts to the build ID.
|
|
|
|
// 1) If building with debug info, the COFF debug directory contains a
|
|
|
|
// timestamp as well as a Guid and Age of the PDB.
|
|
|
|
// 2) In all cases, the PE COFF file header also contains a timestamp.
|
|
|
|
// For reproducibility, instead of a timestamp we want to use a hash of the
|
|
|
|
// binary, however when building with debug info the hash needs to take into
|
|
|
|
// account the debug info, since it's possible to add blank lines to a file
|
|
|
|
// which causes the debug info to change but not the generated code.
|
|
|
|
//
|
|
|
|
// To handle this, we first set the Guid and Age in the debug directory (but
|
|
|
|
// only if we're doing a debug build). Then, we hash the binary (thus causing
|
|
|
|
// the hash to change if only the debug info changes, since the Age will be
|
|
|
|
// different). Finally, we write that hash into the debug directory (if
|
|
|
|
// present) as well as the COFF file header (always).
|
|
|
|
if (Config->Debug) {
|
|
|
|
assert(BuildId && "BuildId is not set!");
|
|
|
|
if (PreviousBuildId.hasValue()) {
|
|
|
|
*BuildId->BuildId = *PreviousBuildId;
|
|
|
|
BuildId->BuildId->PDB70.Age = BuildId->BuildId->PDB70.Age + 1;
|
|
|
|
} else {
|
|
|
|
BuildId->BuildId->Signature.CVSignature = OMF::Signature::PDB70;
|
|
|
|
BuildId->BuildId->PDB70.Age = 1;
|
|
|
|
llvm::getRandomBytes(BuildId->BuildId->PDB70.Signature, 16);
|
|
|
|
}
|
|
|
|
}
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
|
2018-03-09 03:33:47 +08:00
|
|
|
// At this point the only fields in the COFF file which remain unset are the
|
|
|
|
// "timestamp" in the COFF file header, and the ones in the coff debug
|
|
|
|
// directory. Now we can hash the file and write that hash to the various
|
|
|
|
// timestamp fields in the file.
|
|
|
|
StringRef OutputFileData(
|
|
|
|
reinterpret_cast<const char *>(Buffer->getBufferStart()),
|
|
|
|
Buffer->getBufferSize());
|
2018-03-08 02:13:41 +08:00
|
|
|
|
2018-03-09 03:33:47 +08:00
|
|
|
uint32_t Hash = static_cast<uint32_t>(xxHash64(OutputFileData));
|
|
|
|
|
|
|
|
if (DebugDirectory)
|
|
|
|
DebugDirectory->setTimeDateStamp(Hash);
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
|
2018-03-09 03:33:47 +08:00
|
|
|
uint8_t *Buf = Buffer->getBufferStart();
|
|
|
|
Buf += DOSStubSize + sizeof(PEMagic);
|
|
|
|
object::coff_file_header *CoffHeader =
|
|
|
|
reinterpret_cast<coff_file_header *>(Buf);
|
|
|
|
CoffHeader->TimeDateStamp = Hash;
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
}
|
|
|
|
|
2015-06-21 12:00:54 +08:00
|
|
|
// Sort .pdata section contents according to PE/COFF spec 5.5.
|
|
|
|
void Writer::sortExceptionTable() {
|
2018-04-07 08:46:55 +08:00
|
|
|
if (!FirstPdata)
|
2015-08-06 11:45:27 +08:00
|
|
|
return;
|
|
|
|
// We assume .pdata contains function table entries only.
|
2018-04-07 08:46:55 +08:00
|
|
|
auto BufAddr = [&](Chunk *C) {
|
|
|
|
return Buffer->getBufferStart() + C->getOutputSection()->getFileOff() +
|
|
|
|
C->getRVA() - C->getOutputSection()->getRVA();
|
|
|
|
};
|
|
|
|
uint8_t *Begin = BufAddr(FirstPdata);
|
|
|
|
uint8_t *End = BufAddr(LastPdata) + LastPdata->getSize();
|
2015-08-06 11:45:27 +08:00
|
|
|
if (Config->Machine == AMD64) {
|
2015-06-21 12:00:54 +08:00
|
|
|
struct Entry { ulittle32_t Begin, End, Unwind; };
|
2017-05-10 09:16:22 +08:00
|
|
|
sort(parallel::par, (Entry *)Begin, (Entry *)End,
|
|
|
|
[](const Entry &A, const Entry &B) { return A.Begin < B.Begin; });
|
2015-08-06 11:45:27 +08:00
|
|
|
return;
|
|
|
|
}
|
2017-12-14 16:56:29 +08:00
|
|
|
if (Config->Machine == ARMNT || Config->Machine == ARM64) {
|
2015-08-06 11:45:27 +08:00
|
|
|
struct Entry { ulittle32_t Begin, Unwind; };
|
2017-05-10 09:16:22 +08:00
|
|
|
sort(parallel::par, (Entry *)Begin, (Entry *)End,
|
|
|
|
[](const Entry &A, const Entry &B) { return A.Begin < B.Begin; });
|
2015-08-06 11:45:27 +08:00
|
|
|
return;
|
2015-06-21 12:00:54 +08:00
|
|
|
}
|
2015-08-06 11:45:27 +08:00
|
|
|
errs() << "warning: don't know how to handle .pdata.\n";
|
2015-06-21 12:00:54 +08:00
|
|
|
}
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
OutputSection *Writer::findSection(StringRef Name) {
|
2015-06-04 00:44:00 +08:00
|
|
|
for (OutputSection *Sec : OutputSections)
|
2018-03-16 05:13:46 +08:00
|
|
|
if (Sec->Name == Name)
|
2015-06-04 00:44:00 +08:00
|
|
|
return Sec;
|
2015-05-29 03:09:30 +08:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t Writer::getSizeOfInitializedData() {
|
|
|
|
uint32_t Res = 0;
|
2015-06-04 00:44:00 +08:00
|
|
|
for (OutputSection *S : OutputSections)
|
2018-04-20 05:48:37 +08:00
|
|
|
if (S->Header.Characteristics & IMAGE_SCN_CNT_INITIALIZED_DATA)
|
2015-05-29 03:09:30 +08:00
|
|
|
Res += S->getRawSize();
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2018-04-06 11:25:49 +08:00
|
|
|
// Add base relocations to .reloc section.
|
|
|
|
void Writer::addBaserels() {
|
|
|
|
if (!Config->Relocatable)
|
|
|
|
return;
|
2015-07-25 09:44:32 +08:00
|
|
|
std::vector<Baserel> V;
|
2015-06-15 09:23:58 +08:00
|
|
|
for (OutputSection *Sec : OutputSections) {
|
2018-04-06 11:25:49 +08:00
|
|
|
if (Sec == RelocSec)
|
2015-06-15 09:23:58 +08:00
|
|
|
continue;
|
|
|
|
// Collect all locations for base relocations.
|
|
|
|
for (Chunk *C : Sec->getChunks())
|
2015-07-25 06:58:44 +08:00
|
|
|
C->getBaserels(&V);
|
2015-06-15 09:23:58 +08:00
|
|
|
// Add the addresses to .reloc section.
|
|
|
|
if (!V.empty())
|
2018-04-06 11:25:49 +08:00
|
|
|
addBaserelBlocks(V);
|
2015-06-15 09:23:58 +08:00
|
|
|
V.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add addresses to .reloc section. Note that addresses are grouped by page.
|
2018-04-06 11:25:49 +08:00
|
|
|
void Writer::addBaserelBlocks(std::vector<Baserel> &V) {
|
2015-06-15 09:23:58 +08:00
|
|
|
const uint32_t Mask = ~uint32_t(PageSize - 1);
|
2015-07-25 09:44:32 +08:00
|
|
|
uint32_t Page = V[0].RVA & Mask;
|
2015-06-15 09:23:58 +08:00
|
|
|
size_t I = 0, J = 1;
|
|
|
|
for (size_t E = V.size(); J < E; ++J) {
|
2015-07-25 09:44:32 +08:00
|
|
|
uint32_t P = V[J].RVA & Mask;
|
2015-06-15 09:23:58 +08:00
|
|
|
if (P == Page)
|
|
|
|
continue;
|
2018-04-06 11:25:49 +08:00
|
|
|
RelocSec->addChunk(make<BaserelChunk>(Page, &V[I], &V[0] + J));
|
2015-06-15 09:23:58 +08:00
|
|
|
I = J;
|
|
|
|
Page = P;
|
|
|
|
}
|
|
|
|
if (I == J)
|
|
|
|
return;
|
2018-04-06 11:25:49 +08:00
|
|
|
RelocSec->addChunk(make<BaserelChunk>(Page, &V[I], &V[0] + J));
|
2015-06-15 09:23:58 +08:00
|
|
|
}
|