2015-12-05 07:11:05 +08:00
|
|
|
//===- PDB.cpp ------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Linker
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-09-16 06:24:51 +08:00
|
|
|
#include "PDB.h"
|
2016-11-12 08:00:51 +08:00
|
|
|
#include "Chunks.h"
|
2016-11-22 01:22:35 +08:00
|
|
|
#include "Config.h"
|
2017-10-21 03:48:26 +08:00
|
|
|
#include "Driver.h"
|
2016-11-12 08:00:51 +08:00
|
|
|
#include "SymbolTable.h"
|
|
|
|
#include "Symbols.h"
|
2017-07-28 02:25:59 +08:00
|
|
|
#include "Writer.h"
|
[lld] unified COFF and ELF error handling on new Common/ErrorHandler
Summary:
The COFF linker and the ELF linker have long had similar but separate
Error.h and Error.cpp files to implement error handling. This change
introduces new error handling code in Common/ErrorHandler.h, changes the
COFF and ELF linkers to use it, and removes the old, separate
implementations.
Reviewers: ruiu
Reviewed By: ruiu
Subscribers: smeenai, jyknight, emaste, sdardis, nemanjai, nhaehnle, mgorny, javed.absar, kbarton, fedor.sergeev, llvm-commits
Differential Revision: https://reviews.llvm.org/D39259
llvm-svn: 316624
2017-10-26 06:28:38 +08:00
|
|
|
#include "lld/Common/ErrorHandler.h"
|
2018-01-18 03:16:26 +08:00
|
|
|
#include "lld/Common/Timer.h"
|
2017-01-05 01:56:54 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/CVDebugRecord.h"
|
2017-06-20 01:21:45 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
|
2017-12-15 02:07:04 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h"
|
2017-05-20 03:26:58 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h"
|
2017-12-01 02:39:50 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h"
|
2017-08-12 03:00:03 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/RecordName.h"
|
2017-08-09 02:34:44 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/SymbolDeserializer.h"
|
2017-07-11 05:01:37 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/SymbolSerializer.h"
|
2017-07-18 08:21:25 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/TypeDeserializer.h"
|
[CodeView] Finish decoupling TypeDatabase from TypeDumper.
Previously the type dumper itself was passed around to a lot of different
places and manipulated in ways that were more appropriate on the type
database. For example, the entire TypeDumper was passed into the symbol
dumper, when all the symbol dumper wanted to do was lookup the name of a
TypeIndex so it could print it. That's what the TypeDatabase is for --
mapping type indices to names.
Another example is how if the user runs llvm-pdbdump with the option to
dump symbols but not types, we still have to visit all types so that we
can print minimal information about the type of a symbol, but just without
dumping full symbol records. The way we did this before is by hacking it
up so that we run everything through the type dumper with a null printer,
so that the output goes to /dev/null. But really, we don't need to dump
anything, all we want to do is build the type database. Since
TypeDatabaseVisitor now exists independently of TypeDumper, we can do
this. We just build a custom visitor callback pipeline that includes a
database visitor but not a dumper.
All the hackery around printers etc goes away. After this patch, we could
probably even delete the entire CVTypeDumper class since really all it is
at this point is a thin wrapper that hides the details of how to build a
useful visitation pipeline. It's not a priority though, so CVTypeDumper
remains for now.
After this patch we will be able to easily plug in a different style of
type dumper by only implementing the proper visitation methods to dump
one-line output and then sticking it on the pipeline.
Differential Revision: https://reviews.llvm.org/D28524
llvm-svn: 291724
2017-01-12 07:24:22 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/TypeDumpVisitor.h"
|
2017-06-22 01:25:56 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/TypeIndexDiscovery.h"
|
2017-01-12 11:09:25 +08:00
|
|
|
#include "llvm/DebugInfo/CodeView/TypeStreamMerger.h"
|
2016-09-16 12:32:33 +08:00
|
|
|
#include "llvm/DebugInfo/MSF/MSFBuilder.h"
|
2016-09-16 02:55:18 +08:00
|
|
|
#include "llvm/DebugInfo/MSF/MSFCommon.h"
|
2017-07-18 08:21:25 +08:00
|
|
|
#include "llvm/DebugInfo/PDB/GenericError.h"
|
2017-07-11 05:01:37 +08:00
|
|
|
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h"
|
2017-01-26 06:38:55 +08:00
|
|
|
#include "llvm/DebugInfo/PDB/Native/DbiStream.h"
|
|
|
|
#include "llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h"
|
2017-08-09 12:23:25 +08:00
|
|
|
#include "llvm/DebugInfo/PDB/Native/GSIStreamBuilder.h"
|
2017-01-26 06:38:55 +08:00
|
|
|
#include "llvm/DebugInfo/PDB/Native/InfoStream.h"
|
|
|
|
#include "llvm/DebugInfo/PDB/Native/InfoStreamBuilder.h"
|
2017-07-18 08:21:25 +08:00
|
|
|
#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
|
2017-01-26 06:38:55 +08:00
|
|
|
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
|
|
|
|
#include "llvm/DebugInfo/PDB/Native/PDBFileBuilder.h"
|
2017-05-03 04:19:42 +08:00
|
|
|
#include "llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h"
|
2017-07-20 01:26:07 +08:00
|
|
|
#include "llvm/DebugInfo/PDB/Native/TpiHashing.h"
|
2017-01-26 06:38:55 +08:00
|
|
|
#include "llvm/DebugInfo/PDB/Native/TpiStream.h"
|
|
|
|
#include "llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h"
|
2017-07-18 08:21:25 +08:00
|
|
|
#include "llvm/DebugInfo/PDB/PDB.h"
|
2016-11-01 05:09:21 +08:00
|
|
|
#include "llvm/Object/COFF.h"
|
2017-03-03 04:52:51 +08:00
|
|
|
#include "llvm/Support/BinaryByteStream.h"
|
2015-12-09 02:39:55 +08:00
|
|
|
#include "llvm/Support/Endian.h"
|
2018-01-06 03:12:40 +08:00
|
|
|
#include "llvm/Support/FormatVariadic.h"
|
2017-08-08 04:23:45 +08:00
|
|
|
#include "llvm/Support/JamCRC.h"
|
2017-02-17 07:35:45 +08:00
|
|
|
#include "llvm/Support/Path.h"
|
2016-11-22 01:22:35 +08:00
|
|
|
#include "llvm/Support/ScopedPrinter.h"
|
2015-12-05 07:11:05 +08:00
|
|
|
#include <memory>
|
|
|
|
|
2016-09-16 06:24:51 +08:00
|
|
|
using namespace lld;
|
2016-11-12 08:00:51 +08:00
|
|
|
using namespace lld::coff;
|
2015-12-05 07:11:05 +08:00
|
|
|
using namespace llvm;
|
2016-11-22 01:22:35 +08:00
|
|
|
using namespace llvm::codeview;
|
2015-12-05 07:11:05 +08:00
|
|
|
|
2016-11-12 08:00:51 +08:00
|
|
|
using llvm::object::coff_section;
|
|
|
|
|
2016-09-16 12:32:33 +08:00
|
|
|
static ExitOnError ExitOnErr;
|
|
|
|
|
2018-01-18 03:16:26 +08:00
|
|
|
static Timer TotalPdbLinkTimer("PDB Emission (Cumulative)", Timer::root());
|
|
|
|
|
|
|
|
static Timer AddObjectsTimer("Add Objects", TotalPdbLinkTimer);
|
|
|
|
static Timer TypeMergingTimer("Type Merging", AddObjectsTimer);
|
|
|
|
static Timer SymbolMergingTimer("Symbol Merging", AddObjectsTimer);
|
|
|
|
static Timer GlobalsLayoutTimer("Globals Stream Layout", TotalPdbLinkTimer);
|
|
|
|
static Timer TpiStreamLayoutTimer("TPI Stream Layout", TotalPdbLinkTimer);
|
|
|
|
static Timer DiskCommitTimer("Commit to Disk", TotalPdbLinkTimer);
|
|
|
|
|
2017-07-14 08:14:58 +08:00
|
|
|
namespace {
|
2017-07-18 08:21:25 +08:00
|
|
|
/// Map from type index and item index in a type server PDB to the
|
|
|
|
/// corresponding index in the destination PDB.
|
|
|
|
struct CVIndexMap {
|
|
|
|
SmallVector<TypeIndex, 0> TPIMap;
|
|
|
|
SmallVector<TypeIndex, 0> IPIMap;
|
|
|
|
bool IsTypeServerMap = false;
|
|
|
|
};
|
|
|
|
|
2017-07-14 08:14:58 +08:00
|
|
|
class PDBLinker {
|
|
|
|
public:
|
|
|
|
PDBLinker(SymbolTable *Symtab)
|
2017-12-15 05:09:31 +08:00
|
|
|
: Alloc(), Symtab(Symtab), Builder(Alloc), TypeTable(Alloc),
|
|
|
|
IDTable(Alloc), GlobalTypeTable(Alloc), GlobalIDTable(Alloc) {}
|
2017-07-14 08:14:58 +08:00
|
|
|
|
|
|
|
/// Emit the basic PDB structure: initial streams, headers, etc.
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
void initialize(const llvm::codeview::DebugInfo &BuildId);
|
2017-07-14 08:14:58 +08:00
|
|
|
|
|
|
|
/// Link CodeView from each object file in the symbol table into the PDB.
|
|
|
|
void addObjectsToPDB();
|
|
|
|
|
|
|
|
/// Link CodeView from a single object file into the PDB.
|
2017-07-27 07:05:24 +08:00
|
|
|
void addObjFile(ObjFile *File);
|
2017-07-14 08:14:58 +08:00
|
|
|
|
2017-07-18 08:21:25 +08:00
|
|
|
/// Produce a mapping from the type and item indices used in the object
|
|
|
|
/// file to those in the destination PDB.
|
|
|
|
///
|
|
|
|
/// If the object file uses a type server PDB (compiled with /Zi), merge TPI
|
|
|
|
/// and IPI from the type server PDB and return a map for it. Each unique type
|
|
|
|
/// server PDB is merged at most once, so this may return an existing index
|
|
|
|
/// mapping.
|
|
|
|
///
|
|
|
|
/// If the object does not use a type server PDB (compiled with /Z7), we merge
|
|
|
|
/// all the type and item records from the .debug$S stream and fill in the
|
|
|
|
/// caller-provided ObjectIndexMap.
|
2018-02-01 01:48:04 +08:00
|
|
|
Expected<const CVIndexMap&> mergeDebugT(ObjFile *File,
|
|
|
|
CVIndexMap &ObjectIndexMap);
|
2017-07-18 08:21:25 +08:00
|
|
|
|
2018-02-01 01:48:04 +08:00
|
|
|
Expected<const CVIndexMap&> maybeMergeTypeServerPDB(ObjFile *File,
|
|
|
|
TypeServer2Record &TS);
|
2017-07-14 08:14:58 +08:00
|
|
|
|
|
|
|
/// Add the section map and section contributions to the PDB.
|
2017-08-04 05:15:09 +08:00
|
|
|
void addSections(ArrayRef<OutputSection *> OutputSections,
|
|
|
|
ArrayRef<uint8_t> SectionTable);
|
|
|
|
|
|
|
|
void addSectionContrib(pdb::DbiModuleDescriptorBuilder &LinkerModule,
|
|
|
|
OutputSection *OS, Chunk *C);
|
2017-07-14 08:14:58 +08:00
|
|
|
|
|
|
|
/// Write the PDB to disk.
|
|
|
|
void commit();
|
|
|
|
|
|
|
|
private:
|
|
|
|
BumpPtrAllocator Alloc;
|
|
|
|
|
|
|
|
SymbolTable *Symtab;
|
|
|
|
|
|
|
|
pdb::PDBFileBuilder Builder;
|
|
|
|
|
|
|
|
/// Type records that will go into the PDB TPI stream.
|
2017-12-01 02:39:50 +08:00
|
|
|
MergingTypeTableBuilder TypeTable;
|
2017-07-14 08:14:58 +08:00
|
|
|
|
|
|
|
/// Item records that will go into the PDB IPI stream.
|
2017-12-01 02:39:50 +08:00
|
|
|
MergingTypeTableBuilder IDTable;
|
2017-07-14 08:14:58 +08:00
|
|
|
|
2017-12-15 02:07:04 +08:00
|
|
|
/// Type records that will go into the PDB TPI stream (for /DEBUG:GHASH)
|
|
|
|
GlobalTypeTableBuilder GlobalTypeTable;
|
|
|
|
|
|
|
|
/// Item records that will go into the PDB IPI stream (for /DEBUG:GHASH)
|
|
|
|
GlobalTypeTableBuilder GlobalIDTable;
|
|
|
|
|
2017-07-14 08:14:58 +08:00
|
|
|
/// PDBs use a single global string table for filenames in the file checksum
|
|
|
|
/// table.
|
|
|
|
DebugStringTableSubsection PDBStrTab;
|
|
|
|
|
|
|
|
llvm::SmallString<128> NativePath;
|
|
|
|
|
2018-03-01 02:09:18 +08:00
|
|
|
/// A list of other PDBs which are loaded during the linking process and which
|
|
|
|
/// we need to keep around since the linking operation may reference pointers
|
|
|
|
/// inside of these PDBs.
|
|
|
|
llvm::SmallVector<std::unique_ptr<pdb::NativeSession>, 2> LoadedPDBs;
|
|
|
|
|
2017-07-14 08:14:58 +08:00
|
|
|
std::vector<pdb::SecMapEntry> SectionMap;
|
2017-07-18 08:21:25 +08:00
|
|
|
|
|
|
|
/// Type index mappings of type server PDBs that we've loaded so far.
|
|
|
|
std::map<GUID, CVIndexMap> TypeServerIndexMappings;
|
2018-02-01 01:48:04 +08:00
|
|
|
|
|
|
|
/// List of TypeServer PDBs which cannot be loaded.
|
|
|
|
/// Cached to prevent repeated load attempts.
|
|
|
|
std::set<GUID> MissingTypeServerPDBs;
|
2017-07-14 08:14:58 +08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2017-12-08 09:09:21 +08:00
|
|
|
static SectionChunk *findByName(ArrayRef<SectionChunk *> Sections,
|
2016-11-22 01:22:35 +08:00
|
|
|
StringRef Name) {
|
|
|
|
for (SectionChunk *C : Sections)
|
|
|
|
if (C->getSectionName() == Name)
|
|
|
|
return C;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2017-06-20 01:21:45 +08:00
|
|
|
static ArrayRef<uint8_t> consumeDebugMagic(ArrayRef<uint8_t> Data,
|
|
|
|
StringRef SecName) {
|
2016-12-01 09:22:48 +08:00
|
|
|
// First 4 bytes are section magic.
|
|
|
|
if (Data.size() < 4)
|
2017-01-12 11:09:25 +08:00
|
|
|
fatal(SecName + " too short");
|
2017-06-20 01:21:45 +08:00
|
|
|
if (support::endian::read32le(Data.data()) != COFF::DEBUG_SECTION_MAGIC)
|
2017-01-12 11:09:25 +08:00
|
|
|
fatal(SecName + " has an invalid magic");
|
2016-12-09 12:46:54 +08:00
|
|
|
return Data.slice(4);
|
|
|
|
}
|
|
|
|
|
2017-07-27 07:05:24 +08:00
|
|
|
static ArrayRef<uint8_t> getDebugSection(ObjFile *File, StringRef SecName) {
|
2017-06-20 01:21:45 +08:00
|
|
|
if (SectionChunk *Sec = findByName(File->getDebugChunks(), SecName))
|
|
|
|
return consumeDebugMagic(Sec->getContents(), SecName);
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2017-12-15 02:07:04 +08:00
|
|
|
// A COFF .debug$H section is currently a clang extension. This function checks
|
|
|
|
// if a .debug$H section is in a format that we expect / understand, so that we
|
|
|
|
// can ignore any sections which are coincidentally also named .debug$H but do
|
|
|
|
// not contain a format we recognize.
|
|
|
|
static bool canUseDebugH(ArrayRef<uint8_t> DebugH) {
|
|
|
|
if (DebugH.size() < sizeof(object::debug_h_header))
|
|
|
|
return false;
|
|
|
|
auto *Header =
|
|
|
|
reinterpret_cast<const object::debug_h_header *>(DebugH.data());
|
|
|
|
DebugH = DebugH.drop_front(sizeof(object::debug_h_header));
|
|
|
|
return Header->Magic == COFF::DEBUG_HASHES_SECTION_MAGIC &&
|
|
|
|
Header->Version == 0 &&
|
|
|
|
Header->HashAlgorithm == uint16_t(GlobalTypeHashAlg::SHA1) &&
|
|
|
|
(DebugH.size() % 20 == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Optional<ArrayRef<uint8_t>> getDebugH(ObjFile *File) {
|
|
|
|
SectionChunk *Sec = findByName(File->getDebugChunks(), ".debug$H");
|
|
|
|
if (!Sec)
|
|
|
|
return llvm::None;
|
|
|
|
ArrayRef<uint8_t> Contents = Sec->getContents();
|
|
|
|
if (!canUseDebugH(Contents))
|
|
|
|
return None;
|
|
|
|
return Contents;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ArrayRef<GloballyHashedType>
|
|
|
|
getHashesFromDebugH(ArrayRef<uint8_t> DebugH) {
|
|
|
|
assert(canUseDebugH(DebugH));
|
|
|
|
|
|
|
|
DebugH = DebugH.drop_front(sizeof(object::debug_h_header));
|
|
|
|
uint32_t Count = DebugH.size() / sizeof(GloballyHashedType);
|
|
|
|
return {reinterpret_cast<const GloballyHashedType *>(DebugH.data()), Count};
|
|
|
|
}
|
|
|
|
|
2017-03-25 01:26:38 +08:00
|
|
|
static void addTypeInfo(pdb::TpiStreamBuilder &TpiBuilder,
|
2017-11-30 03:35:21 +08:00
|
|
|
TypeCollection &TypeTable) {
|
2017-03-25 01:26:38 +08:00
|
|
|
// Start the TPI or IPI stream header.
|
|
|
|
TpiBuilder.setVersionHeader(pdb::PdbTpiV80);
|
|
|
|
|
2017-07-20 01:26:07 +08:00
|
|
|
// Flatten the in memory type table and hash each type.
|
2017-11-30 03:35:21 +08:00
|
|
|
TypeTable.ForEachRecord([&](TypeIndex TI, const CVType &Type) {
|
2017-07-20 01:26:07 +08:00
|
|
|
auto Hash = pdb::hashTypeRecord(Type);
|
|
|
|
if (auto E = Hash.takeError())
|
|
|
|
fatal("type hashing error");
|
2017-11-30 03:35:21 +08:00
|
|
|
TpiBuilder.addTypeRecord(Type.RecordData, *Hash);
|
2017-03-25 01:26:38 +08:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-07-18 08:21:25 +08:00
|
|
|
static Optional<TypeServer2Record>
|
|
|
|
maybeReadTypeServerRecord(CVTypeArray &Types) {
|
|
|
|
auto I = Types.begin();
|
|
|
|
if (I == Types.end())
|
|
|
|
return None;
|
|
|
|
const CVType &Type = *I;
|
|
|
|
if (Type.kind() != LF_TYPESERVER2)
|
|
|
|
return None;
|
|
|
|
TypeServer2Record TS;
|
|
|
|
if (auto EC = TypeDeserializer::deserializeAs(const_cast<CVType &>(Type), TS))
|
[lld] unified COFF and ELF error handling on new Common/ErrorHandler
Summary:
The COFF linker and the ELF linker have long had similar but separate
Error.h and Error.cpp files to implement error handling. This change
introduces new error handling code in Common/ErrorHandler.h, changes the
COFF and ELF linkers to use it, and removes the old, separate
implementations.
Reviewers: ruiu
Reviewed By: ruiu
Subscribers: smeenai, jyknight, emaste, sdardis, nemanjai, nhaehnle, mgorny, javed.absar, kbarton, fedor.sergeev, llvm-commits
Differential Revision: https://reviews.llvm.org/D39259
llvm-svn: 316624
2017-10-26 06:28:38 +08:00
|
|
|
fatal("error reading type server record: " + toString(std::move(EC)));
|
2017-07-18 08:21:25 +08:00
|
|
|
return std::move(TS);
|
|
|
|
}
|
|
|
|
|
2018-02-01 01:48:04 +08:00
|
|
|
Expected<const CVIndexMap&> PDBLinker::mergeDebugT(ObjFile *File,
|
|
|
|
CVIndexMap &ObjectIndexMap) {
|
2018-01-18 03:16:26 +08:00
|
|
|
ScopedTimer T(TypeMergingTimer);
|
|
|
|
|
2017-06-20 01:21:45 +08:00
|
|
|
ArrayRef<uint8_t> Data = getDebugSection(File, ".debug$T");
|
|
|
|
if (Data.empty())
|
2017-07-18 08:21:25 +08:00
|
|
|
return ObjectIndexMap;
|
2017-07-14 04:12:23 +08:00
|
|
|
|
2017-06-20 01:21:45 +08:00
|
|
|
BinaryByteStream Stream(Data, support::little);
|
|
|
|
CVTypeArray Types;
|
|
|
|
BinaryStreamReader Reader(Stream);
|
|
|
|
if (auto EC = Reader.readArray(Types, Reader.getLength()))
|
[lld] unified COFF and ELF error handling on new Common/ErrorHandler
Summary:
The COFF linker and the ELF linker have long had similar but separate
Error.h and Error.cpp files to implement error handling. This change
introduces new error handling code in Common/ErrorHandler.h, changes the
COFF and ELF linkers to use it, and removes the old, separate
implementations.
Reviewers: ruiu
Reviewed By: ruiu
Subscribers: smeenai, jyknight, emaste, sdardis, nemanjai, nhaehnle, mgorny, javed.absar, kbarton, fedor.sergeev, llvm-commits
Differential Revision: https://reviews.llvm.org/D39259
llvm-svn: 316624
2017-10-26 06:28:38 +08:00
|
|
|
fatal("Reader::readArray failed: " + toString(std::move(EC)));
|
2017-07-18 08:21:25 +08:00
|
|
|
|
|
|
|
// Look through type servers. If we've already seen this type server, don't
|
|
|
|
// merge any type information.
|
|
|
|
if (Optional<TypeServer2Record> TS = maybeReadTypeServerRecord(Types))
|
|
|
|
return maybeMergeTypeServerPDB(File, *TS);
|
|
|
|
|
|
|
|
// This is a /Z7 object. Fill in the temporary, caller-provided
|
|
|
|
// ObjectIndexMap.
|
2017-12-15 02:07:04 +08:00
|
|
|
if (Config->DebugGHashes) {
|
|
|
|
ArrayRef<GloballyHashedType> Hashes;
|
|
|
|
std::vector<GloballyHashedType> OwnedHashes;
|
|
|
|
if (Optional<ArrayRef<uint8_t>> DebugH = getDebugH(File))
|
|
|
|
Hashes = getHashesFromDebugH(*DebugH);
|
|
|
|
else {
|
|
|
|
OwnedHashes = GloballyHashedType::hashTypes(Types);
|
|
|
|
Hashes = OwnedHashes;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto Err = mergeTypeAndIdRecords(GlobalIDTable, GlobalTypeTable,
|
|
|
|
ObjectIndexMap.TPIMap, Types, Hashes))
|
|
|
|
fatal("codeview::mergeTypeAndIdRecords failed: " +
|
|
|
|
toString(std::move(Err)));
|
|
|
|
} else {
|
|
|
|
if (auto Err = mergeTypeAndIdRecords(IDTable, TypeTable,
|
|
|
|
ObjectIndexMap.TPIMap, Types))
|
|
|
|
fatal("codeview::mergeTypeAndIdRecords failed: " +
|
|
|
|
toString(std::move(Err)));
|
|
|
|
}
|
2017-07-18 08:21:25 +08:00
|
|
|
return ObjectIndexMap;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Expected<std::unique_ptr<pdb::NativeSession>>
|
|
|
|
tryToLoadPDB(const GUID &GuidFromObj, StringRef TSPath) {
|
2017-10-21 03:48:26 +08:00
|
|
|
ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr = MemoryBuffer::getFile(
|
|
|
|
TSPath, /*FileSize=*/-1, /*RequiresNullTerminator=*/false);
|
|
|
|
if (!MBOrErr)
|
|
|
|
return errorCodeToError(MBOrErr.getError());
|
|
|
|
|
2017-07-18 08:21:25 +08:00
|
|
|
std::unique_ptr<pdb::IPDBSession> ThisSession;
|
2017-10-21 03:48:26 +08:00
|
|
|
if (auto EC = pdb::NativeSession::createFromPdb(
|
|
|
|
MemoryBuffer::getMemBuffer(Driver->takeBuffer(std::move(*MBOrErr)),
|
|
|
|
/*RequiresNullTerminator=*/false),
|
|
|
|
ThisSession))
|
2017-07-18 08:21:25 +08:00
|
|
|
return std::move(EC);
|
|
|
|
|
|
|
|
std::unique_ptr<pdb::NativeSession> NS(
|
|
|
|
static_cast<pdb::NativeSession *>(ThisSession.release()));
|
|
|
|
pdb::PDBFile &File = NS->getPDBFile();
|
|
|
|
auto ExpectedInfo = File.getPDBInfoStream();
|
|
|
|
// All PDB Files should have an Info stream.
|
|
|
|
if (!ExpectedInfo)
|
|
|
|
return ExpectedInfo.takeError();
|
|
|
|
|
|
|
|
// Just because a file with a matching name was found and it was an actual
|
|
|
|
// PDB file doesn't mean it matches. For it to match the InfoStream's GUID
|
|
|
|
// must match the GUID specified in the TypeServer2 record.
|
|
|
|
if (ExpectedInfo->getGuid() != GuidFromObj)
|
|
|
|
return make_error<pdb::GenericError>(
|
|
|
|
pdb::generic_error_code::type_server_not_found, TSPath);
|
|
|
|
|
|
|
|
return std::move(NS);
|
|
|
|
}
|
|
|
|
|
2018-02-01 01:48:04 +08:00
|
|
|
Expected<const CVIndexMap&> PDBLinker::maybeMergeTypeServerPDB(ObjFile *File,
|
|
|
|
TypeServer2Record &TS) {
|
|
|
|
const GUID& TSId = TS.getGuid();
|
|
|
|
StringRef TSPath = TS.getName();
|
|
|
|
|
|
|
|
// First, check if the PDB has previously failed to load.
|
|
|
|
if (MissingTypeServerPDBs.count(TSId))
|
|
|
|
return make_error<pdb::GenericError>(
|
|
|
|
pdb::generic_error_code::type_server_not_found, TSPath);
|
|
|
|
|
|
|
|
// Second, check if we already loaded a PDB with this GUID. Return the type
|
2017-07-18 08:21:25 +08:00
|
|
|
// index mapping if we have it.
|
2018-02-01 01:48:04 +08:00
|
|
|
auto Insertion = TypeServerIndexMappings.insert({TSId, CVIndexMap()});
|
2017-07-18 08:21:25 +08:00
|
|
|
CVIndexMap &IndexMap = Insertion.first->second;
|
|
|
|
if (!Insertion.second)
|
|
|
|
return IndexMap;
|
|
|
|
|
|
|
|
// Mark this map as a type server map.
|
|
|
|
IndexMap.IsTypeServerMap = true;
|
|
|
|
|
|
|
|
// Check for a PDB at:
|
|
|
|
// 1. The given file path
|
|
|
|
// 2. Next to the object file or archive file
|
2018-02-01 01:48:04 +08:00
|
|
|
auto ExpectedSession = tryToLoadPDB(TSId, TSPath);
|
2017-07-18 08:21:25 +08:00
|
|
|
if (!ExpectedSession) {
|
|
|
|
consumeError(ExpectedSession.takeError());
|
|
|
|
StringRef LocalPath =
|
|
|
|
!File->ParentName.empty() ? File->ParentName : File->getName();
|
|
|
|
SmallString<128> Path = sys::path::parent_path(LocalPath);
|
2017-07-18 08:33:53 +08:00
|
|
|
sys::path::append(
|
2018-02-01 01:48:04 +08:00
|
|
|
Path, sys::path::filename(TSPath, sys::path::Style::windows));
|
|
|
|
ExpectedSession = tryToLoadPDB(TSId, Path);
|
|
|
|
}
|
|
|
|
if (auto E = ExpectedSession.takeError()) {
|
|
|
|
TypeServerIndexMappings.erase(TSId);
|
|
|
|
MissingTypeServerPDBs.emplace(TSId);
|
|
|
|
return std::move(E);
|
2017-07-18 08:21:25 +08:00
|
|
|
}
|
|
|
|
|
2018-03-01 02:09:18 +08:00
|
|
|
pdb::NativeSession *Session = ExpectedSession->get();
|
|
|
|
|
|
|
|
// Keep a strong reference to this PDB, so that it's safe to hold pointers
|
|
|
|
// into the file.
|
|
|
|
LoadedPDBs.push_back(std::move(*ExpectedSession));
|
|
|
|
|
|
|
|
auto ExpectedTpi = Session->getPDBFile().getPDBTpiStream();
|
2017-07-18 08:21:25 +08:00
|
|
|
if (auto E = ExpectedTpi.takeError())
|
[lld] unified COFF and ELF error handling on new Common/ErrorHandler
Summary:
The COFF linker and the ELF linker have long had similar but separate
Error.h and Error.cpp files to implement error handling. This change
introduces new error handling code in Common/ErrorHandler.h, changes the
COFF and ELF linkers to use it, and removes the old, separate
implementations.
Reviewers: ruiu
Reviewed By: ruiu
Subscribers: smeenai, jyknight, emaste, sdardis, nemanjai, nhaehnle, mgorny, javed.absar, kbarton, fedor.sergeev, llvm-commits
Differential Revision: https://reviews.llvm.org/D39259
llvm-svn: 316624
2017-10-26 06:28:38 +08:00
|
|
|
fatal("Type server does not have TPI stream: " + toString(std::move(E)));
|
2018-03-01 02:09:18 +08:00
|
|
|
auto ExpectedIpi = Session->getPDBFile().getPDBIpiStream();
|
2017-07-18 08:21:25 +08:00
|
|
|
if (auto E = ExpectedIpi.takeError())
|
[lld] unified COFF and ELF error handling on new Common/ErrorHandler
Summary:
The COFF linker and the ELF linker have long had similar but separate
Error.h and Error.cpp files to implement error handling. This change
introduces new error handling code in Common/ErrorHandler.h, changes the
COFF and ELF linkers to use it, and removes the old, separate
implementations.
Reviewers: ruiu
Reviewed By: ruiu
Subscribers: smeenai, jyknight, emaste, sdardis, nemanjai, nhaehnle, mgorny, javed.absar, kbarton, fedor.sergeev, llvm-commits
Differential Revision: https://reviews.llvm.org/D39259
llvm-svn: 316624
2017-10-26 06:28:38 +08:00
|
|
|
fatal("Type server does not have TPI stream: " + toString(std::move(E)));
|
2017-12-15 02:07:04 +08:00
|
|
|
|
|
|
|
if (Config->DebugGHashes) {
|
|
|
|
// PDBs do not actually store global hashes, so when merging a type server
|
|
|
|
// PDB we have to synthesize global hashes. To do this, we first synthesize
|
|
|
|
// global hashes for the TPI stream, since it is independent, then we
|
|
|
|
// synthesize hashes for the IPI stream, using the hashes for the TPI stream
|
|
|
|
// as inputs.
|
|
|
|
auto TpiHashes = GloballyHashedType::hashTypes(ExpectedTpi->typeArray());
|
|
|
|
auto IpiHashes =
|
|
|
|
GloballyHashedType::hashIds(ExpectedIpi->typeArray(), TpiHashes);
|
|
|
|
|
|
|
|
// Merge TPI first, because the IPI stream will reference type indices.
|
|
|
|
if (auto Err = mergeTypeRecords(GlobalTypeTable, IndexMap.TPIMap,
|
|
|
|
ExpectedTpi->typeArray(), TpiHashes))
|
|
|
|
fatal("codeview::mergeTypeRecords failed: " + toString(std::move(Err)));
|
|
|
|
|
|
|
|
// Merge IPI.
|
|
|
|
if (auto Err =
|
|
|
|
mergeIdRecords(GlobalIDTable, IndexMap.TPIMap, IndexMap.IPIMap,
|
|
|
|
ExpectedIpi->typeArray(), IpiHashes))
|
|
|
|
fatal("codeview::mergeIdRecords failed: " + toString(std::move(Err)));
|
|
|
|
} else {
|
|
|
|
// Merge TPI first, because the IPI stream will reference type indices.
|
|
|
|
if (auto Err = mergeTypeRecords(TypeTable, IndexMap.TPIMap,
|
|
|
|
ExpectedTpi->typeArray()))
|
|
|
|
fatal("codeview::mergeTypeRecords failed: " + toString(std::move(Err)));
|
|
|
|
|
|
|
|
// Merge IPI.
|
|
|
|
if (auto Err = mergeIdRecords(IDTable, IndexMap.TPIMap, IndexMap.IPIMap,
|
|
|
|
ExpectedIpi->typeArray()))
|
|
|
|
fatal("codeview::mergeIdRecords failed: " + toString(std::move(Err)));
|
|
|
|
}
|
2017-07-18 08:21:25 +08:00
|
|
|
|
|
|
|
return IndexMap;
|
2017-06-20 01:21:45 +08:00
|
|
|
}
|
|
|
|
|
2017-06-22 01:25:56 +08:00
|
|
|
static bool remapTypeIndex(TypeIndex &TI, ArrayRef<TypeIndex> TypeIndexMap) {
|
|
|
|
if (TI.isSimple())
|
|
|
|
return true;
|
|
|
|
if (TI.toArrayIndex() >= TypeIndexMap.size())
|
|
|
|
return false;
|
|
|
|
TI = TypeIndexMap[TI.toArrayIndex()];
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-10-25 01:02:40 +08:00
|
|
|
static void remapTypesInSymbolRecord(ObjFile *File, SymbolKind SymKind,
|
2017-06-22 01:25:56 +08:00
|
|
|
MutableArrayRef<uint8_t> Contents,
|
2017-07-18 08:21:25 +08:00
|
|
|
const CVIndexMap &IndexMap,
|
2017-06-22 01:25:56 +08:00
|
|
|
ArrayRef<TiReference> TypeRefs) {
|
|
|
|
for (const TiReference &Ref : TypeRefs) {
|
|
|
|
unsigned ByteSize = Ref.Count * sizeof(TypeIndex);
|
2017-07-13 02:49:43 +08:00
|
|
|
if (Contents.size() < Ref.Offset + ByteSize)
|
|
|
|
fatal("symbol record too short");
|
2017-07-18 08:21:25 +08:00
|
|
|
|
|
|
|
// This can be an item index or a type index. Choose the appropriate map.
|
|
|
|
ArrayRef<TypeIndex> TypeOrItemMap = IndexMap.TPIMap;
|
2017-10-25 01:02:40 +08:00
|
|
|
bool IsItemIndex = Ref.Kind == TiRefKind::IndexRef;
|
|
|
|
if (IsItemIndex && IndexMap.IsTypeServerMap)
|
2017-07-18 08:21:25 +08:00
|
|
|
TypeOrItemMap = IndexMap.IPIMap;
|
|
|
|
|
2017-06-22 01:25:56 +08:00
|
|
|
MutableArrayRef<TypeIndex> TIs(
|
|
|
|
reinterpret_cast<TypeIndex *>(Contents.data() + Ref.Offset), Ref.Count);
|
2017-07-13 02:49:43 +08:00
|
|
|
for (TypeIndex &TI : TIs) {
|
2017-07-18 08:21:25 +08:00
|
|
|
if (!remapTypeIndex(TI, TypeOrItemMap)) {
|
2017-10-25 01:02:40 +08:00
|
|
|
log("ignoring symbol record of kind 0x" + utohexstr(SymKind) + " in " +
|
|
|
|
File->getName() + " with bad " + (IsItemIndex ? "item" : "type") +
|
|
|
|
" index 0x" + utohexstr(TI.getIndex()));
|
2017-10-24 06:44:51 +08:00
|
|
|
TI = TypeIndex(SimpleTypeKind::NotTranslated);
|
2017-07-13 02:49:43 +08:00
|
|
|
continue;
|
2017-06-22 01:25:56 +08:00
|
|
|
}
|
2017-07-13 02:49:43 +08:00
|
|
|
}
|
2017-06-22 01:25:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-06 03:12:40 +08:00
|
|
|
static void
|
|
|
|
recordStringTableReferenceAtOffset(MutableArrayRef<uint8_t> Contents,
|
|
|
|
uint32_t Offset,
|
|
|
|
std::vector<ulittle32_t *> &StrTableRefs) {
|
|
|
|
Contents =
|
|
|
|
Contents.drop_front(Offset).take_front(sizeof(support::ulittle32_t));
|
|
|
|
ulittle32_t *Index = reinterpret_cast<ulittle32_t *>(Contents.data());
|
|
|
|
StrTableRefs.push_back(Index);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
recordStringTableReferences(SymbolKind Kind, MutableArrayRef<uint8_t> Contents,
|
|
|
|
std::vector<ulittle32_t *> &StrTableRefs) {
|
|
|
|
// For now we only handle S_FILESTATIC, but we may need the same logic for
|
|
|
|
// S_DEFRANGE and S_DEFRANGE_SUBFIELD. However, I cannot seem to generate any
|
|
|
|
// PDBs that contain these types of records, so because of the uncertainty
|
|
|
|
// they are omitted here until we can prove that it's necessary.
|
|
|
|
switch (Kind) {
|
|
|
|
case SymbolKind::S_FILESTATIC:
|
|
|
|
// FileStaticSym::ModFileOffset
|
|
|
|
recordStringTableReferenceAtOffset(Contents, 4, StrTableRefs);
|
|
|
|
break;
|
|
|
|
case SymbolKind::S_DEFRANGE:
|
|
|
|
case SymbolKind::S_DEFRANGE_SUBFIELD:
|
|
|
|
log("Not fixing up string table reference in S_DEFRANGE / "
|
|
|
|
"S_DEFRANGE_SUBFIELD record");
|
|
|
|
break;
|
2018-01-06 03:28:39 +08:00
|
|
|
default:
|
|
|
|
break;
|
2018-01-06 03:12:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:34:44 +08:00
|
|
|
static SymbolKind symbolKind(ArrayRef<uint8_t> RecordData) {
|
|
|
|
const RecordPrefix *Prefix =
|
|
|
|
reinterpret_cast<const RecordPrefix *>(RecordData.data());
|
|
|
|
return static_cast<SymbolKind>(uint16_t(Prefix->RecordKind));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// MSVC translates S_PROC_ID_END to S_END, and S_[LG]PROC32_ID to S_[LG]PROC32
|
|
|
|
static void translateIdSymbols(MutableArrayRef<uint8_t> &RecordData,
|
2017-11-30 03:35:21 +08:00
|
|
|
TypeCollection &IDTable) {
|
2017-08-09 02:34:44 +08:00
|
|
|
RecordPrefix *Prefix = reinterpret_cast<RecordPrefix *>(RecordData.data());
|
|
|
|
|
|
|
|
SymbolKind Kind = symbolKind(RecordData);
|
|
|
|
|
|
|
|
if (Kind == SymbolKind::S_PROC_ID_END) {
|
|
|
|
Prefix->RecordKind = SymbolKind::S_END;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// In an object file, GPROC32_ID has an embedded reference which refers to the
|
|
|
|
// single object file type index namespace. This has already been translated
|
|
|
|
// to the PDB file's ID stream index space, but we need to convert this to a
|
|
|
|
// symbol that refers to the type stream index space. So we remap again from
|
|
|
|
// ID index space to type index space.
|
|
|
|
if (Kind == SymbolKind::S_GPROC32_ID || Kind == SymbolKind::S_LPROC32_ID) {
|
|
|
|
SmallVector<TiReference, 1> Refs;
|
|
|
|
auto Content = RecordData.drop_front(sizeof(RecordPrefix));
|
|
|
|
CVSymbol Sym(Kind, RecordData);
|
|
|
|
discoverTypeIndicesInSymbol(Sym, Refs);
|
|
|
|
assert(Refs.size() == 1);
|
|
|
|
assert(Refs.front().Count == 1);
|
|
|
|
|
|
|
|
TypeIndex *TI =
|
|
|
|
reinterpret_cast<TypeIndex *>(Content.data() + Refs[0].Offset);
|
|
|
|
// `TI` is the index of a FuncIdRecord or MemberFuncIdRecord which lives in
|
|
|
|
// the IPI stream, whose `FunctionType` member refers to the TPI stream.
|
|
|
|
// Note that LF_FUNC_ID and LF_MEMFUNC_ID have the same record layout, and
|
|
|
|
// in both cases we just need the second type index.
|
|
|
|
if (!TI->isSimple() && !TI->isNoneType()) {
|
2017-11-30 03:35:21 +08:00
|
|
|
CVType FuncIdData = IDTable.getType(*TI);
|
2017-08-09 02:34:44 +08:00
|
|
|
SmallVector<TypeIndex, 2> Indices;
|
|
|
|
discoverTypeIndices(FuncIdData, Indices);
|
|
|
|
assert(Indices.size() == 2);
|
|
|
|
*TI = Indices[1];
|
|
|
|
}
|
|
|
|
|
|
|
|
Kind = (Kind == SymbolKind::S_GPROC32_ID) ? SymbolKind::S_GPROC32
|
|
|
|
: SymbolKind::S_LPROC32;
|
|
|
|
Prefix->RecordKind = uint16_t(Kind);
|
|
|
|
}
|
2017-06-22 01:25:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Copy the symbol record. In a PDB, symbol records must be 4 byte aligned.
|
|
|
|
/// The object file may not be aligned.
|
|
|
|
static MutableArrayRef<uint8_t> copySymbolForPdb(const CVSymbol &Sym,
|
|
|
|
BumpPtrAllocator &Alloc) {
|
|
|
|
size_t Size = alignTo(Sym.length(), alignOf(CodeViewContainer::Pdb));
|
|
|
|
assert(Size >= 4 && "record too short");
|
|
|
|
assert(Size <= MaxRecordLength && "record too long");
|
|
|
|
void *Mem = Alloc.Allocate(Size, 4);
|
|
|
|
|
|
|
|
// Copy the symbol record and zero out any padding bytes.
|
|
|
|
MutableArrayRef<uint8_t> NewData(reinterpret_cast<uint8_t *>(Mem), Size);
|
|
|
|
memcpy(NewData.data(), Sym.data().data(), Sym.length());
|
|
|
|
memset(NewData.data() + Sym.length(), 0, Size - Sym.length());
|
|
|
|
|
|
|
|
// Update the record prefix length. It should point to the beginning of the
|
2017-08-09 02:34:44 +08:00
|
|
|
// next record.
|
2017-06-22 01:25:56 +08:00
|
|
|
auto *Prefix = reinterpret_cast<RecordPrefix *>(Mem);
|
|
|
|
Prefix->RecordLen = Size - 2;
|
|
|
|
return NewData;
|
|
|
|
}
|
|
|
|
|
2017-07-07 00:39:32 +08:00
|
|
|
/// Return true if this symbol opens a scope. This implies that the symbol has
|
|
|
|
/// "parent" and "end" fields, which contain the offset of the S_END or
|
|
|
|
/// S_INLINESITE_END record.
|
|
|
|
static bool symbolOpensScope(SymbolKind Kind) {
|
|
|
|
switch (Kind) {
|
|
|
|
case SymbolKind::S_GPROC32:
|
|
|
|
case SymbolKind::S_LPROC32:
|
|
|
|
case SymbolKind::S_LPROC32_ID:
|
|
|
|
case SymbolKind::S_GPROC32_ID:
|
|
|
|
case SymbolKind::S_BLOCK32:
|
|
|
|
case SymbolKind::S_SEPCODE:
|
|
|
|
case SymbolKind::S_THUNK32:
|
|
|
|
case SymbolKind::S_INLINESITE:
|
|
|
|
case SymbolKind::S_INLINESITE2:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool symbolEndsScope(SymbolKind Kind) {
|
|
|
|
switch (Kind) {
|
|
|
|
case SymbolKind::S_END:
|
|
|
|
case SymbolKind::S_PROC_ID_END:
|
|
|
|
case SymbolKind::S_INLINESITE_END:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ScopeRecord {
|
|
|
|
ulittle32_t PtrParent;
|
|
|
|
ulittle32_t PtrEnd;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct SymbolScope {
|
|
|
|
ScopeRecord *OpeningRecord;
|
|
|
|
uint32_t ScopeOffset;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void scopeStackOpen(SmallVectorImpl<SymbolScope> &Stack,
|
|
|
|
uint32_t CurOffset, CVSymbol &Sym) {
|
|
|
|
assert(symbolOpensScope(Sym.kind()));
|
|
|
|
SymbolScope S;
|
|
|
|
S.ScopeOffset = CurOffset;
|
|
|
|
S.OpeningRecord = const_cast<ScopeRecord *>(
|
|
|
|
reinterpret_cast<const ScopeRecord *>(Sym.content().data()));
|
|
|
|
S.OpeningRecord->PtrParent = Stack.empty() ? 0 : Stack.back().ScopeOffset;
|
|
|
|
Stack.push_back(S);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void scopeStackClose(SmallVectorImpl<SymbolScope> &Stack,
|
2017-07-27 07:05:24 +08:00
|
|
|
uint32_t CurOffset, ObjFile *File) {
|
2017-07-07 00:39:32 +08:00
|
|
|
if (Stack.empty()) {
|
|
|
|
warn("symbol scopes are not balanced in " + File->getName());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SymbolScope S = Stack.pop_back_val();
|
|
|
|
S.OpeningRecord->PtrEnd = CurOffset;
|
|
|
|
}
|
|
|
|
|
2017-08-12 03:00:03 +08:00
|
|
|
static bool symbolGoesInModuleStream(const CVSymbol &Sym) {
|
|
|
|
switch (Sym.kind()) {
|
|
|
|
case SymbolKind::S_GDATA32:
|
|
|
|
case SymbolKind::S_CONSTANT:
|
|
|
|
case SymbolKind::S_UDT:
|
|
|
|
// We really should not be seeing S_PROCREF and S_LPROCREF in the first place
|
|
|
|
// since they are synthesized by the linker in response to S_GPROC32 and
|
|
|
|
// S_LPROC32, but if we do see them, don't put them in the module stream I
|
|
|
|
// guess.
|
|
|
|
case SymbolKind::S_PROCREF:
|
|
|
|
case SymbolKind::S_LPROCREF:
|
|
|
|
return false;
|
|
|
|
// S_GDATA32 does not go in the module stream, but S_LDATA32 does.
|
|
|
|
case SymbolKind::S_LDATA32:
|
|
|
|
default:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool symbolGoesInGlobalsStream(const CVSymbol &Sym) {
|
|
|
|
switch (Sym.kind()) {
|
|
|
|
case SymbolKind::S_CONSTANT:
|
|
|
|
case SymbolKind::S_GDATA32:
|
|
|
|
// S_LDATA32 goes in both the module stream and the globals stream.
|
|
|
|
case SymbolKind::S_LDATA32:
|
|
|
|
case SymbolKind::S_GPROC32:
|
|
|
|
case SymbolKind::S_LPROC32:
|
|
|
|
// We really should not be seeing S_PROCREF and S_LPROCREF in the first place
|
|
|
|
// since they are synthesized by the linker in response to S_GPROC32 and
|
|
|
|
// S_LPROC32, but if we do see them, copy them straight through.
|
|
|
|
case SymbolKind::S_PROCREF:
|
|
|
|
case SymbolKind::S_LPROCREF:
|
|
|
|
return true;
|
2017-08-15 02:44:58 +08:00
|
|
|
// FIXME: For now, we drop all S_UDT symbols (i.e. they don't go in the
|
|
|
|
// globals stream or the modules stream). These have special handling which
|
|
|
|
// needs more investigation before we can get right, but by putting them all
|
|
|
|
// into the globals stream WinDbg fails to display local variables of class
|
|
|
|
// types saying that it cannot find the type Foo *. So as a stopgap just to
|
|
|
|
// keep things working, we drop them.
|
|
|
|
case SymbolKind::S_UDT:
|
2017-08-12 03:00:03 +08:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void addGlobalSymbol(pdb::GSIStreamBuilder &Builder, ObjFile &File,
|
|
|
|
const CVSymbol &Sym) {
|
|
|
|
switch (Sym.kind()) {
|
|
|
|
case SymbolKind::S_CONSTANT:
|
|
|
|
case SymbolKind::S_UDT:
|
|
|
|
case SymbolKind::S_GDATA32:
|
|
|
|
case SymbolKind::S_LDATA32:
|
|
|
|
case SymbolKind::S_PROCREF:
|
|
|
|
case SymbolKind::S_LPROCREF:
|
|
|
|
Builder.addGlobalSymbol(Sym);
|
|
|
|
break;
|
|
|
|
case SymbolKind::S_GPROC32:
|
|
|
|
case SymbolKind::S_LPROC32: {
|
|
|
|
SymbolRecordKind K = SymbolRecordKind::ProcRefSym;
|
|
|
|
if (Sym.kind() == SymbolKind::S_LPROC32)
|
|
|
|
K = SymbolRecordKind::LocalProcRef;
|
|
|
|
ProcRefSym PS(K);
|
|
|
|
PS.Module = static_cast<uint16_t>(File.ModuleDBI->getModuleIndex());
|
|
|
|
// For some reason, MSVC seems to add one to this value.
|
|
|
|
++PS.Module;
|
|
|
|
PS.Name = getSymbolName(Sym);
|
|
|
|
PS.SumName = 0;
|
|
|
|
PS.SymOffset = File.ModuleDBI->getNextSymbolOffset();
|
|
|
|
Builder.addGlobalSymbol(PS);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Invalid symbol kind!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-27 07:05:24 +08:00
|
|
|
static void mergeSymbolRecords(BumpPtrAllocator &Alloc, ObjFile *File,
|
2017-08-12 03:00:03 +08:00
|
|
|
pdb::GSIStreamBuilder &GsiBuilder,
|
2017-07-18 08:21:25 +08:00
|
|
|
const CVIndexMap &IndexMap,
|
2017-11-30 03:35:21 +08:00
|
|
|
TypeCollection &IDTable,
|
2018-01-06 03:12:40 +08:00
|
|
|
std::vector<ulittle32_t *> &StringTableRefs,
|
2017-06-22 01:25:56 +08:00
|
|
|
BinaryStreamRef SymData) {
|
|
|
|
// FIXME: Improve error recovery by warning and skipping records when
|
|
|
|
// possible.
|
2018-01-19 02:35:01 +08:00
|
|
|
ArrayRef<uint8_t> SymsBuffer;
|
|
|
|
cantFail(SymData.readBytes(0, SymData.getLength(), SymsBuffer));
|
2017-07-07 00:39:32 +08:00
|
|
|
SmallVector<SymbolScope, 4> Scopes;
|
2018-01-06 03:12:40 +08:00
|
|
|
|
2018-01-19 02:35:01 +08:00
|
|
|
auto EC = forEachCodeViewRecord<CVSymbol>(
|
|
|
|
SymsBuffer, [&](const CVSymbol &Sym) -> llvm::Error {
|
|
|
|
// Discover type index references in the record. Skip it if we don't
|
|
|
|
// know where they are.
|
|
|
|
SmallVector<TiReference, 32> TypeRefs;
|
|
|
|
if (!discoverTypeIndicesInSymbol(Sym, TypeRefs)) {
|
|
|
|
log("ignoring unknown symbol record with kind 0x" +
|
|
|
|
utohexstr(Sym.kind()));
|
|
|
|
return Error::success();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy the symbol record so we can mutate it.
|
|
|
|
MutableArrayRef<uint8_t> NewData = copySymbolForPdb(Sym, Alloc);
|
|
|
|
|
|
|
|
// Re-map all the type index references.
|
|
|
|
MutableArrayRef<uint8_t> Contents =
|
|
|
|
NewData.drop_front(sizeof(RecordPrefix));
|
|
|
|
remapTypesInSymbolRecord(File, Sym.kind(), Contents, IndexMap,
|
|
|
|
TypeRefs);
|
|
|
|
|
|
|
|
// An object file may have S_xxx_ID symbols, but these get converted to
|
|
|
|
// "real" symbols in a PDB.
|
|
|
|
translateIdSymbols(NewData, IDTable);
|
|
|
|
|
|
|
|
// If this record refers to an offset in the object file's string table,
|
|
|
|
// add that item to the global PDB string table and re-write the index.
|
|
|
|
recordStringTableReferences(Sym.kind(), Contents, StringTableRefs);
|
|
|
|
|
|
|
|
SymbolKind NewKind = symbolKind(NewData);
|
|
|
|
|
|
|
|
// Fill in "Parent" and "End" fields by maintaining a stack of scopes.
|
|
|
|
CVSymbol NewSym(NewKind, NewData);
|
|
|
|
if (symbolOpensScope(NewKind))
|
|
|
|
scopeStackOpen(Scopes, File->ModuleDBI->getNextSymbolOffset(),
|
|
|
|
NewSym);
|
|
|
|
else if (symbolEndsScope(NewKind))
|
|
|
|
scopeStackClose(Scopes, File->ModuleDBI->getNextSymbolOffset(), File);
|
|
|
|
|
|
|
|
// Add the symbol to the globals stream if necessary. Do this before
|
|
|
|
// adding the symbol to the module since we may need to get the next
|
|
|
|
// symbol offset, and writing to the module's symbol stream will update
|
|
|
|
// that offset.
|
|
|
|
if (symbolGoesInGlobalsStream(NewSym))
|
|
|
|
addGlobalSymbol(GsiBuilder, *File, NewSym);
|
|
|
|
|
|
|
|
// Add the symbol to the module.
|
|
|
|
if (symbolGoesInModuleStream(NewSym))
|
|
|
|
File->ModuleDBI->addSymbol(NewSym);
|
|
|
|
return Error::success();
|
|
|
|
});
|
|
|
|
cantFail(std::move(EC));
|
2017-06-22 01:25:56 +08:00
|
|
|
}
|
|
|
|
|
2017-06-20 01:21:45 +08:00
|
|
|
// Allocate memory for a .debug$S section and relocate it.
|
|
|
|
static ArrayRef<uint8_t> relocateDebugChunk(BumpPtrAllocator &Alloc,
|
|
|
|
SectionChunk *DebugChunk) {
|
|
|
|
uint8_t *Buffer = Alloc.Allocate<uint8_t>(DebugChunk->getSize());
|
|
|
|
assert(DebugChunk->OutputSectionOff == 0 &&
|
|
|
|
"debug sections should not be in output sections");
|
|
|
|
DebugChunk->writeTo(Buffer);
|
|
|
|
return consumeDebugMagic(makeArrayRef(Buffer, DebugChunk->getSize()),
|
|
|
|
".debug$S");
|
|
|
|
}
|
|
|
|
|
2017-07-27 07:05:24 +08:00
|
|
|
void PDBLinker::addObjFile(ObjFile *File) {
|
2017-07-14 08:14:58 +08:00
|
|
|
// Add a module descriptor for every object file. We need to put an absolute
|
|
|
|
// path to the object into the PDB. If this is a plain object, we make its
|
|
|
|
// path absolute. If it's an object in an archive, we make the archive path
|
|
|
|
// absolute.
|
|
|
|
bool InArchive = !File->ParentName.empty();
|
|
|
|
SmallString<128> Path = InArchive ? File->ParentName : File->getName();
|
|
|
|
sys::fs::make_absolute(Path);
|
|
|
|
sys::path::native(Path, sys::path::Style::windows);
|
|
|
|
StringRef Name = InArchive ? File->getName() : StringRef(Path);
|
|
|
|
|
|
|
|
File->ModuleDBI = &ExitOnErr(Builder.getDbiBuilder().addModuleInfo(Name));
|
|
|
|
File->ModuleDBI->setObjFileName(Path);
|
|
|
|
|
|
|
|
// Before we can process symbol substreams from .debug$S, we need to process
|
|
|
|
// type information, file checksums, and the string table. Add type info to
|
|
|
|
// the PDB first, so that we can get the map from object file type and item
|
|
|
|
// indices to PDB type and item indices.
|
2017-07-18 08:21:25 +08:00
|
|
|
CVIndexMap ObjectIndexMap;
|
2018-02-01 01:48:04 +08:00
|
|
|
auto IndexMapResult = mergeDebugT(File, ObjectIndexMap);
|
|
|
|
|
|
|
|
// If the .debug$T sections fail to merge, assume there is no debug info.
|
|
|
|
if (!IndexMapResult) {
|
|
|
|
warn("Type server PDB for " + Name + " is invalid, ignoring debug info. " +
|
|
|
|
toString(IndexMapResult.takeError()));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const CVIndexMap &IndexMap = *IndexMapResult;
|
2017-07-14 08:14:58 +08:00
|
|
|
|
2018-01-18 03:16:26 +08:00
|
|
|
ScopedTimer T(SymbolMergingTimer);
|
|
|
|
|
2017-07-14 08:14:58 +08:00
|
|
|
// Now do all live .debug$S sections.
|
2018-01-06 03:12:40 +08:00
|
|
|
DebugStringTableSubsectionRef CVStrTab;
|
|
|
|
DebugChecksumsSubsectionRef Checksums;
|
|
|
|
std::vector<ulittle32_t *> StringTableReferences;
|
2017-07-14 08:14:58 +08:00
|
|
|
for (SectionChunk *DebugChunk : File->getDebugChunks()) {
|
|
|
|
if (!DebugChunk->isLive() || DebugChunk->getSectionName() != ".debug$S")
|
|
|
|
continue;
|
2017-06-20 01:21:45 +08:00
|
|
|
|
2017-07-14 08:14:58 +08:00
|
|
|
ArrayRef<uint8_t> RelocatedDebugContents =
|
|
|
|
relocateDebugChunk(Alloc, DebugChunk);
|
|
|
|
if (RelocatedDebugContents.empty())
|
|
|
|
continue;
|
2017-06-20 01:21:45 +08:00
|
|
|
|
2017-07-14 08:14:58 +08:00
|
|
|
DebugSubsectionArray Subsections;
|
|
|
|
BinaryStreamReader Reader(RelocatedDebugContents, support::little);
|
|
|
|
ExitOnErr(Reader.readArray(Subsections, RelocatedDebugContents.size()));
|
|
|
|
|
|
|
|
for (const DebugSubsectionRecord &SS : Subsections) {
|
|
|
|
switch (SS.kind()) {
|
2018-01-06 03:12:40 +08:00
|
|
|
case DebugSubsectionKind::StringTable: {
|
|
|
|
auto Data = SS.getRecordData();
|
|
|
|
ArrayRef<uint8_t> Buffer;
|
|
|
|
cantFail(Data.readLongestContiguousChunk(0, Buffer));
|
|
|
|
assert(!CVStrTab.valid() &&
|
|
|
|
"Encountered multiple string table subsections!");
|
2017-07-14 08:14:58 +08:00
|
|
|
ExitOnErr(CVStrTab.initialize(SS.getRecordData()));
|
|
|
|
break;
|
2018-01-06 03:12:40 +08:00
|
|
|
}
|
2017-07-14 08:14:58 +08:00
|
|
|
case DebugSubsectionKind::FileChecksums:
|
2018-01-06 03:12:40 +08:00
|
|
|
assert(!Checksums.valid() &&
|
|
|
|
"Encountered multiple checksum subsections!");
|
2017-07-14 08:14:58 +08:00
|
|
|
ExitOnErr(Checksums.initialize(SS.getRecordData()));
|
|
|
|
break;
|
|
|
|
case DebugSubsectionKind::Lines:
|
|
|
|
// We can add the relocated line table directly to the PDB without
|
|
|
|
// modification because the file checksum offsets will stay the same.
|
|
|
|
File->ModuleDBI->addDebugSubsection(SS);
|
|
|
|
break;
|
|
|
|
case DebugSubsectionKind::Symbols:
|
2017-12-15 02:07:04 +08:00
|
|
|
if (Config->DebugGHashes) {
|
|
|
|
mergeSymbolRecords(Alloc, File, Builder.getGsiBuilder(), IndexMap,
|
2018-01-06 03:12:40 +08:00
|
|
|
GlobalIDTable, StringTableReferences,
|
|
|
|
SS.getRecordData());
|
2017-12-15 02:07:04 +08:00
|
|
|
} else {
|
|
|
|
mergeSymbolRecords(Alloc, File, Builder.getGsiBuilder(), IndexMap,
|
2018-01-06 03:12:40 +08:00
|
|
|
IDTable, StringTableReferences,
|
|
|
|
SS.getRecordData());
|
2017-12-15 02:07:04 +08:00
|
|
|
}
|
2017-07-14 08:14:58 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
// FIXME: Process the rest of the subsections.
|
|
|
|
break;
|
2017-06-20 01:21:45 +08:00
|
|
|
}
|
2017-07-14 08:14:58 +08:00
|
|
|
}
|
2018-01-06 03:12:40 +08:00
|
|
|
}
|
2017-06-20 01:21:45 +08:00
|
|
|
|
2018-01-06 03:12:40 +08:00
|
|
|
// We should have seen all debug subsections across the entire object file now
|
|
|
|
// which means that if a StringTable subsection and Checksums subsection were
|
|
|
|
// present, now is the time to handle them.
|
|
|
|
if (!CVStrTab.valid()) {
|
|
|
|
if (Checksums.valid())
|
|
|
|
fatal(".debug$S sections with a checksums subsection must also contain a "
|
|
|
|
"string table subsection");
|
|
|
|
|
|
|
|
if (!StringTableReferences.empty())
|
|
|
|
warn("No StringTable subsection was encountered, but there are string "
|
|
|
|
"table references");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rewrite each string table reference based on the value that the string
|
|
|
|
// assumes in the final PDB.
|
|
|
|
for (ulittle32_t *Ref : StringTableReferences) {
|
|
|
|
auto ExpectedString = CVStrTab.getString(*Ref);
|
|
|
|
if (!ExpectedString) {
|
|
|
|
warn("Invalid string table reference");
|
|
|
|
consumeError(ExpectedString.takeError());
|
|
|
|
continue;
|
2017-06-20 01:21:45 +08:00
|
|
|
}
|
2018-01-06 03:12:40 +08:00
|
|
|
|
|
|
|
*Ref = PDBStrTab.insert(*ExpectedString);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make a new file checksum table that refers to offsets in the PDB-wide
|
|
|
|
// string table. Generally the string table subsection appears after the
|
|
|
|
// checksum table, so we have to do this after looping over all the
|
|
|
|
// subsections.
|
|
|
|
auto NewChecksums = make_unique<DebugChecksumsSubsection>(PDBStrTab);
|
|
|
|
for (FileChecksumEntry &FC : Checksums) {
|
|
|
|
StringRef FileName = ExitOnErr(CVStrTab.getString(FC.FileNameOffset));
|
|
|
|
ExitOnErr(Builder.getDbiBuilder().addModuleSourceFile(*File->ModuleDBI,
|
|
|
|
FileName));
|
|
|
|
NewChecksums->addChecksum(FileName, FC.Kind, FC.Checksum);
|
2017-01-12 11:09:25 +08:00
|
|
|
}
|
2018-01-06 03:12:40 +08:00
|
|
|
File->ModuleDBI->addDebugSubsection(std::move(NewChecksums));
|
2017-07-14 08:14:58 +08:00
|
|
|
}
|
2017-01-12 11:09:25 +08:00
|
|
|
|
2017-07-28 02:25:59 +08:00
|
|
|
static PublicSym32 createPublic(Defined *Def) {
|
|
|
|
PublicSym32 Pub(SymbolKind::S_PUB32);
|
|
|
|
Pub.Name = Def->getName();
|
|
|
|
if (auto *D = dyn_cast<DefinedCOFF>(Def)) {
|
|
|
|
if (D->getCOFFSymbol().isFunctionDefinition())
|
|
|
|
Pub.Flags = PublicSymFlags::Function;
|
|
|
|
} else if (isa<DefinedImportThunk>(Def)) {
|
|
|
|
Pub.Flags = PublicSymFlags::Function;
|
|
|
|
}
|
|
|
|
|
|
|
|
OutputSection *OS = Def->getChunk()->getOutputSection();
|
|
|
|
assert(OS && "all publics should be in final image");
|
|
|
|
Pub.Offset = Def->getRVA() - OS->getRVA();
|
|
|
|
Pub.Segment = OS->SectionIndex;
|
|
|
|
return Pub;
|
|
|
|
}
|
|
|
|
|
2017-07-14 08:14:58 +08:00
|
|
|
// Add all object files to the PDB. Merge .debug$T sections into IpiData and
|
|
|
|
// TpiData.
|
|
|
|
void PDBLinker::addObjectsToPDB() {
|
2018-01-18 03:16:26 +08:00
|
|
|
ScopedTimer T1(AddObjectsTimer);
|
2017-07-27 08:45:26 +08:00
|
|
|
for (ObjFile *File : ObjFile::Instances)
|
2017-07-27 07:05:24 +08:00
|
|
|
addObjFile(File);
|
2017-07-14 08:14:58 +08:00
|
|
|
|
|
|
|
Builder.getStringTableBuilder().setStrings(PDBStrTab);
|
2018-01-18 03:16:26 +08:00
|
|
|
T1.stop();
|
2017-06-20 01:21:45 +08:00
|
|
|
|
2017-12-15 02:07:04 +08:00
|
|
|
// Construct TPI and IPI stream contents.
|
2018-01-18 03:16:26 +08:00
|
|
|
ScopedTimer T2(TpiStreamLayoutTimer);
|
2017-12-15 02:07:04 +08:00
|
|
|
if (Config->DebugGHashes) {
|
|
|
|
addTypeInfo(Builder.getTpiBuilder(), GlobalTypeTable);
|
|
|
|
addTypeInfo(Builder.getIpiBuilder(), GlobalIDTable);
|
|
|
|
} else {
|
|
|
|
addTypeInfo(Builder.getTpiBuilder(), TypeTable);
|
|
|
|
addTypeInfo(Builder.getIpiBuilder(), IDTable);
|
|
|
|
}
|
2018-01-18 03:16:26 +08:00
|
|
|
T2.stop();
|
2017-07-14 08:14:58 +08:00
|
|
|
|
2018-01-18 03:16:26 +08:00
|
|
|
ScopedTimer T3(GlobalsLayoutTimer);
|
2017-08-12 03:00:03 +08:00
|
|
|
// Compute the public and global symbols.
|
|
|
|
auto &GsiBuilder = Builder.getGsiBuilder();
|
2017-07-28 02:25:59 +08:00
|
|
|
std::vector<PublicSym32> Publics;
|
2017-11-04 05:21:47 +08:00
|
|
|
Symtab->forEachSymbol([&Publics](Symbol *S) {
|
2017-07-28 02:25:59 +08:00
|
|
|
// Only emit defined, live symbols that have a chunk.
|
2017-11-01 00:10:24 +08:00
|
|
|
auto *Def = dyn_cast<Defined>(S);
|
2017-07-28 02:25:59 +08:00
|
|
|
if (Def && Def->isLive() && Def->getChunk())
|
|
|
|
Publics.push_back(createPublic(Def));
|
|
|
|
});
|
2017-07-14 08:14:58 +08:00
|
|
|
|
2017-07-28 02:25:59 +08:00
|
|
|
if (!Publics.empty()) {
|
|
|
|
// Sort the public symbols and add them to the stream.
|
|
|
|
std::sort(Publics.begin(), Publics.end(),
|
|
|
|
[](const PublicSym32 &L, const PublicSym32 &R) {
|
|
|
|
return L.Name < R.Name;
|
|
|
|
});
|
|
|
|
for (const PublicSym32 &Pub : Publics)
|
2017-08-09 12:23:25 +08:00
|
|
|
GsiBuilder.addPublicSymbol(Pub);
|
2017-07-28 02:25:59 +08:00
|
|
|
}
|
2017-01-12 11:09:25 +08:00
|
|
|
}
|
|
|
|
|
2017-08-12 04:46:28 +08:00
|
|
|
static void addCommonLinkerModuleSymbols(StringRef Path,
|
|
|
|
pdb::DbiModuleDescriptorBuilder &Mod,
|
|
|
|
BumpPtrAllocator &Allocator) {
|
|
|
|
ObjNameSym ONS(SymbolRecordKind::ObjNameSym);
|
|
|
|
Compile3Sym CS(SymbolRecordKind::Compile3Sym);
|
|
|
|
EnvBlockSym EBS(SymbolRecordKind::EnvBlockSym);
|
2017-07-11 05:01:37 +08:00
|
|
|
|
|
|
|
ONS.Name = "* Linker *";
|
|
|
|
ONS.Signature = 0;
|
|
|
|
|
|
|
|
CS.Machine = Config->is64() ? CPUType::X64 : CPUType::Intel80386;
|
2017-08-12 05:14:01 +08:00
|
|
|
// Interestingly, if we set the string to 0.0.0.0, then when trying to view
|
|
|
|
// local variables WinDbg emits an error that private symbols are not present.
|
|
|
|
// By setting this to a valid MSVC linker version string, local variables are
|
|
|
|
// displayed properly. As such, even though it is not representative of
|
2017-08-12 04:46:47 +08:00
|
|
|
// LLVM's version information, we need this for compatibility.
|
2017-07-11 05:01:37 +08:00
|
|
|
CS.Flags = CompileSym3Flags::None;
|
2017-08-12 04:46:47 +08:00
|
|
|
CS.VersionBackendBuild = 25019;
|
|
|
|
CS.VersionBackendMajor = 14;
|
|
|
|
CS.VersionBackendMinor = 10;
|
2017-07-11 05:01:37 +08:00
|
|
|
CS.VersionBackendQFE = 0;
|
2017-08-12 04:46:47 +08:00
|
|
|
|
|
|
|
// MSVC also sets the frontend to 0.0.0.0 since this is specifically for the
|
|
|
|
// linker module (which is by definition a backend), so we don't need to do
|
|
|
|
// anything here. Also, it seems we can use "LLVM Linker" for the linker name
|
|
|
|
// without any problems. Only the backend version has to be hardcoded to a
|
|
|
|
// magic number.
|
2017-07-11 05:01:37 +08:00
|
|
|
CS.VersionFrontendBuild = 0;
|
|
|
|
CS.VersionFrontendMajor = 0;
|
|
|
|
CS.VersionFrontendMinor = 0;
|
|
|
|
CS.VersionFrontendQFE = 0;
|
|
|
|
CS.Version = "LLVM Linker";
|
|
|
|
CS.setLanguage(SourceLanguage::Link);
|
|
|
|
|
|
|
|
ArrayRef<StringRef> Args = makeArrayRef(Config->Argv).drop_front();
|
|
|
|
std::string ArgStr = llvm::join(Args, " ");
|
|
|
|
EBS.Fields.push_back("cwd");
|
|
|
|
SmallString<64> cwd;
|
2017-07-14 08:14:58 +08:00
|
|
|
sys::fs::current_path(cwd);
|
2017-07-11 05:01:37 +08:00
|
|
|
EBS.Fields.push_back(cwd);
|
|
|
|
EBS.Fields.push_back("exe");
|
2017-08-12 04:46:47 +08:00
|
|
|
SmallString<64> exe = Config->Argv[0];
|
|
|
|
llvm::sys::fs::make_absolute(exe);
|
|
|
|
EBS.Fields.push_back(exe);
|
2017-07-11 05:01:37 +08:00
|
|
|
EBS.Fields.push_back("pdb");
|
|
|
|
EBS.Fields.push_back(Path);
|
|
|
|
EBS.Fields.push_back("cmd");
|
|
|
|
EBS.Fields.push_back(ArgStr);
|
|
|
|
Mod.addSymbol(codeview::SymbolSerializer::writeOneSymbol(
|
|
|
|
ONS, Allocator, CodeViewContainer::Pdb));
|
|
|
|
Mod.addSymbol(codeview::SymbolSerializer::writeOneSymbol(
|
|
|
|
CS, Allocator, CodeViewContainer::Pdb));
|
|
|
|
Mod.addSymbol(codeview::SymbolSerializer::writeOneSymbol(
|
|
|
|
EBS, Allocator, CodeViewContainer::Pdb));
|
|
|
|
}
|
|
|
|
|
2017-08-12 04:46:28 +08:00
|
|
|
static void addLinkerModuleSectionSymbol(pdb::DbiModuleDescriptorBuilder &Mod,
|
|
|
|
OutputSection &OS,
|
|
|
|
BumpPtrAllocator &Allocator) {
|
|
|
|
SectionSym Sym(SymbolRecordKind::SectionSym);
|
2017-08-12 04:46:47 +08:00
|
|
|
Sym.Alignment = 12; // 2^12 = 4KB
|
2017-08-12 04:46:28 +08:00
|
|
|
Sym.Characteristics = OS.getCharacteristics();
|
|
|
|
Sym.Length = OS.getVirtualSize();
|
|
|
|
Sym.Name = OS.getName();
|
|
|
|
Sym.Rva = OS.getRVA();
|
|
|
|
Sym.SectionNumber = OS.SectionIndex;
|
|
|
|
Mod.addSymbol(codeview::SymbolSerializer::writeOneSymbol(
|
|
|
|
Sym, Allocator, CodeViewContainer::Pdb));
|
|
|
|
}
|
|
|
|
|
2016-11-12 08:00:51 +08:00
|
|
|
// Creates a PDB file.
|
2017-08-04 05:15:09 +08:00
|
|
|
void coff::createPDB(SymbolTable *Symtab,
|
|
|
|
ArrayRef<OutputSection *> OutputSections,
|
|
|
|
ArrayRef<uint8_t> SectionTable,
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
const llvm::codeview::DebugInfo &BuildId) {
|
2018-01-18 03:16:26 +08:00
|
|
|
ScopedTimer T1(TotalPdbLinkTimer);
|
2017-07-14 08:14:58 +08:00
|
|
|
PDBLinker PDB(Symtab);
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
PDB.initialize(BuildId);
|
2017-07-14 08:14:58 +08:00
|
|
|
PDB.addObjectsToPDB();
|
2017-08-04 05:15:09 +08:00
|
|
|
PDB.addSections(OutputSections, SectionTable);
|
2018-01-18 03:16:26 +08:00
|
|
|
|
|
|
|
ScopedTimer T2(DiskCommitTimer);
|
2017-07-14 08:14:58 +08:00
|
|
|
PDB.commit();
|
|
|
|
}
|
|
|
|
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
void PDBLinker::initialize(const llvm::codeview::DebugInfo &BuildId) {
|
2016-10-01 04:53:45 +08:00
|
|
|
ExitOnErr(Builder.initialize(4096)); // 4096 is blocksize
|
2016-09-16 02:55:18 +08:00
|
|
|
|
2016-10-06 06:08:58 +08:00
|
|
|
// Create streams in MSF for predefined streams, namely
|
|
|
|
// PDB, TPI, DBI and IPI.
|
|
|
|
for (int I = 0; I < (int)pdb::kSpecialStreamCount; ++I)
|
|
|
|
ExitOnErr(Builder.getMsfBuilder().addStream(0));
|
2016-09-16 02:55:18 +08:00
|
|
|
|
2016-09-17 06:51:17 +08:00
|
|
|
// Add an Info stream.
|
|
|
|
auto &InfoBuilder = Builder.getInfoBuilder();
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
GUID uuid;
|
|
|
|
memcpy(&uuid, &BuildId.PDB70.Signature, sizeof(uuid));
|
2018-03-02 02:00:29 +08:00
|
|
|
InfoBuilder.setAge(BuildId.PDB70.Age);
|
2017-02-07 12:28:02 +08:00
|
|
|
InfoBuilder.setGuid(uuid);
|
2016-09-17 06:51:17 +08:00
|
|
|
InfoBuilder.setVersion(pdb::PdbRaw_ImplVer::PdbImplVC70);
|
2016-09-16 02:55:18 +08:00
|
|
|
|
2017-07-07 13:04:36 +08:00
|
|
|
// Add an empty DBI stream.
|
2017-06-13 23:49:13 +08:00
|
|
|
pdb::DbiStreamBuilder &DbiBuilder = Builder.getDbiBuilder();
|
[LLD COFF/PDB] Incrementally update the build id.
Previously, our algorithm to compute a build id involved hashing the
executable and storing that as the GUID in the CV Debug Record chunk,
and setting the age to 1.
This breaks down in one very obvious case: a user adds some newlines to
a file, rebuilds, but changes nothing else. This causes new line
information and new file checksums to get written to the PDB, meaning
that the debug info is different, but the generated code would be the
same, so we would write the same build over again with an age of 1.
Anyone using a symbol cache would have a problem now, because the
debugger would open the executable, look at the age and guid, find a
matching PDB in the symbol cache and then load it. It would never copy
the new PDB to the symbol cache.
This patch implements the canonical Windows algorithm for updating
a build id, which is to check the existing executable first, and
re-use an existing GUID while bumping the age if it already
exists.
Differential Revision: https://reviews.llvm.org/D36758
llvm-svn: 310961
2017-08-16 05:31:41 +08:00
|
|
|
DbiBuilder.setAge(BuildId.PDB70.Age);
|
Fix some differences between lld and MSVC generated PDBs.
A couple of things were different about our generated PDBs.
1) We were outputting the wrong Version on the PDB Stream.
The version we were setting was newer than what MSVC is setting.
It's not clear what the implications are, but we change LLD
to use PdbImplVC70, as MSVC does.
2) For the optional debug stream indices in the DBI Stream, we
were outputting 0 to mean "the stream is not present". MSVC
outputs uint16_t(-1), which is the "correct" way to specify
that a stream is not present. So we fix that as well.
3) We were setting the PDB Stream signature to 0. This is supposed
to be the result of calling time(nullptr). Although this leads
to non-deterministic builds, a better way to solve that is by
having a command line option explicitly for generating a
reproducible build, and have the default behavior of lld-link
match the default behavior of link.
To test this, I'm making use of the new and improved `pdb diff`
sub command. To make it suitable for writing tests against, I had
to modify the diff subcommand slightly to print less verbose output.
Previously it would always print | <column> | <value1> | <value2> |
which is quite verbose, and the values are fragile. All we really
want to know is "did we produce the same value as link?" So I added
command line options to print a single character representing the
result status (different, identical, equivalent), and another to
hide the value display. Note that just inspecting the diff output
used to write the test, you can see some things that are obviously
wrong. That is just reflective of the fact that this is the state
of affairs today, not that we're asserting that this is "correct".
We can use this as a starting point to discover differences, fix
them, and update the test.
Differential Revision: https://reviews.llvm.org/D35086
llvm-svn: 307422
2017-07-08 02:45:56 +08:00
|
|
|
DbiBuilder.setVersionHeader(pdb::PdbDbiV70);
|
2017-07-08 04:25:39 +08:00
|
|
|
ExitOnErr(DbiBuilder.addDbgStream(pdb::DbgHeaderType::NewFPO, {}));
|
2017-07-14 08:14:58 +08:00
|
|
|
}
|
2016-10-07 06:52:01 +08:00
|
|
|
|
2017-08-04 05:15:09 +08:00
|
|
|
void PDBLinker::addSectionContrib(pdb::DbiModuleDescriptorBuilder &LinkerModule,
|
|
|
|
OutputSection *OS, Chunk *C) {
|
|
|
|
pdb::SectionContrib SC;
|
|
|
|
memset(&SC, 0, sizeof(SC));
|
|
|
|
SC.ISect = OS->SectionIndex;
|
|
|
|
SC.Off = C->getRVA() - OS->getRVA();
|
|
|
|
SC.Size = C->getSize();
|
|
|
|
if (auto *SecChunk = dyn_cast<SectionChunk>(C)) {
|
|
|
|
SC.Characteristics = SecChunk->Header->Characteristics;
|
|
|
|
SC.Imod = SecChunk->File->ModuleDBI->getModuleIndex();
|
2017-08-08 04:23:45 +08:00
|
|
|
ArrayRef<uint8_t> Contents = SecChunk->getContents();
|
|
|
|
JamCRC CRC(0);
|
|
|
|
ArrayRef<char> CharContents = makeArrayRef(
|
|
|
|
reinterpret_cast<const char *>(Contents.data()), Contents.size());
|
|
|
|
CRC.update(CharContents);
|
|
|
|
SC.DataCrc = CRC.getCRC();
|
2017-08-04 05:15:09 +08:00
|
|
|
} else {
|
|
|
|
SC.Characteristics = OS->getCharacteristics();
|
|
|
|
// FIXME: When we start creating DBI for import libraries, use those here.
|
|
|
|
SC.Imod = LinkerModule.getModuleIndex();
|
|
|
|
}
|
|
|
|
SC.RelocCrc = 0; // FIXME
|
|
|
|
Builder.getDbiBuilder().addSectionContrib(SC);
|
|
|
|
}
|
2016-11-01 05:09:21 +08:00
|
|
|
|
2017-08-04 05:15:09 +08:00
|
|
|
void PDBLinker::addSections(ArrayRef<OutputSection *> OutputSections,
|
|
|
|
ArrayRef<uint8_t> SectionTable) {
|
2017-07-14 08:14:58 +08:00
|
|
|
// It's not entirely clear what this is, but the * Linker * module uses it.
|
2017-08-04 05:15:09 +08:00
|
|
|
pdb::DbiStreamBuilder &DbiBuilder = Builder.getDbiBuilder();
|
2017-07-14 08:14:58 +08:00
|
|
|
NativePath = Config->PDBPath;
|
|
|
|
sys::fs::make_absolute(NativePath);
|
|
|
|
sys::path::native(NativePath, sys::path::Style::windows);
|
|
|
|
uint32_t PdbFilePathNI = DbiBuilder.addECName(NativePath);
|
2017-07-07 13:04:36 +08:00
|
|
|
auto &LinkerModule = ExitOnErr(DbiBuilder.addModuleInfo("* Linker *"));
|
|
|
|
LinkerModule.setPdbFilePathNI(PdbFilePathNI);
|
2017-08-12 04:46:28 +08:00
|
|
|
addCommonLinkerModuleSymbols(NativePath, LinkerModule, Alloc);
|
2016-11-16 09:10:46 +08:00
|
|
|
|
2017-08-04 05:15:09 +08:00
|
|
|
// Add section contributions. They must be ordered by ascending RVA.
|
2017-08-12 04:46:28 +08:00
|
|
|
for (OutputSection *OS : OutputSections) {
|
|
|
|
addLinkerModuleSectionSymbol(LinkerModule, *OS, Alloc);
|
2017-08-04 05:15:09 +08:00
|
|
|
for (Chunk *C : OS->getChunks())
|
|
|
|
addSectionContrib(LinkerModule, OS, C);
|
2017-08-12 04:46:28 +08:00
|
|
|
}
|
2017-08-04 05:15:09 +08:00
|
|
|
|
|
|
|
// Add Section Map stream.
|
|
|
|
ArrayRef<object::coff_section> Sections = {
|
|
|
|
(const object::coff_section *)SectionTable.data(),
|
|
|
|
SectionTable.size() / sizeof(object::coff_section)};
|
|
|
|
SectionMap = pdb::DbiStreamBuilder::createSectionMap(Sections);
|
|
|
|
DbiBuilder.setSectionMap(SectionMap);
|
|
|
|
|
2016-10-12 03:45:07 +08:00
|
|
|
// Add COFF section header stream.
|
|
|
|
ExitOnErr(
|
|
|
|
DbiBuilder.addDbgStream(pdb::DbgHeaderType::SectionHdr, SectionTable));
|
2017-07-14 08:14:58 +08:00
|
|
|
}
|
2016-10-12 03:45:07 +08:00
|
|
|
|
2017-07-14 08:14:58 +08:00
|
|
|
void PDBLinker::commit() {
|
2016-09-27 07:53:55 +08:00
|
|
|
// Write to a file.
|
2017-07-11 05:01:37 +08:00
|
|
|
ExitOnErr(Builder.commit(Config->PDBPath));
|
2015-12-05 07:11:05 +08:00
|
|
|
}
|