[mach-o] create __unwind_info section on x86_64

This is a minimally useful pass to construct the __unwind_info section in a
final object from the various __compact_unwind inputs. Currently it doesn't
produce any compressed pages, only works for x86_64 and will fail if any
function ends up without __compact_unwind.

rdar://problem/18208653

llvm-svn: 218703
This commit is contained in:
Tim Northover 2014-09-30 21:29:54 +00:00
parent e1c79749ca
commit cf78d37fd6
17 changed files with 669 additions and 13 deletions

View File

@ -137,6 +137,7 @@ public:
typeDTraceDOF, // runtime data for Dtrace [Darwin]
typeTempLTO, // temporary atom for bitcode reader
typeCompactUnwindInfo, // runtime data for unwinder [Darwin]
typeProcessedUnwindInfo,// compressed compact unwind info [Darwin]
typeThunkTLV, // thunk used to access a TLV [Darwin]
typeTLVInitialData, // initial data for a TLV [Darwin]
typeTLVInitialZeroFill, // TLV initial zero fill data [Darwin]

View File

@ -214,6 +214,9 @@ public:
// GOT creation Pass should be run.
bool needsGOTPass() const;
/// Pass to transform __compact_unwind into __unwind_info should be run.
bool needsCompactUnwindPass() const;
/// Magic symbol name stubs will need to help lazy bind.
StringRef binderSymbolName() const;

View File

@ -41,6 +41,7 @@ DefinedAtom::ContentPermissions DefinedAtom::permissions(ContentType type) {
case typeLiteral16:
case typeDTraceDOF:
case typeCompactUnwindInfo:
case typeProcessedUnwindInfo:
case typeRONote:
case typeNoAlloc:
return permR__;

View File

@ -53,6 +53,20 @@ public:
/// Used by GOTPass to update GOT References
virtual void updateReferenceToGOT(const Reference *, bool targetIsNowGOT) {}
/// Does this architecture make use of __unwind_info sections for exception
/// handling? If so, it will need a separate pass to create them.
virtual bool needsCompactUnwind() = 0;
/// Returns the kind of reference to use to synthesize a 32-bit image-offset
/// value, used in the __unwind_info section.
virtual Reference::KindValue imageOffsetKind() = 0;
/// Returns the kind of reference to use to synthesize a 32-bit image-offset
/// indirect value. Used for personality functions in the __unwind_info
/// section.
virtual Reference::KindValue imageOffsetKindIndirect() = 0;
/// Used by normalizedFromAtoms() to know where to generated rebasing and
/// binding info in final executables.
virtual bool isPointer(const Reference &) = 0;
@ -126,6 +140,7 @@ public:
/// Copy raw content then apply all fixup References on an Atom.
virtual void generateAtomContent(const DefinedAtom &atom, bool relocatable,
FindAddressForAtom findAddress,
uint64_t imageBaseAddress,
uint8_t *atomContentBuffer) = 0;
/// Used in -r mode to convert a Reference to a mach-o relocation.

View File

@ -36,6 +36,17 @@ public:
bool isCallSite(const Reference &) override;
bool isPointer(const Reference &) override;
bool isPairedReloc(const normalized::Relocation &) override;
bool needsCompactUnwind() override {
return false;
}
Reference::KindValue imageOffsetKind() override {
return invalid;
}
Reference::KindValue imageOffsetKindIndirect() override {
return invalid;
}
std::error_code getReferenceInfo(const normalized::Relocation &reloc,
const DefinedAtom *inAtom,
uint32_t offsetInAtom,
@ -59,6 +70,7 @@ public:
void generateAtomContent(const DefinedAtom &atom, bool relocatable,
FindAddressForAtom findAddress,
uint64_t imageBaseAddress,
uint8_t *atomContentBuffer) override;
void appendSectionRelocations(const DefinedAtom &atom,
@ -904,9 +916,10 @@ void ArchHandler_arm::applyFixupFinal(const Reference &ref, uint8_t *location,
}
void ArchHandler_arm::generateAtomContent(const DefinedAtom &atom,
bool relocatable,
FindAddressForAtom findAddress,
uint8_t *atomContentBuffer) {
bool relocatable,
FindAddressForAtom findAddress,
uint64_t imageBaseAddress,
uint8_t *atomContentBuffer) {
// Copy raw bytes.
memcpy(atomContentBuffer, atom.rawContent().data(), atom.size());
// Apply fix-ups.

View File

@ -76,6 +76,17 @@ public:
bool isCallSite(const Reference &) override;
bool isPointer(const Reference &) override;
bool isPairedReloc(const normalized::Relocation &) override;
bool needsCompactUnwind() override {
return false;
}
Reference::KindValue imageOffsetKind() override {
return invalid;
}
Reference::KindValue imageOffsetKindIndirect() override {
return invalid;
}
std::error_code getReferenceInfo(const normalized::Relocation &reloc,
const DefinedAtom *inAtom,
uint32_t offsetInAtom,
@ -103,6 +114,7 @@ public:
void generateAtomContent(const DefinedAtom &atom, bool relocatable,
FindAddressForAtom findAddress,
uint64_t imageBaseAddress,
uint8_t *atomContentBuffer) override;
void appendSectionRelocations(const DefinedAtom &atom,
@ -440,6 +452,7 @@ std::error_code ArchHandler_arm64::getPairReferenceInfo(
void ArchHandler_arm64::generateAtomContent(const DefinedAtom &atom,
bool relocatable,
FindAddressForAtom findAddress,
uint64_t imageBaseAddress,
uint8_t *atomContentBuffer) {
// Copy raw bytes.
memcpy(atomContentBuffer, atom.rawContent().data(), atom.size());

View File

@ -36,6 +36,17 @@ public:
bool isCallSite(const Reference &) override;
bool isPointer(const Reference &) override;
bool isPairedReloc(const normalized::Relocation &) override;
bool needsCompactUnwind() override {
return false;
}
Reference::KindValue imageOffsetKind() override {
return invalid;
}
Reference::KindValue imageOffsetKindIndirect() override {
return invalid;
}
std::error_code getReferenceInfo(const normalized::Relocation &reloc,
const DefinedAtom *inAtom,
uint32_t offsetInAtom,
@ -59,6 +70,7 @@ public:
void generateAtomContent(const DefinedAtom &atom, bool relocatable,
FindAddressForAtom findAddress,
uint64_t imageBaseAddress,
uint8_t *atomContentBuffer) override;
void appendSectionRelocations(const DefinedAtom &atom,
@ -364,9 +376,10 @@ ArchHandler_x86::getPairReferenceInfo(const normalized::Relocation &reloc1,
}
void ArchHandler_x86::generateAtomContent(const DefinedAtom &atom,
bool relocatable,
FindAddressForAtom findAddress,
uint8_t *atomContentBuffer) {
bool relocatable,
FindAddressForAtom findAddress,
uint64_t imageBaseAddress,
uint8_t *atomContentBuffer) {
// Copy raw bytes.
memcpy(atomContentBuffer, atom.rawContent().data(), atom.size());
// Apply fix-ups.

View File

@ -46,6 +46,9 @@ public:
case ripRel32Got:
canBypassGOT = false;
return true;
case imageOffsetGot:
canBypassGOT = false;
return true;
default:
return false;
}
@ -55,8 +58,30 @@ public:
void updateReferenceToGOT(const Reference *ref, bool targetNowGOT) override {
assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
assert(ref->kindArch() == Reference::KindArch::x86_64);
const_cast<Reference *>(ref)
switch (ref->kindValue()) {
case ripRel32Got:
assert(targetNowGOT && "target must be GOT");
case ripRel32GotLoad:
const_cast<Reference *>(ref)
->setKindValue(targetNowGOT ? ripRel32 : ripRel32GotLoadNowLea);
break;
case imageOffsetGot:
const_cast<Reference *>(ref)->setKindValue(imageOffset);
break;
default:
llvm_unreachable("unknown GOT reference kind");
}
}
bool needsCompactUnwind() override {
return true;
}
Reference::KindValue imageOffsetKind() override {
return imageOffset;
}
Reference::KindValue imageOffsetKindIndirect() override {
return imageOffsetGot;
}
const StubInfo &stubInfo() override { return _sStubInfo; }
@ -64,6 +89,7 @@ public:
bool isCallSite(const Reference &) override;
bool isPointer(const Reference &) override;
bool isPairedReloc(const normalized::Relocation &) override;
std::error_code getReferenceInfo(const normalized::Relocation &reloc,
const DefinedAtom *inAtom,
uint32_t offsetInAtom,
@ -91,6 +117,7 @@ public:
void generateAtomContent(const DefinedAtom &atom, bool relocatable,
FindAddressForAtom findAddress,
uint64_t imageBase,
uint8_t *atomContentBuffer) override;
void appendSectionRelocations(const DefinedAtom &atom,
@ -130,6 +157,11 @@ private:
/// to "leaq _foo(%rip), %rax
lazyPointer, /// Location contains a lazy pointer.
lazyImmediateLocation, /// Location contains immediate value used in stub.
imageOffset, /// Location contains offset of atom in final image
imageOffsetGot, /// Location contains offset of GOT entry for atom in
/// final image (typically personality function).
};
Reference::KindValue kindFromReloc(const normalized::Relocation &reloc);
@ -138,7 +170,7 @@ private:
void applyFixupFinal(const Reference &ref, uint8_t *location,
uint64_t fixupAddress, uint64_t targetAddress,
uint64_t inAtomAddress);
uint64_t inAtomAddress, uint64_t imageBaseAddress);
void applyFixupRelocatable(const Reference &ref, uint8_t *location,
uint64_t fixupAddress,
@ -165,6 +197,7 @@ const Registry::KindStrings ArchHandler_x86_64::_sKindStrings[] = {
LLD_KIND_STRING_ENTRY(pointer64), LLD_KIND_STRING_ENTRY(pointer64Anon),
LLD_KIND_STRING_ENTRY(delta32), LLD_KIND_STRING_ENTRY(delta64),
LLD_KIND_STRING_ENTRY(delta32Anon), LLD_KIND_STRING_ENTRY(delta64Anon),
LLD_KIND_STRING_ENTRY(imageOffset), LLD_KIND_STRING_ENTRY(imageOffsetGot),
LLD_KIND_STRING_END
};
@ -382,6 +415,7 @@ ArchHandler_x86_64::getPairReferenceInfo(const normalized::Relocation &reloc1,
void ArchHandler_x86_64::generateAtomContent(const DefinedAtom &atom,
bool relocatable,
FindAddressForAtom findAddress,
uint64_t imageBaseAddress,
uint8_t *atomContentBuffer) {
// Copy raw bytes.
memcpy(atomContentBuffer, atom.rawContent().data(), atom.size());
@ -400,8 +434,8 @@ void ArchHandler_x86_64::generateAtomContent(const DefinedAtom &atom,
atomAddress);
} else {
applyFixupFinal(*ref, &atomContentBuffer[offset],
fixupAddress, targetAddress,
atomAddress);
fixupAddress, targetAddress,
atomAddress, imageBaseAddress);
}
}
}
@ -410,7 +444,8 @@ void ArchHandler_x86_64::applyFixupFinal(const Reference &ref,
uint8_t *location,
uint64_t fixupAddress,
uint64_t targetAddress,
uint64_t inAtomAddress) {
uint64_t inAtomAddress,
uint64_t imageBaseAddress) {
if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
return;
assert(ref.kindArch() == Reference::KindArch::x86_64);
@ -455,6 +490,10 @@ void ArchHandler_x86_64::applyFixupFinal(const Reference &ref,
case lazyImmediateLocation:
// do nothing
return;
case imageOffset:
case imageOffsetGot:
write32(*loc32, _swap, (targetAddress - imageBaseAddress) + ref.addend());
return;
case invalid:
// Fall into llvm_unreachable().
break;
@ -514,6 +553,10 @@ void ArchHandler_x86_64::applyFixupRelocatable(const Reference &ref,
case lazyImmediateLocation:
llvm_unreachable("lazy reference kind implies Stubs pass was run");
return;
case imageOffset:
case imageOffsetGot:
llvm_unreachable("image offset implies __unwind_info");
return;
case invalid:
// Fall into llvm_unreachable().
break;
@ -605,6 +648,10 @@ void ArchHandler_x86_64::appendSectionRelocations(
case lazyImmediateLocation:
llvm_unreachable("lazy reference kind implies Stubs pass was run");
return;
case imageOffset:
case imageOffsetGot:
llvm_unreachable("__unwind_info references should have been resolved");
return;
case invalid:
// Fall into llvm_unreachable().
break;

View File

@ -4,6 +4,7 @@ add_lld_library(lldMachO
ArchHandler_arm64.cpp
ArchHandler_x86.cpp
ArchHandler_x86_64.cpp
CompactUnwindPass.cpp
GOTPass.cpp
MachOLinkingContext.cpp
MachONormalizedFileBinaryReader.cpp

View File

@ -0,0 +1,456 @@
//===- lib/ReaderWriter/MachO/CompactUnwindPass.cpp -----------------------===//
//
// The LLVM Linker
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
///
//===----------------------------------------------------------------------===//
#include "ArchHandler.h"
#include "File.h"
#include "MachOPasses.h"
#include "MachONormalizedFileBinaryUtils.h"
#include "lld/Core/DefinedAtom.h"
#include "lld/Core/File.h"
#include "lld/Core/LLVM.h"
#include "lld/Core/Reference.h"
#include "lld/Core/Simple.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Format.h"
#define DEBUG_TYPE "macho-compact-unwind"
namespace lld {
namespace mach_o {
namespace {
struct CompactUnwindEntry {
const Atom *rangeStart;
const Atom *personalityFunction;
const Atom *lsdaLocation;
uint32_t rangeLength;
uint32_t encoding;
};
struct UnwindInfoPage {
std::vector<CompactUnwindEntry> entries;
};
}
class UnwindInfoAtom : public SimpleDefinedAtom {
public:
UnwindInfoAtom(ArchHandler &archHandler, const File &file, bool swap,
std::vector<uint32_t> commonEncodings,
std::vector<const Atom *> personalities,
std::vector<UnwindInfoPage> pages, uint32_t numLSDAs)
: SimpleDefinedAtom(file), _archHandler(archHandler),
_commonEncodingsOffset(7 * sizeof(uint32_t)),
_personalityArrayOffset(_commonEncodingsOffset +
commonEncodings.size() * sizeof(uint32_t)),
_topLevelIndexOffset(_personalityArrayOffset +
personalities.size() * sizeof(uint32_t)),
_lsdaIndexOffset(_topLevelIndexOffset +
3 * (pages.size() + 1) * sizeof(uint32_t)),
_firstPageOffset(_lsdaIndexOffset + 2 * numLSDAs * sizeof(uint32_t)),
_swap(swap) {
addHeader(commonEncodings.size(), personalities.size(), pages.size());
addCommonEncodings(commonEncodings);
addPersonalityFunctions(personalities);
addTopLevelIndexes(pages);
addLSDAIndexes(pages, numLSDAs);
addSecondLevelPages(pages);
}
ContentType contentType() const override {
return DefinedAtom::typeProcessedUnwindInfo;
}
Alignment alignment() const override { return Alignment(2); }
uint64_t size() const override { return _contents.size(); }
ContentPermissions permissions() const override {
return DefinedAtom::permR__;
}
ArrayRef<uint8_t> rawContent() const override { return _contents; }
void addHeader(uint32_t numCommon, uint32_t numPersonalities,
uint32_t numPages) {
using normalized::write32;
uint32_t headerSize = 7 * sizeof(uint32_t);
_contents.resize(headerSize);
int32_t *headerEntries = (int32_t *)_contents.data();
// version
write32(headerEntries[0], _swap, 1);
// commonEncodingsArraySectionOffset
write32(headerEntries[1], _swap, _commonEncodingsOffset);
// commonEncodingsArrayCount
write32(headerEntries[2], _swap, numCommon);
// personalityArraySectionOffset
write32(headerEntries[3], _swap, _personalityArrayOffset);
// personalityArrayCount
write32(headerEntries[4], _swap, numPersonalities);
// indexSectionOffset
write32(headerEntries[5], _swap, _topLevelIndexOffset);
// indexCount
write32(headerEntries[6], _swap, numPages + 1);
}
/// Add the list of common encodings to the section; this is simply an array
/// of uint32_t compact values. Size has already been specified in the header.
void addCommonEncodings(std::vector<uint32_t> &commonEncodings) {
using normalized::write32;
_contents.resize(_commonEncodingsOffset +
commonEncodings.size() * sizeof(uint32_t));
int32_t *commonEncodingsArea =
(int32_t *)&_contents[_commonEncodingsOffset];
for (uint32_t encoding : commonEncodings)
write32(*commonEncodingsArea++, _swap, encoding);
}
void addPersonalityFunctions(std::vector<const Atom *> personalities) {
_contents.resize(_personalityArrayOffset +
personalities.size() * sizeof(uint32_t));
for (unsigned i = 0; i < personalities.size(); ++i)
addImageReferenceIndirect(_personalityArrayOffset + i * sizeof(uint32_t),
personalities[i]);
}
void addTopLevelIndexes(std::vector<UnwindInfoPage> &pages) {
using normalized::write32;
uint32_t numIndexes = pages.size() + 1;
_contents.resize(_topLevelIndexOffset + numIndexes * 3 * sizeof(uint32_t));
uint32_t pageLoc = _firstPageOffset;
// The most difficult job here is calculating the LSDAs; everything else
// follows fairly naturally, but we can't state where the first
int32_t *indexData = (int32_t *)&_contents[_topLevelIndexOffset];
uint32_t numLSDAs = 0;
for (unsigned i = 0; i < pages.size(); ++i) {
// functionOffset
addImageReference(_topLevelIndexOffset + 3 * i * sizeof(uint32_t),
pages[i].entries[0].rangeStart);
// secondLevelPagesSectionOffset
write32(indexData[3 * i + 1], _swap, pageLoc);
write32(indexData[3 * i + 2], _swap,
_lsdaIndexOffset + numLSDAs * 2 * sizeof(uint32_t));
for (auto &entry : pages[i].entries)
if (entry.lsdaLocation)
++numLSDAs;
}
// Finally, write out the final sentinel index
CompactUnwindEntry &finalEntry = pages[pages.size() - 1].entries.back();
addImageReference(_topLevelIndexOffset +
3 * pages.size() * sizeof(uint32_t),
finalEntry.rangeStart, finalEntry.rangeLength);
// secondLevelPagesSectionOffset => 0
indexData[3 * pages.size() + 2] =
_lsdaIndexOffset + numLSDAs * 2 * sizeof(uint32_t);
}
void addLSDAIndexes(std::vector<UnwindInfoPage> &pages, uint32_t numLSDAs) {
_contents.resize(_lsdaIndexOffset + numLSDAs * 2 * sizeof(uint32_t));
uint32_t curOffset = _lsdaIndexOffset;
for (auto &page : pages) {
for (auto &entry : page.entries) {
if (!entry.lsdaLocation)
continue;
addImageReference(curOffset, entry.rangeStart);
addImageReference(curOffset + sizeof(uint32_t), entry.lsdaLocation);
curOffset += 2 * sizeof(uint32_t);
}
}
}
void addSecondLevelPages(std::vector<UnwindInfoPage> &pages) {
for (auto &page : pages) {
addRegularSecondLevelPage(page);
}
}
void addRegularSecondLevelPage(const UnwindInfoPage &page) {
uint32_t curPageOffset = _contents.size();
const int16_t headerSize = sizeof(uint32_t) + 2 * sizeof(uint16_t);
uint32_t curPageSize =
headerSize + 2 * page.entries.size() * sizeof(uint32_t);
_contents.resize(curPageOffset + curPageSize);
using normalized::write32;
using normalized::write16;
// 2 => regular page
write32(*(int32_t *)&_contents[curPageOffset], _swap, 2);
// offset of 1st entry
write16(*(int16_t *)&_contents[curPageOffset + 4], _swap, headerSize);
write16(*(int16_t *)&_contents[curPageOffset + 6], _swap,
page.entries.size());
uint32_t pagePos = curPageOffset + headerSize;
for (auto &entry : page.entries) {
addImageReference(pagePos, entry.rangeStart);
write32(reinterpret_cast<int32_t *>(_contents.data() + pagePos)[1], _swap,
entry.encoding);
pagePos += 2 * sizeof(uint32_t);
}
}
void addImageReference(uint32_t offset, const Atom *dest,
Reference::Addend addend = 0) {
addReference(Reference::KindNamespace::mach_o, _archHandler.kindArch(),
_archHandler.imageOffsetKind(), offset, dest, addend);
}
void addImageReferenceIndirect(uint32_t offset, const Atom *dest) {
addReference(Reference::KindNamespace::mach_o, _archHandler.kindArch(),
_archHandler.imageOffsetKindIndirect(), offset, dest, 0);
}
private:
mach_o::ArchHandler &_archHandler;
std::vector<uint8_t> _contents;
uint32_t _commonEncodingsOffset;
uint32_t _personalityArrayOffset;
uint32_t _topLevelIndexOffset;
uint32_t _lsdaIndexOffset;
uint32_t _firstPageOffset;
bool _swap;
};
/// Pass for instantiating and optimizing GOT slots.
///
class CompactUnwindPass : public Pass {
public:
CompactUnwindPass(const MachOLinkingContext &context)
: _context(context), _archHandler(_context.archHandler()),
_file("<mach-o Compact Unwind Pass>"),
_swap(!MachOLinkingContext::isHostEndian(_context.arch())) {}
private:
void perform(std::unique_ptr<MutableFile> &mergedFile) override {
DEBUG(llvm::dbgs() << "MachO Compact Unwind pass\n");
// First collect all __compact_unwind entries, addressable by the function
// it's referring to.
std::map<const Atom *, CompactUnwindEntry> unwindLocs;
std::vector<const Atom *> personalities;
uint32_t numLSDAs = 0;
collectCompactUnwindEntries(mergedFile, unwindLocs, personalities,
numLSDAs);
// FIXME: if there are more than 4 personality functions then we need to
// defer to DWARF info for the ones we don't put in the list. They should
// also probably be sorted by frequency.
assert(personalities.size() <= 4);
// Now sort the entries by final address and fixup the compact encoding to
// its final form (i.e. set personality function bits & create DWARF
// references where needed).
std::vector<CompactUnwindEntry> unwindInfos =
createUnwindInfoEntries(mergedFile, unwindLocs, personalities);
// Finally, we can start creating pages based on these entries.
DEBUG(llvm::dbgs() << " Splitting entries into pages\n");
// FIXME: we split the entries into pages naively: lots of 4k pages followed
// by a small one. ld64 tried to minimize space and align them to real 4k
// boundaries. That might be worth doing, or perhaps we could perform some
// minor balancing for expected number of lookups.
std::vector<UnwindInfoPage> pages;
unsigned pageStart = 0;
do {
pages.push_back(UnwindInfoPage());
// FIXME: we only create regular pages at the moment. These can hold up to
// 1021 entries according to the documentation.
unsigned entriesInPage =
std::min(1021U, (unsigned)unwindInfos.size() - pageStart);
std::copy(unwindInfos.begin() + pageStart,
unwindInfos.begin() + pageStart + entriesInPage,
std::back_inserter(pages.back().entries));
pageStart += entriesInPage;
DEBUG(llvm::dbgs()
<< " Page from " << pages.back().entries[0].rangeStart->name()
<< " to " << pages.back().entries.back().rangeStart->name() << " + "
<< llvm::format("0x%x", pages.back().entries.back().rangeLength)
<< " has " << entriesInPage << " entries\n");
} while (pageStart < unwindInfos.size());
// FIXME: we should also erase all compact-unwind atoms; their job is done.
UnwindInfoAtom *unwind = new (_file.allocator())
UnwindInfoAtom(_archHandler, _file, _swap, std::vector<uint32_t>(),
personalities, pages, numLSDAs);
mergedFile->addAtom(*unwind);
}
void collectCompactUnwindEntries(
std::unique_ptr<MutableFile> &mergedFile,
std::map<const Atom *, CompactUnwindEntry> &unwindLocs,
std::vector<const Atom *> &personalities, uint32_t &numLSDAs) {
DEBUG(llvm::dbgs() << " Collecting __compact_unwind entries\n");
for (const DefinedAtom *atom : mergedFile->defined()) {
if (atom->contentType() != DefinedAtom::typeCompactUnwindInfo)
continue;
auto unwindEntry = extractCompactUnwindEntry(atom);
unwindLocs.insert(std::make_pair(unwindEntry.rangeStart, unwindEntry));
DEBUG(llvm::dbgs() << " Entry for " << unwindEntry.rangeStart->name()
<< ", encoding="
<< llvm::format("0x%08x", unwindEntry.encoding));
if (unwindEntry.personalityFunction)
DEBUG(llvm::dbgs() << ", personality="
<< unwindEntry.personalityFunction->name()
<< ", lsdaLoc=" << unwindEntry.lsdaLocation->name());
DEBUG(llvm::dbgs() << '\n');
// Count number of LSDAs we see, since we need to know how big the index
// will be while laying out the section.
if (unwindEntry.lsdaLocation)
++numLSDAs;
// Gather the personality functions now, so that they're in deterministic
// order (derived from the DefinedAtom order).
if (unwindEntry.personalityFunction) {
auto pFunc = std::find(personalities.begin(), personalities.end(),
unwindEntry.personalityFunction);
if (pFunc == personalities.end())
personalities.push_back(unwindEntry.personalityFunction);
}
}
}
CompactUnwindEntry extractCompactUnwindEntry(const DefinedAtom *atom) {
CompactUnwindEntry entry = {nullptr, nullptr, nullptr, 0, 0};
for (const Reference *ref : *atom) {
switch (ref->offsetInAtom()) {
case 0:
// FIXME: there could legitimately be functions with multiple encoding
// entries. However, nothing produces them at the moment.
assert(ref->addend() == 0 && "unexpected offset into function");
entry.rangeStart = ref->target();
break;
case 0x10:
assert(ref->addend() == 0 && "unexpected offset into personality fn");
entry.personalityFunction = ref->target();
break;
case 0x18:
assert(ref->addend() == 0 && "unexpected offset into LSDA atom");
entry.lsdaLocation = ref->target();
break;
}
}
using normalized::read32;
entry.rangeLength =
read32(_swap, ((uint32_t *)atom->rawContent().data())[2]);
entry.encoding = read32(_swap, ((uint32_t *)atom->rawContent().data())[3]);
return entry;
}
/// Every atom defined in __TEXT,__text needs an entry in the final
/// __unwind_info section (in order). These comes from two sources:
/// + Input __compact_unwind sections where possible (after adding the
/// personality function offset which is only known now).
/// + A synthesised reference to __eh_frame if there's no __compact_unwind
/// or too many personality functions to be accommodated.
std::vector<CompactUnwindEntry> createUnwindInfoEntries(
const std::unique_ptr<MutableFile> &mergedFile,
const std::map<const Atom *, CompactUnwindEntry> &unwindLocs,
const std::vector<const Atom *> &personalities) {
std::vector<CompactUnwindEntry> unwindInfos;
DEBUG(llvm::dbgs() << " Creating __unwind_info entries\n");
// The final order in the __unwind_info section must be derived from the
// order of typeCode atoms, since that's how they'll be put into the object
// file eventually (yuck!).
for (const DefinedAtom *atom : mergedFile->defined()) {
if (atom->contentType() != DefinedAtom::typeCode)
continue;
unwindInfos.push_back(
finalizeUnwindInfoEntryForAtom(atom, unwindLocs, personalities));
DEBUG(llvm::dbgs() << " Entry for " << atom->name()
<< ", final encoding="
<< llvm::format("0x%08x", unwindInfos.back().encoding)
<< '\n');
}
return unwindInfos;
}
CompactUnwindEntry finalizeUnwindInfoEntryForAtom(
const DefinedAtom *function,
const std::map<const Atom *, CompactUnwindEntry> &unwindLocs,
const std::vector<const Atom *> &personalities) {
auto unwindLoc = unwindLocs.find(function);
// FIXME: we should synthesize a DWARF compact unwind entry before claiming
// there's no unwind if a __compact_unwind atom doesn't exist.
if (unwindLoc == unwindLocs.end()) {
CompactUnwindEntry entry;
memset(&entry, 0, sizeof(CompactUnwindEntry));
entry.rangeStart = function;
entry.rangeLength = function->size();
return entry;
}
CompactUnwindEntry entry = unwindLoc->second;
auto personality = std::find(personalities.begin(), personalities.end(),
entry.personalityFunction);
uint32_t personalityIdx = personality == personalities.end()
? 0
: personality - personalities.begin() + 1;
// FIXME: We should also use DWARF when there isn't enough room for the
// personality function in the compact encoding.
assert(personalityIdx < 4 && "too many personality functions");
entry.encoding |= personalityIdx << 28;
if (entry.lsdaLocation)
entry.encoding |= 1U << 30;
return entry;
}
const MachOLinkingContext &_context;
mach_o::ArchHandler &_archHandler;
MachOFile _file;
bool _swap;
};
void addCompactUnwindPass(PassManager &pm, const MachOLinkingContext &ctx) {
assert(ctx.needsCompactUnwindPass());
pm.add(std::unique_ptr<Pass>(new CompactUnwindPass(ctx)));
}
} // end namesapce mach_o
} // end namesapce lld

View File

@ -281,6 +281,16 @@ bool MachOLinkingContext::needsGOTPass() const {
}
}
bool MachOLinkingContext::needsCompactUnwindPass() const {
switch (_outputMachOType) {
case MH_EXECUTE:
case MH_DYLIB:
case MH_BUNDLE:
return archHandler().needsCompactUnwind();
default:
return false;
}
}
StringRef MachOLinkingContext::binderSymbolName() const {
return archHandler().stubInfo().binderSymbolName;
@ -511,6 +521,8 @@ void MachOLinkingContext::addPasses(PassManager &pm) {
pm.add(std::unique_ptr<Pass>(new LayoutPass(registry())));
if (needsStubsPass())
mach_o::addStubsPass(pm, *this);
if (needsCompactUnwindPass())
mach_o::addCompactUnwindPass(pm, *this);
if (needsGOTPass())
mach_o::addGOTPass(pm, *this);
}

View File

@ -220,6 +220,7 @@ const MachOFinalSectionFromAtomType sectsToAtomType[] = {
ENTRY("__TEXT", "__stub_helper", S_REGULAR, typeStubHelper),
ENTRY("__TEXT", "__gcc_except_tab", S_REGULAR, typeLSDA),
ENTRY("__TEXT", "__eh_frame", S_COALESCED, typeCFI),
ENTRY("__TEXT", "__unwind_info", S_REGULAR, typeProcessedUnwindInfo),
ENTRY("__DATA", "__data", S_REGULAR, typeData),
ENTRY("__DATA", "__const", S_REGULAR, typeConstData),
ENTRY("__DATA", "__cfstring", S_REGULAR, typeCFString),
@ -565,7 +566,8 @@ void Util::copySectionContent(NormalizedFile &file) {
for (AtomInfo &ai : si->atomsAndOffsets) {
uint8_t *atomContent = reinterpret_cast<uint8_t*>
(&sectionContent[ai.offsetInSection]);
_archHandler.generateAtomContent(*ai.atom, r, addrForAtom, atomContent);
_archHandler.generateAtomContent(*ai.atom, r, addrForAtom,
_context.baseAddress(), atomContent);
}
}
}

View File

@ -18,6 +18,7 @@ namespace mach_o {
void addStubsPass(PassManager &pm, const MachOLinkingContext &ctx);
void addGOTPass(PassManager &pm, const MachOLinkingContext &ctx);
void addCompactUnwindPass(PassManager &pm, const MachOLinkingContext &ctx);
} // namespace mach_o
} // namespace lld

View File

@ -439,6 +439,7 @@ template <> struct ScalarEnumerationTraits<lld::DefinedAtom::ContentType> {
io.enumCase(value, "dtraceDOF", DefinedAtom::typeDTraceDOF);
io.enumCase(value, "lto-temp", DefinedAtom::typeTempLTO);
io.enumCase(value, "compact-unwind", DefinedAtom::typeCompactUnwindInfo);
io.enumCase(value, "unwind-info", DefinedAtom::typeProcessedUnwindInfo);
io.enumCase(value, "tlv-thunk", DefinedAtom::typeThunkTLV);
io.enumCase(value, "tlv-data", DefinedAtom::typeTLVInitialData);
io.enumCase(value, "tlv-zero-fill", DefinedAtom::typeTLVInitialZeroFill);

View File

@ -53,6 +53,9 @@ global-symbols:
# CHECK: Size: 0x1
# CHECK: Offset: 0
# CHECK-LABEL: Section {
# CHECK: Name: __unwind_info
# CHECK-LABEL: Section {
# CHECK: Name: __data
# CHECK: Segment: __DATA

View File

@ -43,7 +43,10 @@ global-symbols:
# CHECK: Name: __text
# CHECK: Segment: __TEXT
# CHECK: Size: 0x1
# CHECK: Offset: 4095
# CHECK: Offset: 4027
# CHECK-LABEL: Section {
# CHECK: Name: __unwind_info
# CHECK-LABEL: Section {
# CHECK: Name: __data

View File

@ -0,0 +1,71 @@
# RUN: lld -flavor darwin -arch x86_64 %s -o %t -e _main %p/Inputs/libSystem.yaml
# RUN: llvm-objdump -unwind-info %t | FileCheck %s
# CHECK: Contents of __unwind_info section:
# CHECK: Version: 0x1
# CHECK: Common encodings array section offset: 0x1c
# CHECK: Number of common encodings in array: 0x0
# CHECK: Personality function array section offset: 0x1c
# CHECK: Number of personality functions in array: 0x1
# CHECK: Index array section offset: 0x20
# CHECK: Number of indices in array: 0x2
# CHECK: Common encodings: (count = 0)
# CHECK: Personality functions: (count = 1)
# CHECK: personality[1]: 0x00001000
# CHECK: Top level indices: (count = 2)
# CHECK: [0]: function offset=0x00000f7e, 2nd level page offset=0x00000040, LSDA offset=0x00000038
# CHECK: [1]: function offset=0x00000f80, 2nd level page offset=0x00000000, LSDA offset=0x00000040
# CHECK: LSDA descriptors:
# CHECK: [0]: function offset=0x00000f7e, LSDA offset=0x00000f80
# CHECK: Second level indices:
# CHECK: Second level index[0]: offset in section=0x00000040, base function offset=0x00000f7e
# CHECK: [0]: function offset=0x00000f7e, encoding=0x51000000
# CHECK: [1]: function offset=0x00000f7f, encoding=0x01000000
--- !native
path: '<linker-internal>'
defined-atoms:
- name: GCC_except_table1
type: unwind-lsda
content: [ FF, 9B, A2, 80, 80, 00, 03, 1A, 08, 00, 00, 00,
05, 00, 00, 00, 1A, 00, 00, 00, 01, 0D, 00, 00,
00, 64, 00, 00, 00, 00, 00, 00, 00, 00, 01, 00,
04, 00, 00, 00 ]
- type: compact-unwind
content: [ 40, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00,
00, 00, 00, 41, 00, 00, 00, 00, 00, 00, 00, 00,
E0, 00, 00, 00, 00, 00, 00, 00 ]
references:
- kind: pointer64Anon
offset: 0
target: __Z3barv
- kind: pointer64
offset: 16
target: ___gxx_personality_v0
- kind: pointer64Anon
offset: 24
target: GCC_except_table1
- type: compact-unwind
content: [ C0, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00,
00, 00, 00, 01, 00, 00, 00, 00, 00, 00, 00, 00,
00, 00, 00, 00, 00, 00, 00, 00 ]
references:
- kind: pointer64Anon
offset: 0
target: _main
- name: __Z3barv
scope: global
content: [ C3 ]
- name: _main
scope: global
content: [ C3 ]
references:
- kind: branch32
offset: 9
target: __Z3barv
shared-library-atoms:
- name: ___gxx_personality_v0
load-name: '/usr/lib/libc++abi.dylib'
type: unknown