Revert "[lld-macho] Add support for arm64_32" and other stacked diffs

This reverts commits:
* 8914902b01
* 35a745d814
* 682d1dfe09
This commit is contained in:
Jez Ng 2021-04-13 12:39:24 -04:00
parent 74f98391a7
commit 8ca366935b
22 changed files with 322 additions and 721 deletions

View File

@ -6,7 +6,6 @@
//
//===----------------------------------------------------------------------===//
#include "Arch/ARM64Common.h"
#include "InputFiles.h"
#include "Symbols.h"
#include "SyntheticSections.h"
@ -26,13 +25,22 @@ using namespace lld::macho;
namespace {
struct ARM64 : ARM64Common {
struct ARM64 : TargetInfo {
ARM64();
int64_t getEmbeddedAddend(MemoryBufferRef, uint64_t offset,
const relocation_info) const override;
void relocateOne(uint8_t *loc, const Reloc &, uint64_t va,
uint64_t pc) const override;
void writeStub(uint8_t *buf, const Symbol &) const override;
void writeStubHelperHeader(uint8_t *buf) const override;
void writeStubHelperEntry(uint8_t *buf, const DylibSymbol &,
uint64_t entryAddr) const override;
void relaxGotLoad(uint8_t *loc, uint8_t type) const override;
const RelocAttrs &getRelocAttrs(uint8_t type) const override;
uint64_t getPageSize() const override { return 16 * 1024; }
};
} // namespace
@ -69,6 +77,140 @@ const RelocAttrs &ARM64::getRelocAttrs(uint8_t type) const {
return relocAttrsArray[type];
}
int64_t ARM64::getEmbeddedAddend(MemoryBufferRef mb, uint64_t offset,
const relocation_info rel) const {
if (rel.r_type != ARM64_RELOC_UNSIGNED &&
rel.r_type != ARM64_RELOC_SUBTRACTOR) {
// All other reloc types should use the ADDEND relocation to store their
// addends.
// TODO(gkm): extract embedded addend just so we can assert that it is 0
return 0;
}
auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
const uint8_t *loc = buf + offset + rel.r_address;
switch (rel.r_length) {
case 2:
return static_cast<int32_t>(read32le(loc));
case 3:
return read64le(loc);
default:
llvm_unreachable("invalid r_length");
}
}
inline uint64_t bitField(uint64_t value, int right, int width, int left) {
return ((value >> right) & ((1 << width) - 1)) << left;
}
// 25 0
// +-----------+---------------------------------------------------+
// | | imm26 |
// +-----------+---------------------------------------------------+
inline uint64_t encodeBranch26(const Reloc &r, uint64_t base, uint64_t va) {
checkInt(r, va, 28);
// Since branch destinations are 4-byte aligned, the 2 least-
// significant bits are 0. They are right shifted off the end.
return (base | bitField(va, 2, 26, 0));
}
inline uint64_t encodeBranch26(SymbolDiagnostic d, uint64_t base, uint64_t va) {
checkInt(d, va, 28);
return (base | bitField(va, 2, 26, 0));
}
// 30 29 23 5
// +-+---+---------+-------------------------------------+---------+
// | |ilo| | immhi | |
// +-+---+---------+-------------------------------------+---------+
inline uint64_t encodePage21(const Reloc &r, uint64_t base, uint64_t va) {
checkInt(r, va, 35);
return (base | bitField(va, 12, 2, 29) | bitField(va, 14, 19, 5));
}
inline uint64_t encodePage21(SymbolDiagnostic d, uint64_t base, uint64_t va) {
checkInt(d, va, 35);
return (base | bitField(va, 12, 2, 29) | bitField(va, 14, 19, 5));
}
// 21 10
// +-------------------+-----------------------+-------------------+
// | | imm12 | |
// +-------------------+-----------------------+-------------------+
inline uint64_t encodePageOff12(uint32_t base, uint64_t va) {
int scale = 0;
if ((base & 0x3b00'0000) == 0x3900'0000) { // load/store
scale = base >> 30;
if (scale == 0 && (base & 0x0480'0000) == 0x0480'0000) // 128-bit variant
scale = 4;
}
// TODO(gkm): extract embedded addend and warn if != 0
// uint64_t addend = ((base & 0x003FFC00) >> 10);
return (base | bitField(va, scale, 12 - scale, 10));
}
inline uint64_t pageBits(uint64_t address) {
const uint64_t pageMask = ~0xfffull;
return address & pageMask;
}
// For instruction relocations (load, store, add), the base
// instruction is pre-populated in the text section. A pre-populated
// instruction has opcode & register-operand bits set, with immediate
// operands zeroed. We read it from text, OR-in the immediate
// operands, then write-back the completed instruction.
void ARM64::relocateOne(uint8_t *loc, const Reloc &r, uint64_t value,
uint64_t pc) const {
uint32_t base = ((r.length == 2) ? read32le(loc) : 0);
value += r.addend;
switch (r.type) {
case ARM64_RELOC_BRANCH26:
value = encodeBranch26(r, base, value - pc);
break;
case ARM64_RELOC_SUBTRACTOR:
case ARM64_RELOC_UNSIGNED:
if (r.length == 2)
checkInt(r, value, 32);
break;
case ARM64_RELOC_POINTER_TO_GOT:
if (r.pcrel)
value -= pc;
checkInt(r, value, 32);
break;
case ARM64_RELOC_PAGE21:
case ARM64_RELOC_GOT_LOAD_PAGE21:
case ARM64_RELOC_TLVP_LOAD_PAGE21: {
assert(r.pcrel);
value = encodePage21(r, base, pageBits(value) - pageBits(pc));
break;
}
case ARM64_RELOC_PAGEOFF12:
case ARM64_RELOC_GOT_LOAD_PAGEOFF12:
case ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
assert(!r.pcrel);
value = encodePageOff12(base, value);
break;
default:
llvm_unreachable("unexpected relocation type");
}
switch (r.length) {
case 2:
write32le(loc, value);
break;
case 3:
write64le(loc, value);
break;
default:
llvm_unreachable("invalid r_length");
}
}
static constexpr uint32_t stubCode[] = {
0x90000010, // 00: adrp x16, __la_symbol_ptr@page
0xf9400210, // 04: ldr x16, [x16, __la_symbol_ptr@pageoff]
@ -76,7 +218,15 @@ static constexpr uint32_t stubCode[] = {
};
void ARM64::writeStub(uint8_t *buf8, const Symbol &sym) const {
::writeStub<LP64, stubCode>(buf8, sym);
auto *buf32 = reinterpret_cast<uint32_t *>(buf8);
uint64_t pcPageBits =
pageBits(in.stubs->addr + sym.stubsIndex * sizeof(stubCode));
uint64_t lazyPointerVA =
in.lazyPointers->addr + sym.stubsIndex * LP64::wordSize;
buf32[0] = encodePage21({&sym, "stub"}, stubCode[0],
pageBits(lazyPointerVA) - pcPageBits);
buf32[1] = encodePageOff12(stubCode[1], lazyPointerVA);
buf32[2] = stubCode[2];
}
static constexpr uint32_t stubHelperHeaderCode[] = {
@ -89,7 +239,22 @@ static constexpr uint32_t stubHelperHeaderCode[] = {
};
void ARM64::writeStubHelperHeader(uint8_t *buf8) const {
::writeStubHelperHeader<LP64, stubHelperHeaderCode>(buf8);
auto *buf32 = reinterpret_cast<uint32_t *>(buf8);
auto pcPageBits = [](int i) {
return pageBits(in.stubHelper->addr + i * sizeof(uint32_t));
};
uint64_t loaderVA = in.imageLoaderCache->getVA();
SymbolDiagnostic d = {nullptr, "stub header helper"};
buf32[0] = encodePage21(d, stubHelperHeaderCode[0],
pageBits(loaderVA) - pcPageBits(0));
buf32[1] = encodePageOff12(stubHelperHeaderCode[1], loaderVA);
buf32[2] = stubHelperHeaderCode[2];
uint64_t binderVA =
in.got->addr + in.stubHelper->stubBinder->gotIndex * LP64::wordSize;
buf32[3] = encodePage21(d, stubHelperHeaderCode[3],
pageBits(binderVA) - pcPageBits(3));
buf32[4] = encodePageOff12(stubHelperHeaderCode[4], binderVA);
buf32[5] = stubHelperHeaderCode[5];
}
static constexpr uint32_t stubHelperEntryCode[] = {
@ -100,10 +265,34 @@ static constexpr uint32_t stubHelperEntryCode[] = {
void ARM64::writeStubHelperEntry(uint8_t *buf8, const DylibSymbol &sym,
uint64_t entryVA) const {
::writeStubHelperEntry<stubHelperEntryCode>(buf8, sym, entryVA);
auto *buf32 = reinterpret_cast<uint32_t *>(buf8);
auto pcVA = [entryVA](int i) { return entryVA + i * sizeof(uint32_t); };
uint64_t stubHelperHeaderVA = in.stubHelper->addr;
buf32[0] = stubHelperEntryCode[0];
buf32[1] = encodeBranch26({&sym, "stub helper"}, stubHelperEntryCode[1],
stubHelperHeaderVA - pcVA(1));
buf32[2] = sym.lazyBindOffset;
}
ARM64::ARM64() : ARM64Common(LP64()) {
void ARM64::relaxGotLoad(uint8_t *loc, uint8_t type) const {
// The instruction format comments below are quoted from
// Arm® Architecture Reference Manual
// Armv8, for Armv8-A architecture profile
// ARM DDI 0487G.a (ID011921)
uint32_t instruction = read32le(loc);
// C6.2.132 LDR (immediate)
// LDR <Xt>, [<Xn|SP>{, #<pimm>}]
if ((instruction & 0xffc00000) != 0xf9400000)
error(getRelocAttrs(type).name + " reloc requires LDR instruction");
assert(((instruction >> 10) & 0xfff) == 0 &&
"non-zero embedded LDR immediate");
// C6.2.4 ADD (immediate)
// ADD <Xd|SP>, <Xn|SP>, #<imm>{, <shift>}
instruction = ((instruction & 0x001fffff) | 0x91000000);
write32le(loc, instruction);
}
ARM64::ARM64() : TargetInfo(LP64()) {
cpuType = CPU_TYPE_ARM64;
cpuSubtype = CPU_SUBTYPE_ARM64_ALL;

View File

@ -1,111 +0,0 @@
//===- ARM64Common.cpp ----------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "Arch/ARM64Common.h"
#include "lld/Common/ErrorHandler.h"
#include "llvm/Support/Endian.h"
using namespace llvm::MachO;
using namespace llvm::support::endian;
using namespace lld;
using namespace lld::macho;
int64_t ARM64Common::getEmbeddedAddend(MemoryBufferRef mb, uint64_t offset,
const relocation_info rel) const {
if (rel.r_type != ARM64_RELOC_UNSIGNED &&
rel.r_type != ARM64_RELOC_SUBTRACTOR) {
// All other reloc types should use the ADDEND relocation to store their
// addends.
// TODO(gkm): extract embedded addend just so we can assert that it is 0
return 0;
}
const auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
const uint8_t *loc = buf + offset + rel.r_address;
switch (rel.r_length) {
case 2:
return static_cast<int32_t>(read32le(loc));
case 3:
return read64le(loc);
default:
llvm_unreachable("invalid r_length");
}
}
// For instruction relocations (load, store, add), the base
// instruction is pre-populated in the text section. A pre-populated
// instruction has opcode & register-operand bits set, with immediate
// operands zeroed. We read it from text, OR-in the immediate
// operands, then write-back the completed instruction.
void ARM64Common::relocateOne(uint8_t *loc, const Reloc &r, uint64_t value,
uint64_t pc) const {
uint32_t base = ((r.length == 2) ? read32le(loc) : 0);
value += r.addend;
switch (r.type) {
case ARM64_RELOC_BRANCH26:
value = encodeBranch26(r, base, value - pc);
break;
case ARM64_RELOC_SUBTRACTOR:
case ARM64_RELOC_UNSIGNED:
if (r.length == 2)
checkInt(r, value, 32);
break;
case ARM64_RELOC_POINTER_TO_GOT:
if (r.pcrel)
value -= pc;
checkInt(r, value, 32);
break;
case ARM64_RELOC_PAGE21:
case ARM64_RELOC_GOT_LOAD_PAGE21:
case ARM64_RELOC_TLVP_LOAD_PAGE21: {
assert(r.pcrel);
value = encodePage21(r, base, pageBits(value) - pageBits(pc));
break;
}
case ARM64_RELOC_PAGEOFF12:
case ARM64_RELOC_GOT_LOAD_PAGEOFF12:
case ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
assert(!r.pcrel);
value = encodePageOff12(base, value);
break;
default:
llvm_unreachable("unexpected relocation type");
}
switch (r.length) {
case 2:
write32le(loc, value);
break;
case 3:
write64le(loc, value);
break;
default:
llvm_unreachable("invalid r_length");
}
}
void ARM64Common::relaxGotLoad(uint8_t *loc, uint8_t type) const {
// The instruction format comments below are quoted from
// Arm® Architecture Reference Manual
// Armv8, for Armv8-A architecture profile
// ARM DDI 0487G.a (ID011921)
uint32_t instruction = read32le(loc);
// C6.2.132 LDR (immediate)
// This matches both the 64- and 32-bit variants:
// LDR <(X|W)t>, [<Xn|SP>{, #<pimm>}]
if ((instruction & 0xbfc00000) != 0xb9400000)
error(getRelocAttrs(type).name + " reloc requires LDR instruction");
assert(((instruction >> 10) & 0xfff) == 0 &&
"non-zero embedded LDR immediate");
// C6.2.4 ADD (immediate)
// ADD <Xd|SP>, <Xn|SP>, #<imm>{, <shift>}
instruction = ((instruction & 0x001fffff) | 0x91000000);
write32le(loc, instruction);
}

View File

@ -1,141 +0,0 @@
//===- ARM64Common.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLD_MACHO_ARCH_ARM64COMMON_H
#define LLD_MACHO_ARCH_ARM64COMMON_H
#include "InputFiles.h"
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
#include "llvm/BinaryFormat/MachO.h"
namespace lld {
namespace macho {
struct ARM64Common : TargetInfo {
template <class LP> ARM64Common(LP lp) : TargetInfo(lp) {}
int64_t getEmbeddedAddend(MemoryBufferRef, uint64_t offset,
const llvm::MachO::relocation_info) const override;
void relocateOne(uint8_t *loc, const Reloc &, uint64_t va,
uint64_t pc) const override;
void relaxGotLoad(uint8_t *loc, uint8_t type) const override;
uint64_t getPageSize() const override { return 16 * 1024; }
};
inline uint64_t bitField(uint64_t value, int right, int width, int left) {
return ((value >> right) & ((1 << width) - 1)) << left;
}
// 25 0
// +-----------+---------------------------------------------------+
// | | imm26 |
// +-----------+---------------------------------------------------+
inline uint64_t encodeBranch26(const Reloc &r, uint64_t base, uint64_t va) {
checkInt(r, va, 28);
// Since branch destinations are 4-byte aligned, the 2 least-
// significant bits are 0. They are right shifted off the end.
return (base | bitField(va, 2, 26, 0));
}
inline uint64_t encodeBranch26(SymbolDiagnostic d, uint64_t base, uint64_t va) {
checkInt(d, va, 28);
return (base | bitField(va, 2, 26, 0));
}
// 30 29 23 5
// +-+---+---------+-------------------------------------+---------+
// | |ilo| | immhi | |
// +-+---+---------+-------------------------------------+---------+
inline uint64_t encodePage21(const Reloc &r, uint64_t base, uint64_t va) {
checkInt(r, va, 35);
return (base | bitField(va, 12, 2, 29) | bitField(va, 14, 19, 5));
}
inline uint64_t encodePage21(SymbolDiagnostic d, uint64_t base, uint64_t va) {
checkInt(d, va, 35);
return (base | bitField(va, 12, 2, 29) | bitField(va, 14, 19, 5));
}
// 21 10
// +-------------------+-----------------------+-------------------+
// | | imm12 | |
// +-------------------+-----------------------+-------------------+
inline uint64_t encodePageOff12(uint32_t base, uint64_t va) {
int scale = 0;
if ((base & 0x3b00'0000) == 0x3900'0000) { // load/store
scale = base >> 30;
if (scale == 0 && (base & 0x0480'0000) == 0x0480'0000) // 128-bit variant
scale = 4;
}
// TODO(gkm): extract embedded addend and warn if != 0
// uint64_t addend = ((base & 0x003FFC00) >> 10);
return (base | bitField(va, scale, 12 - scale, 10));
}
inline uint64_t pageBits(uint64_t address) {
const uint64_t pageMask = ~0xfffull;
return address & pageMask;
}
template <class LP, const uint32_t stubCode[3]>
inline void writeStub(uint8_t *buf8, const macho::Symbol &sym) {
auto *buf32 = reinterpret_cast<uint32_t *>(buf8);
uint64_t pcPageBits =
pageBits(in.stubs->addr + sym.stubsIndex * sizeof(stubCode));
uint64_t lazyPointerVA =
in.lazyPointers->addr + sym.stubsIndex * LP::wordSize;
buf32[0] = encodePage21({&sym, "stub"}, stubCode[0],
pageBits(lazyPointerVA) - pcPageBits);
buf32[1] = encodePageOff12(stubCode[1], lazyPointerVA);
buf32[2] = stubCode[2];
}
template <class LP, const uint32_t stubHelperHeaderCode[6]>
inline void writeStubHelperHeader(uint8_t *buf8) {
auto *buf32 = reinterpret_cast<uint32_t *>(buf8);
auto pcPageBits = [](int i) {
return pageBits(in.stubHelper->addr + i * sizeof(uint32_t));
};
uint64_t loaderVA = in.imageLoaderCache->getVA();
SymbolDiagnostic d = {nullptr, "stub header helper"};
buf32[0] = encodePage21(d, stubHelperHeaderCode[0],
pageBits(loaderVA) - pcPageBits(0));
buf32[1] = encodePageOff12(stubHelperHeaderCode[1], loaderVA);
buf32[2] = stubHelperHeaderCode[2];
uint64_t binderVA =
in.got->addr + in.stubHelper->stubBinder->gotIndex * LP::wordSize;
buf32[3] = encodePage21(d, stubHelperHeaderCode[3],
pageBits(binderVA) - pcPageBits(3));
buf32[4] = encodePageOff12(stubHelperHeaderCode[4], binderVA);
buf32[5] = stubHelperHeaderCode[5];
}
template <const uint32_t stubHelperEntryCode[3]>
void writeStubHelperEntry(uint8_t *buf8, const DylibSymbol &sym,
uint64_t entryVA) {
auto *buf32 = reinterpret_cast<uint32_t *>(buf8);
auto pcVA = [entryVA](int i) { return entryVA + i * sizeof(uint32_t); };
uint64_t stubHelperHeaderVA = in.stubHelper->addr;
buf32[0] = stubHelperEntryCode[0];
buf32[1] = encodeBranch26({&sym, "stub helper"}, stubHelperEntryCode[1],
stubHelperHeaderVA - pcVA(1));
buf32[2] = sym.lazyBindOffset;
}
} // namespace macho
} // namespace lld
#endif

View File

@ -1,116 +0,0 @@
//===- ARM64_32.cpp
//----------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "Arch/ARM64Common.h"
#include "InputFiles.h"
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
#include "lld/Common/ErrorHandler.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/MathExtras.h"
using namespace llvm::MachO;
using namespace llvm::support::endian;
using namespace lld;
using namespace lld::macho;
namespace {
struct ARM64_32 : ARM64Common {
ARM64_32();
void writeStub(uint8_t *buf, const Symbol &) const override;
void writeStubHelperHeader(uint8_t *buf) const override;
void writeStubHelperEntry(uint8_t *buf, const DylibSymbol &,
uint64_t entryAddr) const override;
const RelocAttrs &getRelocAttrs(uint8_t type) const override;
};
} // namespace
// These are very similar to ARM64's relocation attributes, except that we don't
// have the BYTE8 flag set.
const RelocAttrs &ARM64_32::getRelocAttrs(uint8_t type) const {
static const std::array<RelocAttrs, 11> relocAttrsArray{{
#define B(x) RelocAttrBits::x
{"UNSIGNED", B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
{"SUBTRACTOR", B(SUBTRAHEND) | B(BYTE4)},
{"BRANCH26", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
{"PAGE21", B(PCREL) | B(EXTERN) | B(BYTE4)},
{"PAGEOFF12", B(ABSOLUTE) | B(EXTERN) | B(BYTE4)},
{"GOT_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(GOT) | B(BYTE4)},
{"GOT_LOAD_PAGEOFF12",
B(ABSOLUTE) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)},
{"POINTER_TO_GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)},
{"TLVP_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(TLV) | B(BYTE4)},
{"TLVP_LOAD_PAGEOFF12",
B(ABSOLUTE) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)},
{"ADDEND", B(ADDEND)},
#undef B
}};
assert(type < relocAttrsArray.size() && "invalid relocation type");
if (type >= relocAttrsArray.size())
return invalidRelocAttrs;
return relocAttrsArray[type];
}
// The stub code is fairly similar to ARM64's, except that we load pointers into
// 32-bit 'w' registers, instead of the 64-bit 'x' ones.
static constexpr uint32_t stubCode[] = {
0x90000010, // 00: adrp x16, __la_symbol_ptr@page
0xb9400210, // 04: ldr w16, [x16, __la_symbol_ptr@pageoff]
0xd61f0200, // 08: br x16
};
void ARM64_32::writeStub(uint8_t *buf8, const Symbol &sym) const {
::writeStub<ILP32, stubCode>(buf8, sym);
}
static constexpr uint32_t stubHelperHeaderCode[] = {
0x90000011, // 00: adrp x17, _dyld_private@page
0x91000231, // 04: add x17, x17, _dyld_private@pageoff
0xa9bf47f0, // 08: stp x16/x17, [sp, #-16]!
0x90000010, // 0c: adrp x16, dyld_stub_binder@page
0xb9400210, // 10: ldr w16, [x16, dyld_stub_binder@pageoff]
0xd61f0200, // 14: br x16
};
void ARM64_32::writeStubHelperHeader(uint8_t *buf8) const {
::writeStubHelperHeader<ILP32, stubHelperHeaderCode>(buf8);
}
static constexpr uint32_t stubHelperEntryCode[] = {
0x18000050, // 00: ldr w16, l0
0x14000000, // 04: b stubHelperHeader
0x00000000, // 08: l0: .long 0
};
void ARM64_32::writeStubHelperEntry(uint8_t *buf8, const DylibSymbol &sym,
uint64_t entryVA) const {
::writeStubHelperEntry<stubHelperEntryCode>(buf8, sym, entryVA);
}
ARM64_32::ARM64_32() : ARM64Common(ILP32()) {
cpuType = CPU_TYPE_ARM64_32;
cpuSubtype = CPU_SUBTYPE_ARM64_V8;
stubSize = sizeof(stubCode);
stubHelperHeaderSize = sizeof(stubHelperHeaderCode);
stubHelperEntrySize = sizeof(stubHelperEntryCode);
}
TargetInfo *macho::createARM64_32TargetInfo() {
static ARM64_32 t;
return &t;
}

View File

@ -7,8 +7,6 @@ include_directories(${LLVM_MAIN_SRC_DIR}/../libunwind/include)
add_lld_library(lldMachO2
Arch/X86_64.cpp
Arch/ARM64.cpp
Arch/ARM64Common.cpp
Arch/ARM64_32.cpp
UnwindInfoSection.cpp
Driver.cpp
DriverUtils.cpp

View File

@ -604,8 +604,6 @@ static TargetInfo *createTargetInfo(InputArgList &args) {
return createX86_64TargetInfo();
case CPU_TYPE_ARM64:
return createARM64TargetInfo();
case CPU_TYPE_ARM64_32:
return createARM64_32TargetInfo();
default:
fatal("missing or unsupported -arch " + archName);
}

View File

@ -37,13 +37,6 @@ struct nlist {
llvm::support::ulittle32_t n_value;
};
struct entry_point_command {
llvm::support::ulittle32_t cmd;
llvm::support::ulittle32_t cmdsize;
llvm::support::ulittle64_t entryoff;
llvm::support::ulittle64_t stacksize;
};
} // namespace structs
} // namespace lld

View File

@ -12,7 +12,6 @@
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Support/Endian.h"
#include <cstddef>
#include <cstdint>
@ -94,19 +93,6 @@ inline void checkUInt(Diagnostic d, uint64_t v, int bits) {
reportRangeError(d, llvm::Twine(v), bits, 0, llvm::maxUIntN(bits));
}
inline void writeAddress(uint8_t *loc, uint64_t addr, uint8_t length) {
switch (length) {
case 2:
llvm::support::endian::write32le(loc, addr);
break;
case 3:
llvm::support::endian::write64le(loc, addr);
break;
default:
llvm_unreachable("invalid r_length");
}
}
extern const RelocAttrs invalidRelocAttrs;
} // namespace macho

View File

@ -32,7 +32,6 @@ class Defined;
class DylibSymbol;
class LoadCommand;
class ObjFile;
class UnwindInfoSection;
class SyntheticSection : public OutputSection {
public:
@ -504,7 +503,6 @@ struct InStruct {
StubsSection *stubs = nullptr;
StubHelperSection *stubHelper = nullptr;
ImageLoaderCacheSection *imageLoaderCache = nullptr;
UnwindInfoSection *unwindInfo = nullptr;
};
extern InStruct in;

View File

@ -79,7 +79,6 @@ public:
TargetInfo *createX86_64TargetInfo();
TargetInfo *createARM64TargetInfo();
TargetInfo *createARM64_32TargetInfo();
struct LP64 {
using mach_header = llvm::MachO::mach_header_64;

View File

@ -91,62 +91,31 @@ using namespace lld::macho;
// TODO(gkm): prune __eh_frame entries superseded by __unwind_info
// TODO(gkm): how do we align the 2nd-level pages?
using EncodingMap = llvm::DenseMap<compact_unwind_encoding_t, size_t>;
UnwindInfoSection::UnwindInfoSection()
: SyntheticSection(segment_names::text, section_names::unwindInfo) {
align = 4; // mimic ld64
}
template <class Ptr> struct CompactUnwindEntry {
Ptr functionAddress;
uint32_t functionLength;
compact_unwind_encoding_t encoding;
Ptr personality;
Ptr lsda;
};
bool UnwindInfoSection::isNeeded() const {
return (compactUnwindSection != nullptr);
}
struct SecondLevelPage {
uint32_t kind;
size_t entryIndex;
size_t entryCount;
size_t byteCount;
std::vector<compact_unwind_encoding_t> localEncodings;
EncodingMap localEncodingIndexes;
};
template <class Ptr> class UnwindInfoSectionImpl : public UnwindInfoSection {
public:
void prepareRelocations(InputSection *) override;
void finalize() override;
void writeTo(uint8_t *buf) const override;
private:
std::vector<std::pair<compact_unwind_encoding_t, size_t>> commonEncodings;
EncodingMap commonEncodingIndexes;
// Indices of personality functions within the GOT.
std::vector<uint32_t> personalities;
SmallDenseMap<std::pair<InputSection *, uint64_t /* addend */>, Symbol *>
personalityTable;
std::vector<unwind_info_section_header_lsda_index_entry> lsdaEntries;
// Map of function offset (from the image base) to an index within the LSDA
// array.
llvm::DenseMap<uint32_t, uint32_t> functionToLsdaIndex;
std::vector<CompactUnwindEntry<Ptr>> cuVector;
std::vector<CompactUnwindEntry<Ptr> *> cuPtrVector;
std::vector<SecondLevelPage> secondLevelPages;
uint64_t level2PagesOffset = 0;
};
// Compact unwind relocations have different semantics, so we handle them in a
// separate code path from regular relocations. First, we do not wish to add
// rebase opcodes for __LD,__compact_unwind, because that section doesn't
// actually end up in the final binary. Second, personality pointers always
// reside in the GOT and must be treated specially.
template <class Ptr>
void UnwindInfoSectionImpl<Ptr>::prepareRelocations(InputSection *isec) {
void macho::prepareCompactUnwind(InputSection *isec) {
assert(isec->segname == segment_names::ld &&
isec->name == section_names::compactUnwind);
for (Reloc &r : isec->relocs) {
assert(target->hasAttr(r.type, RelocAttrBits::UNSIGNED));
if (r.offset % sizeof(CompactUnwindEntry<Ptr>) !=
offsetof(CompactUnwindEntry<Ptr>, personality))
if (r.offset % sizeof(CompactUnwindEntry64) !=
offsetof(struct CompactUnwindEntry64, personality))
continue;
if (auto *s = r.referent.dyn_cast<Symbol *>()) {
@ -203,10 +172,8 @@ static void checkTextSegment(InputSection *isec) {
// before converting it to post-link form. There should only be absolute
// relocations here: since we are not emitting the pre-link CU section, there
// is no source address to make a relative location meaningful.
template <class Ptr>
static void
relocateCompactUnwind(MergedOutputSection *compactUnwindSection,
std::vector<CompactUnwindEntry<Ptr>> &cuVector) {
static void relocateCompactUnwind(MergedOutputSection *compactUnwindSection,
std::vector<CompactUnwindEntry64> &cuVector) {
for (const InputSection *isec : compactUnwindSection->inputs) {
uint8_t *buf =
reinterpret_cast<uint8_t *>(cuVector.data()) + isec->outSecFileOff;
@ -228,23 +195,21 @@ relocateCompactUnwind(MergedOutputSection *compactUnwindSection,
checkTextSegment(referentIsec);
referentVA = referentIsec->getVA() + r.addend;
}
writeAddress(buf + r.offset, referentVA, r.length);
support::endian::write64le(buf + r.offset, referentVA);
}
}
}
// There should only be a handful of unique personality pointers, so we can
// encode them as 2-bit indices into a small array.
template <class Ptr>
void encodePersonalities(
const std::vector<CompactUnwindEntry<Ptr> *> &cuPtrVector,
void encodePersonalities(const std::vector<CompactUnwindEntry64 *> &cuPtrVector,
std::vector<uint32_t> &personalities) {
for (CompactUnwindEntry<Ptr> *cu : cuPtrVector) {
for (CompactUnwindEntry64 *cu : cuPtrVector) {
if (cu->personality == 0)
continue;
uint32_t personalityOffset = cu->personality - in.header->addr;
// Linear search is fast enough for a small array.
auto it = find(personalities, cu->personality);
auto it = find(personalities, personalityOffset);
uint32_t personalityIndex; // 1-based index
if (it != personalities.end()) {
personalityIndex = std::distance(personalities.begin(), it) + 1;
@ -263,7 +228,7 @@ void encodePersonalities(
// Scan the __LD,__compact_unwind entries and compute the space needs of
// __TEXT,__unwind_info and __TEXT,__eh_frame
template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() {
void UnwindInfoSection::finalize() {
if (compactUnwindSection == nullptr)
return;
@ -275,21 +240,19 @@ template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() {
// encoding+personality+lsda. Folding is necessary because it reduces
// the number of CU entries by as much as 3 orders of magnitude!
compactUnwindSection->finalize();
assert(compactUnwindSection->getSize() % sizeof(CompactUnwindEntry<Ptr>) ==
0);
assert(compactUnwindSection->getSize() % sizeof(CompactUnwindEntry64) == 0);
size_t cuCount =
compactUnwindSection->getSize() / sizeof(CompactUnwindEntry<Ptr>);
compactUnwindSection->getSize() / sizeof(CompactUnwindEntry64);
cuVector.resize(cuCount);
relocateCompactUnwind(compactUnwindSection, cuVector);
// Rather than sort & fold the 32-byte entries directly, we create a
// vector of pointers to entries and sort & fold that instead.
cuPtrVector.reserve(cuCount);
for (CompactUnwindEntry<Ptr> &cuEntry : cuVector)
for (CompactUnwindEntry64 &cuEntry : cuVector)
cuPtrVector.emplace_back(&cuEntry);
std::sort(
cuPtrVector.begin(), cuPtrVector.end(),
[](const CompactUnwindEntry<Ptr> *a, const CompactUnwindEntry<Ptr> *b) {
std::sort(cuPtrVector.begin(), cuPtrVector.end(),
[](const CompactUnwindEntry64 *a, const CompactUnwindEntry64 *b) {
return a->functionAddress < b->functionAddress;
});
@ -317,7 +280,7 @@ template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() {
// Count frequencies of the folded encodings
EncodingMap encodingFrequencies;
for (const CompactUnwindEntry<Ptr> *cuPtrEntry : cuPtrVector)
for (const CompactUnwindEntry64 *cuPtrEntry : cuPtrVector)
encodingFrequencies[cuPtrEntry->encoding]++;
// Make a vector of encodings, sorted by descending frequency
@ -353,7 +316,7 @@ template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() {
// If more entries fit in the regular format, we use that.
for (size_t i = 0; i < cuPtrVector.size();) {
secondLevelPages.emplace_back();
SecondLevelPage &page = secondLevelPages.back();
UnwindInfoSection::SecondLevelPage &page = secondLevelPages.back();
page.entryIndex = i;
uintptr_t functionAddressMax =
cuPtrVector[i]->functionAddress + COMPRESSED_ENTRY_FUNC_OFFSET_MASK;
@ -363,7 +326,7 @@ template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() {
sizeof(unwind_info_compressed_second_level_page_header) /
sizeof(uint32_t);
while (wordsRemaining >= 1 && i < cuPtrVector.size()) {
const CompactUnwindEntry<Ptr> *cuPtr = cuPtrVector[i];
const CompactUnwindEntry64 *cuPtr = cuPtrVector[i];
if (cuPtr->functionAddress >= functionAddressMax) {
break;
} else if (commonEncodingIndexes.count(cuPtr->encoding) ||
@ -396,7 +359,7 @@ template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() {
}
}
for (const CompactUnwindEntry<Ptr> *cu : cuPtrVector) {
for (const CompactUnwindEntry64 *cu : cuPtrVector) {
uint32_t functionOffset = cu->functionAddress - in.header->addr;
functionToLsdaIndex[functionOffset] = lsdaEntries.size();
if (cu->lsda != 0)
@ -419,8 +382,7 @@ template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() {
// All inputs are relocated and output addresses are known, so write!
template <class Ptr>
void UnwindInfoSectionImpl<Ptr>::writeTo(uint8_t *buf) const {
void UnwindInfoSection::writeTo(uint8_t *buf) const {
// section header
auto *uip = reinterpret_cast<unwind_info_section_header *>(buf);
uip->version = 1;
@ -441,8 +403,7 @@ void UnwindInfoSectionImpl<Ptr>::writeTo(uint8_t *buf) const {
// Personalities
for (const uint32_t &personality : personalities)
*i32p++ =
in.got->addr + (personality - 1) * target->wordSize - in.header->addr;
*i32p++ = in.got->addr + (personality - 1) * target->wordSize;
// Level-1 index
uint32_t lsdaOffset =
@ -461,7 +422,7 @@ void UnwindInfoSectionImpl<Ptr>::writeTo(uint8_t *buf) const {
l2PagesOffset += SECOND_LEVEL_PAGE_BYTES;
}
// Level-1 sentinel
const CompactUnwindEntry<Ptr> &cuEnd = cuVector.back();
const CompactUnwindEntry64 &cuEnd = cuVector.back();
iep->functionOffset = cuEnd.functionAddress + cuEnd.functionLength;
iep->secondLevelPagesSectionOffset = 0;
iep->lsdaIndexArraySectionOffset =
@ -494,7 +455,7 @@ void UnwindInfoSectionImpl<Ptr>::writeTo(uint8_t *buf) const {
p2p->encodingsCount = page.localEncodings.size();
auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
for (size_t i = 0; i < page.entryCount; i++) {
const CompactUnwindEntry<Ptr> *cuep = cuPtrVector[page.entryIndex + i];
const CompactUnwindEntry64 *cuep = cuPtrVector[page.entryIndex + i];
auto it = commonEncodingIndexes.find(cuep->encoding);
if (it == commonEncodingIndexes.end())
it = page.localEncodingIndexes.find(cuep->encoding);
@ -513,7 +474,7 @@ void UnwindInfoSectionImpl<Ptr>::writeTo(uint8_t *buf) const {
p2p->entryCount = page.entryCount;
auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
for (size_t i = 0; i < page.entryCount; i++) {
const CompactUnwindEntry<Ptr> *cuep = cuPtrVector[page.entryIndex + i];
const CompactUnwindEntry64 *cuep = cuPtrVector[page.entryIndex + i];
*ep++ = cuep->functionAddress;
*ep++ = cuep->encoding;
}
@ -521,10 +482,3 @@ void UnwindInfoSectionImpl<Ptr>::writeTo(uint8_t *buf) const {
pp += SECOND_LEVEL_PAGE_WORDS;
}
}
UnwindInfoSection *macho::makeUnwindInfoSection() {
if (target->wordSize == 8)
return make<UnwindInfoSectionImpl<uint64_t>>();
else
return make<UnwindInfoSectionImpl<uint32_t>>();
}

View File

@ -17,30 +17,66 @@
#include <vector>
// In 2020, we mostly care about 64-bit targets: x86_64 and arm64
struct CompactUnwindEntry64 {
uint64_t functionAddress;
uint32_t functionLength;
compact_unwind_encoding_t encoding;
uint64_t personality;
uint64_t lsda;
};
// FIXME(gkm): someday we might care about 32-bit targets: x86 & arm
struct CompactUnwindEntry32 {
uint32_t functionAddress;
uint32_t functionLength;
compact_unwind_encoding_t encoding;
uint32_t personality;
uint32_t lsda;
};
namespace lld {
namespace macho {
class UnwindInfoSection : public SyntheticSection {
public:
bool isNeeded() const override { return compactUnwindSection != nullptr; }
UnwindInfoSection();
uint64_t getSize() const override { return unwindInfoSize; }
virtual void prepareRelocations(InputSection *) = 0;
bool isNeeded() const override;
void finalize() override;
void writeTo(uint8_t *buf) const override;
void setCompactUnwindSection(MergedOutputSection *cuSection) {
compactUnwindSection = cuSection;
}
protected:
UnwindInfoSection()
: SyntheticSection(segment_names::text, section_names::unwindInfo) {
align = 4;
}
using EncodingMap = llvm::DenseMap<compact_unwind_encoding_t, size_t>;
struct SecondLevelPage {
uint32_t kind;
size_t entryIndex;
size_t entryCount;
size_t byteCount;
std::vector<compact_unwind_encoding_t> localEncodings;
EncodingMap localEncodingIndexes;
};
private:
std::vector<std::pair<compact_unwind_encoding_t, size_t>> commonEncodings;
EncodingMap commonEncodingIndexes;
// Indices of personality functions within the GOT.
std::vector<uint32_t> personalities;
std::vector<unwind_info_section_header_lsda_index_entry> lsdaEntries;
// Map of function offset (from the image base) to an index within the LSDA
// array.
llvm::DenseMap<uint32_t, uint32_t> functionToLsdaIndex;
std::vector<CompactUnwindEntry64> cuVector;
std::vector<CompactUnwindEntry64 *> cuPtrVector;
std::vector<SecondLevelPage> secondLevelPages;
MergedOutputSection *compactUnwindSection = nullptr;
uint64_t level2PagesOffset = 0;
uint64_t unwindInfoSize = 0;
};
UnwindInfoSection *makeUnwindInfoSection();
void prepareCompactUnwind(InputSection *isec);
} // namespace macho

View File

@ -71,6 +71,7 @@ public:
SymtabSection *symtabSection = nullptr;
IndirectSymtabSection *indirectSymtabSection = nullptr;
CodeSignatureSection *codeSignatureSection = nullptr;
UnwindInfoSection *unwindInfoSection = nullptr;
FunctionStartsSection *functionStartsSection = nullptr;
LCUuid *uuidCommand = nullptr;
@ -233,12 +234,10 @@ private:
};
class LCMain : public LoadCommand {
uint32_t getSize() const override {
return sizeof(structs::entry_point_command);
}
uint32_t getSize() const override { return sizeof(entry_point_command); }
void writeTo(uint8_t *buf) const override {
auto *c = reinterpret_cast<structs::entry_point_command *>(buf);
auto *c = reinterpret_cast<entry_point_command *>(buf);
c->cmd = LC_MAIN;
c->cmdsize = getSize();
@ -516,7 +515,7 @@ void Writer::scanRelocations() {
TimeTraceScope timeScope("Scan relocations");
for (InputSection *isec : inputSections) {
if (isec->segname == segment_names::ld) {
in.unwindInfo->prepareRelocations(isec);
prepareCompactUnwind(isec);
continue;
}
@ -797,6 +796,7 @@ template <class LP> void Writer::createOutputSections() {
TimeTraceScope timeScope("Create output sections");
// First, create hidden sections
stringTableSection = make<StringTableSection>();
unwindInfoSection = make<UnwindInfoSection>(); // TODO(gkm): only when no -r
symtabSection = makeSymtabSection<LP>(*stringTableSection);
indirectSymtabSection = make<IndirectSymtabSection>();
if (config->adhocCodesign)
@ -828,9 +828,9 @@ template <class LP> void Writer::createOutputSections() {
for (const auto &it : mergedOutputSections) {
StringRef segname = it.first.first;
MergedOutputSection *osec = it.second;
if (segname == segment_names::ld) {
if (unwindInfoSection && segname == segment_names::ld) {
assert(osec->name == section_names::compactUnwind);
in.unwindInfo->setCompactUnwindSection(osec);
unwindInfoSection->setCompactUnwindSection(osec);
} else {
getOrCreateOutputSegment(segname)->addOutputSection(osec);
}
@ -991,7 +991,6 @@ template <class LP> void macho::createSyntheticSections() {
in.stubs = make<StubsSection>();
in.stubHelper = make<StubHelperSection>();
in.imageLoaderCache = make<ImageLoaderCacheSection>();
in.unwindInfo = makeUnwindInfoSection();
}
OutputSection *macho::firstTLVDataSection = nullptr;

View File

@ -1,14 +0,0 @@
--- !tapi-tbd
tbd-version: 4
targets: [ armv7k-watchos, arm64_32-watchos ]
uuids:
- target: armv7k-watchos
value: 00000000-0000-0000-0000-000000000001
- target: arm64_32-watchos
value: 00000000-0000-0000-0000-000000000002
install-name: '/usr/lib/libSystem.dylib'
current-version: 1.0.0
exports:
- targets: [ arm64_32-watchos, armv7k-watchos ]
symbols: [ dyld_stub_binder ]
...

View File

@ -1,14 +0,0 @@
--- !tapi-tbd
tbd-version: 4
targets: [ armv7k-watchos, arm64_32-watchos ]
uuids:
- target: armv7k-watchos
value: 00000000-0000-0000-0000-000000000001
- target: arm64_32-watchos
value: 00000000-0000-0000-0000-000000000002
install-name: '/usr/lib/libc++.dylib'
current-version: 1.0.0
reexported-libraries:
- targets: [ arm64_32-watchos, armv7k-watchos ]
libraries: [ '/usr/lib/libc++abi.dylib' ]
...

View File

@ -1,14 +0,0 @@
--- !tapi-tbd
tbd-version: 4
targets: [ armv7k-watchos, arm64_32-watchos ]
uuids:
- target: armv7k-watchos
value: 00000000-0000-0000-0000-000000000001
- target: arm64_32-watchos
value: 00000000-0000-0000-0000-000000000002
install-name: '/usr/lib/libc++abi.dylib'
current-version: 1.0.0
exports:
- targets: [ arm64_32-watchos, armv7k-watchos ]
symbols: [ ___gxx_personality_v0 ]
...

View File

@ -1,48 +0,0 @@
# REQUIRES: aarch64
# RUN: rm -rf %t; split-file %s %t
# RUN: llvm-mc -filetype=obj -triple=arm64_32-apple-darwin %t/main.s -o %t/main.o
# RUN: llvm-mc -filetype=obj -triple=arm64_32-apple-darwin %t/foobar.s -o %t/foobar.o
# RUN: %lld-watchos -lSystem -arch arm64_32 -o %t/static %t/main.o %t/foobar.o
# RUN: llvm-objdump --macho -d --no-show-raw-insn --syms %t/static | FileCheck %s --check-prefix=STATIC
# RUN: %lld-watchos -lSystem -arch arm64_32 -dylib -o %t/libfoo.dylib %t/foobar.o
# RUN: %lld-watchos -lSystem -arch arm64_32 -o %t/main %t/main.o %t/libfoo.dylib
# RUN: llvm-objdump --macho -d --no-show-raw-insn --section-headers %t/main | FileCheck %s --check-prefix=DYLIB
# STATIC-LABEL: _main:
# STATIC-NEXT: adrp x8, [[#]] ; 0x[[#%x,PAGE:]]
# STATIC-NEXT: add x8, x8, #[[#%u,FOO_OFF:]]
# STATIC-NEXT: adrp x8, [[#]] ; 0x[[#PAGE]]
# STATIC-NEXT: add x8, x8, #[[#%u,BAR_OFF:]]
# STATIC-NEXT: ret
# STATIC-LABEL: SYMBOL TABLE:
# STATIC-DAG: {{0*}}[[#%x,PAGE+FOO_OFF]] g F __TEXT,__text _foo
# STATIC-DAG: {{0*}}[[#%x,PAGE+BAR_OFF]] g F __TEXT,__text _bar
# DYLIB-LABEL: _main:
# DYLIB-NEXT: adrp x8, [[#]] ; 0x[[#%x,GOT:]]
# DYLIB-NEXT: ldr w8, [x8, #4]
# DYLIB-NEXT: adrp x8, [[#]] ; 0x[[#GOT]]
# DYLIB-NEXT: ldr w8, [x8]
# DYLIB-NEXT: ret
# DYLIB-NEXT: Sections:
# DYLIB-NEXT: Idx Name Size VMA Type
# DYLIB: [[#]] __got 00000008 [[#%.8x,GOT]] DATA
#--- main.s
.globl _main, _foo, _bar
.p2align 2
_main:
adrp x8, _foo@GOTPAGE
ldr w8, [x8, _foo@GOTPAGEOFF]
adrp x8, _bar@GOTPAGE
ldr w8, [x8, _bar@GOTPAGEOFF]
ret
#--- foobar.s
.globl _foo, _bar
_foo:
_bar:

View File

@ -1,60 +0,0 @@
# REQUIRES: aarch64
## FIXME: This test is very similar to arm64-stubs.s, but has been split into a
## separate file because llvm-objdump doesn't correctly symbolize arm64_32. In
## particular, the "literal pool symbol address" comments are missing (PR49944).
# RUN: rm -rf %t; split-file %s %t
# RUN: llvm-mc -filetype=obj -triple=arm64_32-apple-watchos %t/foo.s -o %t/foo.o
# RUN: llvm-mc -filetype=obj -triple=arm64_32-apple-watchos %t/bar.s -o %t/bar.o
# RUN: llvm-mc -filetype=obj -triple=arm64_32-apple-watchos %t/test.s -o %t/test.o
# RUN: %lld-watchos -dylib -install_name @executable_path/libfoo.dylib %t/foo.o -o %t/libfoo.dylib
# RUN: %lld-watchos -dylib -install_name @executable_path/libbar.dylib %t/bar.o -o %t/libbar.dylib
# RUN: %lld-watchos -lSystem %t/libfoo.dylib %t/libbar.dylib %t/test.o -o %t/test
# RUN: llvm-objdump --macho -d --no-show-raw-insn --section="__TEXT,__stubs" --section="__TEXT,__stub_helper" %t/test | FileCheck %s
# CHECK: _main:
# CHECK-NEXT: bl 0x[[#%x,FOO:]] ; symbol stub for: _foo
# CHECK-NEXT: bl 0x[[#%x,BAR:]] ; symbol stub for: _bar
# CHECK-NEXT: ret
# CHECK-LABEL: Contents of (__TEXT,__stubs) section
# CHECK-NEXT: [[#BAR]]: adrp x16
# CHECK-NEXT: ldr w16, [x16{{.*}}]
# CHECK-NEXT: br x16
# CHECK-NEXT: [[#FOO]]: adrp x16
# CHECK-NEXT: ldr w16, [x16{{.*}}]
# CHECK-NEXT: br x16
# CHECK-LABEL: Contents of (__TEXT,__stub_helper) section
# CHECK-NEXT: [[#%x,HELPER_HEADER:]]: adrp x17
# CHECK-NEXT: add x17, x17
# CHECK-NEXT: stp x16, x17, [sp, #-16]!
# CHECK-NEXT: adrp x16
# CHECK-NEXT: ldr w16, [x16]
# CHECK-NEXT: br x16
# CHECK-NEXT: ldr w16, 0x[[#%x,BAR_BIND_OFF_ADDR:]]
# CHECK-NEXT: b 0x[[#HELPER_HEADER]]
# CHECK-NEXT: [[#BAR_BIND_OFF_ADDR]]: udf #0
# CHECK-NEXT: ldr w16, 0x[[#%x,FOO_BIND_OFF_ADDR:]]
# CHECK-NEXT: b 0x[[#HELPER_HEADER]]
# CHECK-NEXT: [[#FOO_BIND_OFF_ADDR]]: udf #11
#--- foo.s
.globl _foo
_foo:
#--- bar.s
.globl _bar
_bar:
#--- test.s
.text
.globl _main
.p2align 2
_main:
bl _foo
bl _bar
ret

View File

@ -3,23 +3,16 @@
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin19.0.0 %t/my-personality.s -o %t/x86_64-my-personality.o
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin19.0.0 %t/main.s -o %t/x86_64-main.o
# RUN: %lld -arch x86_64 -pie -lSystem -lc++ %t/x86_64-my-personality.o %t/x86_64-main.o -o %t/x86_64-personality-first
# RUN: llvm-objdump --macho --unwind-info --syms --indirect-symbols --rebase %t/x86_64-personality-first | FileCheck %s --check-prefixes=FIRST,CHECK -D#%x,BASE=0x100000000
# RUN: llvm-objdump --macho --unwind-info --syms --indirect-symbols --rebase %t/x86_64-personality-first | FileCheck %s --check-prefixes=FIRST,CHECK
# RUN: %lld -arch x86_64 -pie -lSystem -lc++ %t/x86_64-main.o %t/x86_64-my-personality.o -o %t/x86_64-personality-second
# RUN: llvm-objdump --macho --unwind-info --syms --indirect-symbols --rebase %t/x86_64-personality-second | FileCheck %s --check-prefixes=SECOND,CHECK -D#%x,BASE=0x100000000
# RUN: llvm-objdump --macho --unwind-info --syms --indirect-symbols --rebase %t/x86_64-personality-second | FileCheck %s --check-prefixes=SECOND,CHECK
# RUN: llvm-mc -filetype=obj -triple=arm64-apple-darwin19.0.0 %t/my-personality.s -o %t/arm64-my-personality.o
# RUN: llvm-mc -filetype=obj -triple=arm64-apple-darwin19.0.0 %t/main.s -o %t/arm64-main.o
# RUN: %lld -arch arm64 -pie -lSystem -lc++ %t/arm64-my-personality.o %t/arm64-main.o -o %t/arm64-personality-first
# RUN: llvm-objdump --macho --unwind-info --syms --indirect-symbols --rebase %t/arm64-personality-first | FileCheck %s --check-prefixes=FIRST,CHECK -D#%x,BASE=0x100000000
# RUN: llvm-objdump --macho --unwind-info --syms --indirect-symbols --rebase %t/arm64-personality-first | FileCheck %s --check-prefixes=FIRST,CHECK
# RUN: %lld -arch arm64 -pie -lSystem -lc++ %t/arm64-main.o %t/arm64-my-personality.o -o %t/arm64-personality-second
# RUN: llvm-objdump --macho --unwind-info --syms --indirect-symbols --rebase %t/arm64-personality-second | FileCheck %s --check-prefixes=SECOND,CHECK -D#%x,BASE=0x100000000
# RUN: llvm-mc -filetype=obj -triple=arm64_32-apple-watchos %t/my-personality.s -o %t/arm64-32-my-personality.o
# RUN: llvm-mc -filetype=obj -triple=arm64_32-apple-watchos %t/main.s -o %t/arm64-32-main.o
# RUN: %lld-watchos -pie -lSystem -lc++ %t/arm64-32-my-personality.o %t/arm64-32-main.o -o %t/arm64-32-personality-first
# RUN: llvm-objdump --macho --unwind-info --syms --indirect-symbols --rebase %t/arm64-32-personality-first | FileCheck %s --check-prefixes=FIRST,CHECK -D#%x,BASE=0x4000
# RUN: %lld-watchos -pie -lSystem -lc++ %t/arm64-32-main.o %t/arm64-32-my-personality.o -o %t/arm64-32-personality-second
# RUN: llvm-objdump --macho --unwind-info --syms --indirect-symbols --rebase %t/arm64-32-personality-second | FileCheck %s --check-prefixes=SECOND,CHECK -D#%x,BASE=0x4000
# RUN: llvm-objdump --macho --unwind-info --syms --indirect-symbols --rebase %t/arm64-personality-second | FileCheck %s --check-prefixes=SECOND,CHECK
# FIRST: Indirect symbols for (__DATA_CONST,__got)
# FIRST-NEXT: address index name
@ -39,16 +32,16 @@
# CHECK: Contents of __unwind_info section:
# CHECK: Personality functions: (count = 2)
# CHECK-DAG: personality[{{[0-9]+}}]: 0x{{0*}}[[#MY_PERSONALITY-BASE]]
# CHECK-DAG: personality[{{[0-9]+}}]: 0x{{0*}}[[#GXX_PERSONALITY-BASE]]
# CHECK-DAG: personality[{{[0-9]+}}]: 0x{{0*}}[[#MY_PERSONALITY-0x100000000]]
# CHECK-DAG: personality[{{[0-9]+}}]: 0x{{0*}}[[#GXX_PERSONALITY-0x100000000]]
# CHECK: LSDA descriptors:
# CHECK-DAG: function offset=0x[[#%.8x,FOO-BASE]], LSDA offset=0x[[#%.8x,EXCEPTION0-BASE]]
# CHECK-DAG: function offset=0x[[#%.8x,MAIN-BASE]], LSDA offset=0x[[#%.8x,EXCEPTION1-BASE]]
# CHECK-DAG: function offset=0x{{0*}}[[#FOO-0x100000000]], LSDA offset=0x{{0*}}[[#EXCEPTION0-0x100000000]]
# CHECK-DAG: function offset=0x{{0*}}[[#MAIN-0x100000000]], LSDA offset=0x{{0*}}[[#EXCEPTION1-0x100000000]]
## Check that we do not add rebase opcodes to the compact unwind section.
# CHECK: Rebase table:
# CHECK-NEXT: segment section address type
# CHECK-NEXT: __DATA_CONST __got 0x{{[0-9A-F]*}} pointer
# CHECK-NEXT: __DATA_CONST __got 0x{{[0-9a-f]*}} pointer
# CHECK-NOT: __TEXT
#--- my-personality.s

View File

@ -1,27 +1,19 @@
# REQUIRES: x86, aarch64
# RUN: rm -rf %t && mkdir -p %t
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %s -o %t/x86-64-test.o
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %s -o %t/x86_64-test.o
# RUN: llvm-mc -filetype=obj -triple=arm64-apple-darwin %s -o %t/arm64-test.o
# RUN: llvm-mc -filetype=obj -triple=arm64_32-apple-watchos %s -o %t/arm64-32-test.o
# RUN: %lld -arch x86_64 -platform_version macos 10.5.0 11.0 -o %t/x86-64-executable %t/x86-64-test.o
# RUN: %lld -arch x86_64 -platform_version macos 10.5.0 11.0 -o %t/x86-64-executable %t/x86_64-test.o
# RUN: %lld -arch arm64 -o %t/arm64-executable %t/arm64-test.o
# RUN: %lld-watchos -o %t/arm64-32-executable %t/arm64-32-test.o
# RUN: %lld -arch x86_64 -dylib -o %t/x86-64-dylib %t/x86-64-test.o
# RUN: %lld -arch x86_64 -dylib -o %t/x86-64-dylib %t/x86_64-test.o
# RUN: %lld -arch arm64 -dylib -o %t/arm64-dylib %t/arm64-test.o
# RUN: %lld-watchos -dylib -o %t/arm64-32-dylib %t/arm64-32-test.o
# RUN: llvm-objdump --macho --private-header %t/x86-64-executable | FileCheck %s -DCPU=X86_64 -DCAPS=LIB64
# RUN: llvm-objdump --macho --private-header %t/arm64-executable | FileCheck %s -DCPU=ARM64 -DCAPS=0x00
# RUN: llvm-objdump --macho --private-header %t/arm64-32-executable | FileCheck %s --check-prefix=ARM64-32
# RUN: llvm-objdump --macho --private-header %t/x86-64-dylib | FileCheck %s -DCPU=X86_64 -DCAPS=0x00
# RUN: llvm-objdump --macho --private-header %t/arm64-dylib | FileCheck %s -DCPU=ARM64 -DCAPS=0x00
# RUN: llvm-objdump --macho --private-header %t/arm64-32-dylib | FileCheck %s --check-prefix=ARM64-32
# RUN: llvm-objdump --macho --private-header %t/x86-64-executable | FileCheck %s -DCAPS=LIB64
# RUN: llvm-objdump --macho --private-header %t/arm64-executable | FileCheck %s -DCAPS=0x00
# RUN: llvm-objdump --macho --private-header %t/x86-64-dylib | FileCheck %s -DCAPS=0x00
# RUN: llvm-objdump --macho --private-header %t/arm64-dylib | FileCheck %s -DCAPS=0x00
# CHECK: magic cputype cpusubtype caps filetype {{.*}} flags
# CHECK-NEXT: MH_MAGIC_64 [[CPU]] ALL [[CAPS]] {{.*}} NOUNDEFS {{.*}} TWOLEVEL
# ARM64-32: magic cputype cpusubtype caps filetype {{.*}} flags
# ARM64-32-NEXT: MH_MAGIC ARM64_32 V8 0x00 {{.*}} NOUNDEFS {{.*}} TWOLEVEL
# CHECK-NEXT: MH_MAGIC_64 {{.*}} ALL [[CAPS]] {{.*}} NOUNDEFS {{.*}} TWOLEVEL
.globl _main
_main:

View File

@ -2,20 +2,12 @@
import os
# We specify the most commonly-used archs and platform versions in our tests
# here. Tests which need different settings can just append to this, as only
# the last value will be used.
# We specify the most commonly-used arch and platform version in our tests here
# Tests which need different settings can just append to this, as only the last
# value will be used.
#
# Note however that this does not apply to `-syslibroot`: each instance of that
# flag will append to the set of library roots. As such, we define a separate
# alias for each platform.
config.substitutions.append(('%lld-watchos',
'ld64.lld -fatal_warnings -arch arm64_32 -platform_version watchos 7.0 8.0 -syslibroot ' +
os.path.join(config.test_source_root, "MachO", "Inputs", "WatchOS.sdk")))
# Since most of our tests are written around x86_64, we give this platform the
# shortest substitution of "%lld".
# flag will append to the set of library roots.
lld = ('ld64.lld -arch x86_64 -platform_version macos 10.0 11.0 -syslibroot ' +
os.path.join(config.test_source_root, "MachO", "Inputs", "MacOSX.sdk"))
config.substitutions.append(('%lld', lld + ' -fatal_warnings'))

View File

@ -1,28 +1,19 @@
# REQUIRES: x86, aarch64
# RUN: rm -rf %t; mkdir -p %t
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %s -o %t/x86_64.o
# RUN: llvm-mc -filetype=obj -triple=arm64_32-apple-watchos %s -o %t/arm64-32.o
# RUN: %lld -o %t/x86_64 %t/x86_64.o
# RUN: %lld-watchos -o %t/arm64_32 %t/arm64-32.o
# RUN: llvm-readobj --macho-segment %t/x86_64 > %t/x86_64.out
# RUN: echo "Total file size" >> %t/x86_64.out
# RUN: wc -c %t/x86_64 >> %t/x86_64.out
# RUN: FileCheck %s -DSUFFIX=_64 -DPAGEZERO_SIZE=0x100000000 -DTEXT_ADDR=0x100000000 < %t/x86_64.out
# RUN: llvm-readobj --macho-segment %t/arm64_32 > %t/arm64-32.out
# RUN: echo "Total file size" >> %t/arm64-32.out
# RUN: wc -c %t/arm64_32 >> %t/arm64-32.out
# RUN: FileCheck %s -DSUFFIX= -DPAGEZERO_SIZE=0x1000 -DTEXT_ADDR=0x4000 < %t/arm64-32.out
# REQUIRES: x86
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %s -o %t.o
# RUN: %lld -o %t %t.o
# RUN: llvm-readobj --macho-segment %t > %t.out
# RUN: echo "Total file size" >> %t.out
# RUN: wc -c %t >> %t.out
# RUN: FileCheck %s < %t.out
## These two segments must always be present at the start of an executable.
# CHECK-NOT: Segment {
# CHECK: Segment {
# CHECK-NEXT: Cmd: LC_SEGMENT[[SUFFIX]]{{$}}
# CHECK-NEXT: Cmd: LC_SEGMENT_64
# CHECK-NEXT: Name: __PAGEZERO
# CHECK-NEXT: Size:
# CHECK-NEXT: Size: 72
# CHECK-NEXT: vmaddr: 0x0
# CHECK-NEXT: vmsize: [[PAGEZERO_SIZE]]
# CHECK-NEXT: vmsize: 0x100000000
# CHECK-NEXT: fileoff: 0
# CHECK-NEXT: filesize: 0
## The kernel won't execute a binary with the wrong protections for __PAGEZERO.
@ -32,10 +23,10 @@
# CHECK-NEXT: flags: 0x0
# CHECK-NEXT: }
# CHECK-NEXT: Segment {
# CHECK-NEXT: Cmd: LC_SEGMENT[[SUFFIX]]{{$}}
# CHECK-NEXT: Cmd: LC_SEGMENT_64
# CHECK-NEXT: Name: __TEXT
# CHECK-NEXT: Size:
# CHECK-NEXT: vmaddr: [[TEXT_ADDR]]
# CHECK-NEXT: Size: 152
# CHECK-NEXT: vmaddr: 0x100000000
# CHECK-NEXT: vmsize:
## dyld3 assumes that the __TEXT segment starts from the file header
# CHECK-NEXT: fileoff: 0
@ -47,7 +38,7 @@
# CHECK-NEXT: }
## Check that we handle max-length names correctly.
# CHECK: Cmd: LC_SEGMENT[[SUFFIX]]{{$}}
# CHECK: Cmd: LC_SEGMENT_64
# CHECK-NEXT: Name: maxlen_16ch_name
## This segment must always be present at the end of an executable, and cover
@ -60,7 +51,7 @@
# CHECK-NEXT: filesize: [[#%u, LINKEDIT_SIZE:]]
# CHECK-NEXT: maxprot: r--
# CHECK-NEXT: initprot: r--
# CHECK-NOT: Cmd: LC_SEGMENT[[SUFFIX]]{{$}}
# CHECK-NOT: Cmd: LC_SEGMENT_64
# CHECK-LABEL: Total file size
# CHECK-NEXT: [[#%u, LINKEDIT_OFF + LINKEDIT_SIZE]]
@ -68,6 +59,7 @@
.text
.global _main
_main:
mov $0, %rax
ret
.section maxlen_16ch_name,foo