llvm-project/lld/ELF/Driver.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

2545 lines
96 KiB
C++
Raw Normal View History

//===- Driver.cpp ---------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The driver drives the entire linking process. It is responsible for
// parsing command line options and doing whatever it is instructed to do.
//
// One notable thing in the LLD's driver when compared to other linkers is
// that the LLD's driver is agnostic on the host operating system.
// Other linkers usually have implicit default values (such as a dynamic
// linker path or library paths) for each host OS.
//
// I don't think implicit default values are useful because they are
// usually explicitly specified by the compiler driver. They can even
// be harmful when you are doing cross-linking. Therefore, in LLD, we
2017-03-24 08:15:57 +08:00
// simply trust the compiler driver to pass all required options and
// don't try to make effort on our side.
//
//===----------------------------------------------------------------------===//
#include "Driver.h"
#include "Config.h"
#include "ICF.h"
#include "InputFiles.h"
#include "InputSection.h"
#include "LinkerScript.h"
#include "MarkLive.h"
#include "OutputSections.h"
#include "ScriptParser.h"
#include "SymbolTable.h"
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
#include "Writer.h"
#include "lld/Common/Args.h"
#include "lld/Common/Driver.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Filesystem.h"
#include "lld/Common/Memory.h"
#include "lld/Common/Strings.h"
#include "lld/Common/TargetOptionsCommandFlags.h"
#include "lld/Common/Version.h"
#include "llvm/ADT/SetVector.h"
2015-09-12 05:18:56 +08:00
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/LTO/LTO.h"
#include "llvm/Remarks/HotnessThresholdParser.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/GlobPattern.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/Parallel.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TarWriter.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdlib>
2015-10-11 10:22:31 +08:00
#include <utility>
using namespace llvm;
using namespace llvm::ELF;
using namespace llvm::object;
using namespace llvm::sys;
using namespace llvm::support;
using namespace lld;
using namespace lld::elf;
std::unique_ptr<Configuration> elf::config;
std::unique_ptr<LinkerDriver> elf::driver;
static void setConfigs(opt::InputArgList &args);
static void readConfigs(opt::InputArgList &args);
bool elf::link(ArrayRef<const char *> args, bool canExitEarly,
raw_ostream &stdoutOS, raw_ostream &stderrOS) {
lld::stdoutOS = &stdoutOS;
lld::stderrOS = &stderrOS;
errorHandler().cleanupCallback = []() {
freeArena();
inputSections.clear();
outputSections.clear();
archiveFiles.clear();
binaryFiles.clear();
bitcodeFiles.clear();
lazyBitcodeFiles.clear();
objectFiles.clear();
sharedFiles.clear();
backwardReferences.clear();
[ELF] Add --why-extract= to query why archive members/lazy object files are extracted Similar to D69607 but for archive member extraction unrelated to GC. This patch adds --why-extract=. Prior art: GNU ld -M prints ``` Archive member included to satisfy reference by file (symbol) a.a(a.o) main.o (a) b.a(b.o) (b()) ``` -M is mainly for input section/symbol assignment <-> output section mapping (often huge output) and the information may appear ad-hoc. Apple ld64 ``` __Z1bv forced load of b.a(b.o) _a forced load of a.a(a.o) ``` It doesn't say the reference file. Arm's proprietary linker ``` Selecting member vsnprintf.o(c_wfu.l) to define vsnprintf. ... Loading member vsnprintf.o from c_wfu.l. definition: vsnprintf reference : _printf_a ``` --- --why-extract= gives the user the full data (which is much shorter than GNU ld -Map). It is easy to track a chain of references to one archive member with a one-liner, e.g. ``` % ld.lld main.o a_b.a b_c.a c.a -o /dev/null --why-extract=- | tee stdout reference extracted symbol main.o a_b.a(a_b.o) a a_b.a(a_b.o) b_c.a(b_c.o) b() b_c.a(b_c.o) c.a(c.o) c() % ruby -ane 'BEGIN{p={}}; p[$F[1]]=[$F[0],$F[2]] if $.>1; END{x="c.a(c.o)"; while y=p[x]; puts "#{y[0]} extracts #{x} to resolve #{y[1]}"; x=y[0] end}' stdout b_c.a(b_c.o) extracts c.a(c.o) to resolve c() a_b.a(a_b.o) extracts b_c.a(b_c.o) to resolve b() main.o extracts a_b.a(a_b.o) to resolve a ``` Archive member extraction happens before --gc-sections, so this may not be a live path under --gc-sections, but I think it is a good approximation in practice. * Specifying a file avoids output interleaving with --verbose. * Required `=` prevents accidental overwrite of an input if the user forgets `=`. (Most of compiler drivers' long options accept `=` but not ` `) Differential Revision: https://reviews.llvm.org/D109572
2021-09-21 00:52:30 +08:00
whyExtract.clear();
tar = nullptr;
memset(&in, 0, sizeof(in));
partitions = {Partition()};
SharedFile::vernauxNum = 0;
};
errorHandler().logName = args::getFilenameWithoutExe(args[0]);
errorHandler().errorLimitExceededMsg =
"too many errors emitted, stopping now (use "
"-error-limit=0 to see all errors)";
errorHandler().exitEarly = canExitEarly;
stderrOS.enable_colors(stderrOS.has_colors());
config = std::make_unique<Configuration>();
driver = std::make_unique<LinkerDriver>();
script = std::make_unique<LinkerScript>();
symtab = std::make_unique<SymbolTable>();
partitions = {Partition()};
config->progName = args[0];
driver->linkerMain(args);
// Exit immediately if we don't need to return to the caller.
// This saves time because the overhead of calling destructors
// for all globally-allocated objects is not negligible.
if (canExitEarly)
exitLld(errorCount() ? 1 : 0);
bool ret = errorCount() == 0;
if (!canExitEarly)
errorHandler().reset();
return ret;
}
// Parses a linker -m option.
static std::tuple<ELFKind, uint16_t, uint8_t> parseEmulation(StringRef emul) {
uint8_t osabi = 0;
StringRef s = emul;
if (s.endswith("_fbsd")) {
s = s.drop_back(5);
osabi = ELFOSABI_FREEBSD;
}
std::pair<ELFKind, uint16_t> ret =
StringSwitch<std::pair<ELFKind, uint16_t>>(s)
.Cases("aarch64elf", "aarch64linux", {ELF64LEKind, EM_AARCH64})
.Cases("aarch64elfb", "aarch64linuxb", {ELF64BEKind, EM_AARCH64})
.Cases("armelf", "armelf_linux_eabi", {ELF32LEKind, EM_ARM})
.Case("elf32_x86_64", {ELF32LEKind, EM_X86_64})
.Cases("elf32btsmip", "elf32btsmipn32", {ELF32BEKind, EM_MIPS})
.Cases("elf32ltsmip", "elf32ltsmipn32", {ELF32LEKind, EM_MIPS})
.Case("elf32lriscv", {ELF32LEKind, EM_RISCV})
.Cases("elf32ppc", "elf32ppclinux", {ELF32BEKind, EM_PPC})
.Cases("elf32lppc", "elf32lppclinux", {ELF32LEKind, EM_PPC})
.Case("elf64btsmip", {ELF64BEKind, EM_MIPS})
.Case("elf64ltsmip", {ELF64LEKind, EM_MIPS})
.Case("elf64lriscv", {ELF64LEKind, EM_RISCV})
.Case("elf64ppc", {ELF64BEKind, EM_PPC64})
.Case("elf64lppc", {ELF64LEKind, EM_PPC64})
.Cases("elf_amd64", "elf_x86_64", {ELF64LEKind, EM_X86_64})
.Case("elf_i386", {ELF32LEKind, EM_386})
.Case("elf_iamcu", {ELF32LEKind, EM_IAMCU})
.Case("elf64_sparc", {ELF64BEKind, EM_SPARCV9})
.Case("msp430elf", {ELF32LEKind, EM_MSP430})
.Default({ELFNoneKind, EM_NONE});
if (ret.first == ELFNoneKind)
error("unknown emulation: " + emul);
if (ret.second == EM_MSP430)
osabi = ELFOSABI_STANDALONE;
return std::make_tuple(ret.first, ret.second, osabi);
}
// Returns slices of MB by parsing MB as an archive file.
// Each slice consists of a member file in the archive.
std::vector<std::pair<MemoryBufferRef, uint64_t>> static getArchiveMembers(
MemoryBufferRef mb) {
std::unique_ptr<Archive> file =
CHECK(Archive::create(mb),
mb.getBufferIdentifier() + ": failed to parse archive");
[Coding style change] Rename variables so that they start with a lowercase letter This patch is mechanically generated by clang-llvm-rename tool that I wrote using Clang Refactoring Engine just for creating this patch. You can see the source code of the tool at https://reviews.llvm.org/D64123. There's no manual post-processing; you can generate the same patch by re-running the tool against lld's code base. Here is the main discussion thread to change the LLVM coding style: https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html In the discussion thread, I proposed we use lld as a testbed for variable naming scheme change, and this patch does that. I chose to rename variables so that they are in camelCase, just because that is a minimal change to make variables to start with a lowercase letter. Note to downstream patch maintainers: if you are maintaining a downstream lld repo, just rebasing ahead of this commit would cause massive merge conflicts because this patch essentially changes every line in the lld subdirectory. But there's a remedy. clang-llvm-rename tool is a batch tool, so you can rename variables in your downstream repo with the tool. Given that, here is how to rebase your repo to a commit after the mass renaming: 1. rebase to the commit just before the mass variable renaming, 2. apply the tool to your downstream repo to mass-rename variables locally, and 3. rebase again to the head. Most changes made by the tool should be identical for a downstream repo and for the head, so at the step 3, almost all changes should be merged and disappear. I'd expect that there would be some lines that you need to merge by hand, but that shouldn't be too many. Differential Revision: https://reviews.llvm.org/D64121 llvm-svn: 365595
2019-07-10 13:00:37 +08:00
std::vector<std::pair<MemoryBufferRef, uint64_t>> v;
Error err = Error::success();
bool addToTar = file->isThin() && tar;
for (const Archive::Child &c : file->children(err)) {
MemoryBufferRef mbref =
CHECK(c.getMemoryBufferRef(),
mb.getBufferIdentifier() +
": could not get the buffer for a child of the archive");
if (addToTar)
tar->append(relativeToRoot(check(c.getFullName())), mbref.getBuffer());
v.push_back(std::make_pair(mbref, c.getChildOffset()));
}
if (err)
fatal(mb.getBufferIdentifier() + ": Archive::children failed: " +
toString(std::move(err)));
// Take ownership of memory buffers created for members of thin archives.
for (std::unique_ptr<MemoryBuffer> &mb : file->takeThinBuffers())
make<std::unique_ptr<MemoryBuffer>>(std::move(mb));
return v;
}
// Opens a file and create a file object. Path has to be resolved already.
void LinkerDriver::addFile(StringRef path, bool withLOption) {
using namespace sys::fs;
Optional<MemoryBufferRef> buffer = readFile(path);
if (!buffer.hasValue())
return;
MemoryBufferRef mbref = *buffer;
if (config->formatBinary) {
files.push_back(make<BinaryFile>(mbref));
return;
}
switch (identify_magic(mbref.getBuffer())) {
case file_magic::unknown:
readLinkerScript(mbref);
return;
case file_magic::archive: {
if (inWholeArchive) {
for (const auto &p : getArchiveMembers(mbref))
files.push_back(createObjectFile(p.first, path, p.second));
return;
}
std::unique_ptr<Archive> file =
CHECK(Archive::create(mbref), path + ": failed to parse archive");
// If an archive file has no symbol table, it is likely that a user
// is attempting LTO and using a default ar command that doesn't
// understand the LLVM bitcode file. It is a pretty common error, so
// we'll handle it as if it had a symbol table.
if (!file->isEmpty() && !file->hasSymbolTable()) {
// Check if all members are bitcode files. If not, ignore, which is the
// default action without the LTO hack described above.
for (const std::pair<MemoryBufferRef, uint64_t> &p :
getArchiveMembers(mbref))
if (identify_magic(p.first.getBuffer()) != file_magic::bitcode) {
error(path + ": archive has no index; run ranlib to add one");
return;
}
for (const std::pair<MemoryBufferRef, uint64_t> &p :
getArchiveMembers(mbref))
files.push_back(createLazyFile(p.first, path, p.second));
return;
}
// Handle the regular case.
files.push_back(make<ArchiveFile>(std::move(file)));
return;
}
case file_magic::elf_shared_object:
if (config->isStatic || config->relocatable) {
error("attempted static link of dynamic object " + path);
return;
}
// Shared objects are identified by soname. soname is (if specified)
// DT_SONAME and falls back to filename. If a file was specified by -lfoo,
// the directory part is ignored. Note that path may be a temporary and
// cannot be stored into SharedFile::soName.
path = mbref.getBufferIdentifier();
files.push_back(
make<SharedFile>(mbref, withLOption ? path::filename(path) : path));
return;
case file_magic::bitcode:
case file_magic::elf_relocatable:
if (inLib)
files.push_back(createLazyFile(mbref, "", 0));
else
files.push_back(createObjectFile(mbref));
break;
default:
error(path + ": unknown file type");
}
}
// Add a given library by searching it from input search paths.
void LinkerDriver::addLibrary(StringRef name) {
if (Optional<std::string> path = searchLibrary(name))
addFile(*path, /*withLOption=*/true);
else
error("unable to find library -l" + name, ErrorTag::LibNotFound, {name});
}
// This function is called on startup. We need this for LTO since
// LTO calls LLVM functions to compile bitcode files to native code.
// Technically this can be delayed until we read bitcode files, but
// we don't bother to do lazily because the initialization is fast.
static void initLLVM() {
InitializeAllTargets();
InitializeAllTargetMCs();
InitializeAllAsmPrinters();
InitializeAllAsmParsers();
}
// Some command line options or some combinations of them are not allowed.
// This function checks for such errors.
static void checkOptions() {
// The MIPS ABI as of 2016 does not support the GNU-style symbol lookup
// table which is a relatively new feature.
if (config->emachine == EM_MIPS && config->gnuHash)
error("the .gnu.hash section is not compatible with the MIPS target");
if (config->fixCortexA53Errata843419 && config->emachine != EM_AARCH64)
error("--fix-cortex-a53-843419 is only supported on AArch64 targets");
if (config->fixCortexA8 && config->emachine != EM_ARM)
error("--fix-cortex-a8 is only supported on ARM targets");
if (config->tocOptimize && config->emachine != EM_PPC64)
error("--toc-optimize is only supported on PowerPC64 targets");
if (config->pcRelOptimize && config->emachine != EM_PPC64)
error("--pcrel-optimize is only supported on PowerPC64 targets");
if (config->pie && config->shared)
error("-shared and -pie may not be used together");
if (!config->shared && !config->filterList.empty())
error("-F may not be used without -shared");
if (!config->shared && !config->auxiliaryList.empty())
error("-f may not be used without -shared");
if (!config->relocatable && !config->defineCommon)
error("-no-define-common not supported in non relocatable output");
if (config->strip == StripPolicy::All && config->emitRelocs)
error("--strip-all and --emit-relocs may not be used together");
if (config->zText && config->zIfuncNoplt)
error("-z text and -z ifunc-noplt may not be used together");
if (config->relocatable) {
if (config->shared)
error("-r and -shared may not be used together");
if (config->gdbIndex)
error("-r and --gdb-index may not be used together");
if (config->icf != ICFLevel::None)
error("-r and --icf may not be used together");
if (config->pie)
error("-r and -pie may not be used together");
if (config->exportDynamic)
error("-r and --export-dynamic may not be used together");
}
if (config->executeOnly) {
if (config->emachine != EM_AARCH64)
error("--execute-only is only supported on AArch64 targets");
if (config->singleRoRx && !script->hasSectionsCommand)
error("--execute-only and --no-rosegment cannot be used together");
}
if (config->zRetpolineplt && config->zForceIbt)
error("-z force-ibt may not be used with -z retpolineplt");
[ELF][AArch64] Support for BTI and PAC Branch Target Identification (BTI) and Pointer Authentication (PAC) are architecture features introduced in v8.5a and 8.3a respectively. The new instructions have been added in the hint space so that binaries take advantage of support where it exists yet still run on older hardware. The impact of each feature is: BTI: For executable pages that have been guarded, all indirect branches must have a destination that is a BTI instruction of the appropriate type. For the static linker, this means that PLT entries must have a "BTI c" as the first instruction in the sequence. BTI is an all or nothing property for a link unit, any indirect branch not landing on a valid destination will cause a Branch Target Exception. PAC: The dynamic loader encodes with PACIA the address of the destination that the PLT entry will load from the .plt.got, placing the result in a subset of the top-bits that are not valid virtual addresses. The PLT entry may authenticate these top-bits using the AUTIA instruction before branching to the destination. Use of PAC in PLT sequences is a contract between the dynamic loader and the static linker, it is independent of whether the relocatable objects use PAC. BTI and PAC are independent features that can be combined. So we can have several combinations of PLT: - Standard with no BTI or PAC - BTI PLT with "BTI c" as first instruction. - PAC PLT with "AUTIA1716" before the indirect branch to X17. - BTIPAC PLT with "BTI c" as first instruction and "AUTIA1716" before the first indirect branch to X17. The use of BTI and PAC in relocatable object files are encoded by feature bits in the .note.gnu.property section in a similar way to Intel CET. There is one AArch64 specific program property GNU_PROPERTY_AARCH64_FEATURE_1_AND and two target feature bits defined: - GNU_PROPERTY_AARCH64_FEATURE_1_BTI -- All executable sections are compatible with BTI. - GNU_PROPERTY_AARCH64_FEATURE_1_PAC -- All executable sections have return address signing enabled. Due to the properties of FEATURE_1_AND the static linker can tell when all input relocatable objects have the BTI and PAC feature bits set. The static linker uses this to enable the appropriate PLT sequence. Neither -> standard PLT GNU_PROPERTY_AARCH64_FEATURE_1_BTI -> BTI PLT GNU_PROPERTY_AARCH64_FEATURE_1_PAC -> PAC PLT Both properties -> BTIPAC PLT In addition to the .note.gnu.properties there are two new command line options: --force-bti : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_BTI and warn for every relocatable object that does not. --pac-plt : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_PAC. As PAC is a contract between the loader and static linker no warning is given if it is not present in an input. Two processor specific dynamic tags are used to communicate that a non standard PLT sequence is being used. DTI_AARCH64_BTI_PLT and DTI_AARCH64_BTI_PAC. Differential Revision: https://reviews.llvm.org/D62609 llvm-svn: 362793
2019-06-07 21:00:17 +08:00
if (config->emachine != EM_AARCH64) {
if (config->zPacPlt)
error("-z pac-plt only supported on AArch64");
if (config->zForceBti)
error("-z force-bti only supported on AArch64");
if (config->zBtiReport != "none")
error("-z bti-report only supported on AArch64");
[ELF][AArch64] Support for BTI and PAC Branch Target Identification (BTI) and Pointer Authentication (PAC) are architecture features introduced in v8.5a and 8.3a respectively. The new instructions have been added in the hint space so that binaries take advantage of support where it exists yet still run on older hardware. The impact of each feature is: BTI: For executable pages that have been guarded, all indirect branches must have a destination that is a BTI instruction of the appropriate type. For the static linker, this means that PLT entries must have a "BTI c" as the first instruction in the sequence. BTI is an all or nothing property for a link unit, any indirect branch not landing on a valid destination will cause a Branch Target Exception. PAC: The dynamic loader encodes with PACIA the address of the destination that the PLT entry will load from the .plt.got, placing the result in a subset of the top-bits that are not valid virtual addresses. The PLT entry may authenticate these top-bits using the AUTIA instruction before branching to the destination. Use of PAC in PLT sequences is a contract between the dynamic loader and the static linker, it is independent of whether the relocatable objects use PAC. BTI and PAC are independent features that can be combined. So we can have several combinations of PLT: - Standard with no BTI or PAC - BTI PLT with "BTI c" as first instruction. - PAC PLT with "AUTIA1716" before the indirect branch to X17. - BTIPAC PLT with "BTI c" as first instruction and "AUTIA1716" before the first indirect branch to X17. The use of BTI and PAC in relocatable object files are encoded by feature bits in the .note.gnu.property section in a similar way to Intel CET. There is one AArch64 specific program property GNU_PROPERTY_AARCH64_FEATURE_1_AND and two target feature bits defined: - GNU_PROPERTY_AARCH64_FEATURE_1_BTI -- All executable sections are compatible with BTI. - GNU_PROPERTY_AARCH64_FEATURE_1_PAC -- All executable sections have return address signing enabled. Due to the properties of FEATURE_1_AND the static linker can tell when all input relocatable objects have the BTI and PAC feature bits set. The static linker uses this to enable the appropriate PLT sequence. Neither -> standard PLT GNU_PROPERTY_AARCH64_FEATURE_1_BTI -> BTI PLT GNU_PROPERTY_AARCH64_FEATURE_1_PAC -> PAC PLT Both properties -> BTIPAC PLT In addition to the .note.gnu.properties there are two new command line options: --force-bti : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_BTI and warn for every relocatable object that does not. --pac-plt : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_PAC. As PAC is a contract between the loader and static linker no warning is given if it is not present in an input. Two processor specific dynamic tags are used to communicate that a non standard PLT sequence is being used. DTI_AARCH64_BTI_PLT and DTI_AARCH64_BTI_PAC. Differential Revision: https://reviews.llvm.org/D62609 llvm-svn: 362793
2019-06-07 21:00:17 +08:00
}
if (config->emachine != EM_386 && config->emachine != EM_X86_64 &&
config->zCetReport != "none")
error("-z cet-report only supported on X86 and X86_64");
}
static const char *getReproduceOption(opt::InputArgList &args) {
if (auto *arg = args.getLastArg(OPT_reproduce))
return arg->getValue();
return getenv("LLD_REPRODUCE");
}
static bool hasZOption(opt::InputArgList &args, StringRef key) {
for (auto *arg : args.filtered(OPT_z))
if (key == arg->getValue())
return true;
return false;
}
static bool getZFlag(opt::InputArgList &args, StringRef k1, StringRef k2,
bool Default) {
for (auto *arg : args.filtered_reverse(OPT_z)) {
if (k1 == arg->getValue())
return true;
if (k2 == arg->getValue())
return false;
}
return Default;
}
static SeparateSegmentKind getZSeparate(opt::InputArgList &args) {
for (auto *arg : args.filtered_reverse(OPT_z)) {
StringRef v = arg->getValue();
if (v == "noseparate-code")
return SeparateSegmentKind::None;
if (v == "separate-code")
return SeparateSegmentKind::Code;
if (v == "separate-loadable-segments")
return SeparateSegmentKind::Loadable;
}
return SeparateSegmentKind::None;
}
static GnuStackKind getZGnuStack(opt::InputArgList &args) {
for (auto *arg : args.filtered_reverse(OPT_z)) {
if (StringRef("execstack") == arg->getValue())
return GnuStackKind::Exec;
if (StringRef("noexecstack") == arg->getValue())
return GnuStackKind::NoExec;
if (StringRef("nognustack") == arg->getValue())
return GnuStackKind::None;
}
return GnuStackKind::NoExec;
}
static uint8_t getZStartStopVisibility(opt::InputArgList &args) {
for (auto *arg : args.filtered_reverse(OPT_z)) {
std::pair<StringRef, StringRef> kv = StringRef(arg->getValue()).split('=');
if (kv.first == "start-stop-visibility") {
if (kv.second == "default")
return STV_DEFAULT;
else if (kv.second == "internal")
return STV_INTERNAL;
else if (kv.second == "hidden")
return STV_HIDDEN;
else if (kv.second == "protected")
return STV_PROTECTED;
error("unknown -z start-stop-visibility= value: " + StringRef(kv.second));
}
}
return STV_PROTECTED;
}
static bool isKnownZFlag(StringRef s) {
return s == "combreloc" || s == "copyreloc" || s == "defs" ||
s == "execstack" || s == "force-bti" || s == "force-ibt" ||
s == "global" || s == "hazardplt" || s == "ifunc-noplt" ||
s == "initfirst" || s == "interpose" ||
s == "keep-text-section-prefix" || s == "lazy" || s == "muldefs" ||
s == "separate-code" || s == "separate-loadable-segments" ||
2021-02-26 07:46:37 +08:00
s == "start-stop-gc" || s == "nocombreloc" || s == "nocopyreloc" ||
s == "nodefaultlib" || s == "nodelete" || s == "nodlopen" ||
s == "noexecstack" || s == "nognustack" ||
s == "nokeep-text-section-prefix" || s == "norelro" ||
s == "noseparate-code" || s == "nostart-stop-gc" || s == "notext" ||
s == "now" || s == "origin" || s == "pac-plt" || s == "rel" ||
s == "rela" || s == "relro" || s == "retpolineplt" ||
s == "rodynamic" || s == "shstk" || s == "text" || s == "undefs" ||
s == "wxneeded" || s.startswith("common-page-size=") ||
s.startswith("bti-report=") || s.startswith("cet-report=") ||
s.startswith("dead-reloc-in-nonalloc=") ||
s.startswith("max-page-size=") || s.startswith("stack-size=") ||
s.startswith("start-stop-visibility=");
}
// Report a warning for an unknown -z option.
static void checkZOptions(opt::InputArgList &args) {
for (auto *arg : args.filtered(OPT_z))
if (!isKnownZFlag(arg->getValue()))
warn("unknown -z value: " + StringRef(arg->getValue()));
}
void LinkerDriver::linkerMain(ArrayRef<const char *> argsArr) {
ELFOptTable parser;
opt::InputArgList args = parser.parse(argsArr.slice(1));
// Interpret the flags early because error()/warn() depend on them.
errorHandler().errorLimit = args::getInteger(args, OPT_error_limit, 20);
errorHandler().fatalWarnings =
args.hasFlag(OPT_fatal_warnings, OPT_no_fatal_warnings, false);
checkZOptions(args);
// Handle -help
if (args.hasArg(OPT_help)) {
printHelp();
return;
}
// Handle -v or -version.
//
// A note about "compatible with GNU linkers" message: this is a hack for
// scripts generated by GNU Libtool up to 2021-10 to recognize LLD as
// a GNU compatible linker. See
// <https://lists.gnu.org/archive/html/libtool/2017-01/msg00007.html>.
//
// This is somewhat ugly hack, but in reality, we had no choice other
// than doing this. Considering the very long release cycle of Libtool,
// it is not easy to improve it to recognize LLD as a GNU compatible
// linker in a timely manner. Even if we can make it, there are still a
// lot of "configure" scripts out there that are generated by old version
// of Libtool. We cannot convince every software developer to migrate to
// the latest version and re-generate scripts. So we have this hack.
if (args.hasArg(OPT_v) || args.hasArg(OPT_version))
message(getLLDVersion() + " (compatible with GNU linkers)");
if (const char *path = getReproduceOption(args)) {
// Note that --reproduce is a debug option so you can ignore it
// if you are trying to understand the whole picture of the code.
Expected<std::unique_ptr<TarWriter>> errOrWriter =
TarWriter::create(path, path::stem(path));
if (errOrWriter) {
tar = std::move(*errOrWriter);
tar->append("response.txt", createResponseFile(args));
tar->append("version.txt", getLLDVersion() + "\n");
StringRef ltoSampleProfile = args.getLastArgValue(OPT_lto_sample_profile);
if (!ltoSampleProfile.empty())
readFile(ltoSampleProfile);
} else {
error("--reproduce: " + toString(errOrWriter.takeError()));
}
}
readConfigs(args);
// The behavior of -v or --version is a bit strange, but this is
// needed for compatibility with GNU linkers.
if (args.hasArg(OPT_v) && !args.hasArg(OPT_INPUT))
return;
if (args.hasArg(OPT_version))
return;
// Initialize time trace profiler.
if (config->timeTraceEnabled)
timeTraceProfilerInitialize(config->timeTraceGranularity, config->progName);
{
llvm::TimeTraceScope timeScope("ExecuteLinker");
initLLVM();
createFiles(args);
if (errorCount())
return;
inferMachineType();
setConfigs(args);
checkOptions();
if (errorCount())
return;
// The Target instance handles target-specific stuff, such as applying
// relocations or writing a PLT section. It also contains target-dependent
// values such as a default image base address.
target = getTarget();
switch (config->ekind) {
case ELF32LEKind:
link<ELF32LE>(args);
break;
case ELF32BEKind:
link<ELF32BE>(args);
break;
case ELF64LEKind:
link<ELF64LE>(args);
break;
case ELF64BEKind:
link<ELF64BE>(args);
break;
default:
llvm_unreachable("unknown Config->EKind");
}
}
if (config->timeTraceEnabled) {
checkError(timeTraceProfilerWrite(
args.getLastArgValue(OPT_time_trace_file_eq).str(),
config->outputFile));
timeTraceProfilerCleanup();
}
}
static std::string getRpath(opt::InputArgList &args) {
std::vector<StringRef> v = args::getStrings(args, OPT_rpath);
return llvm::join(v.begin(), v.end(), ":");
}
// Determines what we should do if there are remaining unresolved
// symbols after the name resolution.
static void setUnresolvedSymbolPolicy(opt::InputArgList &args) {
UnresolvedPolicy errorOrWarn = args.hasFlag(OPT_error_unresolved_symbols,
OPT_warn_unresolved_symbols, true)
? UnresolvedPolicy::ReportError
: UnresolvedPolicy::Warn;
// -shared implies --unresolved-symbols=ignore-all because missing
// symbols are likely to be resolved at runtime.
bool diagRegular = !config->shared, diagShlib = !config->shared;
for (const opt::Arg *arg : args) {
switch (arg->getOption().getID()) {
case OPT_unresolved_symbols: {
StringRef s = arg->getValue();
if (s == "ignore-all") {
diagRegular = false;
diagShlib = false;
} else if (s == "ignore-in-object-files") {
diagRegular = false;
diagShlib = true;
} else if (s == "ignore-in-shared-libs") {
diagRegular = true;
diagShlib = false;
} else if (s == "report-all") {
diagRegular = true;
diagShlib = true;
} else {
error("unknown --unresolved-symbols value: " + s);
}
break;
}
case OPT_no_undefined:
diagRegular = true;
break;
case OPT_z:
if (StringRef(arg->getValue()) == "defs")
diagRegular = true;
else if (StringRef(arg->getValue()) == "undefs")
diagRegular = false;
break;
case OPT_allow_shlib_undefined:
diagShlib = false;
break;
case OPT_no_allow_shlib_undefined:
diagShlib = true;
break;
}
}
config->unresolvedSymbols =
diagRegular ? errorOrWarn : UnresolvedPolicy::Ignore;
config->unresolvedSymbolsInShlib =
diagShlib ? errorOrWarn : UnresolvedPolicy::Ignore;
}
static Target2Policy getTarget2(opt::InputArgList &args) {
StringRef s = args.getLastArgValue(OPT_target2, "got-rel");
if (s == "rel")
return Target2Policy::Rel;
if (s == "abs")
return Target2Policy::Abs;
if (s == "got-rel")
return Target2Policy::GotRel;
error("unknown --target2 option: " + s);
return Target2Policy::GotRel;
}
static bool isOutputFormatBinary(opt::InputArgList &args) {
StringRef s = args.getLastArgValue(OPT_oformat, "elf");
if (s == "binary")
return true;
if (!s.startswith("elf"))
error("unknown --oformat value: " + s);
return false;
}
static DiscardPolicy getDiscard(opt::InputArgList &args) {
auto *arg =
args.getLastArg(OPT_discard_all, OPT_discard_locals, OPT_discard_none);
if (!arg)
return DiscardPolicy::Default;
2016-09-03 03:49:27 +08:00
if (arg->getOption().getID() == OPT_discard_all)
return DiscardPolicy::All;
2016-09-03 03:49:27 +08:00
if (arg->getOption().getID() == OPT_discard_locals)
return DiscardPolicy::Locals;
2016-09-03 03:49:27 +08:00
return DiscardPolicy::None;
}
static StringRef getDynamicLinker(opt::InputArgList &args) {
auto *arg = args.getLastArg(OPT_dynamic_linker, OPT_no_dynamic_linker);
if (!arg)
return "";
if (arg->getOption().getID() == OPT_no_dynamic_linker) {
// --no-dynamic-linker suppresses undefined weak symbols in .dynsym
config->noDynamicLinker = true;
return "";
}
return arg->getValue();
}
static ICFLevel getICF(opt::InputArgList &args) {
auto *arg = args.getLastArg(OPT_icf_none, OPT_icf_safe, OPT_icf_all);
if (!arg || arg->getOption().getID() == OPT_icf_none)
return ICFLevel::None;
if (arg->getOption().getID() == OPT_icf_safe)
return ICFLevel::Safe;
return ICFLevel::All;
}
static StripPolicy getStrip(opt::InputArgList &args) {
if (args.hasArg(OPT_relocatable))
return StripPolicy::None;
auto *arg = args.getLastArg(OPT_strip_all, OPT_strip_debug);
if (!arg)
return StripPolicy::None;
if (arg->getOption().getID() == OPT_strip_all)
return StripPolicy::All;
return StripPolicy::Debug;
}
2019-07-09 08:34:08 +08:00
static uint64_t parseSectionAddress(StringRef s, opt::InputArgList &args,
const opt::Arg &arg) {
uint64_t va = 0;
if (s.startswith("0x"))
s = s.drop_front(2);
if (!to_integer(s, va, 16))
2019-07-09 08:34:08 +08:00
error("invalid argument: " + arg.getAsString(args));
return va;
}
static StringMap<uint64_t> getSectionStartMap(opt::InputArgList &args) {
StringMap<uint64_t> ret;
for (auto *arg : args.filtered(OPT_section_start)) {
StringRef name;
StringRef addr;
std::tie(name, addr) = StringRef(arg->getValue()).split('=');
2019-07-09 08:34:08 +08:00
ret[name] = parseSectionAddress(addr, args, *arg);
}
if (auto *arg = args.getLastArg(OPT_Ttext))
2019-07-09 08:34:08 +08:00
ret[".text"] = parseSectionAddress(arg->getValue(), args, *arg);
if (auto *arg = args.getLastArg(OPT_Tdata))
2019-07-09 08:34:08 +08:00
ret[".data"] = parseSectionAddress(arg->getValue(), args, *arg);
if (auto *arg = args.getLastArg(OPT_Tbss))
2019-07-09 08:34:08 +08:00
ret[".bss"] = parseSectionAddress(arg->getValue(), args, *arg);
return ret;
}
static SortSectionPolicy getSortSection(opt::InputArgList &args) {
StringRef s = args.getLastArgValue(OPT_sort_section);
if (s == "alignment")
return SortSectionPolicy::Alignment;
if (s == "name")
return SortSectionPolicy::Name;
if (!s.empty())
error("unknown --sort-section rule: " + s);
return SortSectionPolicy::Default;
}
static OrphanHandlingPolicy getOrphanHandling(opt::InputArgList &args) {
StringRef s = args.getLastArgValue(OPT_orphan_handling, "place");
if (s == "warn")
return OrphanHandlingPolicy::Warn;
if (s == "error")
return OrphanHandlingPolicy::Error;
if (s != "place")
error("unknown --orphan-handling mode: " + s);
return OrphanHandlingPolicy::Place;
}
// Parse --build-id or --build-id=<style>. We handle "tree" as a
// synonym for "sha1" because all our hash functions including
// --build-id=sha1 are actually tree hashes for performance reasons.
static std::pair<BuildIdKind, std::vector<uint8_t>>
getBuildId(opt::InputArgList &args) {
auto *arg = args.getLastArg(OPT_build_id, OPT_build_id_eq);
if (!arg)
return {BuildIdKind::None, {}};
if (arg->getOption().getID() == OPT_build_id)
return {BuildIdKind::Fast, {}};
StringRef s = arg->getValue();
if (s == "fast")
return {BuildIdKind::Fast, {}};
if (s == "md5")
return {BuildIdKind::Md5, {}};
if (s == "sha1" || s == "tree")
return {BuildIdKind::Sha1, {}};
if (s == "uuid")
return {BuildIdKind::Uuid, {}};
if (s.startswith("0x"))
return {BuildIdKind::Hexstring, parseHex(s.substr(2))};
if (s != "none")
error("unknown --build-id style: " + s);
return {BuildIdKind::None, {}};
}
static std::pair<bool, bool> getPackDynRelocs(opt::InputArgList &args) {
StringRef s = args.getLastArgValue(OPT_pack_dyn_relocs, "none");
if (s == "android")
return {true, false};
if (s == "relr")
return {false, true};
if (s == "android+relr")
return {true, true};
if (s != "none")
error("unknown --pack-dyn-relocs format: " + s);
return {false, false};
}
static void readCallGraph(MemoryBufferRef mb) {
// Build a map from symbol name to section
DenseMap<StringRef, Symbol *> map;
for (ELFFileBase *file : objectFiles)
for (Symbol *sym : file->getSymbols())
map[sym->getName()] = sym;
[Coding style change] Rename variables so that they start with a lowercase letter This patch is mechanically generated by clang-llvm-rename tool that I wrote using Clang Refactoring Engine just for creating this patch. You can see the source code of the tool at https://reviews.llvm.org/D64123. There's no manual post-processing; you can generate the same patch by re-running the tool against lld's code base. Here is the main discussion thread to change the LLVM coding style: https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html In the discussion thread, I proposed we use lld as a testbed for variable naming scheme change, and this patch does that. I chose to rename variables so that they are in camelCase, just because that is a minimal change to make variables to start with a lowercase letter. Note to downstream patch maintainers: if you are maintaining a downstream lld repo, just rebasing ahead of this commit would cause massive merge conflicts because this patch essentially changes every line in the lld subdirectory. But there's a remedy. clang-llvm-rename tool is a batch tool, so you can rename variables in your downstream repo with the tool. Given that, here is how to rebase your repo to a commit after the mass renaming: 1. rebase to the commit just before the mass variable renaming, 2. apply the tool to your downstream repo to mass-rename variables locally, and 3. rebase again to the head. Most changes made by the tool should be identical for a downstream repo and for the head, so at the step 3, almost all changes should be merged and disappear. I'd expect that there would be some lines that you need to merge by hand, but that shouldn't be too many. Differential Revision: https://reviews.llvm.org/D64121 llvm-svn: 365595
2019-07-10 13:00:37 +08:00
auto findSection = [&](StringRef name) -> InputSectionBase * {
Symbol *sym = map.lookup(name);
if (!sym) {
if (config->warnSymbolOrdering)
warn(mb.getBufferIdentifier() + ": no such symbol: " + name);
return nullptr;
}
maybeWarnUnorderableSymbol(sym);
if (Defined *dr = dyn_cast_or_null<Defined>(sym))
return dyn_cast_or_null<InputSectionBase>(dr->section);
return nullptr;
};
for (StringRef line : args::getLines(mb)) {
SmallVector<StringRef, 3> fields;
line.split(fields, ' ');
uint64_t count;
if (fields.size() != 3 || !to_integer(fields[2], count)) {
error(mb.getBufferIdentifier() + ": parse error");
return;
}
if (InputSectionBase *from = findSection(fields[0]))
if (InputSectionBase *to = findSection(fields[1]))
config->callGraphProfile[std::make_pair(from, to)] += count;
}
}
// If SHT_LLVM_CALL_GRAPH_PROFILE and its relocation section exist, returns
// true and populates cgProfile and symbolIndices.
template <class ELFT>
static bool
processCallGraphRelocations(SmallVector<uint32_t, 32> &symbolIndices,
ArrayRef<typename ELFT::CGProfile> &cgProfile,
ObjFile<ELFT> *inputObj) {
symbolIndices.clear();
const ELFFile<ELFT> &obj = inputObj->getObj();
ArrayRef<Elf_Shdr_Impl<ELFT>> objSections =
CHECK(obj.sections(), "could not retrieve object sections");
if (inputObj->cgProfileSectionIndex == SHN_UNDEF)
return false;
cgProfile =
check(obj.template getSectionContentsAsArray<typename ELFT::CGProfile>(
objSections[inputObj->cgProfileSectionIndex]));
for (size_t i = 0, e = objSections.size(); i < e; ++i) {
const Elf_Shdr_Impl<ELFT> &sec = objSections[i];
if (sec.sh_info == inputObj->cgProfileSectionIndex) {
if (sec.sh_type == SHT_RELA) {
ArrayRef<typename ELFT::Rela> relas =
CHECK(obj.relas(sec), "could not retrieve cg profile rela section");
for (const typename ELFT::Rela &rel : relas)
symbolIndices.push_back(rel.getSymbol(config->isMips64EL));
break;
}
if (sec.sh_type == SHT_REL) {
ArrayRef<typename ELFT::Rel> rels =
CHECK(obj.rels(sec), "could not retrieve cg profile rel section");
for (const typename ELFT::Rel &rel : rels)
symbolIndices.push_back(rel.getSymbol(config->isMips64EL));
break;
}
}
}
if (symbolIndices.empty())
warn("SHT_LLVM_CALL_GRAPH_PROFILE exists, but relocation section doesn't");
return !symbolIndices.empty();
}
template <class ELFT> static void readCallGraphsFromObjectFiles() {
SmallVector<uint32_t, 32> symbolIndices;
ArrayRef<typename ELFT::CGProfile> cgProfile;
for (auto file : objectFiles) {
auto *obj = cast<ObjFile<ELFT>>(file);
if (!processCallGraphRelocations(symbolIndices, cgProfile, obj))
continue;
if (symbolIndices.size() != cgProfile.size() * 2)
fatal("number of relocations doesn't match Weights");
for (uint32_t i = 0, size = cgProfile.size(); i < size; ++i) {
const Elf_CGProfile_Impl<ELFT> &cgpe = cgProfile[i];
uint32_t fromIndex = symbolIndices[i * 2];
uint32_t toIndex = symbolIndices[i * 2 + 1];
auto *fromSym = dyn_cast<Defined>(&obj->getSymbol(fromIndex));
auto *toSym = dyn_cast<Defined>(&obj->getSymbol(toIndex));
if (!fromSym || !toSym)
continue;
auto *from = dyn_cast_or_null<InputSectionBase>(fromSym->section);
auto *to = dyn_cast_or_null<InputSectionBase>(toSym->section);
if (from && to)
config->callGraphProfile[{from, to}] += cgpe.cgp_weight;
}
}
}
static bool getCompressDebugSections(opt::InputArgList &args) {
StringRef s = args.getLastArgValue(OPT_compress_debug_sections, "none");
if (s == "none")
return false;
if (s != "zlib")
error("unknown --compress-debug-sections value: " + s);
if (!zlib::isAvailable())
error("--compress-debug-sections: zlib is not available");
return true;
}
static StringRef getAliasSpelling(opt::Arg *arg) {
if (const opt::Arg *alias = arg->getAlias())
return alias->getSpelling();
return arg->getSpelling();
}
static std::pair<StringRef, StringRef> getOldNewOptions(opt::InputArgList &args,
unsigned id) {
auto *arg = args.getLastArg(id);
if (!arg)
return {"", ""};
StringRef s = arg->getValue();
std::pair<StringRef, StringRef> ret = s.split(';');
if (ret.second.empty())
error(getAliasSpelling(arg) + " expects 'old;new' format, but got " + s);
return ret;
}
// Parse the symbol ordering file and warn for any duplicate entries.
static std::vector<StringRef> getSymbolOrderingFile(MemoryBufferRef mb) {
SetVector<StringRef> names;
for (StringRef s : args::getLines(mb))
if (!names.insert(s) && config->warnSymbolOrdering)
warn(mb.getBufferIdentifier() + ": duplicate ordered symbol: " + s);
return names.takeVector();
}
static bool getIsRela(opt::InputArgList &args) {
// If -z rel or -z rela is specified, use the last option.
for (auto *arg : args.filtered_reverse(OPT_z)) {
StringRef s(arg->getValue());
if (s == "rel")
return false;
if (s == "rela")
return true;
}
// Otherwise use the psABI defined relocation entry format.
uint16_t m = config->emachine;
return m == EM_AARCH64 || m == EM_AMDGPU || m == EM_HEXAGON || m == EM_PPC ||
m == EM_PPC64 || m == EM_RISCV || m == EM_X86_64;
}
static void parseClangOption(StringRef opt, const Twine &msg) {
std::string err;
raw_string_ostream os(err);
const char *argv[] = {config->progName.data(), opt.data()};
if (cl::ParseCommandLineOptions(2, argv, "", &os))
return;
os.flush();
error(msg + ": " + StringRef(err).trim());
}
// Checks the parameter of the bti-report and cet-report options.
static bool isValidReportString(StringRef arg) {
return arg == "none" || arg == "warning" || arg == "error";
}
// Initializes Config members by the command line options.
static void readConfigs(opt::InputArgList &args) {
errorHandler().verbose = args.hasArg(OPT_verbose);
errorHandler().vsDiagnostics =
args.hasArg(OPT_visual_studio_diagnostics_format, false);
config->allowMultipleDefinition =
args.hasFlag(OPT_allow_multiple_definition,
OPT_no_allow_multiple_definition, false) ||
hasZOption(args, "muldefs");
config->auxiliaryList = args::getStrings(args, OPT_auxiliary);
[ELF] Add -Bsymbolic-non-weak-functions This option is a subset of -Bsymbolic-functions. It applies to STB_GLOBAL STT_FUNC definitions. The address of a vague linkage function (STB_WEAK STT_FUNC, e.g. an inline function, a template instantiation) seen by a -Bsymbolic-functions linked shared object may be different from the address seen from outside the shared object. Such cases are uncommon. (ELF/Mach-O programs may use `-fvisibility-inlines-hidden` to break such pointer equality. On Windows, correct dllexport and dllimport are needed to make pointer equality work. Windows link.exe enables /OPT:ICF by default so different inline functions may have the same address.) ``` // a.cc -> a.o -> a.so (-Bsymbolic-functions) inline void f() {} void *g() { return (void *)&f; } // b.cc -> b.o -> exe // The address is different! inline void f() {} ``` -Bsymbolic-non-weak-functions is a safer (C++ conforming) subset of -Bsymbolic-functions, which can make such programs work. Implementations usually emit a vague linkage definition in a COMDAT group. We could detect the group (with more code) but I feel that we should just check STB_WEAK for simplicity. A weak definition will thus serve as an escape hatch for rare cases when users want interposition on definitions. GNU ld feature request: https://sourceware.org/bugzilla/show_bug.cgi?id=27871 Longer write-up: https://maskray.me/blog/2021-05-16-elf-interposition-and-bsymbolic If Linux distributions migrate to protected non-vague-linkage external linkage functions by default, the linker option can still be handy because it allows rapid experiment without recompilation. Protected function addresses currently have deep issues in GNU ld. Reviewed By: peter.smith Differential Revision: https://reviews.llvm.org/D102570
2021-07-30 05:46:53 +08:00
if (opt::Arg *arg =
args.getLastArg(OPT_Bno_symbolic, OPT_Bsymbolic_non_weak_functions,
OPT_Bsymbolic_functions, OPT_Bsymbolic)) {
if (arg->getOption().matches(OPT_Bsymbolic_non_weak_functions))
config->bsymbolic = BsymbolicKind::NonWeakFunctions;
else if (arg->getOption().matches(OPT_Bsymbolic_functions))
config->bsymbolic = BsymbolicKind::Functions;
else if (arg->getOption().matches(OPT_Bsymbolic))
[ELF] Add -Bsymbolic-non-weak-functions This option is a subset of -Bsymbolic-functions. It applies to STB_GLOBAL STT_FUNC definitions. The address of a vague linkage function (STB_WEAK STT_FUNC, e.g. an inline function, a template instantiation) seen by a -Bsymbolic-functions linked shared object may be different from the address seen from outside the shared object. Such cases are uncommon. (ELF/Mach-O programs may use `-fvisibility-inlines-hidden` to break such pointer equality. On Windows, correct dllexport and dllimport are needed to make pointer equality work. Windows link.exe enables /OPT:ICF by default so different inline functions may have the same address.) ``` // a.cc -> a.o -> a.so (-Bsymbolic-functions) inline void f() {} void *g() { return (void *)&f; } // b.cc -> b.o -> exe // The address is different! inline void f() {} ``` -Bsymbolic-non-weak-functions is a safer (C++ conforming) subset of -Bsymbolic-functions, which can make such programs work. Implementations usually emit a vague linkage definition in a COMDAT group. We could detect the group (with more code) but I feel that we should just check STB_WEAK for simplicity. A weak definition will thus serve as an escape hatch for rare cases when users want interposition on definitions. GNU ld feature request: https://sourceware.org/bugzilla/show_bug.cgi?id=27871 Longer write-up: https://maskray.me/blog/2021-05-16-elf-interposition-and-bsymbolic If Linux distributions migrate to protected non-vague-linkage external linkage functions by default, the linker option can still be handy because it allows rapid experiment without recompilation. Protected function addresses currently have deep issues in GNU ld. Reviewed By: peter.smith Differential Revision: https://reviews.llvm.org/D102570
2021-07-30 05:46:53 +08:00
config->bsymbolic = BsymbolicKind::All;
}
config->checkSections =
args.hasFlag(OPT_check_sections, OPT_no_check_sections, true);
config->chroot = args.getLastArgValue(OPT_chroot);
config->compressDebugSections = getCompressDebugSections(args);
config->cref = args.hasArg(OPT_cref);
config->defineCommon = args.hasFlag(OPT_define_common, OPT_no_define_common,
!args.hasArg(OPT_relocatable));
LLD Support for Basic Block Sections This is part of the Propeller framework to do post link code layout optimizations. Please see the RFC here: https://groups.google.com/forum/#!msg/llvm-dev/ef3mKzAdJ7U/1shV64BYBAAJ and the detailed RFC doc here: https://github.com/google/llvm-propeller/blob/plo-dev/Propeller_RFC.pdf This patch adds lld support for basic block sections and performs relaxations after the basic blocks have been reordered. After the linker has reordered the basic block sections according to the desired sequence, it runs a relaxation pass to optimize jump instructions. Currently, the compiler emits the long form of all jump instructions. AMD64 ISA supports variants of jump instructions with one byte offset or a four byte offset. The compiler generates jump instructions with R_X86_64 32-bit PC relative relocations. We would like to use a new relocation type for these jump instructions as it makes it easy and accurate while relaxing these instructions. The relaxation pass does two things: First, it deletes all explicit fall-through direct jump instructions between adjacent basic blocks. This is done by discarding the tail of the basic block section. Second, If there are consecutive jump instructions, it checks if the first conditional jump can be inverted to convert the second into a fall through and delete the second. The jump instructions are relaxed by using jump instruction mods, something like relocations. These are used to modify the opcode of the jump instruction. Jump instruction mods contain three values, instruction offset, jump type and size. While writing this jump instruction out to the final binary, the linker uses the jump instruction mod to determine the opcode and the size of the modified jump instruction. These mods are required because the input object files are memory-mapped without write permissions and directly modifying the object files requires copying these sections. Copying a large number of basic block sections significantly bloats memory. Differential Revision: https://reviews.llvm.org/D68065
2020-04-07 21:48:18 +08:00
config->optimizeBBJumps =
args.hasFlag(OPT_optimize_bb_jumps, OPT_no_optimize_bb_jumps, false);
config->demangle = args.hasFlag(OPT_demangle, OPT_no_demangle, true);
config->dependencyFile = args.getLastArgValue(OPT_dependency_file);
[ELF] Implement Dependent Libraries Feature This patch implements a limited form of autolinking primarily designed to allow either the --dependent-library compiler option, or "comment lib" pragmas ( https://docs.microsoft.com/en-us/cpp/preprocessor/comment-c-cpp?view=vs-2017) in C/C++ e.g. #pragma comment(lib, "foo"), to cause an ELF linker to automatically add the specified library to the link when processing the input file generated by the compiler. Currently this extension is unique to LLVM and LLD. However, care has been taken to design this feature so that it could be supported by other ELF linkers. The design goals were to provide: - A simple linking model for developers to reason about. - The ability to to override autolinking from the linker command line. - Source code compatibility, where possible, with "comment lib" pragmas in other environments (MSVC in particular). Dependent library support is implemented differently for ELF platforms than on the other platforms. Primarily this difference is that on ELF we pass the dependent library specifiers directly to the linker without manipulating them. This is in contrast to other platforms where they are mapped to a specific linker option by the compiler. This difference is a result of the greater variety of ELF linkers and the fact that ELF linkers tend to handle libraries in a more complicated fashion than on other platforms. This forces us to defer handling the specifiers to the linker. In order to achieve a level of source code compatibility with other platforms we have restricted this feature to work with libraries that meet the following "reasonable" requirements: 1. There are no competing defined symbols in a given set of libraries, or if they exist, the program owner doesn't care which is linked to their program. 2. There may be circular dependencies between libraries. The binary representation is a mergeable string section (SHF_MERGE, SHF_STRINGS), called .deplibs, with custom type SHT_LLVM_DEPENDENT_LIBRARIES (0x6fff4c04). The compiler forms this section by concatenating the arguments of the "comment lib" pragmas and --dependent-library options in the order they are encountered. Partial (-r, -Ur) links are handled by concatenating .deplibs sections with the normal mergeable string section rules. As an example, #pragma comment(lib, "foo") would result in: .section ".deplibs","MS",@llvm_dependent_libraries,1 .asciz "foo" For LTO, equivalent information to the contents of a the .deplibs section can be retrieved by the LLD for bitcode input files. LLD processes the dependent library specifiers in the following way: 1. Dependent libraries which are found from the specifiers in .deplibs sections of relocatable object files are added when the linker decides to include that file (which could itself be in a library) in the link. Dependent libraries behave as if they were appended to the command line after all other options. As a consequence the set of dependent libraries are searched last to resolve symbols. 2. It is an error if a file cannot be found for a given specifier. 3. Any command line options in effect at the end of the command line parsing apply to the dependent libraries, e.g. --whole-archive. 4. The linker tries to add a library or relocatable object file from each of the strings in a .deplibs section by; first, handling the string as if it was specified on the command line; second, by looking for the string in each of the library search paths in turn; third, by looking for a lib<string>.a or lib<string>.so (depending on the current mode of the linker) in each of the library search paths. 5. A new command line option --no-dependent-libraries tells LLD to ignore the dependent libraries. Rationale for the above points: 1. Adding the dependent libraries last makes the process simple to understand from a developers perspective. All linkers are able to implement this scheme. 2. Error-ing for libraries that are not found seems like better behavior than failing the link during symbol resolution. 3. It seems useful for the user to be able to apply command line options which will affect all of the dependent libraries. There is a potential problem of surprise for developers, who might not realize that these options would apply to these "invisible" input files; however, despite the potential for surprise, this is easy for developers to reason about and gives developers the control that they may require. 4. This algorithm takes into account all of the different ways that ELF linkers find input files. The different search methods are tried by the linker in most obvious to least obvious order. 5. I considered adding finer grained control over which dependent libraries were ignored (e.g. MSVC has /nodefaultlib:<library>); however, I concluded that this is not necessary: if finer control is required developers can fall back to using the command line directly. RFC thread: http://lists.llvm.org/pipermail/llvm-dev/2019-March/131004.html. Differential Revision: https://reviews.llvm.org/D60274 llvm-svn: 360984
2019-05-17 11:44:15 +08:00
config->dependentLibraries = args.hasFlag(OPT_dependent_libraries, OPT_no_dependent_libraries, true);
config->disableVerify = args.hasArg(OPT_disable_verify);
config->discard = getDiscard(args);
config->dwoDir = args.getLastArgValue(OPT_plugin_opt_dwo_dir_eq);
config->dynamicLinker = getDynamicLinker(args);
config->ehFrameHdr =
args.hasFlag(OPT_eh_frame_hdr, OPT_no_eh_frame_hdr, false);
config->emitLLVM = args.hasArg(OPT_plugin_opt_emit_llvm, false);
2017-02-25 09:51:25 +08:00
config->emitRelocs = args.hasArg(OPT_emit_relocs);
config->callGraphProfileSort = args.hasFlag(
OPT_call_graph_profile_sort, OPT_no_call_graph_profile_sort, true);
config->enableNewDtags =
args.hasFlag(OPT_enable_new_dtags, OPT_disable_new_dtags, true);
config->entry = args.getLastArgValue(OPT_entry);
errorHandler().errorHandlingScript =
args.getLastArgValue(OPT_error_handling_script);
config->executeOnly =
args.hasFlag(OPT_execute_only, OPT_no_execute_only, false);
config->exportDynamic =
args.hasFlag(OPT_export_dynamic, OPT_no_export_dynamic, false);
config->filterList = args::getStrings(args, OPT_filter);
config->fini = args.getLastArgValue(OPT_fini, "_fini");
config->fixCortexA53Errata843419 = args.hasArg(OPT_fix_cortex_a53_843419) &&
!args.hasArg(OPT_relocatable);
config->fixCortexA8 =
args.hasArg(OPT_fix_cortex_a8) && !args.hasArg(OPT_relocatable);
config->fortranCommon =
args.hasFlag(OPT_fortran_common, OPT_no_fortran_common, true);
config->gcSections = args.hasFlag(OPT_gc_sections, OPT_no_gc_sections, false);
config->gnuUnique = args.hasFlag(OPT_gnu_unique, OPT_no_gnu_unique, true);
config->gdbIndex = args.hasFlag(OPT_gdb_index, OPT_no_gdb_index, false);
config->icf = getICF(args);
config->ignoreDataAddressEquality =
args.hasArg(OPT_ignore_data_address_equality);
config->ignoreFunctionAddressEquality =
args.hasArg(OPT_ignore_function_address_equality);
config->init = args.getLastArgValue(OPT_init, "_init");
config->ltoAAPipeline = args.getLastArgValue(OPT_lto_aa_pipeline);
config->ltoCSProfileGenerate = args.hasArg(OPT_lto_cs_profile_generate);
config->ltoCSProfileFile = args.getLastArgValue(OPT_lto_cs_profile_file);
config->ltoPGOWarnMismatch = args.hasFlag(OPT_lto_pgo_warn_mismatch,
OPT_no_lto_pgo_warn_mismatch, true);
config->ltoDebugPassManager = args.hasArg(OPT_lto_debug_pass_manager);
config->ltoEmitAsm = args.hasArg(OPT_lto_emit_asm);
config->ltoNewPassManager =
args.hasFlag(OPT_no_lto_legacy_pass_manager, OPT_lto_legacy_pass_manager,
LLVM_ENABLE_NEW_PASS_MANAGER);
config->ltoNewPmPasses = args.getLastArgValue(OPT_lto_newpm_passes);
config->ltoWholeProgramVisibility =
args.hasFlag(OPT_lto_whole_program_visibility,
OPT_no_lto_whole_program_visibility, false);
config->ltoo = args::getInteger(args, OPT_lto_O, 2);
config->ltoObjPath = args.getLastArgValue(OPT_lto_obj_path_eq);
config->ltoPartitions = args::getInteger(args, OPT_lto_partitions, 1);
config->ltoSampleProfile = args.getLastArgValue(OPT_lto_sample_profile);
LLD Support for Basic Block Sections This is part of the Propeller framework to do post link code layout optimizations. Please see the RFC here: https://groups.google.com/forum/#!msg/llvm-dev/ef3mKzAdJ7U/1shV64BYBAAJ and the detailed RFC doc here: https://github.com/google/llvm-propeller/blob/plo-dev/Propeller_RFC.pdf This patch adds lld support for basic block sections and performs relaxations after the basic blocks have been reordered. After the linker has reordered the basic block sections according to the desired sequence, it runs a relaxation pass to optimize jump instructions. Currently, the compiler emits the long form of all jump instructions. AMD64 ISA supports variants of jump instructions with one byte offset or a four byte offset. The compiler generates jump instructions with R_X86_64 32-bit PC relative relocations. We would like to use a new relocation type for these jump instructions as it makes it easy and accurate while relaxing these instructions. The relaxation pass does two things: First, it deletes all explicit fall-through direct jump instructions between adjacent basic blocks. This is done by discarding the tail of the basic block section. Second, If there are consecutive jump instructions, it checks if the first conditional jump can be inverted to convert the second into a fall through and delete the second. The jump instructions are relaxed by using jump instruction mods, something like relocations. These are used to modify the opcode of the jump instruction. Jump instruction mods contain three values, instruction offset, jump type and size. While writing this jump instruction out to the final binary, the linker uses the jump instruction mod to determine the opcode and the size of the modified jump instruction. These mods are required because the input object files are memory-mapped without write permissions and directly modifying the object files requires copying these sections. Copying a large number of basic block sections significantly bloats memory. Differential Revision: https://reviews.llvm.org/D68065
2020-04-07 21:48:18 +08:00
config->ltoBasicBlockSections =
args.getLastArgValue(OPT_lto_basic_block_sections);
config->ltoUniqueBasicBlockSectionNames =
args.hasFlag(OPT_lto_unique_basic_block_section_names,
OPT_no_lto_unique_basic_block_section_names, false);
config->mapFile = args.getLastArgValue(OPT_Map);
[ELF][MIPS] Multi-GOT implementation Almost all entries inside MIPS GOT are referenced by signed 16-bit index. Zero entry lies approximately in the middle of the GOT. So the total number of GOT entries cannot exceed ~16384 for 32-bit architecture and ~8192 for 64-bit architecture. This limitation makes impossible to link rather large application like for example LLVM+Clang. There are two workaround for this problem. The first one is using the -mxgot compiler's flag. It enables using a 32-bit index to access GOT entries. But each access requires two assembly instructions two load GOT entry index to a register. Another workaround is multi-GOT. This patch implements it. Here is a brief description of multi-GOT for detailed one see the following link https://dmz-portal.mips.com/wiki/MIPS_Multi_GOT. If the sum of local, global and tls entries is less than 64K only single got is enough. Otherwise, multi-got is created. Series of primary and multiple secondary GOTs have the following layout: ``` - Primary GOT Header Local entries Global entries Relocation only entries TLS entries - Secondary GOT Local entries Global entries TLS entries ... ``` All GOT entries required by relocations from a single input file entirely belong to either primary or one of secondary GOTs. To reference GOT entries each GOT has its own _gp value points to the "middle" of the GOT. In the code this value loaded to the register which is used for GOT access. MIPS 32 function's prologue: ``` lui v0,0x0 0: R_MIPS_HI16 _gp_disp addiu v0,v0,0 4: R_MIPS_LO16 _gp_disp ``` MIPS 64 function's prologue: ``` lui at,0x0 14: R_MIPS_GPREL16 main ``` Dynamic linker does not know anything about secondary GOTs and cannot use a regular MIPS mechanism for GOT entries initialization. So we have to use an approach accepted by other architectures and create dynamic relocations R_MIPS_REL32 to initialize global entries (and local in case of PIC code) in secondary GOTs. But ironically MIPS dynamic linker requires GOT entries and correspondingly ordered dynamic symbol table entries to deal with dynamic relocations. To handle this problem relocation-only section in the primary GOT contains entries for all symbols referenced in global parts of secondary GOTs. Although the sum of local and normal global entries of the primary got should be less than 64K, the size of the primary got (including relocation-only entries can be greater than 64K, because parts of the primary got that overflow the 64K limit are used only by the dynamic linker at dynamic link-time and not by 16-bit gp-relative addressing at run-time. The patch affects common LLD code in the following places: - Added new hidden -mips-got-size flag. This flag required to set low maximum size of a single GOT to be able to test the implementation using small test cases. - Added InputFile argument to the getRelocTargetVA function. The same symbol referenced by GOT relocation from different input file might be allocated in different GOT. So result of relocation depends on the file. - Added new ctor to the DynamicReloc class. This constructor records settings of dynamic relocation which used to adjust address of 64kb page lies inside a specific output section. With the patch LLD is able to link all LLVM+Clang+LLD applications and libraries for MIPS 32/64 targets. Differential revision: https://reviews.llvm.org/D31528 llvm-svn: 334390
2018-06-11 15:24:31 +08:00
config->mipsGotSize = args::getInteger(args, OPT_mips_got_size, 0xfff0);
config->mergeArmExidx =
args.hasFlag(OPT_merge_exidx_entries, OPT_no_merge_exidx_entries, true);
[LLD][ELF] Support --[no-]mmap-output-file with F_no_mmap Summary: Add a flag `F_no_mmap` to `FileOutputBuffer` to support `--[no-]mmap-output-file` in ELF LLD. LLD currently explicitly ignores this flag for compatibility with GNU ld and gold. We need this flag to speed up link time for large binaries in certain scenarios. When we link some of our larger binaries we find that LLD takes 50+ GB of memory, which causes memory pressure. The memory pressure causes the VM to flush dirty pages of the output file to disk. This is normally okay, since we should be flushing cold pages. However, when using BtrFS with compression we need to write 128KB at a time when we flush a page. If any page in that 128KB block is written again, then it must be flushed a second time, and so on. Since LLD doesn't write sequentially this causes write amplification. The same 128KB block will end up being flushed multiple times, causing the linker to many times more IO than necessary. We've observed 3-5x faster builds with -no-mmap-output-file when we hit this scenario. The bad scenario only applies to compressed filesystems, which group together multiple pages into a single compressed block. I've tested BtrFS, but the problem will be present for any compressed filesystem on Linux, since it is caused by the VM. Silently ignoring --no-mmap-output-file caused a silent regression when we switched from gold to lld. We pass --no-mmap-output-file to fix this edge case, but since lld silently ignored the flag we didn't realize it wasn't being respected. Benchmark building a 9 GB binary that exposes this edge case. I linked 3 times with --mmap-output-file and 3 times with --no-mmap-output-file and took the average. The machine has 24 cores @ 2.4 GHz, 112 GB of RAM, BtrFS mounted with -compress-force=zstd, and an 80% full disk. | Mode | Time | |---------|-------| | mmap | 894 s | | no mmap | 126 s | When compression is disabled, BtrFS performs just as well with and without mmap on this benchmark. I was unable to reproduce the regression with any binaries in lld-speed-test. Reviewed By: ruiu, MaskRay Differential Revision: https://reviews.llvm.org/D69294
2019-10-30 06:46:22 +08:00
config->mmapOutputFile =
args.hasFlag(OPT_mmap_output_file, OPT_no_mmap_output_file, true);
config->nmagic = args.hasFlag(OPT_nmagic, OPT_no_nmagic, false);
config->noinhibitExec = args.hasArg(OPT_noinhibit_exec);
2016-09-03 03:20:33 +08:00
config->nostdlib = args.hasArg(OPT_nostdlib);
2017-02-25 09:51:25 +08:00
config->oFormatBinary = isOutputFormatBinary(args);
config->omagic = args.hasFlag(OPT_omagic, OPT_no_omagic, false);
config->optRemarksFilename = args.getLastArgValue(OPT_opt_remarks_filename);
// Parse remarks hotness threshold. Valid value is either integer or 'auto'.
if (auto *arg = args.getLastArg(OPT_opt_remarks_hotness_threshold)) {
auto resultOrErr = remarks::parseHotnessThresholdOption(arg->getValue());
if (!resultOrErr)
error(arg->getSpelling() + ": invalid argument '" + arg->getValue() +
"', only integer or 'auto' is supported");
else
config->optRemarksHotnessThreshold = *resultOrErr;
}
config->optRemarksPasses = args.getLastArgValue(OPT_opt_remarks_passes);
config->optRemarksWithHotness = args.hasArg(OPT_opt_remarks_with_hotness);
config->optRemarksFormat = args.getLastArgValue(OPT_opt_remarks_format);
config->optimize = args::getInteger(args, OPT_O, 1);
config->orphanHandling = getOrphanHandling(args);
config->outputFile = args.getLastArgValue(OPT_o);
config->pie = args.hasFlag(OPT_pie, OPT_no_pie, false);
config->printIcfSections =
args.hasFlag(OPT_print_icf_sections, OPT_no_print_icf_sections, false);
config->printGcSections =
args.hasFlag(OPT_print_gc_sections, OPT_no_print_gc_sections, false);
config->printArchiveStats = args.getLastArgValue(OPT_print_archive_stats);
config->printSymbolOrder =
args.getLastArgValue(OPT_print_symbol_order);
config->relax = args.hasFlag(OPT_relax, OPT_no_relax, true);
config->rpath = getRpath(args);
config->relocatable = args.hasArg(OPT_relocatable);
config->saveTemps = args.hasArg(OPT_save_temps);
config->searchPaths = args::getStrings(args, OPT_library_path);
2017-02-25 09:51:25 +08:00
config->sectionStartMap = getSectionStartMap(args);
config->shared = args.hasArg(OPT_shared);
config->singleRoRx = !args.hasFlag(OPT_rosegment, OPT_no_rosegment, true);
config->soName = args.getLastArgValue(OPT_soname);
config->sortSection = getSortSection(args);
config->splitStackAdjustSize = args::getInteger(args, OPT_split_stack_adjust_size, 16384);
config->strip = getStrip(args);
config->sysroot = args.getLastArgValue(OPT_sysroot);
config->target1Rel = args.hasFlag(OPT_target1_rel, OPT_target1_abs, false);
config->target2 = getTarget2(args);
config->thinLTOCacheDir = args.getLastArgValue(OPT_thinlto_cache_dir);
config->thinLTOCachePolicy = CHECK(
parseCachePruningPolicy(args.getLastArgValue(OPT_thinlto_cache_policy)),
"--thinlto-cache-policy: invalid cache policy");
config->thinLTOEmitImportsFiles = args.hasArg(OPT_thinlto_emit_imports_files);
config->thinLTOIndexOnly = args.hasArg(OPT_thinlto_index_only) ||
args.hasArg(OPT_thinlto_index_only_eq);
config->thinLTOIndexOnlyArg = args.getLastArgValue(OPT_thinlto_index_only_eq);
config->thinLTOObjectSuffixReplace =
getOldNewOptions(args, OPT_thinlto_object_suffix_replace_eq);
config->thinLTOPrefixReplace =
getOldNewOptions(args, OPT_thinlto_prefix_replace_eq);
config->thinLTOModulesToCompile =
args::getStrings(args, OPT_thinlto_single_module_eq);
config->timeTraceEnabled = args.hasArg(OPT_time_trace);
config->timeTraceGranularity =
args::getInteger(args, OPT_time_trace_granularity, 500);
config->trace = args.hasArg(OPT_trace);
config->undefined = args::getStrings(args, OPT_undefined);
config->undefinedVersion =
args.hasFlag(OPT_undefined_version, OPT_no_undefined_version, true);
config->unique = args.hasArg(OPT_unique);
config->useAndroidRelrTags = args.hasFlag(
OPT_use_android_relr_tags, OPT_no_use_android_relr_tags, false);
Add --warn-backrefs to maintain compatibility with other linkers I'm proposing a new command line flag, --warn-backrefs in this patch. The flag and the feature proposed below don't exist in GNU linkers nor the current lld. --warn-backrefs is an option to detect reverse or cyclic dependencies between static archives, and it can be used to keep your program compatible with GNU linkers after you switch to lld. I'll explain the feature and why you may find it useful below. lld's symbol resolution semantics is more relaxed than traditional Unix linkers. Therefore, ld.lld foo.a bar.o succeeds even if bar.o contains an undefined symbol that have to be resolved by some object file in foo.a. Traditional Unix linkers don't allow this kind of backward reference, as they visit each file only once from left to right in the command line while resolving all undefined symbol at the moment of visiting. In the above case, since there's no undefined symbol when a linker visits foo.a, no files are pulled out from foo.a, and because the linker forgets about foo.a after visiting, it can't resolve undefined symbols that could have been resolved otherwise. That lld accepts more relaxed form means (besides it makes more sense) that you can accidentally write a command line or a build file that works only with lld, even if you have a plan to distribute it to wider users who may be using GNU linkers. With --check-library-dependency, you can detect a library order that doesn't work with other Unix linkers. The option is also useful to detect cyclic dependencies between static archives. Again, lld accepts ld.lld foo.a bar.a even if foo.a and bar.a depend on each other. With --warn-backrefs it is handled as an error. Here is how the option works. We assign a group ID to each file. A file with a smaller group ID can pull out object files from an archive file with an equal or greater group ID. Otherwise, it is a reverse dependency and an error. A file outside --{start,end}-group gets a fresh ID when instantiated. All files within the same --{start,end}-group get the same group ID. E.g. ld.lld A B --start-group C D --end-group E A and B form group 0, C, D and their member object files form group 1, and E forms group 2. I think that you can see how this group assignment rule simulates the traditional linker's semantics. Differential Revision: https://reviews.llvm.org/D45195 llvm-svn: 329636
2018-04-10 07:05:48 +08:00
config->warnBackrefs =
args.hasFlag(OPT_warn_backrefs, OPT_no_warn_backrefs, false);
config->warnCommon = args.hasFlag(OPT_warn_common, OPT_no_warn_common, false);
config->warnSymbolOrdering =
args.hasFlag(OPT_warn_symbol_ordering, OPT_no_warn_symbol_ordering, true);
[ELF] Add --why-extract= to query why archive members/lazy object files are extracted Similar to D69607 but for archive member extraction unrelated to GC. This patch adds --why-extract=. Prior art: GNU ld -M prints ``` Archive member included to satisfy reference by file (symbol) a.a(a.o) main.o (a) b.a(b.o) (b()) ``` -M is mainly for input section/symbol assignment <-> output section mapping (often huge output) and the information may appear ad-hoc. Apple ld64 ``` __Z1bv forced load of b.a(b.o) _a forced load of a.a(a.o) ``` It doesn't say the reference file. Arm's proprietary linker ``` Selecting member vsnprintf.o(c_wfu.l) to define vsnprintf. ... Loading member vsnprintf.o from c_wfu.l. definition: vsnprintf reference : _printf_a ``` --- --why-extract= gives the user the full data (which is much shorter than GNU ld -Map). It is easy to track a chain of references to one archive member with a one-liner, e.g. ``` % ld.lld main.o a_b.a b_c.a c.a -o /dev/null --why-extract=- | tee stdout reference extracted symbol main.o a_b.a(a_b.o) a a_b.a(a_b.o) b_c.a(b_c.o) b() b_c.a(b_c.o) c.a(c.o) c() % ruby -ane 'BEGIN{p={}}; p[$F[1]]=[$F[0],$F[2]] if $.>1; END{x="c.a(c.o)"; while y=p[x]; puts "#{y[0]} extracts #{x} to resolve #{y[1]}"; x=y[0] end}' stdout b_c.a(b_c.o) extracts c.a(c.o) to resolve c() a_b.a(a_b.o) extracts b_c.a(b_c.o) to resolve b() main.o extracts a_b.a(a_b.o) to resolve a ``` Archive member extraction happens before --gc-sections, so this may not be a live path under --gc-sections, but I think it is a good approximation in practice. * Specifying a file avoids output interleaving with --verbose. * Required `=` prevents accidental overwrite of an input if the user forgets `=`. (Most of compiler drivers' long options accept `=` but not ` `) Differential Revision: https://reviews.llvm.org/D109572
2021-09-21 00:52:30 +08:00
config->whyExtract = args.getLastArgValue(OPT_why_extract);
config->zCombreloc = getZFlag(args, "combreloc", "nocombreloc", true);
config->zCopyreloc = getZFlag(args, "copyreloc", "nocopyreloc", true);
config->zForceBti = hasZOption(args, "force-bti");
config->zForceIbt = hasZOption(args, "force-ibt");
config->zGlobal = hasZOption(args, "global");
config->zGnustack = getZGnuStack(args);
config->zHazardplt = hasZOption(args, "hazardplt");
config->zIfuncNoplt = hasZOption(args, "ifunc-noplt");
config->zInitfirst = hasZOption(args, "initfirst");
config->zInterpose = hasZOption(args, "interpose");
config->zKeepTextSectionPrefix = getZFlag(
args, "keep-text-section-prefix", "nokeep-text-section-prefix", false);
config->zNodefaultlib = hasZOption(args, "nodefaultlib");
config->zNodelete = hasZOption(args, "nodelete");
config->zNodlopen = hasZOption(args, "nodlopen");
config->zNow = getZFlag(args, "now", "lazy", false);
config->zOrigin = hasZOption(args, "origin");
config->zPacPlt = hasZOption(args, "pac-plt");
config->zRelro = getZFlag(args, "relro", "norelro", true);
Introduce the "retpoline" x86 mitigation technique for variant #2 of the speculative execution vulnerabilities disclosed today, specifically identified by CVE-2017-5715, "Branch Target Injection", and is one of the two halves to Spectre.. Summary: First, we need to explain the core of the vulnerability. Note that this is a very incomplete description, please see the Project Zero blog post for details: https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html The basis for branch target injection is to direct speculative execution of the processor to some "gadget" of executable code by poisoning the prediction of indirect branches with the address of that gadget. The gadget in turn contains an operation that provides a side channel for reading data. Most commonly, this will look like a load of secret data followed by a branch on the loaded value and then a load of some predictable cache line. The attacker then uses timing of the processors cache to determine which direction the branch took *in the speculative execution*, and in turn what one bit of the loaded value was. Due to the nature of these timing side channels and the branch predictor on Intel processors, this allows an attacker to leak data only accessible to a privileged domain (like the kernel) back into an unprivileged domain. The goal is simple: avoid generating code which contains an indirect branch that could have its prediction poisoned by an attacker. In many cases, the compiler can simply use directed conditional branches and a small search tree. LLVM already has support for lowering switches in this way and the first step of this patch is to disable jump-table lowering of switches and introduce a pass to rewrite explicit indirectbr sequences into a switch over integers. However, there is no fully general alternative to indirect calls. We introduce a new construct we call a "retpoline" to implement indirect calls in a non-speculatable way. It can be thought of loosely as a trampoline for indirect calls which uses the RET instruction on x86. Further, we arrange for a specific call->ret sequence which ensures the processor predicts the return to go to a controlled, known location. The retpoline then "smashes" the return address pushed onto the stack by the call with the desired target of the original indirect call. The result is a predicted return to the next instruction after a call (which can be used to trap speculative execution within an infinite loop) and an actual indirect branch to an arbitrary address. On 64-bit x86 ABIs, this is especially easily done in the compiler by using a guaranteed scratch register to pass the target into this device. For 32-bit ABIs there isn't a guaranteed scratch register and so several different retpoline variants are introduced to use a scratch register if one is available in the calling convention and to otherwise use direct stack push/pop sequences to pass the target address. This "retpoline" mitigation is fully described in the following blog post: https://support.google.com/faqs/answer/7625886 We also support a target feature that disables emission of the retpoline thunk by the compiler to allow for custom thunks if users want them. These are particularly useful in environments like kernels that routinely do hot-patching on boot and want to hot-patch their thunk to different code sequences. They can write this custom thunk and use `-mretpoline-external-thunk` *in addition* to `-mretpoline`. In this case, on x86-64 thu thunk names must be: ``` __llvm_external_retpoline_r11 ``` or on 32-bit: ``` __llvm_external_retpoline_eax __llvm_external_retpoline_ecx __llvm_external_retpoline_edx __llvm_external_retpoline_push ``` And the target of the retpoline is passed in the named register, or in the case of the `push` suffix on the top of the stack via a `pushl` instruction. There is one other important source of indirect branches in x86 ELF binaries: the PLT. These patches also include support for LLD to generate PLT entries that perform a retpoline-style indirection. The only other indirect branches remaining that we are aware of are from precompiled runtimes (such as crt0.o and similar). The ones we have found are not really attackable, and so we have not focused on them here, but eventually these runtimes should also be replicated for retpoline-ed configurations for completeness. For kernels or other freestanding or fully static executables, the compiler switch `-mretpoline` is sufficient to fully mitigate this particular attack. For dynamic executables, you must compile *all* libraries with `-mretpoline` and additionally link the dynamic executable and all shared libraries with LLD and pass `-z retpolineplt` (or use similar functionality from some other linker). We strongly recommend also using `-z now` as non-lazy binding allows the retpoline-mitigated PLT to be substantially smaller. When manually apply similar transformations to `-mretpoline` to the Linux kernel we observed very small performance hits to applications running typical workloads, and relatively minor hits (approximately 2%) even for extremely syscall-heavy applications. This is largely due to the small number of indirect branches that occur in performance sensitive paths of the kernel. When using these patches on statically linked applications, especially C++ applications, you should expect to see a much more dramatic performance hit. For microbenchmarks that are switch, indirect-, or virtual-call heavy we have seen overheads ranging from 10% to 50%. However, real-world workloads exhibit substantially lower performance impact. Notably, techniques such as PGO and ThinLTO dramatically reduce the impact of hot indirect calls (by speculatively promoting them to direct calls) and allow optimized search trees to be used to lower switches. If you need to deploy these techniques in C++ applications, we *strongly* recommend that you ensure all hot call targets are statically linked (avoiding PLT indirection) and use both PGO and ThinLTO. Well tuned servers using all of these techniques saw 5% - 10% overhead from the use of retpoline. We will add detailed documentation covering these components in subsequent patches, but wanted to make the core functionality available as soon as possible. Happy for more code review, but we'd really like to get these patches landed and backported ASAP for obvious reasons. We're planning to backport this to both 6.0 and 5.0 release streams and get a 5.0 release with just this cherry picked ASAP for distros and vendors. This patch is the work of a number of people over the past month: Eric, Reid, Rui, and myself. I'm mailing it out as a single commit due to the time sensitive nature of landing this and the need to backport it. Huge thanks to everyone who helped out here, and everyone at Intel who helped out in discussions about how to craft this. Also, credit goes to Paul Turner (at Google, but not an LLVM contributor) for much of the underlying retpoline design. Reviewers: echristo, rnk, ruiu, craig.topper, DavidKreitzer Subscribers: sanjoy, emaste, mcrosier, mgorny, mehdi_amini, hiraditya, llvm-commits Differential Revision: https://reviews.llvm.org/D41723 llvm-svn: 323155
2018-01-23 06:05:25 +08:00
config->zRetpolineplt = hasZOption(args, "retpolineplt");
config->zRodynamic = hasZOption(args, "rodynamic");
config->zSeparate = getZSeparate(args);
config->zShstk = hasZOption(args, "shstk");
config->zStackSize = args::getZOptionValue(args, OPT_z, "stack-size", 0);
2021-02-26 07:46:37 +08:00
config->zStartStopGC =
getZFlag(args, "start-stop-gc", "nostart-stop-gc", true);
config->zStartStopVisibility = getZStartStopVisibility(args);
config->zText = getZFlag(args, "text", "notext", true);
config->zWxneeded = hasZOption(args, "wxneeded");
setUnresolvedSymbolPolicy(args);
config->power10Stubs = args.getLastArgValue(OPT_power10_stubs_eq) != "no";
if (opt::Arg *arg = args.getLastArg(OPT_eb, OPT_el)) {
if (arg->getOption().matches(OPT_eb))
config->optEB = true;
else
config->optEL = true;
}
for (opt::Arg *arg : args.filtered(OPT_shuffle_sections)) {
constexpr StringRef errPrefix = "--shuffle-sections=: ";
std::pair<StringRef, StringRef> kv = StringRef(arg->getValue()).split('=');
if (kv.first.empty() || kv.second.empty()) {
error(errPrefix + "expected <section_glob>=<seed>, but got '" +
arg->getValue() + "'");
continue;
}
// Signed so that <section_glob>=-1 is allowed.
int64_t v;
if (!to_integer(kv.second, v))
error(errPrefix + "expected an integer, but got '" + kv.second + "'");
else if (Expected<GlobPattern> pat = GlobPattern::create(kv.first))
config->shuffleSections.emplace_back(std::move(*pat), uint32_t(v));
else
error(errPrefix + toString(pat.takeError()));
}
auto reports = {std::make_pair("bti-report", &config->zBtiReport),
std::make_pair("cet-report", &config->zCetReport)};
for (opt::Arg *arg : args.filtered(OPT_z)) {
std::pair<StringRef, StringRef> option =
StringRef(arg->getValue()).split('=');
for (auto reportArg : reports) {
if (option.first != reportArg.first)
continue;
if (!isValidReportString(option.second)) {
error(Twine("-z ") + reportArg.first + "= parameter " + option.second +
" is not recognized");
continue;
}
*reportArg.second = option.second;
}
}
for (opt::Arg *arg : args.filtered(OPT_z)) {
std::pair<StringRef, StringRef> option =
StringRef(arg->getValue()).split('=');
if (option.first != "dead-reloc-in-nonalloc")
continue;
constexpr StringRef errPrefix = "-z dead-reloc-in-nonalloc=: ";
std::pair<StringRef, StringRef> kv = option.second.split('=');
if (kv.first.empty() || kv.second.empty()) {
error(errPrefix + "expected <section_glob>=<value>");
continue;
}
uint64_t v;
if (!to_integer(kv.second, v))
error(errPrefix + "expected a non-negative integer, but got '" +
kv.second + "'");
else if (Expected<GlobPattern> pat = GlobPattern::create(kv.first))
config->deadRelocInNonAlloc.emplace_back(std::move(*pat), v);
else
error(errPrefix + toString(pat.takeError()));
}
cl::ResetAllOptionOccurrences();
// Parse LTO options.
if (auto *arg = args.getLastArg(OPT_plugin_opt_mcpu_eq))
parseClangOption(saver.save("-mcpu=" + StringRef(arg->getValue())),
arg->getSpelling());
for (opt::Arg *arg : args.filtered(OPT_plugin_opt_eq_minus))
parseClangOption(std::string("-") + arg->getValue(), arg->getSpelling());
// GCC collect2 passes -plugin-opt=path/to/lto-wrapper with an absolute or
// relative path. Just ignore. If not ended with "lto-wrapper", consider it an
// unsupported LLVMgold.so option and error.
for (opt::Arg *arg : args.filtered(OPT_plugin_opt_eq))
if (!StringRef(arg->getValue()).endswith("lto-wrapper"))
error(arg->getSpelling() + ": unknown plugin option '" + arg->getValue() +
"'");
// Parse -mllvm options.
for (auto *arg : args.filtered(OPT_mllvm))
parseClangOption(arg->getValue(), arg->getSpelling());
// --threads= takes a positive integer and provides the default value for
// --thinlto-jobs=.
if (auto *arg = args.getLastArg(OPT_threads)) {
StringRef v(arg->getValue());
unsigned threads = 0;
if (!llvm::to_integer(v, threads, 0) || threads == 0)
error(arg->getSpelling() + ": expected a positive integer, but got '" +
arg->getValue() + "'");
parallel::strategy = hardware_concurrency(threads);
config->thinLTOJobs = v;
}
if (auto *arg = args.getLastArg(OPT_thinlto_jobs))
config->thinLTOJobs = arg->getValue();
2017-02-25 09:51:25 +08:00
if (config->ltoo > 3)
error("invalid optimization level for LTO: " + Twine(config->ltoo));
2017-02-25 09:51:25 +08:00
if (config->ltoPartitions == 0)
error("--lto-partitions: number of threads must be > 0");
if (!get_threadpool_strategy(config->thinLTOJobs))
error("--thinlto-jobs: invalid job count: " + config->thinLTOJobs);
2017-02-25 09:51:25 +08:00
if (config->splitStackAdjustSize < 0)
error("--split-stack-adjust-size: size must be >= 0");
// The text segment is traditionally the first segment, whose address equals
// the base address. However, lld places the R PT_LOAD first. -Ttext-segment
// is an old-fashioned option that does not play well with lld's layout.
// Suggest --image-base as a likely alternative.
if (args.hasArg(OPT_Ttext_segment))
error("-Ttext-segment is not supported. Use --image-base if you "
"intend to set the base address");
// Parse ELF{32,64}{LE,BE} and CPU type.
2017-02-25 09:51:25 +08:00
if (auto *arg = args.getLastArg(OPT_m)) {
StringRef s = arg->getValue();
std::tie(config->ekind, config->emachine, config->osabi) =
parseEmulation(s);
config->mipsN32Abi =
(s.startswith("elf32btsmipn32") || s.startswith("elf32ltsmipn32"));
2017-02-25 09:51:25 +08:00
config->emulation = s;
}
2016-10-20 13:23:23 +08:00
// Parse --hash-style={sysv,gnu,both}.
if (auto *arg = args.getLastArg(OPT_hash_style)) {
StringRef s = arg->getValue();
if (s == "sysv")
config->sysvHash = true;
else if (s == "gnu")
config->gnuHash = true;
else if (s == "both")
config->sysvHash = config->gnuHash = true;
else
error("unknown --hash-style: " + s);
}
if (args.hasArg(OPT_print_map))
config->mapFile = "-";
// Page alignment can be disabled by the -n (--nmagic) and -N (--omagic).
// As PT_GNU_RELRO relies on Paging, do not create it when we have disabled
// it.
if (config->nmagic || config->omagic)
config->zRelro = false;
std::tie(config->buildId, config->buildIdVector) = getBuildId(args);
std::tie(config->androidPackDynRelocs, config->relrPackDynRelocs) =
getPackDynRelocs(args);
if (auto *arg = args.getLastArg(OPT_symbol_ordering_file)){
if (args.hasArg(OPT_call_graph_ordering_file))
error("--symbol-ordering-file and --call-graph-order-file "
"may not be used together");
if (Optional<MemoryBufferRef> buffer = readFile(arg->getValue())){
config->symbolOrderingFile = getSymbolOrderingFile(*buffer);
// Also need to disable CallGraphProfileSort to prevent
// LLD order symbols with CGProfile
config->callGraphProfileSort = false;
}
}
assert(config->versionDefinitions.empty());
config->versionDefinitions.push_back(
{"local", (uint16_t)VER_NDX_LOCAL, {}, {}});
config->versionDefinitions.push_back(
{"global", (uint16_t)VER_NDX_GLOBAL, {}, {}});
2017-01-26 05:49:23 +08:00
// If --retain-symbol-file is used, we'll keep only the symbols listed in
// the file and discard all others.
if (auto *arg = args.getLastArg(OPT_retain_symbols_file)) {
config->versionDefinitions[VER_NDX_LOCAL].nonLocalPatterns.push_back(
{"*", /*isExternCpp=*/false, /*hasWildcard=*/true});
if (Optional<MemoryBufferRef> buffer = readFile(arg->getValue()))
for (StringRef s : args::getLines(*buffer))
config->versionDefinitions[VER_NDX_GLOBAL].nonLocalPatterns.push_back(
{s, /*isExternCpp=*/false, /*hasWildcard=*/false});
}
for (opt::Arg *arg : args.filtered(OPT_warn_backrefs_exclude)) {
StringRef pattern(arg->getValue());
if (Expected<GlobPattern> pat = GlobPattern::create(pattern))
config->warnBackrefsExclude.push_back(std::move(*pat));
else
error(arg->getSpelling() + ": " + toString(pat.takeError()));
}
// For -no-pie and -pie, --export-dynamic-symbol specifies defined symbols
// which should be exported. For -shared, references to matched non-local
// STV_DEFAULT symbols are not bound to definitions within the shared object,
// even if other options express a symbolic intention: -Bsymbolic,
// -Bsymbolic-functions (if STT_FUNC), --dynamic-list.
for (auto *arg : args.filtered(OPT_export_dynamic_symbol))
config->dynamicList.push_back(
{arg->getValue(), /*isExternCpp=*/false,
/*hasWildcard=*/hasWildcard(arg->getValue())});
// --export-dynamic-symbol-list specifies a list of --export-dynamic-symbol
// patterns. --dynamic-list is --export-dynamic-symbol-list plus -Bsymbolic
// like semantics.
[ELF] Add -Bsymbolic-non-weak-functions This option is a subset of -Bsymbolic-functions. It applies to STB_GLOBAL STT_FUNC definitions. The address of a vague linkage function (STB_WEAK STT_FUNC, e.g. an inline function, a template instantiation) seen by a -Bsymbolic-functions linked shared object may be different from the address seen from outside the shared object. Such cases are uncommon. (ELF/Mach-O programs may use `-fvisibility-inlines-hidden` to break such pointer equality. On Windows, correct dllexport and dllimport are needed to make pointer equality work. Windows link.exe enables /OPT:ICF by default so different inline functions may have the same address.) ``` // a.cc -> a.o -> a.so (-Bsymbolic-functions) inline void f() {} void *g() { return (void *)&f; } // b.cc -> b.o -> exe // The address is different! inline void f() {} ``` -Bsymbolic-non-weak-functions is a safer (C++ conforming) subset of -Bsymbolic-functions, which can make such programs work. Implementations usually emit a vague linkage definition in a COMDAT group. We could detect the group (with more code) but I feel that we should just check STB_WEAK for simplicity. A weak definition will thus serve as an escape hatch for rare cases when users want interposition on definitions. GNU ld feature request: https://sourceware.org/bugzilla/show_bug.cgi?id=27871 Longer write-up: https://maskray.me/blog/2021-05-16-elf-interposition-and-bsymbolic If Linux distributions migrate to protected non-vague-linkage external linkage functions by default, the linker option can still be handy because it allows rapid experiment without recompilation. Protected function addresses currently have deep issues in GNU ld. Reviewed By: peter.smith Differential Revision: https://reviews.llvm.org/D102570
2021-07-30 05:46:53 +08:00
config->symbolic =
config->bsymbolic == BsymbolicKind::All || args.hasArg(OPT_dynamic_list);
for (auto *arg :
args.filtered(OPT_dynamic_list, OPT_export_dynamic_symbol_list))
if (Optional<MemoryBufferRef> buffer = readFile(arg->getValue()))
readDynamicList(*buffer);
for (auto *arg : args.filtered(OPT_version_script))
if (Optional<std::string> path = searchScript(arg->getValue())) {
if (Optional<MemoryBufferRef> buffer = readFile(*path))
readVersionScript(*buffer);
} else {
error(Twine("cannot find version script ") + arg->getValue());
}
}
// Some Config members do not directly correspond to any particular
// command line options, but computed based on other Config values.
// This function initialize such members. See Config.h for the details
// of these values.
static void setConfigs(opt::InputArgList &args) {
ELFKind k = config->ekind;
uint16_t m = config->emachine;
[Coding style change] Rename variables so that they start with a lowercase letter This patch is mechanically generated by clang-llvm-rename tool that I wrote using Clang Refactoring Engine just for creating this patch. You can see the source code of the tool at https://reviews.llvm.org/D64123. There's no manual post-processing; you can generate the same patch by re-running the tool against lld's code base. Here is the main discussion thread to change the LLVM coding style: https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html In the discussion thread, I proposed we use lld as a testbed for variable naming scheme change, and this patch does that. I chose to rename variables so that they are in camelCase, just because that is a minimal change to make variables to start with a lowercase letter. Note to downstream patch maintainers: if you are maintaining a downstream lld repo, just rebasing ahead of this commit would cause massive merge conflicts because this patch essentially changes every line in the lld subdirectory. But there's a remedy. clang-llvm-rename tool is a batch tool, so you can rename variables in your downstream repo with the tool. Given that, here is how to rebase your repo to a commit after the mass renaming: 1. rebase to the commit just before the mass variable renaming, 2. apply the tool to your downstream repo to mass-rename variables locally, and 3. rebase again to the head. Most changes made by the tool should be identical for a downstream repo and for the head, so at the step 3, almost all changes should be merged and disappear. I'd expect that there would be some lines that you need to merge by hand, but that shouldn't be too many. Differential Revision: https://reviews.llvm.org/D64121 llvm-svn: 365595
2019-07-10 13:00:37 +08:00
config->copyRelocs = (config->relocatable || config->emitRelocs);
config->is64 = (k == ELF64LEKind || k == ELF64BEKind);
config->isLE = (k == ELF32LEKind || k == ELF64LEKind);
config->endianness = config->isLE ? endianness::little : endianness::big;
config->isMips64EL = (k == ELF64LEKind && m == EM_MIPS);
config->isPic = config->pie || config->shared;
config->picThunk = args.hasArg(OPT_pic_veneer, config->isPic);
config->wordsize = config->is64 ? 8 : 4;
2018-06-08 08:18:32 +08:00
// ELF defines two different ways to store relocation addends as shown below:
//
// Rel: Addends are stored to the location where relocations are applied. It
// cannot pack the full range of addend values for all relocation types, but
// this only affects relocation types that we don't support emitting as
// dynamic relocations (see getDynRel).
2018-06-08 08:18:32 +08:00
// Rela: Addends are stored as part of relocation entry.
//
// In other words, Rela makes it easy to read addends at the price of extra
// 4 or 8 byte for each relocation entry.
2018-06-08 08:18:32 +08:00
//
// We pick the format for dynamic relocations according to the psABI for each
// processor, but a contrary choice can be made if the dynamic loader
// supports.
config->isRela = getIsRela(args);
2018-06-08 08:18:32 +08:00
// If the output uses REL relocations we must store the dynamic relocation
// addends to the output sections. We also store addends for RELA relocations
// if --apply-dynamic-relocs is used.
// We default to not writing the addends when using RELA relocations since
// any standard conforming tool can find it in r_addend.
config->writeAddends = args.hasFlag(OPT_apply_dynamic_relocs,
OPT_no_apply_dynamic_relocs, false) ||
!config->isRela;
// Validation of dynamic relocation addends is on by default for assertions
// builds (for supported targets) and disabled otherwise. Ideally we would
// enable the debug checks for all targets, but currently not all targets
// have support for reading Elf_Rel addends, so we only enable for a subset.
#ifndef NDEBUG
bool checkDynamicRelocsDefault = m == EM_ARM || m == EM_386 || m == EM_MIPS ||
m == EM_X86_64 || m == EM_RISCV;
#else
bool checkDynamicRelocsDefault = false;
#endif
config->checkDynamicRelocs =
args.hasFlag(OPT_check_dynamic_relocations,
OPT_no_check_dynamic_relocations, checkDynamicRelocsDefault);
config->tocOptimize =
args.hasFlag(OPT_toc_optimize, OPT_no_toc_optimize, m == EM_PPC64);
config->pcRelOptimize =
args.hasFlag(OPT_pcrel_optimize, OPT_no_pcrel_optimize, m == EM_PPC64);
}
static bool isFormatBinary(StringRef s) {
2016-10-20 12:36:36 +08:00
if (s == "binary")
return true;
if (s == "elf" || s == "default")
return false;
error("unknown --format value: " + s +
2016-10-20 12:36:36 +08:00
" (supported formats: elf, default, binary)");
return false;
}
void LinkerDriver::createFiles(opt::InputArgList &args) {
llvm::TimeTraceScope timeScope("Load input files");
// For --{push,pop}-state.
std::vector<std::tuple<bool, bool, bool>> stack;
// Iterate over argv to process input files and positional arguments.
InputFile::isInGroup = false;
for (auto *arg : args) {
switch (arg->getOption().getID()) {
case OPT_library:
addLibrary(arg->getValue());
break;
case OPT_INPUT:
addFile(arg->getValue(), /*withLOption=*/false);
break;
case OPT_defsym: {
StringRef from;
StringRef to;
std::tie(from, to) = StringRef(arg->getValue()).split('=');
if (from.empty() || to.empty())
error("--defsym: syntax error: " + StringRef(arg->getValue()));
else
readDefsym(from, MemoryBufferRef(to, "--defsym"));
break;
}
case OPT_script:
if (Optional<std::string> path = searchScript(arg->getValue())) {
if (Optional<MemoryBufferRef> mb = readFile(*path))
readLinkerScript(*mb);
break;
}
error(Twine("cannot find linker script ") + arg->getValue());
break;
case OPT_as_needed:
config->asNeeded = true;
break;
2016-10-20 12:36:36 +08:00
case OPT_format:
config->formatBinary = isFormatBinary(arg->getValue());
break;
case OPT_no_as_needed:
config->asNeeded = false;
break;
case OPT_Bstatic:
case OPT_omagic:
case OPT_nmagic:
config->isStatic = true;
break;
case OPT_Bdynamic:
config->isStatic = false;
break;
case OPT_whole_archive:
inWholeArchive = true;
break;
case OPT_no_whole_archive:
inWholeArchive = false;
break;
case OPT_just_symbols:
if (Optional<MemoryBufferRef> mb = readFile(arg->getValue())) {
files.push_back(createObjectFile(*mb));
files.back()->justSymbols = true;
}
break;
Add --warn-backrefs to maintain compatibility with other linkers I'm proposing a new command line flag, --warn-backrefs in this patch. The flag and the feature proposed below don't exist in GNU linkers nor the current lld. --warn-backrefs is an option to detect reverse or cyclic dependencies between static archives, and it can be used to keep your program compatible with GNU linkers after you switch to lld. I'll explain the feature and why you may find it useful below. lld's symbol resolution semantics is more relaxed than traditional Unix linkers. Therefore, ld.lld foo.a bar.o succeeds even if bar.o contains an undefined symbol that have to be resolved by some object file in foo.a. Traditional Unix linkers don't allow this kind of backward reference, as they visit each file only once from left to right in the command line while resolving all undefined symbol at the moment of visiting. In the above case, since there's no undefined symbol when a linker visits foo.a, no files are pulled out from foo.a, and because the linker forgets about foo.a after visiting, it can't resolve undefined symbols that could have been resolved otherwise. That lld accepts more relaxed form means (besides it makes more sense) that you can accidentally write a command line or a build file that works only with lld, even if you have a plan to distribute it to wider users who may be using GNU linkers. With --check-library-dependency, you can detect a library order that doesn't work with other Unix linkers. The option is also useful to detect cyclic dependencies between static archives. Again, lld accepts ld.lld foo.a bar.a even if foo.a and bar.a depend on each other. With --warn-backrefs it is handled as an error. Here is how the option works. We assign a group ID to each file. A file with a smaller group ID can pull out object files from an archive file with an equal or greater group ID. Otherwise, it is a reverse dependency and an error. A file outside --{start,end}-group gets a fresh ID when instantiated. All files within the same --{start,end}-group get the same group ID. E.g. ld.lld A B --start-group C D --end-group E A and B form group 0, C, D and their member object files form group 1, and E forms group 2. I think that you can see how this group assignment rule simulates the traditional linker's semantics. Differential Revision: https://reviews.llvm.org/D45195 llvm-svn: 329636
2018-04-10 07:05:48 +08:00
case OPT_start_group:
if (InputFile::isInGroup)
error("nested --start-group");
InputFile::isInGroup = true;
break;
case OPT_end_group:
if (!InputFile::isInGroup)
error("stray --end-group");
InputFile::isInGroup = false;
++InputFile::nextGroupId;
Add --warn-backrefs to maintain compatibility with other linkers I'm proposing a new command line flag, --warn-backrefs in this patch. The flag and the feature proposed below don't exist in GNU linkers nor the current lld. --warn-backrefs is an option to detect reverse or cyclic dependencies between static archives, and it can be used to keep your program compatible with GNU linkers after you switch to lld. I'll explain the feature and why you may find it useful below. lld's symbol resolution semantics is more relaxed than traditional Unix linkers. Therefore, ld.lld foo.a bar.o succeeds even if bar.o contains an undefined symbol that have to be resolved by some object file in foo.a. Traditional Unix linkers don't allow this kind of backward reference, as they visit each file only once from left to right in the command line while resolving all undefined symbol at the moment of visiting. In the above case, since there's no undefined symbol when a linker visits foo.a, no files are pulled out from foo.a, and because the linker forgets about foo.a after visiting, it can't resolve undefined symbols that could have been resolved otherwise. That lld accepts more relaxed form means (besides it makes more sense) that you can accidentally write a command line or a build file that works only with lld, even if you have a plan to distribute it to wider users who may be using GNU linkers. With --check-library-dependency, you can detect a library order that doesn't work with other Unix linkers. The option is also useful to detect cyclic dependencies between static archives. Again, lld accepts ld.lld foo.a bar.a even if foo.a and bar.a depend on each other. With --warn-backrefs it is handled as an error. Here is how the option works. We assign a group ID to each file. A file with a smaller group ID can pull out object files from an archive file with an equal or greater group ID. Otherwise, it is a reverse dependency and an error. A file outside --{start,end}-group gets a fresh ID when instantiated. All files within the same --{start,end}-group get the same group ID. E.g. ld.lld A B --start-group C D --end-group E A and B form group 0, C, D and their member object files form group 1, and E forms group 2. I think that you can see how this group assignment rule simulates the traditional linker's semantics. Differential Revision: https://reviews.llvm.org/D45195 llvm-svn: 329636
2018-04-10 07:05:48 +08:00
break;
case OPT_start_lib:
if (inLib)
error("nested --start-lib");
if (InputFile::isInGroup)
error("may not nest --start-lib in --start-group");
inLib = true;
InputFile::isInGroup = true;
break;
case OPT_end_lib:
if (!inLib)
error("stray --end-lib");
inLib = false;
InputFile::isInGroup = false;
++InputFile::nextGroupId;
break;
case OPT_push_state:
stack.emplace_back(config->asNeeded, config->isStatic, inWholeArchive);
break;
case OPT_pop_state:
if (stack.empty()) {
error("unbalanced --push-state/--pop-state");
break;
}
std::tie(config->asNeeded, config->isStatic, inWholeArchive) = stack.back();
stack.pop_back();
break;
}
}
if (files.empty() && errorCount() == 0)
error("no input files");
}
// If -m <machine_type> was not given, infer it from object files.
void LinkerDriver::inferMachineType() {
if (config->ekind != ELFNoneKind)
return;
for (InputFile *f : files) {
if (f->ekind == ELFNoneKind)
continue;
config->ekind = f->ekind;
config->emachine = f->emachine;
config->osabi = f->osabi;
config->mipsN32Abi = config->emachine == EM_MIPS && isMipsN32Abi(f);
return;
}
error("target emulation unknown: -m or at least one .o file required");
}
// Parse -z max-page-size=<value>. The default value is defined by
// each target.
static uint64_t getMaxPageSize(opt::InputArgList &args) {
uint64_t val = args::getZOptionValue(args, OPT_z, "max-page-size",
target->defaultMaxPageSize);
if (!isPowerOf2_64(val))
error("max-page-size: value isn't a power of 2");
if (config->nmagic || config->omagic) {
if (val != target->defaultMaxPageSize)
warn("-z max-page-size set, but paging disabled by omagic or nmagic");
return 1;
}
return val;
}
// Parse -z common-page-size=<value>. The default value is defined by
// each target.
static uint64_t getCommonPageSize(opt::InputArgList &args) {
uint64_t val = args::getZOptionValue(args, OPT_z, "common-page-size",
target->defaultCommonPageSize);
if (!isPowerOf2_64(val))
error("common-page-size: value isn't a power of 2");
if (config->nmagic || config->omagic) {
if (val != target->defaultCommonPageSize)
warn("-z common-page-size set, but paging disabled by omagic or nmagic");
return 1;
}
// commonPageSize can't be larger than maxPageSize.
if (val > config->maxPageSize)
val = config->maxPageSize;
return val;
}
// Parses --image-base option.
static Optional<uint64_t> getImageBase(opt::InputArgList &args) {
// Because we are using "Config->maxPageSize" here, this function has to be
// called after the variable is initialized.
auto *arg = args.getLastArg(OPT_image_base);
if (!arg)
return None;
StringRef s = arg->getValue();
uint64_t v;
if (!to_integer(s, v)) {
error("--image-base: number expected, but got " + s);
return 0;
}
if ((v % config->maxPageSize) != 0)
warn("--image-base: address isn't multiple of page size: " + s);
return v;
}
// Parses `--exclude-libs=lib,lib,...`.
// The library names may be delimited by commas or colons.
static DenseSet<StringRef> getExcludeLibs(opt::InputArgList &args) {
DenseSet<StringRef> ret;
for (auto *arg : args.filtered(OPT_exclude_libs)) {
StringRef s = arg->getValue();
for (;;) {
size_t pos = s.find_first_of(",:");
if (pos == StringRef::npos)
break;
ret.insert(s.substr(0, pos));
s = s.substr(pos + 1);
}
ret.insert(s);
}
return ret;
}
// Handles the --exclude-libs option. If a static library file is specified
// by the --exclude-libs option, all public symbols from the archive become
// private unless otherwise specified by version scripts or something.
// A special library name "ALL" means all archive files.
//
// This is not a popular option, but some programs such as bionic libc use it.
static void excludeLibs(opt::InputArgList &args) {
DenseSet<StringRef> libs = getExcludeLibs(args);
bool all = libs.count("ALL");
[Coding style change] Rename variables so that they start with a lowercase letter This patch is mechanically generated by clang-llvm-rename tool that I wrote using Clang Refactoring Engine just for creating this patch. You can see the source code of the tool at https://reviews.llvm.org/D64123. There's no manual post-processing; you can generate the same patch by re-running the tool against lld's code base. Here is the main discussion thread to change the LLVM coding style: https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html In the discussion thread, I proposed we use lld as a testbed for variable naming scheme change, and this patch does that. I chose to rename variables so that they are in camelCase, just because that is a minimal change to make variables to start with a lowercase letter. Note to downstream patch maintainers: if you are maintaining a downstream lld repo, just rebasing ahead of this commit would cause massive merge conflicts because this patch essentially changes every line in the lld subdirectory. But there's a remedy. clang-llvm-rename tool is a batch tool, so you can rename variables in your downstream repo with the tool. Given that, here is how to rebase your repo to a commit after the mass renaming: 1. rebase to the commit just before the mass variable renaming, 2. apply the tool to your downstream repo to mass-rename variables locally, and 3. rebase again to the head. Most changes made by the tool should be identical for a downstream repo and for the head, so at the step 3, almost all changes should be merged and disappear. I'd expect that there would be some lines that you need to merge by hand, but that shouldn't be too many. Differential Revision: https://reviews.llvm.org/D64121 llvm-svn: 365595
2019-07-10 13:00:37 +08:00
auto visit = [&](InputFile *file) {
if (!file->archiveName.empty())
if (all || libs.count(path::filename(file->archiveName)))
for (Symbol *sym : file->getSymbols())
if (!sym->isUndefined() && !sym->isLocal() && sym->file == file)
sym->versionId = VER_NDX_LOCAL;
};
for (ELFFileBase *file : objectFiles)
visit(file);
for (BitcodeFile *file : bitcodeFiles)
visit(file);
}
// Force Sym to be entered in the output.
[ELF] Add --why-extract= to query why archive members/lazy object files are extracted Similar to D69607 but for archive member extraction unrelated to GC. This patch adds --why-extract=. Prior art: GNU ld -M prints ``` Archive member included to satisfy reference by file (symbol) a.a(a.o) main.o (a) b.a(b.o) (b()) ``` -M is mainly for input section/symbol assignment <-> output section mapping (often huge output) and the information may appear ad-hoc. Apple ld64 ``` __Z1bv forced load of b.a(b.o) _a forced load of a.a(a.o) ``` It doesn't say the reference file. Arm's proprietary linker ``` Selecting member vsnprintf.o(c_wfu.l) to define vsnprintf. ... Loading member vsnprintf.o from c_wfu.l. definition: vsnprintf reference : _printf_a ``` --- --why-extract= gives the user the full data (which is much shorter than GNU ld -Map). It is easy to track a chain of references to one archive member with a one-liner, e.g. ``` % ld.lld main.o a_b.a b_c.a c.a -o /dev/null --why-extract=- | tee stdout reference extracted symbol main.o a_b.a(a_b.o) a a_b.a(a_b.o) b_c.a(b_c.o) b() b_c.a(b_c.o) c.a(c.o) c() % ruby -ane 'BEGIN{p={}}; p[$F[1]]=[$F[0],$F[2]] if $.>1; END{x="c.a(c.o)"; while y=p[x]; puts "#{y[0]} extracts #{x} to resolve #{y[1]}"; x=y[0] end}' stdout b_c.a(b_c.o) extracts c.a(c.o) to resolve c() a_b.a(a_b.o) extracts b_c.a(b_c.o) to resolve b() main.o extracts a_b.a(a_b.o) to resolve a ``` Archive member extraction happens before --gc-sections, so this may not be a live path under --gc-sections, but I think it is a good approximation in practice. * Specifying a file avoids output interleaving with --verbose. * Required `=` prevents accidental overwrite of an input if the user forgets `=`. (Most of compiler drivers' long options accept `=` but not ` `) Differential Revision: https://reviews.llvm.org/D109572
2021-09-21 00:52:30 +08:00
static void handleUndefined(Symbol *sym, const char *option) {
// Since a symbol may not be used inside the program, LTO may
// eliminate it. Mark the symbol as "used" to prevent it.
sym->isUsedInRegularObj = true;
[ELF] Add --why-extract= to query why archive members/lazy object files are extracted Similar to D69607 but for archive member extraction unrelated to GC. This patch adds --why-extract=. Prior art: GNU ld -M prints ``` Archive member included to satisfy reference by file (symbol) a.a(a.o) main.o (a) b.a(b.o) (b()) ``` -M is mainly for input section/symbol assignment <-> output section mapping (often huge output) and the information may appear ad-hoc. Apple ld64 ``` __Z1bv forced load of b.a(b.o) _a forced load of a.a(a.o) ``` It doesn't say the reference file. Arm's proprietary linker ``` Selecting member vsnprintf.o(c_wfu.l) to define vsnprintf. ... Loading member vsnprintf.o from c_wfu.l. definition: vsnprintf reference : _printf_a ``` --- --why-extract= gives the user the full data (which is much shorter than GNU ld -Map). It is easy to track a chain of references to one archive member with a one-liner, e.g. ``` % ld.lld main.o a_b.a b_c.a c.a -o /dev/null --why-extract=- | tee stdout reference extracted symbol main.o a_b.a(a_b.o) a a_b.a(a_b.o) b_c.a(b_c.o) b() b_c.a(b_c.o) c.a(c.o) c() % ruby -ane 'BEGIN{p={}}; p[$F[1]]=[$F[0],$F[2]] if $.>1; END{x="c.a(c.o)"; while y=p[x]; puts "#{y[0]} extracts #{x} to resolve #{y[1]}"; x=y[0] end}' stdout b_c.a(b_c.o) extracts c.a(c.o) to resolve c() a_b.a(a_b.o) extracts b_c.a(b_c.o) to resolve b() main.o extracts a_b.a(a_b.o) to resolve a ``` Archive member extraction happens before --gc-sections, so this may not be a live path under --gc-sections, but I think it is a good approximation in practice. * Specifying a file avoids output interleaving with --verbose. * Required `=` prevents accidental overwrite of an input if the user forgets `=`. (Most of compiler drivers' long options accept `=` but not ` `) Differential Revision: https://reviews.llvm.org/D109572
2021-09-21 00:52:30 +08:00
if (!sym->isLazy())
return;
sym->extract();
[ELF] Add --why-extract= to query why archive members/lazy object files are extracted Similar to D69607 but for archive member extraction unrelated to GC. This patch adds --why-extract=. Prior art: GNU ld -M prints ``` Archive member included to satisfy reference by file (symbol) a.a(a.o) main.o (a) b.a(b.o) (b()) ``` -M is mainly for input section/symbol assignment <-> output section mapping (often huge output) and the information may appear ad-hoc. Apple ld64 ``` __Z1bv forced load of b.a(b.o) _a forced load of a.a(a.o) ``` It doesn't say the reference file. Arm's proprietary linker ``` Selecting member vsnprintf.o(c_wfu.l) to define vsnprintf. ... Loading member vsnprintf.o from c_wfu.l. definition: vsnprintf reference : _printf_a ``` --- --why-extract= gives the user the full data (which is much shorter than GNU ld -Map). It is easy to track a chain of references to one archive member with a one-liner, e.g. ``` % ld.lld main.o a_b.a b_c.a c.a -o /dev/null --why-extract=- | tee stdout reference extracted symbol main.o a_b.a(a_b.o) a a_b.a(a_b.o) b_c.a(b_c.o) b() b_c.a(b_c.o) c.a(c.o) c() % ruby -ane 'BEGIN{p={}}; p[$F[1]]=[$F[0],$F[2]] if $.>1; END{x="c.a(c.o)"; while y=p[x]; puts "#{y[0]} extracts #{x} to resolve #{y[1]}"; x=y[0] end}' stdout b_c.a(b_c.o) extracts c.a(c.o) to resolve c() a_b.a(a_b.o) extracts b_c.a(b_c.o) to resolve b() main.o extracts a_b.a(a_b.o) to resolve a ``` Archive member extraction happens before --gc-sections, so this may not be a live path under --gc-sections, but I think it is a good approximation in practice. * Specifying a file avoids output interleaving with --verbose. * Required `=` prevents accidental overwrite of an input if the user forgets `=`. (Most of compiler drivers' long options accept `=` but not ` `) Differential Revision: https://reviews.llvm.org/D109572
2021-09-21 00:52:30 +08:00
if (!config->whyExtract.empty())
whyExtract.emplace_back(option, sym->file, *sym);
}
// As an extension to GNU linkers, lld supports a variant of `-u`
// which accepts wildcard patterns. All symbols that match a given
// pattern are handled as if they were given by `-u`.
static void handleUndefinedGlob(StringRef arg) {
Expected<GlobPattern> pat = GlobPattern::create(arg);
if (!pat) {
error("--undefined-glob: " + toString(pat.takeError()));
return;
}
// Calling sym->extract() in the loop is not safe because it may add new
// symbols to the symbol table, invalidating the current iterator.
std::vector<Symbol *> syms;
for (Symbol *sym : symtab->symbols())
if (pat->match(sym->getName()))
syms.push_back(sym);
for (Symbol *sym : syms)
[ELF] Add --why-extract= to query why archive members/lazy object files are extracted Similar to D69607 but for archive member extraction unrelated to GC. This patch adds --why-extract=. Prior art: GNU ld -M prints ``` Archive member included to satisfy reference by file (symbol) a.a(a.o) main.o (a) b.a(b.o) (b()) ``` -M is mainly for input section/symbol assignment <-> output section mapping (often huge output) and the information may appear ad-hoc. Apple ld64 ``` __Z1bv forced load of b.a(b.o) _a forced load of a.a(a.o) ``` It doesn't say the reference file. Arm's proprietary linker ``` Selecting member vsnprintf.o(c_wfu.l) to define vsnprintf. ... Loading member vsnprintf.o from c_wfu.l. definition: vsnprintf reference : _printf_a ``` --- --why-extract= gives the user the full data (which is much shorter than GNU ld -Map). It is easy to track a chain of references to one archive member with a one-liner, e.g. ``` % ld.lld main.o a_b.a b_c.a c.a -o /dev/null --why-extract=- | tee stdout reference extracted symbol main.o a_b.a(a_b.o) a a_b.a(a_b.o) b_c.a(b_c.o) b() b_c.a(b_c.o) c.a(c.o) c() % ruby -ane 'BEGIN{p={}}; p[$F[1]]=[$F[0],$F[2]] if $.>1; END{x="c.a(c.o)"; while y=p[x]; puts "#{y[0]} extracts #{x} to resolve #{y[1]}"; x=y[0] end}' stdout b_c.a(b_c.o) extracts c.a(c.o) to resolve c() a_b.a(a_b.o) extracts b_c.a(b_c.o) to resolve b() main.o extracts a_b.a(a_b.o) to resolve a ``` Archive member extraction happens before --gc-sections, so this may not be a live path under --gc-sections, but I think it is a good approximation in practice. * Specifying a file avoids output interleaving with --verbose. * Required `=` prevents accidental overwrite of an input if the user forgets `=`. (Most of compiler drivers' long options accept `=` but not ` `) Differential Revision: https://reviews.llvm.org/D109572
2021-09-21 00:52:30 +08:00
handleUndefined(sym, "--undefined-glob");
}
static void handleLibcall(StringRef name) {
Symbol *sym = symtab->find(name);
if (!sym || !sym->isLazy())
return;
MemoryBufferRef mb;
if (auto *lo = dyn_cast<LazyObject>(sym))
mb = lo->file->mb;
else
mb = cast<LazyArchive>(sym)->getMemberBuffer();
if (isBitcode(mb))
sym->extract();
}
// Handle --dependency-file=<path>. If that option is given, lld creates a
// file at a given path with the following contents:
//
// <output-file>: <input-file> ...
//
// <input-file>:
//
// where <output-file> is a pathname of an output file and <input-file>
// ... is a list of pathnames of all input files. `make` command can read a
// file in the above format and interpret it as a dependency info. We write
// phony targets for every <input-file> to avoid an error when that file is
// removed.
//
// This option is useful if you want to make your final executable to depend
// on all input files including system libraries. Here is why.
//
// When you write a Makefile, you usually write it so that the final
// executable depends on all user-generated object files. Normally, you
// don't make your executable to depend on system libraries (such as libc)
// because you don't know the exact paths of libraries, even though system
// libraries that are linked to your executable statically are technically a
// part of your program. By using --dependency-file option, you can make
// lld to dump dependency info so that you can maintain exact dependencies
// easily.
static void writeDependencyFile() {
std::error_code ec;
raw_fd_ostream os(config->dependencyFile, ec, sys::fs::OF_None);
if (ec) {
error("cannot open " + config->dependencyFile + ": " + ec.message());
return;
}
// We use the same escape rules as Clang/GCC which are accepted by Make/Ninja:
// * A space is escaped by a backslash which itself must be escaped.
// * A hash sign is escaped by a single backslash.
// * $ is escapes as $$.
auto printFilename = [](raw_fd_ostream &os, StringRef filename) {
llvm::SmallString<256> nativePath;
llvm::sys::path::native(filename.str(), nativePath);
llvm::sys::path::remove_dots(nativePath, /*remove_dot_dot=*/true);
for (unsigned i = 0, e = nativePath.size(); i != e; ++i) {
if (nativePath[i] == '#') {
os << '\\';
} else if (nativePath[i] == ' ') {
os << '\\';
unsigned j = i;
while (j > 0 && nativePath[--j] == '\\')
os << '\\';
} else if (nativePath[i] == '$') {
os << '$';
}
os << nativePath[i];
}
};
os << config->outputFile << ":";
for (StringRef path : config->dependencyFiles) {
os << " \\\n ";
printFilename(os, path);
}
os << "\n";
for (StringRef path : config->dependencyFiles) {
os << "\n";
printFilename(os, path);
os << ":\n";
}
}
// Replaces common symbols with defined symbols reside in .bss sections.
// This function is called after all symbol names are resolved. As a
// result, the passes after the symbol resolution won't see any
// symbols of type CommonSymbol.
static void replaceCommonSymbols() {
llvm::TimeTraceScope timeScope("Replace common symbols");
for (Symbol *sym : symtab->symbols()) {
auto *s = dyn_cast<CommonSymbol>(sym);
if (!s)
continue;
auto *bss = make<BssSection>("COMMON", s->size, s->alignment);
bss->file = s->file;
bss->markDead();
inputSections.push_back(bss);
s->replace(Defined{s->file, s->getName(), s->binding, s->stOther, s->type,
/*value=*/0, s->size, bss});
}
}
// If all references to a DSO happen to be weak, the DSO is not added
// to DT_NEEDED. If that happens, we need to eliminate shared symbols
// created from the DSO. Otherwise, they become dangling references
// that point to a non-existent DSO.
static void demoteSharedSymbols() {
llvm::TimeTraceScope timeScope("Demote shared symbols");
for (Symbol *sym : symtab->symbols()) {
auto *s = dyn_cast<SharedSymbol>(sym);
if (!((s && !s->getFile().isNeeded) ||
(sym->isLazy() && sym->isUsedInRegularObj)))
continue;
bool used = sym->used;
sym->replace(
Undefined{nullptr, sym->getName(), STB_WEAK, sym->stOther, sym->type});
sym->used = used;
sym->versionId = VER_NDX_GLOBAL;
}
}
// The section referred to by `s` is considered address-significant. Set the
// keepUnique flag on the section if appropriate.
static void markAddrsig(Symbol *s) {
if (auto *d = dyn_cast_or_null<Defined>(s))
if (d->section)
// We don't need to keep text sections unique under --icf=all even if they
// are address-significant.
if (config->icf == ICFLevel::Safe || !(d->section->flags & SHF_EXECINSTR))
d->section->keepUnique = true;
}
// Record sections that define symbols mentioned in --keep-unique <symbol>
// and symbols referred to by address-significance tables. These sections are
// ineligible for ICF.
template <class ELFT>
static void findKeepUniqueSections(opt::InputArgList &args) {
for (auto *arg : args.filtered(OPT_keep_unique)) {
StringRef name = arg->getValue();
auto *d = dyn_cast_or_null<Defined>(symtab->find(name));
if (!d || !d->section) {
warn("could not find symbol " + name + " to keep unique");
continue;
}
d->section->keepUnique = true;
}
// --icf=all --ignore-data-address-equality means that we can ignore
// the dynsym and address-significance tables entirely.
if (config->icf == ICFLevel::All && config->ignoreDataAddressEquality)
return;
// Symbols in the dynsym could be address-significant in other executables
// or DSOs, so we conservatively mark them as address-significant.
for (Symbol *sym : symtab->symbols())
if (sym->includeInDynsym())
markAddrsig(sym);
// Visit the address-significance table in each object file and mark each
// referenced symbol as address-significant.
for (InputFile *f : objectFiles) {
auto *obj = cast<ObjFile<ELFT>>(f);
ArrayRef<Symbol *> syms = obj->getSymbols();
if (obj->addrsigSec) {
ArrayRef<uint8_t> contents =
check(obj->getObj().getSectionContents(*obj->addrsigSec));
const uint8_t *cur = contents.begin();
while (cur != contents.end()) {
unsigned size;
const char *err;
uint64_t symIndex = decodeULEB128(cur, &size, contents.end(), &err);
if (err)
fatal(toString(f) + ": could not decode addrsig section: " + err);
markAddrsig(syms[symIndex]);
cur += size;
}
} else {
// If an object file does not have an address-significance table,
// conservatively mark all of its symbols as address-significant.
for (Symbol *s : syms)
markAddrsig(s);
}
}
}
// This function reads a symbol partition specification section. These sections
// are used to control which partition a symbol is allocated to. See
// https://lld.llvm.org/Partitions.html for more details on partitions.
template <typename ELFT>
static void readSymbolPartitionSection(InputSectionBase *s) {
// Read the relocation that refers to the partition's entry point symbol.
Symbol *sym;
const RelsOrRelas<ELFT> rels = s->template relsOrRelas<ELFT>();
if (rels.areRelocsRel())
sym = &s->getFile<ELFT>()->getRelocTargetSym(rels.rels[0]);
else
sym = &s->getFile<ELFT>()->getRelocTargetSym(rels.relas[0]);
if (!isa<Defined>(sym) || !sym->includeInDynsym())
return;
StringRef partName = reinterpret_cast<const char *>(s->data().data());
for (Partition &part : partitions) {
if (part.name == partName) {
sym->partition = part.getNumber();
return;
}
}
// Forbid partitions from being used on incompatible targets, and forbid them
// from being used together with various linker features that assume a single
// set of output sections.
if (script->hasSectionsCommand)
error(toString(s->file) +
": partitions cannot be used with the SECTIONS command");
if (script->hasPhdrsCommands())
error(toString(s->file) +
": partitions cannot be used with the PHDRS command");
if (!config->sectionStartMap.empty())
error(toString(s->file) + ": partitions cannot be used with "
"--section-start, -Ttext, -Tdata or -Tbss");
if (config->emachine == EM_MIPS)
error(toString(s->file) + ": partitions cannot be used on this target");
// Impose a limit of no more than 254 partitions. This limit comes from the
// sizes of the Partition fields in InputSectionBase and Symbol, as well as
// the amount of space devoted to the partition number in RankFlags.
if (partitions.size() == 254)
fatal("may not have more than 254 partitions");
partitions.emplace_back();
Partition &newPart = partitions.back();
newPart.name = partName;
sym->partition = newPart.getNumber();
}
static Symbol *addUndefined(StringRef name) {
return symtab->addSymbol(
Undefined{nullptr, name, STB_GLOBAL, STV_DEFAULT, 0});
}
static Symbol *addUnusedUndefined(StringRef name,
uint8_t binding = STB_GLOBAL) {
Undefined sym{nullptr, name, binding, STV_DEFAULT, 0};
sym.isUsedInRegularObj = false;
return symtab->addSymbol(sym);
}
// This function is where all the optimizations of link-time
// optimization takes place. When LTO is in use, some input files are
// not in native object file format but in the LLVM bitcode format.
// This function compiles bitcode files into a few big native files
// using LLVM functions and replaces bitcode symbols with the results.
// Because all bitcode files that the program consists of are passed to
// the compiler at once, it can do a whole-program optimization.
template <class ELFT> void LinkerDriver::compileBitcodeFiles() {
llvm::TimeTraceScope timeScope("LTO");
// Compile bitcode files and replace bitcode symbols.
lto.reset(new BitcodeCompiler);
for (BitcodeFile *file : bitcodeFiles)
lto->add(*file);
[Coding style change] Rename variables so that they start with a lowercase letter This patch is mechanically generated by clang-llvm-rename tool that I wrote using Clang Refactoring Engine just for creating this patch. You can see the source code of the tool at https://reviews.llvm.org/D64123. There's no manual post-processing; you can generate the same patch by re-running the tool against lld's code base. Here is the main discussion thread to change the LLVM coding style: https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html In the discussion thread, I proposed we use lld as a testbed for variable naming scheme change, and this patch does that. I chose to rename variables so that they are in camelCase, just because that is a minimal change to make variables to start with a lowercase letter. Note to downstream patch maintainers: if you are maintaining a downstream lld repo, just rebasing ahead of this commit would cause massive merge conflicts because this patch essentially changes every line in the lld subdirectory. But there's a remedy. clang-llvm-rename tool is a batch tool, so you can rename variables in your downstream repo with the tool. Given that, here is how to rebase your repo to a commit after the mass renaming: 1. rebase to the commit just before the mass variable renaming, 2. apply the tool to your downstream repo to mass-rename variables locally, and 3. rebase again to the head. Most changes made by the tool should be identical for a downstream repo and for the head, so at the step 3, almost all changes should be merged and disappear. I'd expect that there would be some lines that you need to merge by hand, but that shouldn't be too many. Differential Revision: https://reviews.llvm.org/D64121 llvm-svn: 365595
2019-07-10 13:00:37 +08:00
for (InputFile *file : lto->compile()) {
auto *obj = cast<ObjFile<ELFT>>(file);
obj->parse(/*ignoreComdats=*/true);
// Parse '@' in symbol names for non-relocatable output.
if (!config->relocatable)
for (Symbol *sym : obj->getGlobalSymbols())
sym->parseSymbolVersion();
objectFiles.push_back(obj);
}
}
// The --wrap option is a feature to rename symbols so that you can write
// wrappers for existing functions. If you pass `--wrap=foo`, all
// occurrences of symbol `foo` are resolved to `__wrap_foo` (so, you are
// expected to write `__wrap_foo` function as a wrapper). The original
// symbol becomes accessible as `__real_foo`, so you can call that from your
// wrapper.
//
// This data structure is instantiated for each --wrap option.
struct WrappedSymbol {
Symbol *sym;
Symbol *real;
Symbol *wrap;
};
// Handles --wrap option.
//
// This function instantiates wrapper symbols. At this point, they seem
// like they are not being used at all, so we explicitly set some flags so
// that LTO won't eliminate them.
static std::vector<WrappedSymbol> addWrappedSymbols(opt::InputArgList &args) {
std::vector<WrappedSymbol> v;
DenseSet<StringRef> seen;
for (auto *arg : args.filtered(OPT_wrap)) {
StringRef name = arg->getValue();
if (!seen.insert(name).second)
continue;
Symbol *sym = symtab->find(name);
if (!sym)
continue;
Symbol *real = addUnusedUndefined(saver.save("__real_" + name));
Symbol *wrap =
addUnusedUndefined(saver.save("__wrap_" + name), sym->binding);
v.push_back({sym, real, wrap});
// We want to tell LTO not to inline symbols to be overwritten
// because LTO doesn't know the final symbol contents after renaming.
real->canInline = false;
sym->canInline = false;
// Tell LTO not to eliminate these symbols.
sym->isUsedInRegularObj = true;
// If sym is referenced in any object file, bitcode file or shared object,
// retain wrap which is the redirection target of sym. If the object file
// defining sym has sym references, we cannot easily distinguish the case
// from cases where sym is not referenced. Retain wrap because we choose to
// wrap sym references regardless of whether sym is defined
// (https://sourceware.org/bugzilla/show_bug.cgi?id=26358).
if (sym->referenced || sym->isDefined())
wrap->isUsedInRegularObj = true;
}
return v;
}
// Do renaming for --wrap and foo@v1 by updating pointers to symbols.
//
// When this function is executed, only InputFiles and symbol table
// contain pointers to symbol objects. We visit them to replace pointers,
// so that wrapped symbols are swapped as instructed by the command line.
static void redirectSymbols(ArrayRef<WrappedSymbol> wrapped) {
llvm::TimeTraceScope timeScope("Redirect symbols");
DenseMap<Symbol *, Symbol *> map;
for (const WrappedSymbol &w : wrapped) {
map[w.sym] = w.wrap;
map[w.real] = w.sym;
}
for (Symbol *sym : symtab->symbols()) {
// Enumerate symbols with a non-default version (foo@v1).
StringRef name = sym->getName();
const char *suffix1 = sym->getVersionSuffix();
if (suffix1[0] != '@' || suffix1[1] == '@')
continue;
// Check the existing symbol foo. We have two special cases to handle:
//
// * There is a definition of foo@v1 and foo@@v1.
// * There is a definition of foo@v1 and foo.
Defined *sym2 = dyn_cast_or_null<Defined>(symtab->find(name));
if (!sym2)
continue;
const char *suffix2 = sym2->getVersionSuffix();
if (suffix2[0] == '@' && suffix2[1] == '@' &&
strcmp(suffix1 + 1, suffix2 + 2) == 0) {
// foo@v1 and foo@@v1 should be merged, so redirect foo@v1 to foo@@v1.
map.try_emplace(sym, sym2);
// If both foo@v1 and foo@@v1 are defined and non-weak, report a duplicate
// definition error.
sym2->resolve(*sym);
// Eliminate foo@v1 from the symbol table.
sym->symbolKind = Symbol::PlaceholderKind;
} else if (auto *sym1 = dyn_cast<Defined>(sym)) {
if (sym2->versionId > VER_NDX_GLOBAL
? config->versionDefinitions[sym2->versionId].name == suffix1 + 1
: sym1->section == sym2->section && sym1->value == sym2->value) {
// Due to an assembler design flaw, if foo is defined, .symver foo,
// foo@v1 defines both foo and foo@v1. Unless foo is bound to a
2021-08-05 00:26:29 +08:00
// different version, GNU ld makes foo@v1 canonical and eliminates foo.
// Emulate its behavior, otherwise we would have foo or foo@@v1 beside
// foo@v1. foo@v1 and foo combining does not apply if they are not
// defined in the same place.
map.try_emplace(sym2, sym);
sym2->symbolKind = Symbol::PlaceholderKind;
}
}
}
if (map.empty())
return;
// Update pointers in input files.
parallelForEach(objectFiles, [&](InputFile *file) {
MutableArrayRef<Symbol *> syms = file->getMutableSymbols();
for (size_t i = 0, e = syms.size(); i != e; ++i)
if (Symbol *s = map.lookup(syms[i]))
syms[i] = s;
});
// Update pointers in the symbol table.
for (const WrappedSymbol &w : wrapped)
symtab->wrap(w.sym, w.real, w.wrap);
}
static void checkAndReportMissingFeature(StringRef config, uint32_t features,
uint32_t mask, const Twine &report) {
if (!(features & mask)) {
if (config == "error")
error(report);
else if (config == "warning")
warn(report);
}
}
// To enable CET (x86's hardware-assited control flow enforcement), each
// source file must be compiled with -fcf-protection. Object files compiled
// with the flag contain feature flags indicating that they are compatible
// with CET. We enable the feature only when all object files are compatible
// with CET.
//
[ELF][AArch64] Support for BTI and PAC Branch Target Identification (BTI) and Pointer Authentication (PAC) are architecture features introduced in v8.5a and 8.3a respectively. The new instructions have been added in the hint space so that binaries take advantage of support where it exists yet still run on older hardware. The impact of each feature is: BTI: For executable pages that have been guarded, all indirect branches must have a destination that is a BTI instruction of the appropriate type. For the static linker, this means that PLT entries must have a "BTI c" as the first instruction in the sequence. BTI is an all or nothing property for a link unit, any indirect branch not landing on a valid destination will cause a Branch Target Exception. PAC: The dynamic loader encodes with PACIA the address of the destination that the PLT entry will load from the .plt.got, placing the result in a subset of the top-bits that are not valid virtual addresses. The PLT entry may authenticate these top-bits using the AUTIA instruction before branching to the destination. Use of PAC in PLT sequences is a contract between the dynamic loader and the static linker, it is independent of whether the relocatable objects use PAC. BTI and PAC are independent features that can be combined. So we can have several combinations of PLT: - Standard with no BTI or PAC - BTI PLT with "BTI c" as first instruction. - PAC PLT with "AUTIA1716" before the indirect branch to X17. - BTIPAC PLT with "BTI c" as first instruction and "AUTIA1716" before the first indirect branch to X17. The use of BTI and PAC in relocatable object files are encoded by feature bits in the .note.gnu.property section in a similar way to Intel CET. There is one AArch64 specific program property GNU_PROPERTY_AARCH64_FEATURE_1_AND and two target feature bits defined: - GNU_PROPERTY_AARCH64_FEATURE_1_BTI -- All executable sections are compatible with BTI. - GNU_PROPERTY_AARCH64_FEATURE_1_PAC -- All executable sections have return address signing enabled. Due to the properties of FEATURE_1_AND the static linker can tell when all input relocatable objects have the BTI and PAC feature bits set. The static linker uses this to enable the appropriate PLT sequence. Neither -> standard PLT GNU_PROPERTY_AARCH64_FEATURE_1_BTI -> BTI PLT GNU_PROPERTY_AARCH64_FEATURE_1_PAC -> PAC PLT Both properties -> BTIPAC PLT In addition to the .note.gnu.properties there are two new command line options: --force-bti : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_BTI and warn for every relocatable object that does not. --pac-plt : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_PAC. As PAC is a contract between the loader and static linker no warning is given if it is not present in an input. Two processor specific dynamic tags are used to communicate that a non standard PLT sequence is being used. DTI_AARCH64_BTI_PLT and DTI_AARCH64_BTI_PAC. Differential Revision: https://reviews.llvm.org/D62609 llvm-svn: 362793
2019-06-07 21:00:17 +08:00
// This is also the case with AARCH64's BTI and PAC which use the similar
// GNU_PROPERTY_AARCH64_FEATURE_1_AND mechanism.
template <class ELFT> static uint32_t getAndFeatures() {
[ELF][AArch64] Support for BTI and PAC Branch Target Identification (BTI) and Pointer Authentication (PAC) are architecture features introduced in v8.5a and 8.3a respectively. The new instructions have been added in the hint space so that binaries take advantage of support where it exists yet still run on older hardware. The impact of each feature is: BTI: For executable pages that have been guarded, all indirect branches must have a destination that is a BTI instruction of the appropriate type. For the static linker, this means that PLT entries must have a "BTI c" as the first instruction in the sequence. BTI is an all or nothing property for a link unit, any indirect branch not landing on a valid destination will cause a Branch Target Exception. PAC: The dynamic loader encodes with PACIA the address of the destination that the PLT entry will load from the .plt.got, placing the result in a subset of the top-bits that are not valid virtual addresses. The PLT entry may authenticate these top-bits using the AUTIA instruction before branching to the destination. Use of PAC in PLT sequences is a contract between the dynamic loader and the static linker, it is independent of whether the relocatable objects use PAC. BTI and PAC are independent features that can be combined. So we can have several combinations of PLT: - Standard with no BTI or PAC - BTI PLT with "BTI c" as first instruction. - PAC PLT with "AUTIA1716" before the indirect branch to X17. - BTIPAC PLT with "BTI c" as first instruction and "AUTIA1716" before the first indirect branch to X17. The use of BTI and PAC in relocatable object files are encoded by feature bits in the .note.gnu.property section in a similar way to Intel CET. There is one AArch64 specific program property GNU_PROPERTY_AARCH64_FEATURE_1_AND and two target feature bits defined: - GNU_PROPERTY_AARCH64_FEATURE_1_BTI -- All executable sections are compatible with BTI. - GNU_PROPERTY_AARCH64_FEATURE_1_PAC -- All executable sections have return address signing enabled. Due to the properties of FEATURE_1_AND the static linker can tell when all input relocatable objects have the BTI and PAC feature bits set. The static linker uses this to enable the appropriate PLT sequence. Neither -> standard PLT GNU_PROPERTY_AARCH64_FEATURE_1_BTI -> BTI PLT GNU_PROPERTY_AARCH64_FEATURE_1_PAC -> PAC PLT Both properties -> BTIPAC PLT In addition to the .note.gnu.properties there are two new command line options: --force-bti : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_BTI and warn for every relocatable object that does not. --pac-plt : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_PAC. As PAC is a contract between the loader and static linker no warning is given if it is not present in an input. Two processor specific dynamic tags are used to communicate that a non standard PLT sequence is being used. DTI_AARCH64_BTI_PLT and DTI_AARCH64_BTI_PAC. Differential Revision: https://reviews.llvm.org/D62609 llvm-svn: 362793
2019-06-07 21:00:17 +08:00
if (config->emachine != EM_386 && config->emachine != EM_X86_64 &&
config->emachine != EM_AARCH64)
return 0;
uint32_t ret = -1;
for (InputFile *f : objectFiles) {
uint32_t features = cast<ObjFile<ELFT>>(f)->andFeatures;
checkAndReportMissingFeature(
config->zBtiReport, features, GNU_PROPERTY_AARCH64_FEATURE_1_BTI,
toString(f) + ": -z bti-report: file does not have "
"GNU_PROPERTY_AARCH64_FEATURE_1_BTI property");
checkAndReportMissingFeature(
config->zCetReport, features, GNU_PROPERTY_X86_FEATURE_1_IBT,
toString(f) + ": -z cet-report: file does not have "
"GNU_PROPERTY_X86_FEATURE_1_IBT property");
checkAndReportMissingFeature(
config->zCetReport, features, GNU_PROPERTY_X86_FEATURE_1_SHSTK,
toString(f) + ": -z cet-report: file does not have "
"GNU_PROPERTY_X86_FEATURE_1_SHSTK property");
if (config->zForceBti && !(features & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)) {
[ELF][AArch64] Support for BTI and PAC Branch Target Identification (BTI) and Pointer Authentication (PAC) are architecture features introduced in v8.5a and 8.3a respectively. The new instructions have been added in the hint space so that binaries take advantage of support where it exists yet still run on older hardware. The impact of each feature is: BTI: For executable pages that have been guarded, all indirect branches must have a destination that is a BTI instruction of the appropriate type. For the static linker, this means that PLT entries must have a "BTI c" as the first instruction in the sequence. BTI is an all or nothing property for a link unit, any indirect branch not landing on a valid destination will cause a Branch Target Exception. PAC: The dynamic loader encodes with PACIA the address of the destination that the PLT entry will load from the .plt.got, placing the result in a subset of the top-bits that are not valid virtual addresses. The PLT entry may authenticate these top-bits using the AUTIA instruction before branching to the destination. Use of PAC in PLT sequences is a contract between the dynamic loader and the static linker, it is independent of whether the relocatable objects use PAC. BTI and PAC are independent features that can be combined. So we can have several combinations of PLT: - Standard with no BTI or PAC - BTI PLT with "BTI c" as first instruction. - PAC PLT with "AUTIA1716" before the indirect branch to X17. - BTIPAC PLT with "BTI c" as first instruction and "AUTIA1716" before the first indirect branch to X17. The use of BTI and PAC in relocatable object files are encoded by feature bits in the .note.gnu.property section in a similar way to Intel CET. There is one AArch64 specific program property GNU_PROPERTY_AARCH64_FEATURE_1_AND and two target feature bits defined: - GNU_PROPERTY_AARCH64_FEATURE_1_BTI -- All executable sections are compatible with BTI. - GNU_PROPERTY_AARCH64_FEATURE_1_PAC -- All executable sections have return address signing enabled. Due to the properties of FEATURE_1_AND the static linker can tell when all input relocatable objects have the BTI and PAC feature bits set. The static linker uses this to enable the appropriate PLT sequence. Neither -> standard PLT GNU_PROPERTY_AARCH64_FEATURE_1_BTI -> BTI PLT GNU_PROPERTY_AARCH64_FEATURE_1_PAC -> PAC PLT Both properties -> BTIPAC PLT In addition to the .note.gnu.properties there are two new command line options: --force-bti : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_BTI and warn for every relocatable object that does not. --pac-plt : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_PAC. As PAC is a contract between the loader and static linker no warning is given if it is not present in an input. Two processor specific dynamic tags are used to communicate that a non standard PLT sequence is being used. DTI_AARCH64_BTI_PLT and DTI_AARCH64_BTI_PAC. Differential Revision: https://reviews.llvm.org/D62609 llvm-svn: 362793
2019-06-07 21:00:17 +08:00
features |= GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
if (config->zBtiReport == "none")
warn(toString(f) + ": -z force-bti: file does not have "
"GNU_PROPERTY_AARCH64_FEATURE_1_BTI property");
} else if (config->zForceIbt &&
!(features & GNU_PROPERTY_X86_FEATURE_1_IBT)) {
if (config->zCetReport == "none")
warn(toString(f) + ": -z force-ibt: file does not have "
"GNU_PROPERTY_X86_FEATURE_1_IBT property");
features |= GNU_PROPERTY_X86_FEATURE_1_IBT;
}
if (config->zPacPlt && !(features & GNU_PROPERTY_AARCH64_FEATURE_1_PAC)) {
warn(toString(f) + ": -z pac-plt: file does not have "
"GNU_PROPERTY_AARCH64_FEATURE_1_PAC property");
features |= GNU_PROPERTY_AARCH64_FEATURE_1_PAC;
}
ret &= features;
}
[ELF][AArch64] Support for BTI and PAC Branch Target Identification (BTI) and Pointer Authentication (PAC) are architecture features introduced in v8.5a and 8.3a respectively. The new instructions have been added in the hint space so that binaries take advantage of support where it exists yet still run on older hardware. The impact of each feature is: BTI: For executable pages that have been guarded, all indirect branches must have a destination that is a BTI instruction of the appropriate type. For the static linker, this means that PLT entries must have a "BTI c" as the first instruction in the sequence. BTI is an all or nothing property for a link unit, any indirect branch not landing on a valid destination will cause a Branch Target Exception. PAC: The dynamic loader encodes with PACIA the address of the destination that the PLT entry will load from the .plt.got, placing the result in a subset of the top-bits that are not valid virtual addresses. The PLT entry may authenticate these top-bits using the AUTIA instruction before branching to the destination. Use of PAC in PLT sequences is a contract between the dynamic loader and the static linker, it is independent of whether the relocatable objects use PAC. BTI and PAC are independent features that can be combined. So we can have several combinations of PLT: - Standard with no BTI or PAC - BTI PLT with "BTI c" as first instruction. - PAC PLT with "AUTIA1716" before the indirect branch to X17. - BTIPAC PLT with "BTI c" as first instruction and "AUTIA1716" before the first indirect branch to X17. The use of BTI and PAC in relocatable object files are encoded by feature bits in the .note.gnu.property section in a similar way to Intel CET. There is one AArch64 specific program property GNU_PROPERTY_AARCH64_FEATURE_1_AND and two target feature bits defined: - GNU_PROPERTY_AARCH64_FEATURE_1_BTI -- All executable sections are compatible with BTI. - GNU_PROPERTY_AARCH64_FEATURE_1_PAC -- All executable sections have return address signing enabled. Due to the properties of FEATURE_1_AND the static linker can tell when all input relocatable objects have the BTI and PAC feature bits set. The static linker uses this to enable the appropriate PLT sequence. Neither -> standard PLT GNU_PROPERTY_AARCH64_FEATURE_1_BTI -> BTI PLT GNU_PROPERTY_AARCH64_FEATURE_1_PAC -> PAC PLT Both properties -> BTIPAC PLT In addition to the .note.gnu.properties there are two new command line options: --force-bti : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_BTI and warn for every relocatable object that does not. --pac-plt : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_PAC. As PAC is a contract between the loader and static linker no warning is given if it is not present in an input. Two processor specific dynamic tags are used to communicate that a non standard PLT sequence is being used. DTI_AARCH64_BTI_PLT and DTI_AARCH64_BTI_PAC. Differential Revision: https://reviews.llvm.org/D62609 llvm-svn: 362793
2019-06-07 21:00:17 +08:00
// Force enable Shadow Stack.
if (config->zShstk)
ret |= GNU_PROPERTY_X86_FEATURE_1_SHSTK;
[ELF][AArch64] Support for BTI and PAC Branch Target Identification (BTI) and Pointer Authentication (PAC) are architecture features introduced in v8.5a and 8.3a respectively. The new instructions have been added in the hint space so that binaries take advantage of support where it exists yet still run on older hardware. The impact of each feature is: BTI: For executable pages that have been guarded, all indirect branches must have a destination that is a BTI instruction of the appropriate type. For the static linker, this means that PLT entries must have a "BTI c" as the first instruction in the sequence. BTI is an all or nothing property for a link unit, any indirect branch not landing on a valid destination will cause a Branch Target Exception. PAC: The dynamic loader encodes with PACIA the address of the destination that the PLT entry will load from the .plt.got, placing the result in a subset of the top-bits that are not valid virtual addresses. The PLT entry may authenticate these top-bits using the AUTIA instruction before branching to the destination. Use of PAC in PLT sequences is a contract between the dynamic loader and the static linker, it is independent of whether the relocatable objects use PAC. BTI and PAC are independent features that can be combined. So we can have several combinations of PLT: - Standard with no BTI or PAC - BTI PLT with "BTI c" as first instruction. - PAC PLT with "AUTIA1716" before the indirect branch to X17. - BTIPAC PLT with "BTI c" as first instruction and "AUTIA1716" before the first indirect branch to X17. The use of BTI and PAC in relocatable object files are encoded by feature bits in the .note.gnu.property section in a similar way to Intel CET. There is one AArch64 specific program property GNU_PROPERTY_AARCH64_FEATURE_1_AND and two target feature bits defined: - GNU_PROPERTY_AARCH64_FEATURE_1_BTI -- All executable sections are compatible with BTI. - GNU_PROPERTY_AARCH64_FEATURE_1_PAC -- All executable sections have return address signing enabled. Due to the properties of FEATURE_1_AND the static linker can tell when all input relocatable objects have the BTI and PAC feature bits set. The static linker uses this to enable the appropriate PLT sequence. Neither -> standard PLT GNU_PROPERTY_AARCH64_FEATURE_1_BTI -> BTI PLT GNU_PROPERTY_AARCH64_FEATURE_1_PAC -> PAC PLT Both properties -> BTIPAC PLT In addition to the .note.gnu.properties there are two new command line options: --force-bti : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_BTI and warn for every relocatable object that does not. --pac-plt : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_PAC. As PAC is a contract between the loader and static linker no warning is given if it is not present in an input. Two processor specific dynamic tags are used to communicate that a non standard PLT sequence is being used. DTI_AARCH64_BTI_PLT and DTI_AARCH64_BTI_PAC. Differential Revision: https://reviews.llvm.org/D62609 llvm-svn: 362793
2019-06-07 21:00:17 +08:00
return ret;
}
// Do actual linking. Note that when this function is called,
// all linker scripts have already been parsed.
template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
llvm::TimeTraceScope timeScope("Link", StringRef("LinkerDriver::Link"));
// If a --hash-style option was not given, set to a default value,
// which varies depending on the target.
if (!args.hasArg(OPT_hash_style)) {
if (config->emachine == EM_MIPS)
config->sysvHash = true;
else
config->sysvHash = config->gnuHash = true;
}
// Default output filename is "a.out" by the Unix tradition.
if (config->outputFile.empty())
config->outputFile = "a.out";
// Fail early if the output file or map file is not writable. If a user has a
// long link, e.g. due to a large LTO link, they do not wish to run it and
// find that it failed because there was a mistake in their command-line.
{
llvm::TimeTraceScope timeScope("Create output files");
if (auto e = tryCreateFile(config->outputFile))
error("cannot open output file " + config->outputFile + ": " +
e.message());
if (auto e = tryCreateFile(config->mapFile))
error("cannot open map file " + config->mapFile + ": " + e.message());
[ELF] Add --why-extract= to query why archive members/lazy object files are extracted Similar to D69607 but for archive member extraction unrelated to GC. This patch adds --why-extract=. Prior art: GNU ld -M prints ``` Archive member included to satisfy reference by file (symbol) a.a(a.o) main.o (a) b.a(b.o) (b()) ``` -M is mainly for input section/symbol assignment <-> output section mapping (often huge output) and the information may appear ad-hoc. Apple ld64 ``` __Z1bv forced load of b.a(b.o) _a forced load of a.a(a.o) ``` It doesn't say the reference file. Arm's proprietary linker ``` Selecting member vsnprintf.o(c_wfu.l) to define vsnprintf. ... Loading member vsnprintf.o from c_wfu.l. definition: vsnprintf reference : _printf_a ``` --- --why-extract= gives the user the full data (which is much shorter than GNU ld -Map). It is easy to track a chain of references to one archive member with a one-liner, e.g. ``` % ld.lld main.o a_b.a b_c.a c.a -o /dev/null --why-extract=- | tee stdout reference extracted symbol main.o a_b.a(a_b.o) a a_b.a(a_b.o) b_c.a(b_c.o) b() b_c.a(b_c.o) c.a(c.o) c() % ruby -ane 'BEGIN{p={}}; p[$F[1]]=[$F[0],$F[2]] if $.>1; END{x="c.a(c.o)"; while y=p[x]; puts "#{y[0]} extracts #{x} to resolve #{y[1]}"; x=y[0] end}' stdout b_c.a(b_c.o) extracts c.a(c.o) to resolve c() a_b.a(a_b.o) extracts b_c.a(b_c.o) to resolve b() main.o extracts a_b.a(a_b.o) to resolve a ``` Archive member extraction happens before --gc-sections, so this may not be a live path under --gc-sections, but I think it is a good approximation in practice. * Specifying a file avoids output interleaving with --verbose. * Required `=` prevents accidental overwrite of an input if the user forgets `=`. (Most of compiler drivers' long options accept `=` but not ` `) Differential Revision: https://reviews.llvm.org/D109572
2021-09-21 00:52:30 +08:00
if (auto e = tryCreateFile(config->whyExtract))
error("cannot open --why-extract= file " + config->whyExtract + ": " +
e.message());
}
if (errorCount())
return;
// Use default entry point name if no name was given via the command
// line nor linker scripts. For some reason, MIPS entry point name is
// different from others.
config->warnMissingEntry =
(!config->entry.empty() || (!config->shared && !config->relocatable));
if (config->entry.empty() && !config->relocatable)
config->entry = (config->emachine == EM_MIPS) ? "__start" : "_start";
// Handle --trace-symbol.
for (auto *arg : args.filtered(OPT_trace_symbol))
symtab->insert(arg->getValue())->traced = true;
// Handle -u/--undefined before input files. If both a.a and b.so define foo,
// -u foo a.a b.so will extract a.a.
for (StringRef name : config->undefined)
addUnusedUndefined(name)->referenced = true;
// Add all files to the symbol table. This will add almost all
[ELF] Implement Dependent Libraries Feature This patch implements a limited form of autolinking primarily designed to allow either the --dependent-library compiler option, or "comment lib" pragmas ( https://docs.microsoft.com/en-us/cpp/preprocessor/comment-c-cpp?view=vs-2017) in C/C++ e.g. #pragma comment(lib, "foo"), to cause an ELF linker to automatically add the specified library to the link when processing the input file generated by the compiler. Currently this extension is unique to LLVM and LLD. However, care has been taken to design this feature so that it could be supported by other ELF linkers. The design goals were to provide: - A simple linking model for developers to reason about. - The ability to to override autolinking from the linker command line. - Source code compatibility, where possible, with "comment lib" pragmas in other environments (MSVC in particular). Dependent library support is implemented differently for ELF platforms than on the other platforms. Primarily this difference is that on ELF we pass the dependent library specifiers directly to the linker without manipulating them. This is in contrast to other platforms where they are mapped to a specific linker option by the compiler. This difference is a result of the greater variety of ELF linkers and the fact that ELF linkers tend to handle libraries in a more complicated fashion than on other platforms. This forces us to defer handling the specifiers to the linker. In order to achieve a level of source code compatibility with other platforms we have restricted this feature to work with libraries that meet the following "reasonable" requirements: 1. There are no competing defined symbols in a given set of libraries, or if they exist, the program owner doesn't care which is linked to their program. 2. There may be circular dependencies between libraries. The binary representation is a mergeable string section (SHF_MERGE, SHF_STRINGS), called .deplibs, with custom type SHT_LLVM_DEPENDENT_LIBRARIES (0x6fff4c04). The compiler forms this section by concatenating the arguments of the "comment lib" pragmas and --dependent-library options in the order they are encountered. Partial (-r, -Ur) links are handled by concatenating .deplibs sections with the normal mergeable string section rules. As an example, #pragma comment(lib, "foo") would result in: .section ".deplibs","MS",@llvm_dependent_libraries,1 .asciz "foo" For LTO, equivalent information to the contents of a the .deplibs section can be retrieved by the LLD for bitcode input files. LLD processes the dependent library specifiers in the following way: 1. Dependent libraries which are found from the specifiers in .deplibs sections of relocatable object files are added when the linker decides to include that file (which could itself be in a library) in the link. Dependent libraries behave as if they were appended to the command line after all other options. As a consequence the set of dependent libraries are searched last to resolve symbols. 2. It is an error if a file cannot be found for a given specifier. 3. Any command line options in effect at the end of the command line parsing apply to the dependent libraries, e.g. --whole-archive. 4. The linker tries to add a library or relocatable object file from each of the strings in a .deplibs section by; first, handling the string as if it was specified on the command line; second, by looking for the string in each of the library search paths in turn; third, by looking for a lib<string>.a or lib<string>.so (depending on the current mode of the linker) in each of the library search paths. 5. A new command line option --no-dependent-libraries tells LLD to ignore the dependent libraries. Rationale for the above points: 1. Adding the dependent libraries last makes the process simple to understand from a developers perspective. All linkers are able to implement this scheme. 2. Error-ing for libraries that are not found seems like better behavior than failing the link during symbol resolution. 3. It seems useful for the user to be able to apply command line options which will affect all of the dependent libraries. There is a potential problem of surprise for developers, who might not realize that these options would apply to these "invisible" input files; however, despite the potential for surprise, this is easy for developers to reason about and gives developers the control that they may require. 4. This algorithm takes into account all of the different ways that ELF linkers find input files. The different search methods are tried by the linker in most obvious to least obvious order. 5. I considered adding finer grained control over which dependent libraries were ignored (e.g. MSVC has /nodefaultlib:<library>); however, I concluded that this is not necessary: if finer control is required developers can fall back to using the command line directly. RFC thread: http://lists.llvm.org/pipermail/llvm-dev/2019-March/131004.html. Differential Revision: https://reviews.llvm.org/D60274 llvm-svn: 360984
2019-05-17 11:44:15 +08:00
// symbols that we need to the symbol table. This process might
// add files to the link, via autolinking, these files are always
// appended to the Files vector.
{
llvm::TimeTraceScope timeScope("Parse input files");
for (size_t i = 0; i < files.size(); ++i) {
llvm::TimeTraceScope timeScope("Parse input files", files[i]->getName());
parseFile(files[i]);
}
}
// Now that we have every file, we can decide if we will need a
// dynamic symbol table.
// We need one if we were asked to export dynamic symbols or if we are
// producing a shared library.
// We also need one if any shared libraries are used and for pie executables
// (probably because the dynamic linker needs it).
config->hasDynSymTab =
!sharedFiles.empty() || config->isPic || config->exportDynamic;
// Some symbols (such as __ehdr_start) are defined lazily only when there
// are undefined symbols for them, so we add these to trigger that logic.
for (StringRef name : script->referencedSymbols)
addUndefined(name);
// Prevent LTO from removing any definition referenced by -u.
for (StringRef name : config->undefined)
if (Defined *sym = dyn_cast_or_null<Defined>(symtab->find(name)))
sym->isUsedInRegularObj = true;
// If an entry symbol is in a static archive, pull out that file now.
if (Symbol *sym = symtab->find(config->entry))
[ELF] Add --why-extract= to query why archive members/lazy object files are extracted Similar to D69607 but for archive member extraction unrelated to GC. This patch adds --why-extract=. Prior art: GNU ld -M prints ``` Archive member included to satisfy reference by file (symbol) a.a(a.o) main.o (a) b.a(b.o) (b()) ``` -M is mainly for input section/symbol assignment <-> output section mapping (often huge output) and the information may appear ad-hoc. Apple ld64 ``` __Z1bv forced load of b.a(b.o) _a forced load of a.a(a.o) ``` It doesn't say the reference file. Arm's proprietary linker ``` Selecting member vsnprintf.o(c_wfu.l) to define vsnprintf. ... Loading member vsnprintf.o from c_wfu.l. definition: vsnprintf reference : _printf_a ``` --- --why-extract= gives the user the full data (which is much shorter than GNU ld -Map). It is easy to track a chain of references to one archive member with a one-liner, e.g. ``` % ld.lld main.o a_b.a b_c.a c.a -o /dev/null --why-extract=- | tee stdout reference extracted symbol main.o a_b.a(a_b.o) a a_b.a(a_b.o) b_c.a(b_c.o) b() b_c.a(b_c.o) c.a(c.o) c() % ruby -ane 'BEGIN{p={}}; p[$F[1]]=[$F[0],$F[2]] if $.>1; END{x="c.a(c.o)"; while y=p[x]; puts "#{y[0]} extracts #{x} to resolve #{y[1]}"; x=y[0] end}' stdout b_c.a(b_c.o) extracts c.a(c.o) to resolve c() a_b.a(a_b.o) extracts b_c.a(b_c.o) to resolve b() main.o extracts a_b.a(a_b.o) to resolve a ``` Archive member extraction happens before --gc-sections, so this may not be a live path under --gc-sections, but I think it is a good approximation in practice. * Specifying a file avoids output interleaving with --verbose. * Required `=` prevents accidental overwrite of an input if the user forgets `=`. (Most of compiler drivers' long options accept `=` but not ` `) Differential Revision: https://reviews.llvm.org/D109572
2021-09-21 00:52:30 +08:00
handleUndefined(sym, "--entry");
// Handle the `--undefined-glob <pattern>` options.
for (StringRef pat : args::getStrings(args, OPT_undefined_glob))
handleUndefinedGlob(pat);
// Mark -init and -fini symbols so that the LTO doesn't eliminate them.
if (Symbol *sym = dyn_cast_or_null<Defined>(symtab->find(config->init)))
sym->isUsedInRegularObj = true;
if (Symbol *sym = dyn_cast_or_null<Defined>(symtab->find(config->fini)))
sym->isUsedInRegularObj = true;
// If any of our inputs are bitcode files, the LTO code generator may create
// references to certain library functions that might not be explicit in the
// bitcode file's symbol table. If any of those library functions are defined
// in a bitcode file in an archive member, we need to arrange to use LTO to
// compile those archive members by adding them to the link beforehand.
//
// However, adding all libcall symbols to the link can have undesired
// consequences. For example, the libgcc implementation of
// __sync_val_compare_and_swap_8 on 32-bit ARM pulls in an .init_array entry
// that aborts the program if the Linux kernel does not support 64-bit
// atomics, which would prevent the program from running even if it does not
// use 64-bit atomics.
//
// Therefore, we only add libcall symbols to the link before LTO if we have
// to, i.e. if the symbol's definition is in bitcode. Any other required
// libcall symbols will be added to the link after LTO when we add the LTO
// object file to the link.
if (!bitcodeFiles.empty())
for (auto *s : lto::LTO::getRuntimeLibcallSymbols())
handleLibcall(s);
// Return if there were name resolution errors.
if (errorCount())
return;
// We want to declare linker script's symbols early,
// so that we can version them.
// They also might be exported if referenced by DSOs.
script->declareSymbols();
// Handle --exclude-libs. This is before scanVersionScript() due to a
// workaround for Android ndk: for a defined versioned symbol in an archive
// without a version node in the version script, Android does not expect a
// 'has undefined version' error in -shared --exclude-libs=ALL mode (PR36295).
// GNU ld errors in this case.
if (args.hasArg(OPT_exclude_libs))
excludeLibs(args);
// Create elfHeader early. We need a dummy section in
// addReservedSymbols to mark the created symbols as not absolute.
Out::elfHeader = make<OutputSection>("", 0, SHF_ALLOC);
std::vector<WrappedSymbol> wrapped = addWrappedSymbols(args);
// We need to create some reserved symbols such as _end. Create them.
if (!config->relocatable)
addReservedSymbols();
// Apply version scripts.
//
// For a relocatable output, version scripts don't make sense, and
// parsing a symbol version string (e.g. dropping "@ver1" from a symbol
// name "foo@ver1") rather do harm, so we don't call this if -r is given.
if (!config->relocatable) {
llvm::TimeTraceScope timeScope("Process symbol versions");
symtab->scanVersionScript();
}
// Do link-time optimization if given files are LLVM bitcode files.
// This compiles bitcode files into real object files.
//
// With this the symbol table should be complete. After this, no new names
// except a few linker-synthesized ones will be added to the symbol table.
compileBitcodeFiles<ELFT>();
[ELF] --warn-backrefs: don't warn for linking sandwich problems This is an alternative design to D77512. D45195 added --warn-backrefs to detect * A. certain input orders which GNU ld either errors ("undefined reference") or has different resolution semantics * B. (byproduct) some latent multiple definition problems (-ldef1 -lref -ldef2) which I call "linking sandwich problems". def2 may or may not be the same as def1. When an archive appears more than once (-ldef -lref -ldef), lld and GNU ld may have the same resolution but --warn-backrefs may warn. This is not uncommon. For example, currently lld itself has such a problem: ``` liblldCommon.a liblldCOFF.a ... liblldCommon.a _ZN3lld10DWARFCache13getDILineInfoEmm in liblldCOFF.a refers to liblldCommon.a(DWARF.cpp.o) libLLVMSupport.a also appears twice and has a similar warning ``` glibc has such problems. It is somewhat destined because of its separate libc/libpthread/... and arbitrary grouping. The situation is getting improved over time but I have seen: ``` -lc __isnanl references -lm -lc _IO_funlockfile references -lpthread ``` There are also various issues in interaction with other runtime libraries such as libgcc_eh and libunwind: ``` -lc __gcc_personality_v0 references -lgcc_eh -lpthread __gcc_personality_v0 references -lgcc_eh -lpthread _Unwind_GetCFA references -lunwind ``` These problems are actually benign. We want --warn-backrefs to focus on its main task A and defer task B (which is also useful) to a more specific future feature (see gold --detect-odr-violations and https://bugs.llvm.org/show_bug.cgi?id=43110). Instead of warning immediately, we store the message and only report it if no subsequent lazy definition exists. The use of the static variable `backrefDiags` is similar to `undefs` in Relocations.cpp Reviewed By: grimar Differential Revision: https://reviews.llvm.org/D77522
2020-04-06 13:27:46 +08:00
// Handle --exclude-libs again because lto.tmp may reference additional
// libcalls symbols defined in an excluded archive. This may override
// versionId set by scanVersionScript().
if (args.hasArg(OPT_exclude_libs))
excludeLibs(args);
[ELF] --warn-backrefs: don't warn for linking sandwich problems This is an alternative design to D77512. D45195 added --warn-backrefs to detect * A. certain input orders which GNU ld either errors ("undefined reference") or has different resolution semantics * B. (byproduct) some latent multiple definition problems (-ldef1 -lref -ldef2) which I call "linking sandwich problems". def2 may or may not be the same as def1. When an archive appears more than once (-ldef -lref -ldef), lld and GNU ld may have the same resolution but --warn-backrefs may warn. This is not uncommon. For example, currently lld itself has such a problem: ``` liblldCommon.a liblldCOFF.a ... liblldCommon.a _ZN3lld10DWARFCache13getDILineInfoEmm in liblldCOFF.a refers to liblldCommon.a(DWARF.cpp.o) libLLVMSupport.a also appears twice and has a similar warning ``` glibc has such problems. It is somewhat destined because of its separate libc/libpthread/... and arbitrary grouping. The situation is getting improved over time but I have seen: ``` -lc __isnanl references -lm -lc _IO_funlockfile references -lpthread ``` There are also various issues in interaction with other runtime libraries such as libgcc_eh and libunwind: ``` -lc __gcc_personality_v0 references -lgcc_eh -lpthread __gcc_personality_v0 references -lgcc_eh -lpthread _Unwind_GetCFA references -lunwind ``` These problems are actually benign. We want --warn-backrefs to focus on its main task A and defer task B (which is also useful) to a more specific future feature (see gold --detect-odr-violations and https://bugs.llvm.org/show_bug.cgi?id=43110). Instead of warning immediately, we store the message and only report it if no subsequent lazy definition exists. The use of the static variable `backrefDiags` is similar to `undefs` in Relocations.cpp Reviewed By: grimar Differential Revision: https://reviews.llvm.org/D77522
2020-04-06 13:27:46 +08:00
// Symbol resolution finished. Report backward reference problems.
reportBackrefs();
if (errorCount())
return;
// If --thinlto-index-only is given, we should create only "index
// files" and not object files. Index file creation is already done
// in compileBitcodeFiles, so we are done if that's the case.
// Likewise, --plugin-opt=emit-llvm and --plugin-opt=emit-asm are the
// options to create output files in bitcode or assembly code
2021-02-19 03:24:56 +08:00
// respectively. No object files are generated.
// Also bail out here when only certain thinLTO modules are specified for
// compilation. The intermediate object file are the expected output.
if (config->thinLTOIndexOnly || config->emitLLVM || config->ltoEmitAsm ||
!config->thinLTOModulesToCompile.empty())
return;
// Apply symbol renames for --wrap and combine foo@v1 and foo@@v1.
redirectSymbols(wrapped);
{
llvm::TimeTraceScope timeScope("Aggregate sections");
// Now that we have a complete list of input files.
// Beyond this point, no new files are added.
// Aggregate all input sections into one place.
for (InputFile *f : objectFiles)
for (InputSectionBase *s : f->getSections())
if (s && s != &InputSection::discarded)
inputSections.push_back(s);
for (BinaryFile *f : binaryFiles)
for (InputSectionBase *s : f->getSections())
inputSections.push_back(cast<InputSection>(s));
}
{
llvm::TimeTraceScope timeScope("Strip sections");
llvm::erase_if(inputSections, [](InputSectionBase *s) {
if (s->type == SHT_LLVM_SYMPART) {
readSymbolPartitionSection<ELFT>(s);
return true;
}
// We do not want to emit debug sections if --strip-all
// or --strip-debug are given.
if (config->strip == StripPolicy::None)
return false;
if (isDebugSection(*s))
return true;
if (auto *isec = dyn_cast<InputSection>(s))
if (InputSectionBase *rel = isec->getRelocatedSection())
if (isDebugSection(*rel))
return true;
return false;
});
}
// Since we now have a complete set of input files, we can create
// a .d file to record build dependencies.
if (!config->dependencyFile.empty())
writeDependencyFile();
// Now that the number of partitions is fixed, save a pointer to the main
// partition.
mainPart = &partitions[0];
// Read .note.gnu.property sections from input object files which
// contain a hint to tweak linker's and loader's behaviors.
config->andFeatures = getAndFeatures<ELFT>();
[ELF][AArch64] Support for BTI and PAC Branch Target Identification (BTI) and Pointer Authentication (PAC) are architecture features introduced in v8.5a and 8.3a respectively. The new instructions have been added in the hint space so that binaries take advantage of support where it exists yet still run on older hardware. The impact of each feature is: BTI: For executable pages that have been guarded, all indirect branches must have a destination that is a BTI instruction of the appropriate type. For the static linker, this means that PLT entries must have a "BTI c" as the first instruction in the sequence. BTI is an all or nothing property for a link unit, any indirect branch not landing on a valid destination will cause a Branch Target Exception. PAC: The dynamic loader encodes with PACIA the address of the destination that the PLT entry will load from the .plt.got, placing the result in a subset of the top-bits that are not valid virtual addresses. The PLT entry may authenticate these top-bits using the AUTIA instruction before branching to the destination. Use of PAC in PLT sequences is a contract between the dynamic loader and the static linker, it is independent of whether the relocatable objects use PAC. BTI and PAC are independent features that can be combined. So we can have several combinations of PLT: - Standard with no BTI or PAC - BTI PLT with "BTI c" as first instruction. - PAC PLT with "AUTIA1716" before the indirect branch to X17. - BTIPAC PLT with "BTI c" as first instruction and "AUTIA1716" before the first indirect branch to X17. The use of BTI and PAC in relocatable object files are encoded by feature bits in the .note.gnu.property section in a similar way to Intel CET. There is one AArch64 specific program property GNU_PROPERTY_AARCH64_FEATURE_1_AND and two target feature bits defined: - GNU_PROPERTY_AARCH64_FEATURE_1_BTI -- All executable sections are compatible with BTI. - GNU_PROPERTY_AARCH64_FEATURE_1_PAC -- All executable sections have return address signing enabled. Due to the properties of FEATURE_1_AND the static linker can tell when all input relocatable objects have the BTI and PAC feature bits set. The static linker uses this to enable the appropriate PLT sequence. Neither -> standard PLT GNU_PROPERTY_AARCH64_FEATURE_1_BTI -> BTI PLT GNU_PROPERTY_AARCH64_FEATURE_1_PAC -> PAC PLT Both properties -> BTIPAC PLT In addition to the .note.gnu.properties there are two new command line options: --force-bti : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_BTI and warn for every relocatable object that does not. --pac-plt : Act as if all relocatable inputs had GNU_PROPERTY_AARCH64_FEATURE_1_PAC. As PAC is a contract between the loader and static linker no warning is given if it is not present in an input. Two processor specific dynamic tags are used to communicate that a non standard PLT sequence is being used. DTI_AARCH64_BTI_PLT and DTI_AARCH64_BTI_PAC. Differential Revision: https://reviews.llvm.org/D62609 llvm-svn: 362793
2019-06-07 21:00:17 +08:00
// The Target instance handles target-specific stuff, such as applying
// relocations or writing a PLT section. It also contains target-dependent
// values such as a default image base address.
target = getTarget();
config->eflags = target->calcEFlags();
// maxPageSize (sometimes called abi page size) is the maximum page size that
// the output can be run on. For example if the OS can use 4k or 64k page
// sizes then maxPageSize must be 64k for the output to be useable on both.
// All important alignment decisions must use this value.
config->maxPageSize = getMaxPageSize(args);
// commonPageSize is the most common page size that the output will be run on.
// For example if an OS can use 4k or 64k page sizes and 4k is more common
// than 64k then commonPageSize is set to 4k. commonPageSize can be used for
// optimizations such as DATA_SEGMENT_ALIGN in linker scripts. LLD's use of it
// is limited to writing trap instructions on the last executable segment.
config->commonPageSize = getCommonPageSize(args);
config->imageBase = getImageBase(args);
if (config->emachine == EM_ARM) {
// FIXME: These warnings can be removed when lld only uses these features
// when the input objects have been compiled with an architecture that
// supports them.
if (config->armHasBlx == false)
warn("lld uses blx instruction, no object with architecture supporting "
"feature detected");
}
// This adds a .comment section containing a version string.
if (!config->relocatable)
inputSections.push_back(createCommentSection());
// Replace common symbols with regular symbols.
replaceCommonSymbols();
// Split SHF_MERGE and .eh_frame sections into pieces in preparation for garbage collection.
splitSections<ELFT>();
// Garbage collection and removal of shared symbols from unused shared objects.
markLive<ELFT>();
demoteSharedSymbols();
// Make copies of any input sections that need to be copied into each
// partition.
copySectionsIntoPartitions();
// Create synthesized sections such as .got and .plt. This is called before
// processSectionCommands() so that they can be placed by SECTIONS commands.
createSyntheticSections<ELFT>();
// Some input sections that are used for exception handling need to be moved
// into synthetic sections. Do that now so that they aren't assigned to
// output sections in the usual way.
if (!config->relocatable)
combineEhSections();
{
llvm::TimeTraceScope timeScope("Assign sections");
// Create output sections described by SECTIONS commands.
script->processSectionCommands();
// Linker scripts control how input sections are assigned to output
// sections. Input sections that were not handled by scripts are called
// "orphans", and they are assigned to output sections by the default rule.
// Process that.
script->addOrphanSections();
}
{
llvm::TimeTraceScope timeScope("Merge/finalize input sections");
// Migrate InputSectionDescription::sectionBases to sections. This includes
// merging MergeInputSections into a single MergeSyntheticSection. From this
// point onwards InputSectionDescription::sections should be used instead of
// sectionBases.
for (SectionCommand *cmd : script->sectionCommands)
if (auto *sec = dyn_cast<OutputSection>(cmd))
sec->finalizeInputSections();
llvm::erase_if(inputSections, [](InputSectionBase *s) {
return isa<MergeInputSection>(s);
});
}
// Two input sections with different output sections should not be folded.
// ICF runs after processSectionCommands() so that we know the output sections.
if (config->icf != ICFLevel::None) {
findKeepUniqueSections<ELFT>(args);
doIcf<ELFT>();
}
// Read the callgraph now that we know what was gced or icfed
if (config->callGraphProfileSort) {
if (auto *arg = args.getLastArg(OPT_call_graph_ordering_file))
if (Optional<MemoryBufferRef> buffer = readFile(arg->getValue()))
readCallGraph(*buffer);
readCallGraphsFromObjectFiles<ELFT>();
}
2016-09-14 03:56:25 +08:00
// Write the result to the file.
writeResult<ELFT>();
}