2015-09-23 02:19:46 +08:00
|
|
|
//===- Target.cpp ---------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Linker
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2015-10-14 03:51:57 +08:00
|
|
|
//
|
2015-10-16 03:52:27 +08:00
|
|
|
// Machine-specific things, such as applying relocations, creation of
|
|
|
|
// GOT or PLT entries, etc., are handled in this file.
|
|
|
|
//
|
|
|
|
// Refer the ELF spec for the single letter varaibles, S, A or P, used
|
|
|
|
// in this file. SA is S+A.
|
2015-10-14 03:51:57 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2015-09-23 02:19:46 +08:00
|
|
|
|
|
|
|
#include "Target.h"
|
2015-09-23 04:54:08 +08:00
|
|
|
#include "Error.h"
|
2015-10-09 04:06:07 +08:00
|
|
|
#include "OutputSections.h"
|
2015-09-30 07:22:16 +08:00
|
|
|
#include "Symbols.h"
|
2015-09-23 02:19:46 +08:00
|
|
|
|
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
2015-09-23 04:54:08 +08:00
|
|
|
#include "llvm/Object/ELF.h"
|
2015-09-23 02:19:46 +08:00
|
|
|
#include "llvm/Support/Endian.h"
|
|
|
|
#include "llvm/Support/ELF.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
2015-09-23 04:54:08 +08:00
|
|
|
using namespace llvm::object;
|
2015-09-24 22:16:02 +08:00
|
|
|
using namespace llvm::support::endian;
|
2015-09-23 02:19:46 +08:00
|
|
|
using namespace llvm::ELF;
|
|
|
|
|
|
|
|
namespace lld {
|
|
|
|
namespace elf2 {
|
|
|
|
|
2016-02-12 05:18:01 +08:00
|
|
|
TargetInfo *Target;
|
2015-09-23 02:19:46 +08:00
|
|
|
|
2015-11-10 05:43:00 +08:00
|
|
|
template <endianness E> static void add32(void *P, int32_t V) {
|
|
|
|
write32<E>(P, read32<E>(P) + V);
|
|
|
|
}
|
2015-10-15 05:30:32 +08:00
|
|
|
|
2015-11-10 05:43:00 +08:00
|
|
|
static void add32le(uint8_t *P, int32_t V) { add32<support::little>(P, V); }
|
|
|
|
static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
|
2015-10-15 05:30:32 +08:00
|
|
|
|
2015-11-26 17:49:44 +08:00
|
|
|
template <unsigned N> static void checkInt(int64_t V, uint32_t Type) {
|
|
|
|
if (isInt<N>(V))
|
|
|
|
return;
|
|
|
|
StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
|
2016-02-02 07:28:21 +08:00
|
|
|
error("Relocation " + S + " out of range");
|
2015-11-26 17:49:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
template <unsigned N> static void checkUInt(uint64_t V, uint32_t Type) {
|
|
|
|
if (isUInt<N>(V))
|
|
|
|
return;
|
|
|
|
StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
|
2016-02-02 07:28:21 +08:00
|
|
|
error("Relocation " + S + " out of range");
|
2015-11-26 17:49:44 +08:00
|
|
|
}
|
|
|
|
|
2015-11-26 18:05:24 +08:00
|
|
|
template <unsigned N> static void checkIntUInt(uint64_t V, uint32_t Type) {
|
|
|
|
if (isInt<N>(V) || isUInt<N>(V))
|
|
|
|
return;
|
|
|
|
StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
|
2016-02-02 07:28:21 +08:00
|
|
|
error("Relocation " + S + " out of range");
|
2015-11-26 18:05:24 +08:00
|
|
|
}
|
|
|
|
|
2015-11-26 17:49:44 +08:00
|
|
|
template <unsigned N> static void checkAlignment(uint64_t V, uint32_t Type) {
|
|
|
|
if ((V & (N - 1)) == 0)
|
|
|
|
return;
|
|
|
|
StringRef S = getELFRelocationTypeName(Config->EMachine, Type);
|
2016-02-02 07:28:21 +08:00
|
|
|
error("Improper alignment for relocation " + S);
|
2015-11-26 17:49:44 +08:00
|
|
|
}
|
|
|
|
|
2015-12-21 18:12:06 +08:00
|
|
|
template <class ELFT> bool isGnuIFunc(const SymbolBody &S) {
|
2015-12-24 08:47:42 +08:00
|
|
|
if (auto *SS = dyn_cast<DefinedElf<ELFT>>(&S))
|
2015-12-21 18:12:06 +08:00
|
|
|
return SS->Sym.getType() == STT_GNU_IFUNC;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-10-15 05:30:32 +08:00
|
|
|
namespace {
|
|
|
|
class X86TargetInfo final : public TargetInfo {
|
|
|
|
public:
|
|
|
|
X86TargetInfo();
|
2016-01-29 10:33:45 +08:00
|
|
|
void writeGotPltHeader(uint8_t *Buf) const override;
|
|
|
|
unsigned getDynRel(unsigned Type) const override;
|
2016-01-29 09:49:32 +08:00
|
|
|
unsigned getTlsGotRel(unsigned Type) const override;
|
2016-02-12 21:43:03 +08:00
|
|
|
bool isTlsLocalDynamicRel(unsigned Type) const override;
|
|
|
|
bool isTlsGlobalDynamicRel(unsigned Type) const override;
|
2016-01-29 10:33:45 +08:00
|
|
|
bool isTlsDynRel(unsigned Type, const SymbolBody &S) const override;
|
|
|
|
void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
|
2016-01-29 11:51:49 +08:00
|
|
|
void writePltZero(uint8_t *Buf) const override;
|
2016-01-29 12:15:02 +08:00
|
|
|
void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
|
|
|
|
int32_t Index, unsigned RelOff) const override;
|
2015-12-17 09:18:40 +08:00
|
|
|
bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override;
|
2016-01-29 10:33:45 +08:00
|
|
|
bool needsDynRelative(unsigned Type) const override;
|
2016-02-09 23:11:01 +08:00
|
|
|
bool needsGot(uint32_t Type, SymbolBody &S) const override;
|
2016-02-12 23:47:37 +08:00
|
|
|
PltNeed needsPlt(uint32_t Type, const SymbolBody &S) const override;
|
2015-10-23 10:40:46 +08:00
|
|
|
void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
|
2015-12-11 16:59:37 +08:00
|
|
|
uint64_t SA, uint64_t ZA = 0,
|
|
|
|
uint8_t *PairedLoc = nullptr) const override;
|
2016-01-29 08:20:12 +08:00
|
|
|
bool canRelaxTls(unsigned Type, const SymbolBody *S) const override;
|
2016-01-29 10:33:45 +08:00
|
|
|
unsigned relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
|
|
|
|
uint64_t SA, const SymbolBody *S) const override;
|
2015-12-21 18:00:12 +08:00
|
|
|
bool isGotRelative(uint32_t Type) const override;
|
2015-12-09 17:55:54 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
void relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const;
|
|
|
|
void relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const;
|
|
|
|
void relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const;
|
2015-12-17 17:32:21 +08:00
|
|
|
void relocateTlsIeToLe(unsigned Type, uint8_t *Loc, uint8_t *BufEnd,
|
|
|
|
uint64_t P, uint64_t SA) const;
|
2015-10-15 05:30:32 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
class X86_64TargetInfo final : public TargetInfo {
|
|
|
|
public:
|
|
|
|
X86_64TargetInfo();
|
2016-02-11 19:14:46 +08:00
|
|
|
unsigned getTlsGotRel(unsigned Type) const override;
|
2016-02-12 21:43:03 +08:00
|
|
|
bool isTlsLocalDynamicRel(unsigned Type) const override;
|
|
|
|
bool isTlsGlobalDynamicRel(unsigned Type) const override;
|
2016-01-29 10:33:45 +08:00
|
|
|
bool isTlsDynRel(unsigned Type, const SymbolBody &S) const override;
|
|
|
|
void writeGotPltHeader(uint8_t *Buf) const override;
|
|
|
|
void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
|
2016-01-29 11:51:49 +08:00
|
|
|
void writePltZero(uint8_t *Buf) const override;
|
2016-01-29 12:15:02 +08:00
|
|
|
void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
|
|
|
|
int32_t Index, unsigned RelOff) const override;
|
2015-12-17 09:18:40 +08:00
|
|
|
bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override;
|
2016-02-09 23:11:01 +08:00
|
|
|
bool needsGot(uint32_t Type, SymbolBody &S) const override;
|
2016-02-12 23:47:37 +08:00
|
|
|
PltNeed needsPlt(uint32_t Type, const SymbolBody &S) const override;
|
2015-10-23 10:40:46 +08:00
|
|
|
void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
|
2015-12-11 16:59:37 +08:00
|
|
|
uint64_t SA, uint64_t ZA = 0,
|
|
|
|
uint8_t *PairedLoc = nullptr) const override;
|
2015-10-15 05:30:32 +08:00
|
|
|
bool isRelRelative(uint32_t Type) const override;
|
2016-01-29 08:20:12 +08:00
|
|
|
bool canRelaxTls(unsigned Type, const SymbolBody *S) const override;
|
2016-01-29 10:33:45 +08:00
|
|
|
bool isSizeRel(uint32_t Type) const override;
|
|
|
|
unsigned relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
|
|
|
|
uint64_t SA, const SymbolBody *S) const override;
|
2015-11-26 05:46:05 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
void relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const;
|
|
|
|
void relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const;
|
2015-12-04 19:20:13 +08:00
|
|
|
void relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const;
|
2015-11-26 05:46:05 +08:00
|
|
|
void relocateTlsIeToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const;
|
2015-10-15 05:30:32 +08:00
|
|
|
};
|
|
|
|
|
2016-01-12 03:45:33 +08:00
|
|
|
class PPCTargetInfo final : public TargetInfo {
|
|
|
|
public:
|
|
|
|
PPCTargetInfo();
|
|
|
|
void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
|
|
|
|
uint64_t SA, uint64_t ZA = 0,
|
|
|
|
uint8_t *PairedLoc = nullptr) const override;
|
|
|
|
bool isRelRelative(uint32_t Type) const override;
|
|
|
|
};
|
|
|
|
|
2015-10-15 05:30:32 +08:00
|
|
|
class PPC64TargetInfo final : public TargetInfo {
|
|
|
|
public:
|
|
|
|
PPC64TargetInfo();
|
2016-01-29 12:15:02 +08:00
|
|
|
void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
|
|
|
|
int32_t Index, unsigned RelOff) const override;
|
2016-02-09 23:11:01 +08:00
|
|
|
bool needsGot(uint32_t Type, SymbolBody &S) const override;
|
2016-02-12 23:47:37 +08:00
|
|
|
PltNeed needsPlt(uint32_t Type, const SymbolBody &S) const override;
|
2015-10-23 10:40:46 +08:00
|
|
|
void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
|
2015-12-11 16:59:37 +08:00
|
|
|
uint64_t SA, uint64_t ZA = 0,
|
|
|
|
uint8_t *PairedLoc = nullptr) const override;
|
2015-10-15 05:30:32 +08:00
|
|
|
bool isRelRelative(uint32_t Type) const override;
|
|
|
|
};
|
|
|
|
|
|
|
|
class AArch64TargetInfo final : public TargetInfo {
|
|
|
|
public:
|
|
|
|
AArch64TargetInfo();
|
2016-01-29 10:33:45 +08:00
|
|
|
unsigned getDynRel(unsigned Type) const override;
|
2016-02-12 21:43:03 +08:00
|
|
|
bool isTlsGlobalDynamicRel(unsigned Type) const override;
|
2016-01-29 10:33:45 +08:00
|
|
|
void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
|
2016-01-29 11:51:49 +08:00
|
|
|
void writePltZero(uint8_t *Buf) const override;
|
2016-01-29 12:15:02 +08:00
|
|
|
void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
|
|
|
|
int32_t Index, unsigned RelOff) const override;
|
2016-02-11 19:14:46 +08:00
|
|
|
unsigned getTlsGotRel(unsigned Type) const override;
|
2016-01-29 10:33:45 +08:00
|
|
|
bool isTlsDynRel(unsigned Type, const SymbolBody &S) const override;
|
2015-12-17 09:18:40 +08:00
|
|
|
bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override;
|
2016-02-09 23:11:01 +08:00
|
|
|
bool needsGot(uint32_t Type, SymbolBody &S) const override;
|
2016-02-12 23:47:37 +08:00
|
|
|
PltNeed needsPlt(uint32_t Type, const SymbolBody &S) const override;
|
2015-10-23 10:40:46 +08:00
|
|
|
void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
|
2015-12-11 16:59:37 +08:00
|
|
|
uint64_t SA, uint64_t ZA = 0,
|
|
|
|
uint8_t *PairedLoc = nullptr) const override;
|
2016-02-12 21:43:03 +08:00
|
|
|
unsigned relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
|
|
|
|
uint64_t SA, const SymbolBody *S) const override;
|
|
|
|
bool canRelaxTls(unsigned Type, const SymbolBody *S) const override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
void relocateTlsGdToLe(unsigned Type, uint8_t *Loc, uint8_t *BufEnd,
|
|
|
|
uint64_t P, uint64_t SA) const;
|
|
|
|
void relocateTlsIeToLe(unsigned Type, uint8_t *Loc, uint8_t *BufEnd,
|
|
|
|
uint64_t P, uint64_t SA) const;
|
|
|
|
|
|
|
|
static const uint64_t TcbSize = 16;
|
2015-10-15 05:30:32 +08:00
|
|
|
};
|
|
|
|
|
2016-01-07 11:59:08 +08:00
|
|
|
class AMDGPUTargetInfo final : public TargetInfo {
|
|
|
|
public:
|
2016-01-29 12:05:09 +08:00
|
|
|
AMDGPUTargetInfo() {}
|
2016-01-07 11:59:08 +08:00
|
|
|
void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
|
|
|
|
uint64_t SA, uint64_t ZA = 0,
|
|
|
|
uint8_t *PairedLoc = nullptr) const override;
|
|
|
|
};
|
|
|
|
|
2015-10-15 05:30:32 +08:00
|
|
|
template <class ELFT> class MipsTargetInfo final : public TargetInfo {
|
|
|
|
public:
|
|
|
|
MipsTargetInfo();
|
2016-01-29 10:33:45 +08:00
|
|
|
unsigned getDynRel(unsigned Type) const override;
|
2016-02-11 03:57:19 +08:00
|
|
|
void writeGotPlt(uint8_t *Buf, uint64_t Plt) const override;
|
|
|
|
void writePltZero(uint8_t *Buf) const override;
|
|
|
|
void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
|
|
|
|
int32_t Index, unsigned RelOff) const override;
|
2016-01-29 10:33:45 +08:00
|
|
|
void writeGotHeader(uint8_t *Buf) const override;
|
2016-02-08 18:05:13 +08:00
|
|
|
bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override;
|
2016-02-09 23:11:01 +08:00
|
|
|
bool needsGot(uint32_t Type, SymbolBody &S) const override;
|
2016-02-12 23:47:37 +08:00
|
|
|
PltNeed needsPlt(uint32_t Type, const SymbolBody &S) const override;
|
2015-10-23 10:40:46 +08:00
|
|
|
void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P,
|
2016-02-10 18:08:35 +08:00
|
|
|
uint64_t S, uint64_t ZA = 0,
|
2015-12-11 16:59:37 +08:00
|
|
|
uint8_t *PairedLoc = nullptr) const override;
|
2016-01-29 10:33:45 +08:00
|
|
|
bool isHintRel(uint32_t Type) const override;
|
2015-12-22 01:36:40 +08:00
|
|
|
bool isRelRelative(uint32_t Type) const override;
|
2015-10-15 05:30:32 +08:00
|
|
|
};
|
|
|
|
} // anonymous namespace
|
|
|
|
|
2015-10-14 00:08:15 +08:00
|
|
|
TargetInfo *createTarget() {
|
|
|
|
switch (Config->EMachine) {
|
|
|
|
case EM_386:
|
|
|
|
return new X86TargetInfo();
|
|
|
|
case EM_AARCH64:
|
|
|
|
return new AArch64TargetInfo();
|
2016-01-07 11:59:08 +08:00
|
|
|
case EM_AMDGPU:
|
|
|
|
return new AMDGPUTargetInfo();
|
2015-10-14 00:08:15 +08:00
|
|
|
case EM_MIPS:
|
2015-10-14 22:24:46 +08:00
|
|
|
switch (Config->EKind) {
|
|
|
|
case ELF32LEKind:
|
|
|
|
return new MipsTargetInfo<ELF32LE>();
|
|
|
|
case ELF32BEKind:
|
|
|
|
return new MipsTargetInfo<ELF32BE>();
|
|
|
|
default:
|
ELF: Rename error -> fatal and redefine error as a non-noreturn function.
In many situations, we don't want to exit at the first error even in the
process model. For example, it is better to report all undefined symbols
rather than reporting the first one that the linker picked up randomly.
In order to handle such errors, we don't need to wrap everything with
ErrorOr (thanks for David Blaikie for pointing this out!) Instead, we
can set a flag to record the fact that we found an error and keep it
going until it reaches a reasonable checkpoint.
This idea should be applicable to other places. For example, we can
ignore broken relocations and check for errors after visiting all relocs.
In this patch, I rename error to fatal, and introduce another version of
error which doesn't call exit. That function instead sets HasError to true.
Once HasError becomes true, it stays true, so that we know that there
was an error if it is true.
I think introducing a non-noreturn error reporting function is by itself
a good idea, and it looks to me that this also provides a gradual path
towards lld-as-a-library (or at least embed-lld-to-your-program) without
sacrificing code readability with lots of ErrorOr's.
http://reviews.llvm.org/D16641
llvm-svn: 259069
2016-01-29 02:40:06 +08:00
|
|
|
fatal("Unsupported MIPS target");
|
2015-10-14 22:24:46 +08:00
|
|
|
}
|
2016-01-12 03:45:33 +08:00
|
|
|
case EM_PPC:
|
|
|
|
return new PPCTargetInfo();
|
2015-10-14 00:08:15 +08:00
|
|
|
case EM_PPC64:
|
|
|
|
return new PPC64TargetInfo();
|
|
|
|
case EM_X86_64:
|
|
|
|
return new X86_64TargetInfo();
|
|
|
|
}
|
ELF: Rename error -> fatal and redefine error as a non-noreturn function.
In many situations, we don't want to exit at the first error even in the
process model. For example, it is better to report all undefined symbols
rather than reporting the first one that the linker picked up randomly.
In order to handle such errors, we don't need to wrap everything with
ErrorOr (thanks for David Blaikie for pointing this out!) Instead, we
can set a flag to record the fact that we found an error and keep it
going until it reaches a reasonable checkpoint.
This idea should be applicable to other places. For example, we can
ignore broken relocations and check for errors after visiting all relocs.
In this patch, I rename error to fatal, and introduce another version of
error which doesn't call exit. That function instead sets HasError to true.
Once HasError becomes true, it stays true, so that we know that there
was an error if it is true.
I think introducing a non-noreturn error reporting function is by itself
a good idea, and it looks to me that this also provides a gradual path
towards lld-as-a-library (or at least embed-lld-to-your-program) without
sacrificing code readability with lots of ErrorOr's.
http://reviews.llvm.org/D16641
llvm-svn: 259069
2016-01-29 02:40:06 +08:00
|
|
|
fatal("Unknown target machine");
|
2015-10-14 00:08:15 +08:00
|
|
|
}
|
|
|
|
|
2015-09-23 02:19:46 +08:00
|
|
|
TargetInfo::~TargetInfo() {}
|
|
|
|
|
2016-01-29 08:20:12 +08:00
|
|
|
bool TargetInfo::canRelaxTls(unsigned Type, const SymbolBody *S) const {
|
2015-11-24 17:00:06 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-11-10 16:39:27 +08:00
|
|
|
uint64_t TargetInfo::getVAStart() const { return Config->Shared ? 0 : VAStart; }
|
|
|
|
|
2015-12-17 09:18:40 +08:00
|
|
|
bool TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const {
|
2015-10-29 00:48:58 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-01-29 12:05:09 +08:00
|
|
|
bool TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const {
|
|
|
|
return false;
|
|
|
|
}
|
2015-10-06 03:30:12 +08:00
|
|
|
|
2016-01-29 12:05:09 +08:00
|
|
|
bool TargetInfo::isGotRelative(uint32_t Type) const { return false; }
|
|
|
|
bool TargetInfo::isHintRel(uint32_t Type) const { return false; }
|
|
|
|
bool TargetInfo::isRelRelative(uint32_t Type) const { return true; }
|
2016-01-29 10:33:45 +08:00
|
|
|
bool TargetInfo::isSizeRel(uint32_t Type) const { return false; }
|
2015-12-11 16:59:37 +08:00
|
|
|
|
2016-02-09 23:11:01 +08:00
|
|
|
bool TargetInfo::needsGot(uint32_t Type, SymbolBody &S) const { return false; }
|
2016-01-29 12:05:09 +08:00
|
|
|
|
2016-02-12 23:47:37 +08:00
|
|
|
TargetInfo::PltNeed TargetInfo::needsPlt(uint32_t Type,
|
|
|
|
const SymbolBody &S) const {
|
|
|
|
return Plt_No;
|
|
|
|
}
|
2016-01-29 12:05:09 +08:00
|
|
|
|
2016-02-12 21:43:03 +08:00
|
|
|
bool TargetInfo::isTlsLocalDynamicRel(unsigned Type) const {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool TargetInfo::isTlsGlobalDynamicRel(unsigned Type) const {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
unsigned TargetInfo::relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
|
|
|
|
uint64_t P, uint64_t SA,
|
|
|
|
const SymbolBody *S) const {
|
2015-11-26 05:46:05 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2015-11-24 17:00:06 +08:00
|
|
|
|
2015-09-23 05:35:51 +08:00
|
|
|
X86TargetInfo::X86TargetInfo() {
|
2016-01-29 09:49:32 +08:00
|
|
|
CopyRel = R_386_COPY;
|
|
|
|
GotRel = R_386_GLOB_DAT;
|
|
|
|
PltRel = R_386_JUMP_SLOT;
|
|
|
|
IRelativeRel = R_386_IRELATIVE;
|
|
|
|
RelativeRel = R_386_RELATIVE;
|
|
|
|
TlsGotRel = R_386_TLS_TPOFF;
|
|
|
|
TlsModuleIndexRel = R_386_TLS_DTPMOD32;
|
|
|
|
TlsOffsetRel = R_386_TLS_DTPOFF32;
|
|
|
|
UseLazyBinding = true;
|
2015-11-26 06:15:01 +08:00
|
|
|
PltEntrySize = 16;
|
2016-01-29 11:00:32 +08:00
|
|
|
PltZeroSize = 16;
|
2015-11-26 06:15:01 +08:00
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
|
2015-11-26 06:15:01 +08:00
|
|
|
write32le(Buf, Out<ELF32LE>::Dynamic->getVA());
|
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
void X86TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
|
2016-01-30 07:58:03 +08:00
|
|
|
// Entries in .got.plt initially points back to the corresponding
|
|
|
|
// PLT entries with a fixed offset to skip the first instruction.
|
2015-11-26 06:15:01 +08:00
|
|
|
write32le(Buf, Plt + 6);
|
2015-09-23 05:35:51 +08:00
|
|
|
}
|
2015-09-23 02:19:46 +08:00
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
unsigned X86TargetInfo::getDynRel(unsigned Type) const {
|
2015-11-26 04:41:53 +08:00
|
|
|
if (Type == R_386_TLS_LE)
|
|
|
|
return R_386_TLS_TPOFF;
|
|
|
|
if (Type == R_386_TLS_LE_32)
|
|
|
|
return R_386_TLS_TPOFF32;
|
|
|
|
return Type;
|
|
|
|
}
|
|
|
|
|
2016-01-29 09:49:32 +08:00
|
|
|
unsigned X86TargetInfo::getTlsGotRel(unsigned Type) const {
|
2015-12-17 17:32:21 +08:00
|
|
|
if (Type == R_386_TLS_IE)
|
|
|
|
return Type;
|
2016-01-29 09:49:32 +08:00
|
|
|
return TlsGotRel;
|
2015-12-17 17:32:21 +08:00
|
|
|
}
|
|
|
|
|
2016-02-12 21:43:03 +08:00
|
|
|
bool X86TargetInfo::isTlsGlobalDynamicRel(unsigned Type) const {
|
|
|
|
return Type == R_386_TLS_GD;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86TargetInfo::isTlsLocalDynamicRel(unsigned Type) const {
|
|
|
|
return Type == R_386_TLS_LDM;
|
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
bool X86TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const {
|
2015-12-02 17:58:20 +08:00
|
|
|
if (Type == R_386_TLS_LE || Type == R_386_TLS_LE_32 ||
|
|
|
|
Type == R_386_TLS_GOTIE)
|
2015-11-26 04:41:53 +08:00
|
|
|
return Config->Shared;
|
2015-12-17 17:32:21 +08:00
|
|
|
if (Type == R_386_TLS_IE)
|
|
|
|
return canBePreempted(&S, true);
|
2015-12-09 17:55:54 +08:00
|
|
|
return Type == R_386_TLS_GD;
|
2015-11-26 04:41:53 +08:00
|
|
|
}
|
|
|
|
|
2016-01-29 11:51:49 +08:00
|
|
|
void X86TargetInfo::writePltZero(uint8_t *Buf) const {
|
2015-11-26 06:15:01 +08:00
|
|
|
// Executable files and shared object files have
|
|
|
|
// separate procedure linkage tables.
|
|
|
|
if (Config->Shared) {
|
|
|
|
const uint8_t V[] = {
|
2016-01-06 00:35:46 +08:00
|
|
|
0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx)
|
2016-01-30 07:58:03 +08:00
|
|
|
0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp *8(%ebx)
|
|
|
|
0x90, 0x90, 0x90, 0x90 // nop; nop; nop; nop
|
2015-11-26 06:15:01 +08:00
|
|
|
};
|
|
|
|
memcpy(Buf, V, sizeof(V));
|
|
|
|
return;
|
|
|
|
}
|
2015-10-20 16:54:27 +08:00
|
|
|
|
2015-11-26 06:15:01 +08:00
|
|
|
const uint8_t PltData[] = {
|
|
|
|
0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOT+4)
|
2016-01-30 07:58:03 +08:00
|
|
|
0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *(GOT+8)
|
|
|
|
0x90, 0x90, 0x90, 0x90 // nop; nop; nop; nop
|
2015-11-26 06:15:01 +08:00
|
|
|
};
|
|
|
|
memcpy(Buf, PltData, sizeof(PltData));
|
2016-01-29 11:51:49 +08:00
|
|
|
uint32_t Got = Out<ELF32LE>::GotPlt->getVA();
|
2016-01-30 07:58:03 +08:00
|
|
|
write32le(Buf + 2, Got + 4);
|
|
|
|
write32le(Buf + 8, Got + 8);
|
2015-11-26 06:15:01 +08:00
|
|
|
}
|
|
|
|
|
2016-01-29 12:15:02 +08:00
|
|
|
void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
|
|
|
|
uint64_t PltEntryAddr, int32_t Index,
|
|
|
|
unsigned RelOff) const {
|
2015-11-26 06:15:01 +08:00
|
|
|
const uint8_t Inst[] = {
|
|
|
|
0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo@GOT(%ebx)
|
|
|
|
0x68, 0x00, 0x00, 0x00, 0x00, // pushl $reloc_offset
|
|
|
|
0xe9, 0x00, 0x00, 0x00, 0x00 // jmp .PLT0@PC
|
|
|
|
};
|
2015-09-30 07:00:47 +08:00
|
|
|
memcpy(Buf, Inst, sizeof(Inst));
|
2016-01-29 12:15:02 +08:00
|
|
|
|
2015-11-26 06:15:01 +08:00
|
|
|
// jmp *foo@GOT(%ebx) or jmp *foo_in_GOT
|
|
|
|
Buf[1] = Config->Shared ? 0xa3 : 0x25;
|
2016-01-29 12:15:02 +08:00
|
|
|
uint32_t Got = UseLazyBinding ? Out<ELF32LE>::GotPlt->getVA()
|
|
|
|
: Out<ELF32LE>::Got->getVA();
|
|
|
|
write32le(Buf + 2, Config->Shared ? GotEntryAddr - Got : GotEntryAddr);
|
2015-11-26 06:15:01 +08:00
|
|
|
write32le(Buf + 7, RelOff);
|
2016-01-29 11:00:32 +08:00
|
|
|
write32le(Buf + 12, -Index * PltEntrySize - PltZeroSize - 16);
|
2015-09-23 02:19:46 +08:00
|
|
|
}
|
|
|
|
|
2015-12-17 09:18:40 +08:00
|
|
|
bool X86TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const {
|
2015-11-25 19:27:40 +08:00
|
|
|
if (Type == R_386_32 || Type == R_386_16 || Type == R_386_8)
|
|
|
|
if (auto *SS = dyn_cast<SharedSymbol<ELF32LE>>(&S))
|
|
|
|
return SS->Sym.getType() == STT_OBJECT;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-02-09 23:11:01 +08:00
|
|
|
bool X86TargetInfo::needsGot(uint32_t Type, SymbolBody &S) const {
|
2015-12-17 08:04:18 +08:00
|
|
|
if (S.isTls() && Type == R_386_TLS_GD)
|
2016-01-29 08:20:12 +08:00
|
|
|
return Target->canRelaxTls(Type, &S) && canBePreempted(&S, true);
|
2015-12-17 17:32:21 +08:00
|
|
|
if (Type == R_386_TLS_GOTIE || Type == R_386_TLS_IE)
|
2016-01-29 08:20:12 +08:00
|
|
|
return !canRelaxTls(Type, &S);
|
2016-01-29 10:33:45 +08:00
|
|
|
return Type == R_386_GOT32 || needsPlt(Type, S);
|
2015-09-23 02:19:46 +08:00
|
|
|
}
|
|
|
|
|
2016-02-12 23:47:37 +08:00
|
|
|
TargetInfo::PltNeed X86TargetInfo::needsPlt(uint32_t Type,
|
|
|
|
const SymbolBody &S) const {
|
|
|
|
if (isGnuIFunc<ELF32LE>(S) ||
|
|
|
|
(Type == R_386_PLT32 && canBePreempted(&S, true)) ||
|
|
|
|
(Type == R_386_PC32 && S.isShared()))
|
|
|
|
return Plt_Explicit;
|
|
|
|
return Plt_No;
|
2015-09-23 02:19:46 +08:00
|
|
|
}
|
|
|
|
|
2015-12-21 18:00:12 +08:00
|
|
|
bool X86TargetInfo::isGotRelative(uint32_t Type) const {
|
|
|
|
// This relocation does not require got entry,
|
|
|
|
// but it is relative to got and needs it to be created.
|
|
|
|
// Here we request for that.
|
|
|
|
return Type == R_386_GOTOFF;
|
|
|
|
}
|
|
|
|
|
2015-10-23 10:40:46 +08:00
|
|
|
void X86TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
|
2015-12-11 16:59:37 +08:00
|
|
|
uint64_t P, uint64_t SA, uint64_t ZA,
|
2015-12-02 05:24:45 +08:00
|
|
|
uint8_t *PairedLoc) const {
|
2015-09-23 04:54:08 +08:00
|
|
|
switch (Type) {
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_386_32:
|
|
|
|
add32le(Loc, SA);
|
|
|
|
break;
|
2016-01-19 19:00:48 +08:00
|
|
|
case R_386_GOT32: {
|
|
|
|
uint64_t V = SA - Out<ELF32LE>::Got->getVA() -
|
|
|
|
Out<ELF32LE>::Got->getNumEntries() * 4;
|
|
|
|
checkInt<32>(V, Type);
|
|
|
|
add32le(Loc, V);
|
|
|
|
break;
|
|
|
|
}
|
2015-12-21 18:00:12 +08:00
|
|
|
case R_386_GOTOFF:
|
2015-10-16 03:52:27 +08:00
|
|
|
add32le(Loc, SA - Out<ELF32LE>::Got->getVA());
|
2015-09-29 22:42:37 +08:00
|
|
|
break;
|
2015-11-26 04:20:31 +08:00
|
|
|
case R_386_GOTPC:
|
|
|
|
add32le(Loc, SA + Out<ELF32LE>::Got->getVA() - P);
|
|
|
|
break;
|
2015-09-23 04:54:08 +08:00
|
|
|
case R_386_PC32:
|
2015-12-10 17:03:39 +08:00
|
|
|
case R_386_PLT32:
|
2015-10-23 10:40:46 +08:00
|
|
|
add32le(Loc, SA - P);
|
2015-09-23 04:54:08 +08:00
|
|
|
break;
|
2015-12-02 17:58:20 +08:00
|
|
|
case R_386_TLS_GD:
|
|
|
|
case R_386_TLS_LDM:
|
|
|
|
case R_386_TLS_TPOFF: {
|
|
|
|
uint64_t V = SA - Out<ELF32LE>::Got->getVA() -
|
|
|
|
Out<ELF32LE>::Got->getNumEntries() * 4;
|
|
|
|
checkInt<32>(V, Type);
|
|
|
|
write32le(Loc, V);
|
|
|
|
break;
|
|
|
|
}
|
2015-12-17 17:32:21 +08:00
|
|
|
case R_386_TLS_IE:
|
2015-12-02 17:58:20 +08:00
|
|
|
case R_386_TLS_LDO_32:
|
|
|
|
write32le(Loc, SA);
|
|
|
|
break;
|
2015-11-26 04:41:53 +08:00
|
|
|
case R_386_TLS_LE:
|
|
|
|
write32le(Loc, SA - Out<ELF32LE>::TlsPhdr->p_memsz);
|
|
|
|
break;
|
|
|
|
case R_386_TLS_LE_32:
|
|
|
|
write32le(Loc, Out<ELF32LE>::TlsPhdr->p_memsz - SA);
|
|
|
|
break;
|
2015-09-23 04:54:08 +08:00
|
|
|
default:
|
ELF: Rename error -> fatal and redefine error as a non-noreturn function.
In many situations, we don't want to exit at the first error even in the
process model. For example, it is better to report all undefined symbols
rather than reporting the first one that the linker picked up randomly.
In order to handle such errors, we don't need to wrap everything with
ErrorOr (thanks for David Blaikie for pointing this out!) Instead, we
can set a flag to record the fact that we found an error and keep it
going until it reaches a reasonable checkpoint.
This idea should be applicable to other places. For example, we can
ignore broken relocations and check for errors after visiting all relocs.
In this patch, I rename error to fatal, and introduce another version of
error which doesn't call exit. That function instead sets HasError to true.
Once HasError becomes true, it stays true, so that we know that there
was an error if it is true.
I think introducing a non-noreturn error reporting function is by itself
a good idea, and it looks to me that this also provides a gradual path
towards lld-as-a-library (or at least embed-lld-to-your-program) without
sacrificing code readability with lots of ErrorOr's.
http://reviews.llvm.org/D16641
llvm-svn: 259069
2016-01-29 02:40:06 +08:00
|
|
|
fatal("unrecognized reloc " + Twine(Type));
|
2015-09-23 04:54:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-29 08:20:12 +08:00
|
|
|
bool X86TargetInfo::canRelaxTls(unsigned Type, const SymbolBody *S) const {
|
2015-12-17 08:04:18 +08:00
|
|
|
if (Config->Shared || (S && !S->isTls()))
|
2015-12-09 17:55:54 +08:00
|
|
|
return false;
|
|
|
|
return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM ||
|
|
|
|
Type == R_386_TLS_GD ||
|
2015-12-17 17:32:21 +08:00
|
|
|
(Type == R_386_TLS_IE && !canBePreempted(S, true)) ||
|
2015-12-09 17:55:54 +08:00
|
|
|
(Type == R_386_TLS_GOTIE && !canBePreempted(S, true));
|
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
bool X86TargetInfo::needsDynRelative(unsigned Type) const {
|
2015-12-17 17:32:21 +08:00
|
|
|
return Config->Shared && Type == R_386_TLS_IE;
|
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
unsigned X86TargetInfo::relaxTls(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
|
|
|
|
uint64_t P, uint64_t SA,
|
|
|
|
const SymbolBody *S) const {
|
2015-12-09 17:55:54 +08:00
|
|
|
switch (Type) {
|
|
|
|
case R_386_TLS_GD:
|
2016-01-23 02:02:28 +08:00
|
|
|
if (canBePreempted(S, true))
|
2015-12-09 17:55:54 +08:00
|
|
|
relocateTlsGdToIe(Loc, BufEnd, P, SA);
|
|
|
|
else
|
|
|
|
relocateTlsGdToLe(Loc, BufEnd, P, SA);
|
|
|
|
// The next relocation should be against __tls_get_addr, so skip it
|
|
|
|
return 1;
|
|
|
|
case R_386_TLS_GOTIE:
|
2015-12-17 17:32:21 +08:00
|
|
|
case R_386_TLS_IE:
|
|
|
|
relocateTlsIeToLe(Type, Loc, BufEnd, P, SA);
|
2015-12-09 17:55:54 +08:00
|
|
|
return 0;
|
|
|
|
case R_386_TLS_LDM:
|
|
|
|
relocateTlsLdToLe(Loc, BufEnd, P, SA);
|
|
|
|
// The next relocation should be against __tls_get_addr, so skip it
|
|
|
|
return 1;
|
|
|
|
case R_386_TLS_LDO_32:
|
|
|
|
relocateOne(Loc, BufEnd, R_386_TLS_LE, P, SA);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
llvm_unreachable("Unknown TLS optimization");
|
|
|
|
}
|
|
|
|
|
|
|
|
// "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.1
|
|
|
|
// IA-32 Linker Optimizations, http://www.akkadia.org/drepper/tls.pdf) shows
|
|
|
|
// how GD can be optimized to IE:
|
|
|
|
// leal x@tlsgd(, %ebx, 1),
|
|
|
|
// call __tls_get_addr@plt
|
|
|
|
// Is converted to:
|
|
|
|
// movl %gs:0, %eax
|
|
|
|
// addl x@gotntpoff(%ebx), %eax
|
|
|
|
void X86TargetInfo::relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const {
|
|
|
|
const uint8_t Inst[] = {
|
|
|
|
0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
|
|
|
|
0x03, 0x83, 0x00, 0x00, 0x00, 0x00 // addl 0(%ebx), %eax
|
|
|
|
};
|
|
|
|
memcpy(Loc - 3, Inst, sizeof(Inst));
|
|
|
|
relocateOne(Loc + 5, BufEnd, R_386_32, P,
|
|
|
|
SA - Out<ELF32LE>::Got->getVA() -
|
|
|
|
Out<ELF32LE>::Got->getNumEntries() * 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
// GD can be optimized to LE:
|
|
|
|
// leal x@tlsgd(, %ebx, 1),
|
|
|
|
// call __tls_get_addr@plt
|
|
|
|
// Can be converted to:
|
|
|
|
// movl %gs:0,%eax
|
|
|
|
// addl $x@ntpoff,%eax
|
|
|
|
// But gold emits subl $foo@tpoff,%eax instead of addl.
|
|
|
|
// These instructions are completely equal in behavior.
|
|
|
|
// This method generates subl to be consistent with gold.
|
|
|
|
void X86TargetInfo::relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const {
|
|
|
|
const uint8_t Inst[] = {
|
|
|
|
0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
|
|
|
|
0x81, 0xe8, 0x00, 0x00, 0x00, 0x00 // subl 0(%ebx), %eax
|
|
|
|
};
|
|
|
|
memcpy(Loc - 3, Inst, sizeof(Inst));
|
|
|
|
relocateOne(Loc + 5, BufEnd, R_386_32, P,
|
|
|
|
Out<ELF32LE>::TlsPhdr->p_memsz - SA);
|
|
|
|
}
|
|
|
|
|
|
|
|
// LD can be optimized to LE:
|
|
|
|
// leal foo(%reg),%eax
|
|
|
|
// call ___tls_get_addr
|
|
|
|
// Is converted to:
|
|
|
|
// movl %gs:0,%eax
|
|
|
|
// nop
|
|
|
|
// leal 0(%esi,1),%esi
|
|
|
|
void X86TargetInfo::relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const {
|
|
|
|
const uint8_t Inst[] = {
|
|
|
|
0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax
|
|
|
|
0x90, // nop
|
|
|
|
0x8d, 0x74, 0x26, 0x00 // leal 0(%esi,1),%esi
|
|
|
|
};
|
|
|
|
memcpy(Loc - 2, Inst, sizeof(Inst));
|
|
|
|
}
|
|
|
|
|
2015-12-17 17:32:21 +08:00
|
|
|
// In some conditions, relocations can be optimized to avoid using GOT.
|
|
|
|
// This function does that for Initial Exec to Local Exec case.
|
|
|
|
// Read "ELF Handling For Thread-Local Storage, 5.1
|
|
|
|
// IA-32 Linker Optimizations" (http://www.akkadia.org/drepper/tls.pdf)
|
2015-12-09 17:55:54 +08:00
|
|
|
// by Ulrich Drepper for details.
|
2015-12-17 17:32:21 +08:00
|
|
|
void X86TargetInfo::relocateTlsIeToLe(unsigned Type, uint8_t *Loc,
|
|
|
|
uint8_t *BufEnd, uint64_t P,
|
2015-12-09 17:55:54 +08:00
|
|
|
uint64_t SA) const {
|
2015-12-17 17:32:21 +08:00
|
|
|
// Ulrich's document section 6.2 says that @gotntpoff can
|
|
|
|
// be used with MOVL or ADDL instructions.
|
|
|
|
// @indntpoff is similar to @gotntpoff, but for use in
|
|
|
|
// position dependent code.
|
2015-12-09 17:55:54 +08:00
|
|
|
uint8_t *Inst = Loc - 2;
|
2015-12-17 17:32:21 +08:00
|
|
|
uint8_t *Op = Loc - 1;
|
2015-12-09 17:55:54 +08:00
|
|
|
uint8_t Reg = (Loc[-1] >> 3) & 7;
|
|
|
|
bool IsMov = *Inst == 0x8b;
|
2015-12-17 17:32:21 +08:00
|
|
|
if (Type == R_386_TLS_IE) {
|
|
|
|
// For R_386_TLS_IE relocation we perform the next transformations:
|
|
|
|
// MOVL foo@INDNTPOFF,%EAX is transformed to MOVL $foo,%EAX
|
|
|
|
// MOVL foo@INDNTPOFF,%REG is transformed to MOVL $foo,%REG
|
|
|
|
// ADDL foo@INDNTPOFF,%REG is transformed to ADDL $foo,%REG
|
|
|
|
// First one is special because when EAX is used the sequence is 5 bytes
|
|
|
|
// long, otherwise it is 6 bytes.
|
|
|
|
if (*Op == 0xa1) {
|
|
|
|
*Op = 0xb8;
|
|
|
|
} else {
|
|
|
|
*Inst = IsMov ? 0xc7 : 0x81;
|
|
|
|
*Op = 0xc0 | ((*Op >> 3) & 7);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// R_386_TLS_GOTIE relocation can be optimized to
|
|
|
|
// R_386_TLS_LE so that it does not use GOT.
|
|
|
|
// "MOVL foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVL $foo, %REG".
|
|
|
|
// "ADDL foo@GOTNTPOFF(%RIP), %REG" is transformed to "LEAL foo(%REG), %REG"
|
|
|
|
// Note: gold converts to ADDL instead of LEAL.
|
|
|
|
*Inst = IsMov ? 0xc7 : 0x8d;
|
|
|
|
if (IsMov)
|
|
|
|
*Op = 0xc0 | ((*Op >> 3) & 7);
|
|
|
|
else
|
|
|
|
*Op = 0x80 | Reg | (Reg << 3);
|
|
|
|
}
|
2015-12-09 17:55:54 +08:00
|
|
|
relocateOne(Loc, BufEnd, R_386_TLS_LE, P, SA);
|
|
|
|
}
|
|
|
|
|
2015-09-23 05:35:51 +08:00
|
|
|
X86_64TargetInfo::X86_64TargetInfo() {
|
2016-01-29 09:49:32 +08:00
|
|
|
CopyRel = R_X86_64_COPY;
|
|
|
|
GotRel = R_X86_64_GLOB_DAT;
|
|
|
|
PltRel = R_X86_64_JUMP_SLOT;
|
|
|
|
RelativeRel = R_X86_64_RELATIVE;
|
|
|
|
IRelativeRel = R_X86_64_IRELATIVE;
|
|
|
|
TlsGotRel = R_X86_64_TPOFF64;
|
|
|
|
TlsModuleIndexRel = R_X86_64_DTPMOD64;
|
|
|
|
TlsOffsetRel = R_X86_64_DTPOFF64;
|
|
|
|
UseLazyBinding = true;
|
2015-10-20 16:54:27 +08:00
|
|
|
PltEntrySize = 16;
|
2016-01-29 11:00:32 +08:00
|
|
|
PltZeroSize = 16;
|
2015-10-20 16:54:27 +08:00
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
void X86_64TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
|
2015-11-17 01:44:08 +08:00
|
|
|
write64le(Buf, Out<ELF64LE>::Dynamic->getVA());
|
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
void X86_64TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
|
2016-01-30 07:58:03 +08:00
|
|
|
// See comments in X86TargetInfo::writeGotPlt.
|
2015-10-20 16:54:27 +08:00
|
|
|
write32le(Buf, Plt + 6);
|
|
|
|
}
|
|
|
|
|
2016-01-29 11:51:49 +08:00
|
|
|
void X86_64TargetInfo::writePltZero(uint8_t *Buf) const {
|
2015-10-20 16:54:27 +08:00
|
|
|
const uint8_t PltData[] = {
|
|
|
|
0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOT+8(%rip)
|
|
|
|
0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOT+16(%rip)
|
|
|
|
0x0f, 0x1f, 0x40, 0x00 // nopl 0x0(rax)
|
|
|
|
};
|
|
|
|
memcpy(Buf, PltData, sizeof(PltData));
|
2016-01-29 11:51:49 +08:00
|
|
|
uint64_t Got = Out<ELF64LE>::GotPlt->getVA();
|
|
|
|
uint64_t Plt = Out<ELF64LE>::Plt->getVA();
|
|
|
|
write32le(Buf + 2, Got - Plt + 2); // GOT+8
|
|
|
|
write32le(Buf + 8, Got - Plt + 4); // GOT+16
|
2015-09-23 05:35:51 +08:00
|
|
|
}
|
2015-09-23 02:19:46 +08:00
|
|
|
|
2016-01-29 12:15:02 +08:00
|
|
|
void X86_64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
|
|
|
|
uint64_t PltEntryAddr, int32_t Index,
|
|
|
|
unsigned RelOff) const {
|
2015-10-20 16:54:27 +08:00
|
|
|
const uint8_t Inst[] = {
|
|
|
|
0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip)
|
|
|
|
0x68, 0x00, 0x00, 0x00, 0x00, // pushq <relocation index>
|
|
|
|
0xe9, 0x00, 0x00, 0x00, 0x00 // jmpq plt[0]
|
|
|
|
};
|
2015-09-30 07:00:47 +08:00
|
|
|
memcpy(Buf, Inst, sizeof(Inst));
|
2015-09-23 02:19:46 +08:00
|
|
|
|
2015-10-20 16:54:27 +08:00
|
|
|
write32le(Buf + 2, GotEntryAddr - PltEntryAddr - 6);
|
|
|
|
write32le(Buf + 7, Index);
|
2016-01-29 11:00:32 +08:00
|
|
|
write32le(Buf + 12, -Index * PltEntrySize - PltZeroSize - 16);
|
2015-09-23 02:19:46 +08:00
|
|
|
}
|
|
|
|
|
2015-12-17 09:18:40 +08:00
|
|
|
bool X86_64TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const {
|
2015-10-29 00:48:58 +08:00
|
|
|
if (Type == R_X86_64_32S || Type == R_X86_64_32 || Type == R_X86_64_PC32 ||
|
|
|
|
Type == R_X86_64_64)
|
|
|
|
if (auto *SS = dyn_cast<SharedSymbol<ELF64LE>>(&S))
|
|
|
|
return SS->Sym.getType() == STT_OBJECT;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-02-09 23:11:01 +08:00
|
|
|
bool X86_64TargetInfo::needsGot(uint32_t Type, SymbolBody &S) const {
|
2015-12-04 19:20:13 +08:00
|
|
|
if (Type == R_X86_64_TLSGD)
|
2016-01-29 08:20:12 +08:00
|
|
|
return Target->canRelaxTls(Type, &S) && canBePreempted(&S, true);
|
2015-11-24 17:00:06 +08:00
|
|
|
if (Type == R_X86_64_GOTTPOFF)
|
2016-01-29 08:20:12 +08:00
|
|
|
return !canRelaxTls(Type, &S);
|
2016-01-29 10:33:45 +08:00
|
|
|
return Type == R_X86_64_GOTPCREL || needsPlt(Type, S);
|
2015-09-23 02:19:46 +08:00
|
|
|
}
|
|
|
|
|
2016-02-11 19:14:46 +08:00
|
|
|
unsigned X86_64TargetInfo::getTlsGotRel(unsigned Type) const {
|
|
|
|
// No other types of TLS relocations requiring GOT should
|
|
|
|
// reach here.
|
|
|
|
assert(Type == R_X86_64_GOTTPOFF);
|
|
|
|
return R_X86_64_PC32;
|
|
|
|
}
|
|
|
|
|
2016-02-12 21:43:03 +08:00
|
|
|
bool X86_64TargetInfo::isTlsGlobalDynamicRel(unsigned Type) const {
|
|
|
|
return Type == R_X86_64_TLSGD;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86_64TargetInfo::isTlsLocalDynamicRel(unsigned Type) const {
|
|
|
|
return Type == R_X86_64_TLSLD;
|
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
bool X86_64TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const {
|
2015-12-04 19:20:13 +08:00
|
|
|
return Type == R_X86_64_GOTTPOFF || Type == R_X86_64_TLSGD;
|
2015-11-26 04:41:53 +08:00
|
|
|
}
|
|
|
|
|
2016-02-12 23:47:37 +08:00
|
|
|
TargetInfo::PltNeed X86_64TargetInfo::needsPlt(uint32_t Type,
|
|
|
|
const SymbolBody &S) const {
|
2015-12-17 09:18:40 +08:00
|
|
|
if (needsCopyRel(Type, S))
|
2016-02-12 23:47:37 +08:00
|
|
|
return Plt_No;
|
2015-12-21 18:12:06 +08:00
|
|
|
if (isGnuIFunc<ELF64LE>(S))
|
2016-02-12 23:47:37 +08:00
|
|
|
return Plt_Explicit;
|
2015-10-29 00:48:58 +08:00
|
|
|
|
2015-09-23 02:19:46 +08:00
|
|
|
switch (Type) {
|
|
|
|
default:
|
2016-02-12 23:47:37 +08:00
|
|
|
return Plt_No;
|
2015-10-15 00:15:46 +08:00
|
|
|
case R_X86_64_32:
|
2015-10-29 02:33:08 +08:00
|
|
|
case R_X86_64_64:
|
2015-09-30 07:22:16 +08:00
|
|
|
case R_X86_64_PC32:
|
|
|
|
// This relocation is defined to have a value of (S + A - P).
|
2015-09-30 20:30:58 +08:00
|
|
|
// The problems start when a non PIC program calls a function in a shared
|
2015-09-30 07:22:16 +08:00
|
|
|
// library.
|
2015-09-30 07:23:53 +08:00
|
|
|
// In an ideal world, we could just report an error saying the relocation
|
2015-09-30 07:22:16 +08:00
|
|
|
// can overflow at runtime.
|
2015-09-30 20:30:58 +08:00
|
|
|
// In the real world with glibc, crt1.o has a R_X86_64_PC32 pointing to
|
|
|
|
// libc.so.
|
|
|
|
//
|
|
|
|
// The general idea on how to handle such cases is to create a PLT entry
|
|
|
|
// and use that as the function value.
|
|
|
|
//
|
|
|
|
// For the static linking part, we just return true and everything else
|
|
|
|
// will use the the PLT entry as the address.
|
|
|
|
//
|
2016-02-09 23:11:01 +08:00
|
|
|
// The remaining problem is making sure pointer equality still works. We
|
|
|
|
// need the help of the dynamic linker for that. We let it know that we have
|
|
|
|
// a direct reference to a so symbol by creating an undefined symbol with a
|
|
|
|
// non zero st_value. Seeing that, the dynamic linker resolves the symbol to
|
|
|
|
// the value of the symbol we created. This is true even for got entries, so
|
|
|
|
// pointer equality is maintained. To avoid an infinite loop, the only entry
|
|
|
|
// that points to the real function is a dedicated got entry used by the
|
|
|
|
// plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT,
|
2015-09-30 20:30:58 +08:00
|
|
|
// R_386_JMP_SLOT, etc).
|
2016-02-09 23:11:01 +08:00
|
|
|
if (!S.isShared())
|
2016-02-12 23:47:37 +08:00
|
|
|
return Plt_No;
|
|
|
|
return Plt_Implicit;
|
2015-09-23 02:19:46 +08:00
|
|
|
case R_X86_64_PLT32:
|
2016-02-12 23:47:37 +08:00
|
|
|
if (canBePreempted(&S, true))
|
|
|
|
return Plt_Explicit;
|
|
|
|
return Plt_No;
|
2015-09-23 02:19:46 +08:00
|
|
|
}
|
|
|
|
}
|
2015-09-23 04:54:08 +08:00
|
|
|
|
2015-10-06 03:30:12 +08:00
|
|
|
bool X86_64TargetInfo::isRelRelative(uint32_t Type) const {
|
|
|
|
switch (Type) {
|
|
|
|
default:
|
|
|
|
return false;
|
2015-11-11 09:27:58 +08:00
|
|
|
case R_X86_64_DTPOFF32:
|
2015-11-11 09:28:11 +08:00
|
|
|
case R_X86_64_DTPOFF64:
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_X86_64_PC8:
|
|
|
|
case R_X86_64_PC16:
|
|
|
|
case R_X86_64_PC32:
|
|
|
|
case R_X86_64_PC64:
|
|
|
|
case R_X86_64_PLT32:
|
2015-10-06 03:30:12 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
bool X86_64TargetInfo::isSizeRel(uint32_t Type) const {
|
2016-01-08 08:13:23 +08:00
|
|
|
return Type == R_X86_64_SIZE32 || Type == R_X86_64_SIZE64;
|
2015-12-11 16:59:37 +08:00
|
|
|
}
|
|
|
|
|
2016-01-29 08:20:12 +08:00
|
|
|
bool X86_64TargetInfo::canRelaxTls(unsigned Type, const SymbolBody *S) const {
|
2015-12-17 08:04:18 +08:00
|
|
|
if (Config->Shared || (S && !S->isTls()))
|
2015-11-24 17:00:06 +08:00
|
|
|
return false;
|
2015-12-04 19:20:13 +08:00
|
|
|
return Type == R_X86_64_TLSGD || Type == R_X86_64_TLSLD ||
|
|
|
|
Type == R_X86_64_DTPOFF32 ||
|
2015-11-26 05:46:05 +08:00
|
|
|
(Type == R_X86_64_GOTTPOFF && !canBePreempted(S, true));
|
|
|
|
}
|
|
|
|
|
|
|
|
// "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5
|
|
|
|
// x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows
|
|
|
|
// how LD can be optimized to LE:
|
|
|
|
// leaq bar@tlsld(%rip), %rdi
|
|
|
|
// callq __tls_get_addr@PLT
|
|
|
|
// leaq bar@dtpoff(%rax), %rcx
|
|
|
|
// Is converted to:
|
|
|
|
// .word 0x6666
|
|
|
|
// .byte 0x66
|
|
|
|
// mov %fs:0,%rax
|
|
|
|
// leaq bar@tpoff(%rax), %rcx
|
|
|
|
void X86_64TargetInfo::relocateTlsLdToLe(uint8_t *Loc, uint8_t *BufEnd,
|
|
|
|
uint64_t P, uint64_t SA) const {
|
|
|
|
const uint8_t Inst[] = {
|
|
|
|
0x66, 0x66, //.word 0x6666
|
|
|
|
0x66, //.byte 0x66
|
|
|
|
0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
|
|
|
|
};
|
|
|
|
memcpy(Loc - 3, Inst, sizeof(Inst));
|
|
|
|
}
|
|
|
|
|
|
|
|
// "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5
|
|
|
|
// x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows
|
|
|
|
// how GD can be optimized to LE:
|
|
|
|
// .byte 0x66
|
|
|
|
// leaq x@tlsgd(%rip), %rdi
|
|
|
|
// .word 0x6666
|
|
|
|
// rex64
|
|
|
|
// call __tls_get_addr@plt
|
|
|
|
// Is converted to:
|
|
|
|
// mov %fs:0x0,%rax
|
|
|
|
// lea x@tpoff,%rax
|
|
|
|
void X86_64TargetInfo::relocateTlsGdToLe(uint8_t *Loc, uint8_t *BufEnd,
|
|
|
|
uint64_t P, uint64_t SA) const {
|
|
|
|
const uint8_t Inst[] = {
|
|
|
|
0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
|
|
|
|
0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff,%rax
|
|
|
|
};
|
|
|
|
memcpy(Loc - 4, Inst, sizeof(Inst));
|
|
|
|
relocateOne(Loc + 8, BufEnd, R_X86_64_TPOFF32, P, SA);
|
2015-11-24 17:00:06 +08:00
|
|
|
}
|
|
|
|
|
2015-12-04 19:20:13 +08:00
|
|
|
// "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5
|
|
|
|
// x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf) shows
|
|
|
|
// how GD can be optimized to IE:
|
|
|
|
// .byte 0x66
|
|
|
|
// leaq x@tlsgd(%rip), %rdi
|
|
|
|
// .word 0x6666
|
|
|
|
// rex64
|
|
|
|
// call __tls_get_addr@plt
|
|
|
|
// Is converted to:
|
|
|
|
// mov %fs:0x0,%rax
|
|
|
|
// addq x@tpoff,%rax
|
|
|
|
void X86_64TargetInfo::relocateTlsGdToIe(uint8_t *Loc, uint8_t *BufEnd,
|
|
|
|
uint64_t P, uint64_t SA) const {
|
|
|
|
const uint8_t Inst[] = {
|
|
|
|
0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
|
|
|
|
0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00 // addq x@tpoff,%rax
|
|
|
|
};
|
|
|
|
memcpy(Loc - 4, Inst, sizeof(Inst));
|
2016-02-11 19:14:46 +08:00
|
|
|
relocateOne(Loc + 8, BufEnd, R_X86_64_PC32, P + 12, SA);
|
2015-12-04 19:20:13 +08:00
|
|
|
}
|
|
|
|
|
2015-11-24 17:00:06 +08:00
|
|
|
// In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
|
2015-12-08 00:54:56 +08:00
|
|
|
// R_X86_64_TPOFF32 so that it does not use GOT.
|
2015-11-24 17:00:06 +08:00
|
|
|
// This function does that. Read "ELF Handling For Thread-Local Storage,
|
|
|
|
// 5.5 x86-x64 linker optimizations" (http://www.akkadia.org/drepper/tls.pdf)
|
|
|
|
// by Ulrich Drepper for details.
|
2015-11-26 05:46:05 +08:00
|
|
|
void X86_64TargetInfo::relocateTlsIeToLe(uint8_t *Loc, uint8_t *BufEnd,
|
|
|
|
uint64_t P, uint64_t SA) const {
|
2015-11-24 17:00:06 +08:00
|
|
|
// Ulrich's document section 6.5 says that @gottpoff(%rip) must be
|
|
|
|
// used in MOVQ or ADDQ instructions only.
|
|
|
|
// "MOVQ foo@GOTTPOFF(%RIP), %REG" is transformed to "MOVQ $foo, %REG".
|
|
|
|
// "ADDQ foo@GOTTPOFF(%RIP), %REG" is transformed to "LEAQ foo(%REG), %REG"
|
|
|
|
// (if the register is not RSP/R12) or "ADDQ $foo, %RSP".
|
|
|
|
// Opcodes info can be found at http://ref.x86asm.net/coder64.html#x48.
|
|
|
|
uint8_t *Prefix = Loc - 3;
|
|
|
|
uint8_t *Inst = Loc - 2;
|
|
|
|
uint8_t *RegSlot = Loc - 1;
|
|
|
|
uint8_t Reg = Loc[-1] >> 3;
|
|
|
|
bool IsMov = *Inst == 0x8b;
|
|
|
|
bool RspAdd = !IsMov && Reg == 4;
|
|
|
|
// r12 and rsp registers requires special handling.
|
|
|
|
// Problem is that for other registers, for example leaq 0xXXXXXXXX(%r11),%r11
|
|
|
|
// result out is 7 bytes: 4d 8d 9b XX XX XX XX,
|
|
|
|
// but leaq 0xXXXXXXXX(%r12),%r12 is 8 bytes: 4d 8d a4 24 XX XX XX XX.
|
|
|
|
// The same true for rsp. So we convert to addq for them, saving 1 byte that
|
|
|
|
// we dont have.
|
|
|
|
if (RspAdd)
|
|
|
|
*Inst = 0x81;
|
|
|
|
else
|
|
|
|
*Inst = IsMov ? 0xc7 : 0x8d;
|
|
|
|
if (*Prefix == 0x4c)
|
|
|
|
*Prefix = (IsMov || RspAdd) ? 0x49 : 0x4d;
|
|
|
|
*RegSlot = (IsMov || RspAdd) ? (0xc0 | Reg) : (0x80 | Reg | (Reg << 3));
|
|
|
|
relocateOne(Loc, BufEnd, R_X86_64_TPOFF32, P, SA);
|
|
|
|
}
|
|
|
|
|
2015-11-26 05:46:05 +08:00
|
|
|
// This function applies a TLS relocation with an optimization as described
|
|
|
|
// in the Ulrich's document. As a result of rewriting instructions at the
|
|
|
|
// relocation target, relocations immediately follow the TLS relocation (which
|
|
|
|
// would be applied to rewritten instructions) may have to be skipped.
|
|
|
|
// This function returns a number of relocations that need to be skipped.
|
2016-01-29 10:33:45 +08:00
|
|
|
unsigned X86_64TargetInfo::relaxTls(uint8_t *Loc, uint8_t *BufEnd,
|
|
|
|
uint32_t Type, uint64_t P, uint64_t SA,
|
|
|
|
const SymbolBody *S) const {
|
2015-11-26 05:46:05 +08:00
|
|
|
switch (Type) {
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_X86_64_DTPOFF32:
|
|
|
|
relocateOne(Loc, BufEnd, R_X86_64_TPOFF32, P, SA);
|
|
|
|
return 0;
|
2015-11-26 05:46:05 +08:00
|
|
|
case R_X86_64_GOTTPOFF:
|
|
|
|
relocateTlsIeToLe(Loc, BufEnd, P, SA);
|
|
|
|
return 0;
|
2015-12-04 19:20:13 +08:00
|
|
|
case R_X86_64_TLSGD: {
|
2016-01-23 02:02:28 +08:00
|
|
|
if (canBePreempted(S, true))
|
2015-12-04 19:20:13 +08:00
|
|
|
relocateTlsGdToIe(Loc, BufEnd, P, SA);
|
|
|
|
else
|
|
|
|
relocateTlsGdToLe(Loc, BufEnd, P, SA);
|
2015-11-26 05:46:05 +08:00
|
|
|
// The next relocation should be against __tls_get_addr, so skip it
|
|
|
|
return 1;
|
2015-12-04 19:20:13 +08:00
|
|
|
}
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_X86_64_TLSLD:
|
|
|
|
relocateTlsLdToLe(Loc, BufEnd, P, SA);
|
|
|
|
// The next relocation should be against __tls_get_addr, so skip it
|
|
|
|
return 1;
|
2015-11-26 05:46:05 +08:00
|
|
|
}
|
|
|
|
llvm_unreachable("Unknown TLS optimization");
|
|
|
|
}
|
|
|
|
|
2015-10-23 10:40:46 +08:00
|
|
|
void X86_64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
|
2015-12-11 16:59:37 +08:00
|
|
|
uint64_t P, uint64_t SA, uint64_t ZA,
|
2015-12-02 05:24:45 +08:00
|
|
|
uint8_t *PairedLoc) const {
|
2015-09-23 04:54:08 +08:00
|
|
|
switch (Type) {
|
2015-10-24 00:13:27 +08:00
|
|
|
case R_X86_64_32:
|
2015-11-26 17:49:44 +08:00
|
|
|
checkUInt<32>(SA, Type);
|
|
|
|
write32le(Loc, SA);
|
|
|
|
break;
|
2015-09-23 04:54:08 +08:00
|
|
|
case R_X86_64_32S:
|
2015-11-26 17:49:44 +08:00
|
|
|
checkInt<32>(SA, Type);
|
2015-10-16 03:52:27 +08:00
|
|
|
write32le(Loc, SA);
|
2015-09-23 04:54:08 +08:00
|
|
|
break;
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_X86_64_64:
|
2016-02-11 06:00:21 +08:00
|
|
|
case R_X86_64_DTPOFF64:
|
2015-12-01 16:41:20 +08:00
|
|
|
write64le(Loc, SA);
|
|
|
|
break;
|
2015-11-11 09:27:58 +08:00
|
|
|
case R_X86_64_DTPOFF32:
|
|
|
|
write32le(Loc, SA);
|
|
|
|
break;
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_X86_64_GOTPCREL:
|
|
|
|
case R_X86_64_PC32:
|
|
|
|
case R_X86_64_PLT32:
|
|
|
|
case R_X86_64_TLSGD:
|
|
|
|
case R_X86_64_TLSLD:
|
|
|
|
write32le(Loc, SA - P);
|
|
|
|
break;
|
2015-12-11 16:59:37 +08:00
|
|
|
case R_X86_64_SIZE32:
|
|
|
|
write32le(Loc, ZA);
|
|
|
|
break;
|
|
|
|
case R_X86_64_SIZE64:
|
|
|
|
write64le(Loc, ZA);
|
|
|
|
break;
|
2015-11-05 23:22:26 +08:00
|
|
|
case R_X86_64_TPOFF32: {
|
2015-11-07 06:14:44 +08:00
|
|
|
uint64_t Val = SA - Out<ELF64LE>::TlsPhdr->p_memsz;
|
2015-11-26 17:49:44 +08:00
|
|
|
checkInt<32>(Val, Type);
|
2015-11-05 23:22:26 +08:00
|
|
|
write32le(Loc, Val);
|
2015-11-04 06:39:09 +08:00
|
|
|
break;
|
2015-11-05 23:22:26 +08:00
|
|
|
}
|
2015-09-23 04:54:08 +08:00
|
|
|
default:
|
ELF: Rename error -> fatal and redefine error as a non-noreturn function.
In many situations, we don't want to exit at the first error even in the
process model. For example, it is better to report all undefined symbols
rather than reporting the first one that the linker picked up randomly.
In order to handle such errors, we don't need to wrap everything with
ErrorOr (thanks for David Blaikie for pointing this out!) Instead, we
can set a flag to record the fact that we found an error and keep it
going until it reaches a reasonable checkpoint.
This idea should be applicable to other places. For example, we can
ignore broken relocations and check for errors after visiting all relocs.
In this patch, I rename error to fatal, and introduce another version of
error which doesn't call exit. That function instead sets HasError to true.
Once HasError becomes true, it stays true, so that we know that there
was an error if it is true.
I think introducing a non-noreturn error reporting function is by itself
a good idea, and it looks to me that this also provides a gradual path
towards lld-as-a-library (or at least embed-lld-to-your-program) without
sacrificing code readability with lots of ErrorOr's.
http://reviews.llvm.org/D16641
llvm-svn: 259069
2016-01-29 02:40:06 +08:00
|
|
|
fatal("unrecognized reloc " + Twine(Type));
|
2015-09-23 04:54:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-13 04:56:18 +08:00
|
|
|
// Relocation masks following the #lo(value), #hi(value), #ha(value),
|
|
|
|
// #higher(value), #highera(value), #highest(value), and #highesta(value)
|
|
|
|
// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
|
|
|
|
// document.
|
2015-10-24 00:54:58 +08:00
|
|
|
static uint16_t applyPPCLo(uint64_t V) { return V; }
|
|
|
|
static uint16_t applyPPCHi(uint64_t V) { return V >> 16; }
|
|
|
|
static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; }
|
|
|
|
static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; }
|
|
|
|
static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; }
|
2015-10-13 04:56:18 +08:00
|
|
|
static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; }
|
|
|
|
static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; }
|
|
|
|
|
2016-01-12 03:45:33 +08:00
|
|
|
PPCTargetInfo::PPCTargetInfo() {}
|
|
|
|
bool PPCTargetInfo::isRelRelative(uint32_t Type) const { return false; }
|
|
|
|
|
|
|
|
void PPCTargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
|
|
|
|
uint64_t P, uint64_t SA, uint64_t ZA,
|
|
|
|
uint8_t *PairedLoc) const {
|
|
|
|
switch (Type) {
|
|
|
|
case R_PPC_ADDR16_HA:
|
|
|
|
write16be(Loc, applyPPCHa(SA));
|
|
|
|
break;
|
|
|
|
case R_PPC_ADDR16_LO:
|
|
|
|
write16be(Loc, applyPPCLo(SA));
|
|
|
|
break;
|
|
|
|
default:
|
ELF: Rename error -> fatal and redefine error as a non-noreturn function.
In many situations, we don't want to exit at the first error even in the
process model. For example, it is better to report all undefined symbols
rather than reporting the first one that the linker picked up randomly.
In order to handle such errors, we don't need to wrap everything with
ErrorOr (thanks for David Blaikie for pointing this out!) Instead, we
can set a flag to record the fact that we found an error and keep it
going until it reaches a reasonable checkpoint.
This idea should be applicable to other places. For example, we can
ignore broken relocations and check for errors after visiting all relocs.
In this patch, I rename error to fatal, and introduce another version of
error which doesn't call exit. That function instead sets HasError to true.
Once HasError becomes true, it stays true, so that we know that there
was an error if it is true.
I think introducing a non-noreturn error reporting function is by itself
a good idea, and it looks to me that this also provides a gradual path
towards lld-as-a-library (or at least embed-lld-to-your-program) without
sacrificing code readability with lots of ErrorOr's.
http://reviews.llvm.org/D16641
llvm-svn: 259069
2016-01-29 02:40:06 +08:00
|
|
|
fatal("unrecognized reloc " + Twine(Type));
|
2016-01-12 03:45:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-23 04:54:08 +08:00
|
|
|
PPC64TargetInfo::PPC64TargetInfo() {
|
2016-01-29 09:49:32 +08:00
|
|
|
GotRel = R_PPC64_GLOB_DAT;
|
|
|
|
RelativeRel = R_PPC64_RELATIVE;
|
2015-10-09 05:51:31 +08:00
|
|
|
PltEntrySize = 32;
|
2015-10-13 03:34:29 +08:00
|
|
|
|
|
|
|
// We need 64K pages (at least under glibc/Linux, the loader won't
|
|
|
|
// set different permissions on a finer granularity than that).
|
2015-10-09 06:23:54 +08:00
|
|
|
PageSize = 65536;
|
2015-10-15 15:49:07 +08:00
|
|
|
|
|
|
|
// The PPC64 ELF ABI v1 spec, says:
|
|
|
|
//
|
|
|
|
// It is normally desirable to put segments with different characteristics
|
|
|
|
// in separate 256 Mbyte portions of the address space, to give the
|
|
|
|
// operating system full paging flexibility in the 64-bit address space.
|
|
|
|
//
|
|
|
|
// And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
|
|
|
|
// use 0x10000000 as the starting address.
|
|
|
|
VAStart = 0x10000000;
|
2015-09-23 04:54:08 +08:00
|
|
|
}
|
2015-10-13 04:56:18 +08:00
|
|
|
|
2015-10-17 05:55:40 +08:00
|
|
|
uint64_t getPPC64TocBase() {
|
2015-10-13 04:56:18 +08:00
|
|
|
// The TOC consists of sections .got, .toc, .tocbss, .plt in that
|
|
|
|
// order. The TOC starts where the first of these sections starts.
|
|
|
|
|
|
|
|
// FIXME: This obviously does not do the right thing when there is no .got
|
|
|
|
// section, but there is a .toc or .tocbss section.
|
|
|
|
uint64_t TocVA = Out<ELF64BE>::Got->getVA();
|
|
|
|
if (!TocVA)
|
|
|
|
TocVA = Out<ELF64BE>::Plt->getVA();
|
|
|
|
|
|
|
|
// Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
|
|
|
|
// thus permitting a full 64 Kbytes segment. Note that the glibc startup
|
|
|
|
// code (crt1.o) assumes that you can get from the TOC base to the
|
|
|
|
// start of the .toc section with only a single (signed) 16-bit relocation.
|
|
|
|
return TocVA + 0x8000;
|
|
|
|
}
|
|
|
|
|
2016-01-29 12:15:02 +08:00
|
|
|
void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
|
|
|
|
uint64_t PltEntryAddr, int32_t Index,
|
|
|
|
unsigned RelOff) const {
|
2015-10-13 04:56:18 +08:00
|
|
|
uint64_t Off = GotEntryAddr - getPPC64TocBase();
|
|
|
|
|
|
|
|
// FIXME: What we should do, in theory, is get the offset of the function
|
|
|
|
// descriptor in the .opd section, and use that as the offset from %r2 (the
|
|
|
|
// TOC-base pointer). Instead, we have the GOT-entry offset, and that will
|
|
|
|
// be a pointer to the function descriptor in the .opd section. Using
|
|
|
|
// this scheme is simpler, but requires an extra indirection per PLT dispatch.
|
|
|
|
|
2015-10-14 05:47:34 +08:00
|
|
|
write32be(Buf, 0xf8410028); // std %r2, 40(%r1)
|
2015-10-13 04:56:18 +08:00
|
|
|
write32be(Buf + 4, 0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha
|
|
|
|
write32be(Buf + 8, 0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11)
|
|
|
|
write32be(Buf + 12, 0xe96c0000); // ld %r11,0(%r12)
|
|
|
|
write32be(Buf + 16, 0x7d6903a6); // mtctr %r11
|
|
|
|
write32be(Buf + 20, 0xe84c0008); // ld %r2,8(%r12)
|
|
|
|
write32be(Buf + 24, 0xe96c0010); // ld %r11,16(%r12)
|
|
|
|
write32be(Buf + 28, 0x4e800420); // bctr
|
|
|
|
}
|
|
|
|
|
2016-02-09 23:11:01 +08:00
|
|
|
bool PPC64TargetInfo::needsGot(uint32_t Type, SymbolBody &S) const {
|
2016-01-29 10:33:45 +08:00
|
|
|
if (needsPlt(Type, S))
|
2015-10-13 04:56:18 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
switch (Type) {
|
|
|
|
default: return false;
|
|
|
|
case R_PPC64_GOT16:
|
|
|
|
case R_PPC64_GOT16_DS:
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_PPC64_GOT16_HA:
|
|
|
|
case R_PPC64_GOT16_HI:
|
|
|
|
case R_PPC64_GOT16_LO:
|
2015-10-13 04:56:18 +08:00
|
|
|
case R_PPC64_GOT16_LO_DS:
|
|
|
|
return true;
|
|
|
|
}
|
2015-09-30 07:22:16 +08:00
|
|
|
}
|
2015-10-13 04:56:18 +08:00
|
|
|
|
2016-02-12 23:47:37 +08:00
|
|
|
TargetInfo::PltNeed PPC64TargetInfo::needsPlt(uint32_t Type,
|
|
|
|
const SymbolBody &S) const {
|
2015-10-13 04:56:18 +08:00
|
|
|
// These are function calls that need to be redirected through a PLT stub.
|
2016-02-12 23:47:37 +08:00
|
|
|
if (Type == R_PPC64_REL24 && canBePreempted(&S, false))
|
|
|
|
return Plt_Explicit;
|
|
|
|
return Plt_No;
|
2015-09-30 07:22:16 +08:00
|
|
|
}
|
2015-10-13 04:56:18 +08:00
|
|
|
|
2015-10-13 04:58:52 +08:00
|
|
|
bool PPC64TargetInfo::isRelRelative(uint32_t Type) const {
|
|
|
|
switch (Type) {
|
|
|
|
default:
|
|
|
|
return true;
|
[ELF2/PPC64] Invert PPC64TargetInfo::isRelRelative's default
When I initially implemented PPC64TargetInfo::isRelRelative, I included a fixed
set of relative relocations, and made the default false. In retrospect, this
seems unwise in two respects: First, most PPC64 relocations are relative
(either to the base address, the TOC, etc.). Second, most relocation targets
are not appropriate for R_PPC64_RELATIVE (which writes a 64-bit absolute
address). Thus, back off, and include only those relocations for which we test
(or soon will), and are obviously appropriate for R_PPC64_RELATIVE.
llvm-svn: 250540
2015-10-17 03:01:50 +08:00
|
|
|
case R_PPC64_ADDR64:
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_PPC64_TOC:
|
[ELF2/PPC64] Invert PPC64TargetInfo::isRelRelative's default
When I initially implemented PPC64TargetInfo::isRelRelative, I included a fixed
set of relative relocations, and made the default false. In retrospect, this
seems unwise in two respects: First, most PPC64 relocations are relative
(either to the base address, the TOC, etc.). Second, most relocation targets
are not appropriate for R_PPC64_RELATIVE (which writes a 64-bit absolute
address). Thus, back off, and include only those relocations for which we test
(or soon will), and are obviously appropriate for R_PPC64_RELATIVE.
llvm-svn: 250540
2015-10-17 03:01:50 +08:00
|
|
|
return false;
|
2015-10-13 04:58:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-23 10:40:46 +08:00
|
|
|
void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
|
2015-12-11 16:59:37 +08:00
|
|
|
uint64_t P, uint64_t SA, uint64_t ZA,
|
2015-12-02 05:24:45 +08:00
|
|
|
uint8_t *PairedLoc) const {
|
2015-10-13 04:56:18 +08:00
|
|
|
uint64_t TB = getPPC64TocBase();
|
|
|
|
|
|
|
|
// For a TOC-relative relocation, adjust the addend and proceed in terms of
|
|
|
|
// the corresponding ADDR16 relocation type.
|
2015-09-23 05:12:55 +08:00
|
|
|
switch (Type) {
|
2015-10-16 02:19:39 +08:00
|
|
|
case R_PPC64_TOC16: Type = R_PPC64_ADDR16; SA -= TB; break;
|
|
|
|
case R_PPC64_TOC16_DS: Type = R_PPC64_ADDR16_DS; SA -= TB; break;
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_PPC64_TOC16_HA: Type = R_PPC64_ADDR16_HA; SA -= TB; break;
|
|
|
|
case R_PPC64_TOC16_HI: Type = R_PPC64_ADDR16_HI; SA -= TB; break;
|
2015-10-16 02:19:39 +08:00
|
|
|
case R_PPC64_TOC16_LO: Type = R_PPC64_ADDR16_LO; SA -= TB; break;
|
|
|
|
case R_PPC64_TOC16_LO_DS: Type = R_PPC64_ADDR16_LO_DS; SA -= TB; break;
|
2015-10-13 04:56:18 +08:00
|
|
|
default: break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (Type) {
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_PPC64_ADDR14: {
|
|
|
|
checkAlignment<4>(SA, Type);
|
|
|
|
// Preserve the AA/LK bits in the branch instruction
|
|
|
|
uint8_t AALK = Loc[3];
|
|
|
|
write16be(Loc + 2, (AALK & 3) | (SA & 0xfffc));
|
|
|
|
break;
|
|
|
|
}
|
2015-10-13 04:56:18 +08:00
|
|
|
case R_PPC64_ADDR16:
|
2015-11-26 17:49:44 +08:00
|
|
|
checkInt<16>(SA, Type);
|
2015-10-23 10:40:46 +08:00
|
|
|
write16be(Loc, SA);
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
|
|
|
case R_PPC64_ADDR16_DS:
|
2015-11-26 17:49:44 +08:00
|
|
|
checkInt<16>(SA, Type);
|
2015-10-23 10:40:46 +08:00
|
|
|
write16be(Loc, (read16be(Loc) & 3) | (SA & ~3));
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_PPC64_ADDR16_HA:
|
|
|
|
write16be(Loc, applyPPCHa(SA));
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
|
|
|
case R_PPC64_ADDR16_HI:
|
2015-10-23 10:40:46 +08:00
|
|
|
write16be(Loc, applyPPCHi(SA));
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
|
|
|
case R_PPC64_ADDR16_HIGHER:
|
2015-10-23 10:40:46 +08:00
|
|
|
write16be(Loc, applyPPCHigher(SA));
|
2015-09-23 05:12:55 +08:00
|
|
|
break;
|
2015-10-13 04:56:18 +08:00
|
|
|
case R_PPC64_ADDR16_HIGHERA:
|
2015-10-23 10:40:46 +08:00
|
|
|
write16be(Loc, applyPPCHighera(SA));
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
|
|
|
case R_PPC64_ADDR16_HIGHEST:
|
2015-10-23 10:40:46 +08:00
|
|
|
write16be(Loc, applyPPCHighest(SA));
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
|
|
|
case R_PPC64_ADDR16_HIGHESTA:
|
2015-10-23 10:40:46 +08:00
|
|
|
write16be(Loc, applyPPCHighesta(SA));
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_PPC64_ADDR16_LO:
|
|
|
|
write16be(Loc, applyPPCLo(SA));
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_PPC64_ADDR16_LO_DS:
|
|
|
|
write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(SA) & ~3));
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_PPC64_ADDR32:
|
|
|
|
checkInt<32>(SA, Type);
|
|
|
|
write32be(Loc, SA);
|
|
|
|
break;
|
|
|
|
case R_PPC64_ADDR64:
|
|
|
|
write64be(Loc, SA);
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
|
|
|
case R_PPC64_REL16_HA:
|
2015-10-23 10:40:46 +08:00
|
|
|
write16be(Loc, applyPPCHa(SA - P));
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_PPC64_REL16_HI:
|
|
|
|
write16be(Loc, applyPPCHi(SA - P));
|
|
|
|
break;
|
|
|
|
case R_PPC64_REL16_LO:
|
|
|
|
write16be(Loc, applyPPCLo(SA - P));
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
|
|
|
case R_PPC64_REL24: {
|
2015-10-17 08:48:20 +08:00
|
|
|
// If we have an undefined weak symbol, we might get here with a symbol
|
|
|
|
// address of zero. That could overflow, but the code must be unreachable,
|
|
|
|
// so don't bother doing anything at all.
|
|
|
|
if (!SA)
|
|
|
|
break;
|
|
|
|
|
[ELF2/PPC64] Resolve local-call relocations using the correct function-descriptor values
Under PPC64 ELF v1 ABI, the symbols associated with each function name don't
point directly to the code in the .text section (or similar), but rather to a
function descriptor structure in a special data section named .opd. The
elements in the .opd structure include a pointer to the actual code, and a the
relevant TOC base value. Both of these are themselves set by relocations.
When we have a local call, we need the relevant relocation to refer directly to
the target code, not to the function-descriptor in the .opd section. Only when
we have a .plt stub do we care about the address of the .opd function
descriptor itself.
So we make a few changes here:
1. Always write .opd first, so that its relocated data values are available
for later use when writing the text sections. Record a pointer to the .opd
structure, and its corresponding buffer.
2. When processing a relative branch relocation under ppc64, if the
destination points into the .opd section, read the code pointer out of the
function descriptor structure and use that instead.
This this, I can link, and run, a dynamically-compiled "hello world"
application on big-Endian PPC64/Linux (ELF v1 ABI) using lld.
llvm-svn: 250122
2015-10-13 07:16:53 +08:00
|
|
|
uint64_t PltStart = Out<ELF64BE>::Plt->getVA();
|
|
|
|
uint64_t PltEnd = PltStart + Out<ELF64BE>::Plt->getSize();
|
2015-10-16 03:39:36 +08:00
|
|
|
bool InPlt = PltStart <= SA && SA < PltEnd;
|
[ELF2/PPC64] Resolve local-call relocations using the correct function-descriptor values
Under PPC64 ELF v1 ABI, the symbols associated with each function name don't
point directly to the code in the .text section (or similar), but rather to a
function descriptor structure in a special data section named .opd. The
elements in the .opd structure include a pointer to the actual code, and a the
relevant TOC base value. Both of these are themselves set by relocations.
When we have a local call, we need the relevant relocation to refer directly to
the target code, not to the function-descriptor in the .opd section. Only when
we have a .plt stub do we care about the address of the .opd function
descriptor itself.
So we make a few changes here:
1. Always write .opd first, so that its relocated data values are available
for later use when writing the text sections. Record a pointer to the .opd
structure, and its corresponding buffer.
2. When processing a relative branch relocation under ppc64, if the
destination points into the .opd section, read the code pointer out of the
function descriptor structure and use that instead.
This this, I can link, and run, a dynamically-compiled "hello world"
application on big-Endian PPC64/Linux (ELF v1 ABI) using lld.
llvm-svn: 250122
2015-10-13 07:16:53 +08:00
|
|
|
|
|
|
|
if (!InPlt && Out<ELF64BE>::Opd) {
|
|
|
|
// If this is a local call, and we currently have the address of a
|
|
|
|
// function-descriptor, get the underlying code address instead.
|
|
|
|
uint64_t OpdStart = Out<ELF64BE>::Opd->getVA();
|
|
|
|
uint64_t OpdEnd = OpdStart + Out<ELF64BE>::Opd->getSize();
|
2015-10-16 03:39:36 +08:00
|
|
|
bool InOpd = OpdStart <= SA && SA < OpdEnd;
|
[ELF2/PPC64] Resolve local-call relocations using the correct function-descriptor values
Under PPC64 ELF v1 ABI, the symbols associated with each function name don't
point directly to the code in the .text section (or similar), but rather to a
function descriptor structure in a special data section named .opd. The
elements in the .opd structure include a pointer to the actual code, and a the
relevant TOC base value. Both of these are themselves set by relocations.
When we have a local call, we need the relevant relocation to refer directly to
the target code, not to the function-descriptor in the .opd section. Only when
we have a .plt stub do we care about the address of the .opd function
descriptor itself.
So we make a few changes here:
1. Always write .opd first, so that its relocated data values are available
for later use when writing the text sections. Record a pointer to the .opd
structure, and its corresponding buffer.
2. When processing a relative branch relocation under ppc64, if the
destination points into the .opd section, read the code pointer out of the
function descriptor structure and use that instead.
This this, I can link, and run, a dynamically-compiled "hello world"
application on big-Endian PPC64/Linux (ELF v1 ABI) using lld.
llvm-svn: 250122
2015-10-13 07:16:53 +08:00
|
|
|
|
|
|
|
if (InOpd)
|
2015-10-16 03:39:36 +08:00
|
|
|
SA = read64be(&Out<ELF64BE>::OpdBuf[SA - OpdStart]);
|
[ELF2/PPC64] Resolve local-call relocations using the correct function-descriptor values
Under PPC64 ELF v1 ABI, the symbols associated with each function name don't
point directly to the code in the .text section (or similar), but rather to a
function descriptor structure in a special data section named .opd. The
elements in the .opd structure include a pointer to the actual code, and a the
relevant TOC base value. Both of these are themselves set by relocations.
When we have a local call, we need the relevant relocation to refer directly to
the target code, not to the function-descriptor in the .opd section. Only when
we have a .plt stub do we care about the address of the .opd function
descriptor itself.
So we make a few changes here:
1. Always write .opd first, so that its relocated data values are available
for later use when writing the text sections. Record a pointer to the .opd
structure, and its corresponding buffer.
2. When processing a relative branch relocation under ppc64, if the
destination points into the .opd section, read the code pointer out of the
function descriptor structure and use that instead.
This this, I can link, and run, a dynamically-compiled "hello world"
application on big-Endian PPC64/Linux (ELF v1 ABI) using lld.
llvm-svn: 250122
2015-10-13 07:16:53 +08:00
|
|
|
}
|
|
|
|
|
2015-10-13 04:56:18 +08:00
|
|
|
uint32_t Mask = 0x03FFFFFC;
|
2015-11-26 17:49:44 +08:00
|
|
|
checkInt<24>(SA - P, Type);
|
2015-10-23 10:40:46 +08:00
|
|
|
write32be(Loc, (read32be(Loc) & ~Mask) | ((SA - P) & Mask));
|
2015-10-13 05:19:18 +08:00
|
|
|
|
2015-10-23 10:40:46 +08:00
|
|
|
uint32_t Nop = 0x60000000;
|
|
|
|
if (InPlt && Loc + 8 <= BufEnd && read32be(Loc + 4) == Nop)
|
|
|
|
write32be(Loc + 4, 0xe8410028); // ld %r2, 40(%r1)
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case R_PPC64_REL32:
|
2015-11-26 17:49:44 +08:00
|
|
|
checkInt<32>(SA - P, Type);
|
2015-10-23 10:40:46 +08:00
|
|
|
write32be(Loc, SA - P);
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
|
|
|
case R_PPC64_REL64:
|
2015-10-23 10:40:46 +08:00
|
|
|
write64be(Loc, SA - P);
|
2015-10-13 04:56:18 +08:00
|
|
|
break;
|
2015-10-17 05:55:40 +08:00
|
|
|
case R_PPC64_TOC:
|
2015-10-23 10:40:46 +08:00
|
|
|
write64be(Loc, SA);
|
2015-09-23 05:12:55 +08:00
|
|
|
break;
|
|
|
|
default:
|
ELF: Rename error -> fatal and redefine error as a non-noreturn function.
In many situations, we don't want to exit at the first error even in the
process model. For example, it is better to report all undefined symbols
rather than reporting the first one that the linker picked up randomly.
In order to handle such errors, we don't need to wrap everything with
ErrorOr (thanks for David Blaikie for pointing this out!) Instead, we
can set a flag to record the fact that we found an error and keep it
going until it reaches a reasonable checkpoint.
This idea should be applicable to other places. For example, we can
ignore broken relocations and check for errors after visiting all relocs.
In this patch, I rename error to fatal, and introduce another version of
error which doesn't call exit. That function instead sets HasError to true.
Once HasError becomes true, it stays true, so that we know that there
was an error if it is true.
I think introducing a non-noreturn error reporting function is by itself
a good idea, and it looks to me that this also provides a gradual path
towards lld-as-a-library (or at least embed-lld-to-your-program) without
sacrificing code readability with lots of ErrorOr's.
http://reviews.llvm.org/D16641
llvm-svn: 259069
2016-01-29 02:40:06 +08:00
|
|
|
fatal("unrecognized reloc " + Twine(Type));
|
2015-09-23 05:12:55 +08:00
|
|
|
}
|
|
|
|
}
|
2015-09-23 05:24:52 +08:00
|
|
|
|
2015-11-18 02:01:30 +08:00
|
|
|
AArch64TargetInfo::AArch64TargetInfo() {
|
2016-01-29 09:49:32 +08:00
|
|
|
CopyRel = R_AARCH64_COPY;
|
|
|
|
IRelativeRel = R_AARCH64_IRELATIVE;
|
|
|
|
GotRel = R_AARCH64_GLOB_DAT;
|
|
|
|
PltRel = R_AARCH64_JUMP_SLOT;
|
|
|
|
TlsGotRel = R_AARCH64_TLS_TPREL64;
|
2016-02-12 21:43:03 +08:00
|
|
|
TlsModuleIndexRel = R_AARCH64_TLS_DTPMOD64;
|
|
|
|
TlsOffsetRel = R_AARCH64_TLS_DTPREL64;
|
2016-01-29 09:49:32 +08:00
|
|
|
UseLazyBinding = true;
|
2015-11-18 02:01:30 +08:00
|
|
|
PltEntrySize = 16;
|
2016-01-29 11:00:32 +08:00
|
|
|
PltZeroSize = 32;
|
2015-11-18 02:01:30 +08:00
|
|
|
}
|
|
|
|
|
2016-02-12 21:43:03 +08:00
|
|
|
bool AArch64TargetInfo::isTlsGlobalDynamicRel(unsigned Type) const {
|
|
|
|
return Type == R_AARCH64_TLSDESC_ADR_PAGE21 ||
|
|
|
|
Type == R_AARCH64_TLSDESC_LD64_LO12_NC ||
|
|
|
|
Type == R_AARCH64_TLSDESC_ADD_LO12_NC ||
|
|
|
|
Type == R_AARCH64_TLSDESC_CALL;
|
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
unsigned AArch64TargetInfo::getDynRel(unsigned Type) const {
|
2015-12-05 14:20:24 +08:00
|
|
|
if (Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64)
|
|
|
|
return Type;
|
|
|
|
StringRef S = getELFRelocationTypeName(EM_AARCH64, Type);
|
2016-02-02 07:28:21 +08:00
|
|
|
error("Relocation " + S + " cannot be used when making a shared object; "
|
2015-12-05 14:20:24 +08:00
|
|
|
"recompile with -fPIC.");
|
2016-02-02 07:28:21 +08:00
|
|
|
// Keep it going with a dummy value so that we can find more reloc errors.
|
|
|
|
return R_AARCH64_ABS32;
|
2015-12-05 14:20:24 +08:00
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
|
2015-11-18 02:01:30 +08:00
|
|
|
write64le(Buf, Out<ELF64LE>::Plt->getVA());
|
|
|
|
}
|
2015-10-20 16:54:27 +08:00
|
|
|
|
2016-01-29 11:51:49 +08:00
|
|
|
void AArch64TargetInfo::writePltZero(uint8_t *Buf) const {
|
2015-11-18 02:01:30 +08:00
|
|
|
const uint8_t PltData[] = {
|
|
|
|
0xf0, 0x7b, 0xbf, 0xa9, // stp x16, x30, [sp,#-16]!
|
|
|
|
0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[2]))
|
|
|
|
0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[2]))]
|
|
|
|
0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[2]))
|
|
|
|
0x20, 0x02, 0x1f, 0xd6, // br x17
|
|
|
|
0x1f, 0x20, 0x03, 0xd5, // nop
|
|
|
|
0x1f, 0x20, 0x03, 0xd5, // nop
|
|
|
|
0x1f, 0x20, 0x03, 0xd5 // nop
|
|
|
|
};
|
|
|
|
memcpy(Buf, PltData, sizeof(PltData));
|
|
|
|
|
2016-01-29 11:51:49 +08:00
|
|
|
uint64_t Got = Out<ELF64LE>::GotPlt->getVA();
|
|
|
|
uint64_t Plt = Out<ELF64LE>::Plt->getVA();
|
|
|
|
relocateOne(Buf + 4, Buf + 8, R_AARCH64_ADR_PREL_PG_HI21, Plt + 4, Got + 16);
|
|
|
|
relocateOne(Buf + 8, Buf + 12, R_AARCH64_LDST64_ABS_LO12_NC, Plt + 8,
|
|
|
|
Got + 16);
|
|
|
|
relocateOne(Buf + 12, Buf + 16, R_AARCH64_ADD_ABS_LO12_NC, Plt + 12,
|
|
|
|
Got + 16);
|
2015-11-18 02:01:30 +08:00
|
|
|
}
|
|
|
|
|
2016-01-29 12:15:02 +08:00
|
|
|
void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
|
|
|
|
uint64_t PltEntryAddr, int32_t Index,
|
|
|
|
unsigned RelOff) const {
|
2015-11-18 02:01:30 +08:00
|
|
|
const uint8_t Inst[] = {
|
|
|
|
0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
|
|
|
|
0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[n]))]
|
|
|
|
0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[n]))
|
|
|
|
0x20, 0x02, 0x1f, 0xd6 // br x17
|
|
|
|
};
|
|
|
|
memcpy(Buf, Inst, sizeof(Inst));
|
|
|
|
|
|
|
|
relocateOne(Buf, Buf + 4, R_AARCH64_ADR_PREL_PG_HI21, PltEntryAddr,
|
|
|
|
GotEntryAddr);
|
|
|
|
relocateOne(Buf + 4, Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, PltEntryAddr + 4,
|
|
|
|
GotEntryAddr);
|
|
|
|
relocateOne(Buf + 8, Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, PltEntryAddr + 8,
|
|
|
|
GotEntryAddr);
|
|
|
|
}
|
|
|
|
|
2016-01-29 09:49:32 +08:00
|
|
|
unsigned AArch64TargetInfo::getTlsGotRel(unsigned Type) const {
|
2016-02-11 19:14:46 +08:00
|
|
|
assert(Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
|
|
|
|
Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
|
2016-01-13 21:04:46 +08:00
|
|
|
return Type;
|
|
|
|
}
|
|
|
|
|
2016-01-29 10:33:45 +08:00
|
|
|
bool AArch64TargetInfo::isTlsDynRel(unsigned Type, const SymbolBody &S) const {
|
2016-02-12 21:43:03 +08:00
|
|
|
return Type == R_AARCH64_TLSDESC_ADR_PAGE21 ||
|
|
|
|
Type == R_AARCH64_TLSDESC_LD64_LO12_NC ||
|
|
|
|
Type == R_AARCH64_TLSDESC_ADD_LO12_NC ||
|
|
|
|
Type == R_AARCH64_TLSDESC_CALL ||
|
|
|
|
Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
|
2016-01-13 21:04:46 +08:00
|
|
|
Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
|
|
|
|
}
|
|
|
|
|
2015-12-17 09:18:40 +08:00
|
|
|
bool AArch64TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const {
|
2015-12-03 16:05:35 +08:00
|
|
|
if (Config->Shared)
|
|
|
|
return false;
|
|
|
|
switch (Type) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case R_AARCH64_ABS16:
|
|
|
|
case R_AARCH64_ABS32:
|
|
|
|
case R_AARCH64_ABS64:
|
|
|
|
case R_AARCH64_ADD_ABS_LO12_NC:
|
|
|
|
case R_AARCH64_ADR_PREL_LO21:
|
|
|
|
case R_AARCH64_ADR_PREL_PG_HI21:
|
|
|
|
case R_AARCH64_LDST8_ABS_LO12_NC:
|
2016-01-15 09:49:51 +08:00
|
|
|
case R_AARCH64_LDST16_ABS_LO12_NC:
|
2015-12-03 16:05:35 +08:00
|
|
|
case R_AARCH64_LDST32_ABS_LO12_NC:
|
|
|
|
case R_AARCH64_LDST64_ABS_LO12_NC:
|
2016-01-14 09:30:21 +08:00
|
|
|
case R_AARCH64_LDST128_ABS_LO12_NC:
|
2015-12-03 16:05:35 +08:00
|
|
|
if (auto *SS = dyn_cast<SharedSymbol<ELF64LE>>(&S))
|
|
|
|
return SS->Sym.getType() == STT_OBJECT;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-09 23:11:01 +08:00
|
|
|
bool AArch64TargetInfo::needsGot(uint32_t Type, SymbolBody &S) const {
|
2016-01-13 21:04:46 +08:00
|
|
|
switch (Type) {
|
|
|
|
case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
|
|
|
|
case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
|
|
|
|
case R_AARCH64_ADR_GOT_PAGE:
|
|
|
|
case R_AARCH64_LD64_GOT_LO12_NC:
|
|
|
|
return true;
|
|
|
|
default:
|
2016-01-29 10:33:45 +08:00
|
|
|
return needsPlt(Type, S);
|
2016-01-13 21:04:46 +08:00
|
|
|
}
|
2015-09-30 07:22:16 +08:00
|
|
|
}
|
2015-11-18 02:01:30 +08:00
|
|
|
|
2016-02-12 23:47:37 +08:00
|
|
|
TargetInfo::PltNeed AArch64TargetInfo::needsPlt(uint32_t Type,
|
|
|
|
const SymbolBody &S) const {
|
2016-01-11 22:15:17 +08:00
|
|
|
if (isGnuIFunc<ELF64LE>(S))
|
2016-02-12 23:47:37 +08:00
|
|
|
return Plt_Explicit;
|
2015-11-18 02:01:30 +08:00
|
|
|
switch (Type) {
|
|
|
|
default:
|
2016-02-12 23:47:37 +08:00
|
|
|
return Plt_No;
|
2015-11-18 02:01:30 +08:00
|
|
|
case R_AARCH64_CALL26:
|
2016-01-11 22:22:00 +08:00
|
|
|
case R_AARCH64_CONDBR19:
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_AARCH64_JUMP26:
|
2016-01-11 22:27:05 +08:00
|
|
|
case R_AARCH64_TSTBR14:
|
2016-02-12 23:47:37 +08:00
|
|
|
if (canBePreempted(&S, true))
|
|
|
|
return Plt_Explicit;
|
|
|
|
return Plt_No;
|
2015-11-18 02:01:30 +08:00
|
|
|
}
|
2015-09-30 07:22:16 +08:00
|
|
|
}
|
2015-09-27 16:45:38 +08:00
|
|
|
|
2016-02-12 21:43:03 +08:00
|
|
|
static void updateAArch64Addr(uint8_t *L, uint64_t Imm) {
|
2015-10-03 06:00:42 +08:00
|
|
|
uint32_t ImmLo = (Imm & 0x3) << 29;
|
|
|
|
uint32_t ImmHi = ((Imm & 0x1FFFFC) >> 2) << 5;
|
|
|
|
uint64_t Mask = (0x3 << 29) | (0x7FFFF << 5);
|
2015-10-07 02:54:43 +08:00
|
|
|
write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
|
2015-10-03 06:00:42 +08:00
|
|
|
}
|
|
|
|
|
2016-02-12 21:43:03 +08:00
|
|
|
static inline void updateAArch64Add(uint8_t *L, uint64_t Imm) {
|
|
|
|
or32le(L, (Imm & 0xFFF) << 10);
|
|
|
|
}
|
|
|
|
|
2015-10-03 06:13:51 +08:00
|
|
|
// Page(Expr) is the page address of the expression Expr, defined
|
|
|
|
// as (Expr & ~0xFFF). (This applies even if the machine page size
|
2015-10-03 06:17:09 +08:00
|
|
|
// supported by the platform has a different value.)
|
2015-10-07 03:01:32 +08:00
|
|
|
static uint64_t getAArch64Page(uint64_t Expr) {
|
2015-10-03 06:13:51 +08:00
|
|
|
return Expr & (~static_cast<uint64_t>(0xFFF));
|
2015-10-03 06:00:42 +08:00
|
|
|
}
|
|
|
|
|
2015-10-23 10:40:46 +08:00
|
|
|
void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd,
|
2015-12-02 05:24:45 +08:00
|
|
|
uint32_t Type, uint64_t P, uint64_t SA,
|
2015-12-11 16:59:37 +08:00
|
|
|
uint64_t ZA, uint8_t *PairedLoc) const {
|
2015-09-27 16:45:38 +08:00
|
|
|
switch (Type) {
|
2015-10-04 08:59:16 +08:00
|
|
|
case R_AARCH64_ABS16:
|
2015-11-26 18:05:24 +08:00
|
|
|
checkIntUInt<16>(SA, Type);
|
2015-10-23 10:40:46 +08:00
|
|
|
write16le(Loc, SA);
|
2015-10-04 08:59:16 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_ABS32:
|
2015-11-26 18:05:24 +08:00
|
|
|
checkIntUInt<32>(SA, Type);
|
2015-10-23 10:40:46 +08:00
|
|
|
write32le(Loc, SA);
|
2015-10-04 08:59:16 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_ABS64:
|
2015-10-23 10:40:46 +08:00
|
|
|
write64le(Loc, SA);
|
2015-10-04 08:59:16 +08:00
|
|
|
break;
|
2015-10-04 03:56:07 +08:00
|
|
|
case R_AARCH64_ADD_ABS_LO12_NC:
|
2015-10-17 05:06:55 +08:00
|
|
|
// This relocation stores 12 bits and there's no instruction
|
|
|
|
// to do it. Instead, we do a 32 bits store of the value
|
2015-10-23 10:40:46 +08:00
|
|
|
// of r_addend bitwise-or'ed Loc. This assumes that the addend
|
|
|
|
// bits in Loc are zero.
|
|
|
|
or32le(Loc, (SA & 0xFFF) << 10);
|
2015-10-04 03:56:07 +08:00
|
|
|
break;
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_AARCH64_ADR_GOT_PAGE: {
|
|
|
|
uint64_t X = getAArch64Page(SA) - getAArch64Page(P);
|
|
|
|
checkInt<33>(X, Type);
|
2016-02-12 21:43:03 +08:00
|
|
|
updateAArch64Addr(Loc, (X >> 12) & 0x1FFFFF); // X[32:12]
|
2015-12-01 16:41:20 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-10-07 03:57:01 +08:00
|
|
|
case R_AARCH64_ADR_PREL_LO21: {
|
2015-10-16 02:19:39 +08:00
|
|
|
uint64_t X = SA - P;
|
2015-11-26 17:49:44 +08:00
|
|
|
checkInt<21>(X, Type);
|
2016-02-12 21:43:03 +08:00
|
|
|
updateAArch64Addr(Loc, X & 0x1FFFFF);
|
2015-09-27 16:45:38 +08:00
|
|
|
break;
|
2015-10-07 03:57:01 +08:00
|
|
|
}
|
2016-01-13 21:04:46 +08:00
|
|
|
case R_AARCH64_ADR_PREL_PG_HI21:
|
|
|
|
case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: {
|
2015-10-16 02:19:39 +08:00
|
|
|
uint64_t X = getAArch64Page(SA) - getAArch64Page(P);
|
2015-11-26 17:49:44 +08:00
|
|
|
checkInt<33>(X, Type);
|
2016-02-12 21:43:03 +08:00
|
|
|
updateAArch64Addr(Loc, (X >> 12) & 0x1FFFFF); // X[32:12]
|
2015-10-03 06:00:42 +08:00
|
|
|
break;
|
2015-10-07 03:57:01 +08:00
|
|
|
}
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_AARCH64_CALL26:
|
|
|
|
case R_AARCH64_JUMP26: {
|
2015-11-13 11:26:59 +08:00
|
|
|
uint64_t X = SA - P;
|
2015-11-26 17:49:44 +08:00
|
|
|
checkInt<28>(X, Type);
|
2015-11-13 11:26:59 +08:00
|
|
|
or32le(Loc, (X & 0x0FFFFFFC) >> 2);
|
|
|
|
break;
|
|
|
|
}
|
2016-01-11 22:22:00 +08:00
|
|
|
case R_AARCH64_CONDBR19: {
|
|
|
|
uint64_t X = SA - P;
|
|
|
|
checkInt<21>(X, Type);
|
|
|
|
or32le(Loc, (X & 0x1FFFFC) << 3);
|
|
|
|
break;
|
|
|
|
}
|
2015-11-24 14:48:31 +08:00
|
|
|
case R_AARCH64_LD64_GOT_LO12_NC:
|
2016-01-13 21:04:46 +08:00
|
|
|
case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
|
2015-11-26 17:49:44 +08:00
|
|
|
checkAlignment<8>(SA, Type);
|
2015-11-24 14:48:31 +08:00
|
|
|
or32le(Loc, (SA & 0xFF8) << 7);
|
|
|
|
break;
|
2016-01-14 09:30:21 +08:00
|
|
|
case R_AARCH64_LDST128_ABS_LO12_NC:
|
|
|
|
or32le(Loc, (SA & 0x0FF8) << 6);
|
|
|
|
break;
|
2016-01-15 09:49:51 +08:00
|
|
|
case R_AARCH64_LDST16_ABS_LO12_NC:
|
|
|
|
or32le(Loc, (SA & 0x0FFC) << 9);
|
|
|
|
break;
|
2015-11-21 05:35:38 +08:00
|
|
|
case R_AARCH64_LDST8_ABS_LO12_NC:
|
|
|
|
or32le(Loc, (SA & 0xFFF) << 10);
|
|
|
|
break;
|
2015-12-01 16:41:20 +08:00
|
|
|
case R_AARCH64_LDST32_ABS_LO12_NC:
|
|
|
|
or32le(Loc, (SA & 0xFFC) << 8);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST64_ABS_LO12_NC:
|
|
|
|
or32le(Loc, (SA & 0xFF8) << 7);
|
|
|
|
break;
|
2015-10-30 03:55:59 +08:00
|
|
|
case R_AARCH64_PREL16:
|
2015-11-26 18:05:24 +08:00
|
|
|
checkIntUInt<16>(SA - P, Type);
|
2015-10-30 03:55:59 +08:00
|
|
|
write16le(Loc, SA - P);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL32:
|
2015-11-26 18:05:24 +08:00
|
|
|
checkIntUInt<32>(SA - P, Type);
|
2015-10-30 03:55:59 +08:00
|
|
|
write32le(Loc, SA - P);
|
|
|
|
break;
|
2015-10-29 00:14:18 +08:00
|
|
|
case R_AARCH64_PREL64:
|
|
|
|
write64le(Loc, SA - P);
|
|
|
|
break;
|
2016-01-11 22:27:05 +08:00
|
|
|
case R_AARCH64_TSTBR14: {
|
|
|
|
uint64_t X = SA - P;
|
|
|
|
checkInt<16>(X, Type);
|
|
|
|
or32le(Loc, (X & 0xFFFC) << 3);
|
|
|
|
break;
|
|
|
|
}
|
2016-02-12 21:43:03 +08:00
|
|
|
case R_AARCH64_TLSLE_ADD_TPREL_HI12: {
|
|
|
|
uint64_t V = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align) + SA;
|
|
|
|
checkInt<24>(V, Type);
|
|
|
|
updateAArch64Add(Loc, (V & 0xFFF000) >> 12);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: {
|
|
|
|
uint64_t V = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align) + SA;
|
|
|
|
updateAArch64Add(Loc, V & 0xFFF);
|
|
|
|
break;
|
|
|
|
}
|
2015-09-27 16:45:38 +08:00
|
|
|
default:
|
ELF: Rename error -> fatal and redefine error as a non-noreturn function.
In many situations, we don't want to exit at the first error even in the
process model. For example, it is better to report all undefined symbols
rather than reporting the first one that the linker picked up randomly.
In order to handle such errors, we don't need to wrap everything with
ErrorOr (thanks for David Blaikie for pointing this out!) Instead, we
can set a flag to record the fact that we found an error and keep it
going until it reaches a reasonable checkpoint.
This idea should be applicable to other places. For example, we can
ignore broken relocations and check for errors after visiting all relocs.
In this patch, I rename error to fatal, and introduce another version of
error which doesn't call exit. That function instead sets HasError to true.
Once HasError becomes true, it stays true, so that we know that there
was an error if it is true.
I think introducing a non-noreturn error reporting function is by itself
a good idea, and it looks to me that this also provides a gradual path
towards lld-as-a-library (or at least embed-lld-to-your-program) without
sacrificing code readability with lots of ErrorOr's.
http://reviews.llvm.org/D16641
llvm-svn: 259069
2016-01-29 02:40:06 +08:00
|
|
|
fatal("unrecognized reloc " + Twine(Type));
|
2015-09-27 16:45:38 +08:00
|
|
|
}
|
|
|
|
}
|
2015-09-29 13:34:03 +08:00
|
|
|
|
2016-02-12 21:43:03 +08:00
|
|
|
bool AArch64TargetInfo::canRelaxTls(unsigned Type, const SymbolBody *S) const {
|
|
|
|
if (Config->Shared || (S && !S->isTls()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Global-Dynamic relocs can be relaxed to Initial-Exec if the target is
|
|
|
|
// an executable. And if the target is local it can also be fully relaxed to
|
|
|
|
// Local-Exec.
|
|
|
|
if (isTlsGlobalDynamicRel(Type))
|
|
|
|
return !canBePreempted(S, true);
|
|
|
|
|
|
|
|
// Initial-Exec relocs can be relaxed to Local-Exec if the target is a local
|
|
|
|
// symbol.
|
|
|
|
if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
|
|
|
|
Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC)
|
|
|
|
return !canBePreempted(S, true);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned AArch64TargetInfo::relaxTls(uint8_t *Loc, uint8_t *BufEnd,
|
|
|
|
uint32_t Type, uint64_t P, uint64_t SA,
|
|
|
|
const SymbolBody *S) const {
|
|
|
|
switch (Type) {
|
|
|
|
case R_AARCH64_TLSDESC_ADR_PAGE21:
|
|
|
|
case R_AARCH64_TLSDESC_LD64_LO12_NC:
|
|
|
|
case R_AARCH64_TLSDESC_ADD_LO12_NC:
|
|
|
|
case R_AARCH64_TLSDESC_CALL: {
|
|
|
|
if (canBePreempted(S, true))
|
|
|
|
fatal("Unsupported TLS optimization");
|
|
|
|
uint64_t X = S ? S->getVA<ELF64LE>() : SA;
|
|
|
|
relocateTlsGdToLe(Type, Loc, BufEnd, P, X);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
|
|
|
|
case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
|
|
|
|
relocateTlsIeToLe(Type, Loc, BufEnd, P, S->getVA<ELF64LE>());
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
llvm_unreachable("Unknown TLS optimization");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Global-Dynamic relocations can be relaxed to Local-Exec if both binary is
|
|
|
|
// an executable and target is final (can notbe preempted).
|
|
|
|
void AArch64TargetInfo::relocateTlsGdToLe(unsigned Type, uint8_t *Loc,
|
|
|
|
uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const {
|
|
|
|
// TLSDESC Global-Dynamic relocation are in the form:
|
|
|
|
// adrp x0, :tlsdesc:v [R_AARCH64_TLSDESC_ADR_PAGE21]
|
|
|
|
// ldr x1, [x0, #:tlsdesc_lo12:v [R_AARCH64_TLSDESC_LD64_LO12_NC]
|
|
|
|
// add x0, x0, :tlsdesc_los:v [_AARCH64_TLSDESC_ADD_LO12_NC]
|
|
|
|
// .tlsdesccall [R_AARCH64_TLSDESC_CALL]
|
|
|
|
// And it can optimized to:
|
|
|
|
// movz x0, #0x0, lsl #16
|
|
|
|
// movk x0, #0x10
|
|
|
|
// nop
|
|
|
|
// nop
|
|
|
|
|
|
|
|
uint64_t TPOff = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align);
|
|
|
|
uint64_t X = SA + TPOff;
|
|
|
|
checkUInt<32>(X, Type);
|
|
|
|
|
|
|
|
uint32_t NewInst;
|
|
|
|
switch (Type) {
|
|
|
|
case R_AARCH64_TLSDESC_ADD_LO12_NC:
|
|
|
|
case R_AARCH64_TLSDESC_CALL:
|
|
|
|
// nop
|
|
|
|
NewInst = 0xd503201f;
|
|
|
|
break;
|
|
|
|
case R_AARCH64_TLSDESC_ADR_PAGE21:
|
|
|
|
// movz
|
|
|
|
NewInst = 0xd2a00000 | (((X >> 16) & 0xffff) << 5);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_TLSDESC_LD64_LO12_NC:
|
|
|
|
// movk
|
|
|
|
NewInst = 0xf2800000 | ((X & 0xffff) << 5);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unsupported Relocation for TLS GD to LE relax");
|
|
|
|
}
|
|
|
|
write32le(Loc, NewInst);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initial-Exec relocations can be relaxed to Local-Exec if symbol is final
|
|
|
|
// (can not be preempted).
|
|
|
|
void AArch64TargetInfo::relocateTlsIeToLe(unsigned Type, uint8_t *Loc,
|
|
|
|
uint8_t *BufEnd, uint64_t P,
|
|
|
|
uint64_t SA) const {
|
|
|
|
uint64_t TPOff = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align);
|
|
|
|
uint64_t X = SA + TPOff;
|
|
|
|
checkUInt<32>(X, Type);
|
|
|
|
|
|
|
|
uint32_t Inst = read32le (Loc);
|
|
|
|
uint32_t NewInst;
|
|
|
|
if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
|
|
|
|
// Generate movz.
|
|
|
|
unsigned RegNo = (Inst & 0x1f);
|
|
|
|
NewInst = (0xd2a00000 | RegNo) | (((X >> 16) & 0xffff) << 5);
|
|
|
|
} else if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
|
|
|
|
// Generate movk
|
|
|
|
unsigned RegNo = (Inst & 0x1f);
|
|
|
|
NewInst = (0xf2800000 | RegNo) | ((X & 0xffff) << 5);
|
|
|
|
} else {
|
|
|
|
llvm_unreachable("Invalid Relocation for TLS IE to LE Relax");
|
|
|
|
}
|
|
|
|
write32le(Loc, NewInst);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-08 04:34:16 +08:00
|
|
|
// Implementing relocations for AMDGPU is low priority since most
|
|
|
|
// programs don't use relocations now. Thus, this function is not
|
|
|
|
// actually called (relocateOne is called for each relocation).
|
|
|
|
// That's why the AMDGPU port works without implementing this function.
|
2016-01-07 11:59:08 +08:00
|
|
|
void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type,
|
|
|
|
uint64_t P, uint64_t SA, uint64_t ZA,
|
|
|
|
uint8_t *PairedLoc) const {
|
|
|
|
llvm_unreachable("not implemented");
|
|
|
|
}
|
|
|
|
|
2015-10-14 22:24:46 +08:00
|
|
|
template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() {
|
2015-11-06 15:43:03 +08:00
|
|
|
GotHeaderEntriesNum = 2;
|
2016-02-11 03:57:19 +08:00
|
|
|
GotPltHeaderEntriesNum = 2;
|
2016-02-10 18:08:39 +08:00
|
|
|
PageSize = 65536;
|
2016-02-11 03:57:19 +08:00
|
|
|
PltEntrySize = 16;
|
|
|
|
PltZeroSize = 32;
|
|
|
|
UseLazyBinding = true;
|
2016-02-10 18:08:39 +08:00
|
|
|
CopyRel = R_MIPS_COPY;
|
2016-02-11 03:57:19 +08:00
|
|
|
PltRel = R_MIPS_JUMP_SLOT;
|
2016-01-29 09:49:32 +08:00
|
|
|
RelativeRel = R_MIPS_REL32;
|
2016-01-15 05:34:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
template <class ELFT>
|
2016-01-29 10:33:45 +08:00
|
|
|
unsigned MipsTargetInfo<ELFT>::getDynRel(unsigned Type) const {
|
2016-01-15 05:34:50 +08:00
|
|
|
if (Type == R_MIPS_32 || Type == R_MIPS_64)
|
|
|
|
return R_MIPS_REL32;
|
|
|
|
StringRef S = getELFRelocationTypeName(EM_MIPS, Type);
|
2016-02-02 07:28:21 +08:00
|
|
|
error("Relocation " + S + " cannot be used when making a shared object; "
|
2016-01-15 05:34:50 +08:00
|
|
|
"recompile with -fPIC.");
|
2016-02-02 07:28:21 +08:00
|
|
|
// Keep it going with a dummy value so that we can find more reloc errors.
|
|
|
|
return R_MIPS_32;
|
2015-11-06 15:43:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
template <class ELFT>
|
2016-01-29 10:33:45 +08:00
|
|
|
void MipsTargetInfo<ELFT>::writeGotHeader(uint8_t *Buf) const {
|
2015-12-04 04:57:45 +08:00
|
|
|
typedef typename ELFFile<ELFT>::Elf_Off Elf_Off;
|
2016-01-30 06:55:38 +08:00
|
|
|
typedef typename ELFFile<ELFT>::uintX_t uintX_t;
|
|
|
|
|
|
|
|
// Set the MSB of the second GOT slot. This is not required by any
|
|
|
|
// MIPS ABI documentation, though.
|
|
|
|
//
|
|
|
|
// There is a comment in glibc saying that "The MSB of got[1] of a
|
|
|
|
// gnu object is set to identify gnu objects," and in GNU gold it
|
|
|
|
// says "the second entry will be used by some runtime loaders".
|
|
|
|
// But how this field is being used is unclear.
|
|
|
|
//
|
|
|
|
// We are not really willing to mimic other linkers behaviors
|
|
|
|
// without understanding why they do that, but because all files
|
|
|
|
// generated by GNU tools have this special GOT value, and because
|
|
|
|
// we've been doing this for years, it is probably a safe bet to
|
|
|
|
// keep doing this for now. We really need to revisit this to see
|
|
|
|
// if we had to do this.
|
2015-11-06 15:43:03 +08:00
|
|
|
auto *P = reinterpret_cast<Elf_Off *>(Buf);
|
2016-01-30 06:55:38 +08:00
|
|
|
P[1] = uintX_t(1) << (ELFT::Is64Bits ? 63 : 31);
|
2015-09-29 13:34:03 +08:00
|
|
|
}
|
|
|
|
|
2016-02-11 03:57:19 +08:00
|
|
|
template <class ELFT>
|
|
|
|
void MipsTargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, uint64_t Plt) const {
|
|
|
|
write32<ELFT::TargetEndianness>(Buf, Out<ELFT>::Plt->getVA());
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint16_t mipsHigh(uint64_t V) { return (V + 0x8000) >> 16; }
|
|
|
|
|
|
|
|
template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
|
|
|
|
static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t P,
|
|
|
|
uint64_t S) {
|
|
|
|
uint32_t Mask = 0xffffffff >> (32 - BSIZE);
|
|
|
|
uint32_t Instr = read32<E>(Loc);
|
|
|
|
int64_t A = SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT);
|
|
|
|
if (SHIFT > 0)
|
|
|
|
checkAlignment<(1 << SHIFT)>(S + A, Type);
|
|
|
|
int64_t V = S + A - P;
|
|
|
|
checkInt<BSIZE + SHIFT>(V, Type);
|
|
|
|
write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <endianness E>
|
|
|
|
static void applyMipsHi16Reloc(uint8_t *Loc, uint64_t S, int64_t A) {
|
|
|
|
uint32_t Instr = read32<E>(Loc);
|
|
|
|
write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(S + A));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class ELFT>
|
|
|
|
void MipsTargetInfo<ELFT>::writePltZero(uint8_t *Buf) const {
|
|
|
|
const endianness E = ELFT::TargetEndianness;
|
|
|
|
write32<E>(Buf, 0x3c1c0000); // lui $28, %hi(&GOTPLT[0])
|
|
|
|
write32<E>(Buf + 4, 0x8f990000); // lw $25, %lo(&GOTPLT[0])($28)
|
|
|
|
write32<E>(Buf + 8, 0x279c0000); // addiu $28, $28, %lo(&GOTPLT[0])
|
|
|
|
write32<E>(Buf + 12, 0x031cc023); // subu $24, $24, $28
|
|
|
|
write32<E>(Buf + 16, 0x03e07825); // move $15, $31
|
|
|
|
write32<E>(Buf + 20, 0x0018c082); // srl $24, $24, 2
|
|
|
|
write32<E>(Buf + 24, 0x0320f809); // jalr $25
|
|
|
|
write32<E>(Buf + 28, 0x2718fffe); // subu $24, $24, 2
|
|
|
|
uint64_t Got = Out<ELFT>::GotPlt->getVA();
|
|
|
|
uint64_t Plt = Out<ELFT>::Plt->getVA();
|
|
|
|
applyMipsHi16Reloc<E>(Buf, Got, 0);
|
|
|
|
relocateOne(Buf + 4, Buf + 8, R_MIPS_LO16, Plt + 4, Got);
|
|
|
|
relocateOne(Buf + 8, Buf + 12, R_MIPS_LO16, Plt + 8, Got);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class ELFT>
|
|
|
|
void MipsTargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
|
|
|
|
uint64_t PltEntryAddr, int32_t Index,
|
|
|
|
unsigned RelOff) const {
|
|
|
|
const endianness E = ELFT::TargetEndianness;
|
|
|
|
write32<E>(Buf, 0x3c0f0000); // lui $15, %hi(.got.plt entry)
|
|
|
|
write32<E>(Buf + 4, 0x8df90000); // l[wd] $25, %lo(.got.plt entry)($15)
|
|
|
|
write32<E>(Buf + 8, 0x03200008); // jr $25
|
|
|
|
write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry)
|
|
|
|
applyMipsHi16Reloc<E>(Buf, GotEntryAddr, 0);
|
|
|
|
relocateOne(Buf + 4, Buf + 8, R_MIPS_LO16, PltEntryAddr + 4, GotEntryAddr);
|
|
|
|
relocateOne(Buf + 12, Buf + 16, R_MIPS_LO16, PltEntryAddr + 8, GotEntryAddr);
|
|
|
|
}
|
|
|
|
|
2016-02-08 18:05:13 +08:00
|
|
|
template <class ELFT>
|
|
|
|
bool MipsTargetInfo<ELFT>::needsCopyRel(uint32_t Type,
|
|
|
|
const SymbolBody &S) const {
|
|
|
|
if (Config->Shared)
|
|
|
|
return false;
|
|
|
|
if (Type == R_MIPS_HI16 || Type == R_MIPS_LO16 || isRelRelative(Type))
|
|
|
|
if (auto *SS = dyn_cast<SharedSymbol<ELFT>>(&S))
|
|
|
|
return SS->Sym.getType() == STT_OBJECT;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-10-14 22:24:46 +08:00
|
|
|
template <class ELFT>
|
2016-02-09 23:11:01 +08:00
|
|
|
bool MipsTargetInfo<ELFT>::needsGot(uint32_t Type, SymbolBody &S) const {
|
2016-02-11 03:57:19 +08:00
|
|
|
return needsPlt(Type, S) || Type == R_MIPS_GOT16 || Type == R_MIPS_CALL16;
|
2015-09-30 07:22:16 +08:00
|
|
|
}
|
2015-09-29 13:34:03 +08:00
|
|
|
|
2015-10-14 22:24:46 +08:00
|
|
|
template <class ELFT>
|
2016-02-12 23:47:37 +08:00
|
|
|
TargetInfo::PltNeed MipsTargetInfo<ELFT>::needsPlt(uint32_t Type,
|
|
|
|
const SymbolBody &S) const {
|
2016-02-11 03:57:19 +08:00
|
|
|
if (needsCopyRel(Type, S))
|
2016-02-12 23:47:37 +08:00
|
|
|
return Plt_No;
|
2016-02-11 03:57:19 +08:00
|
|
|
if (Type == R_MIPS_26 && canBePreempted(&S, false))
|
2016-02-12 23:47:37 +08:00
|
|
|
return Plt_Explicit;
|
2016-02-11 03:57:19 +08:00
|
|
|
if (Type == R_MIPS_HI16 || Type == R_MIPS_LO16 || isRelRelative(Type))
|
2016-02-12 23:47:37 +08:00
|
|
|
if (S.isShared())
|
|
|
|
return Plt_Explicit;
|
|
|
|
return Plt_No;
|
2015-09-30 07:22:16 +08:00
|
|
|
}
|
2015-09-29 13:34:03 +08:00
|
|
|
|
2015-10-14 22:24:46 +08:00
|
|
|
template <class ELFT>
|
2015-10-23 10:40:46 +08:00
|
|
|
void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint8_t *BufEnd,
|
2016-02-10 18:08:35 +08:00
|
|
|
uint32_t Type, uint64_t P, uint64_t S,
|
2015-12-11 16:59:37 +08:00
|
|
|
uint64_t ZA, uint8_t *PairedLoc) const {
|
2015-11-10 05:43:00 +08:00
|
|
|
const endianness E = ELFT::TargetEndianness;
|
2015-10-12 23:10:02 +08:00
|
|
|
switch (Type) {
|
|
|
|
case R_MIPS_32:
|
2016-02-10 18:08:35 +08:00
|
|
|
add32<E>(Loc, S);
|
2015-10-12 23:10:02 +08:00
|
|
|
break;
|
2016-02-11 03:57:19 +08:00
|
|
|
case R_MIPS_26: {
|
|
|
|
uint32_t Instr = read32<E>(Loc);
|
|
|
|
// FIXME (simon): If the relocation target symbol is not a PLT entry
|
|
|
|
// we should use another expression for calculation:
|
|
|
|
// ((A << 2) | (P & 0xf0000000)) >> 2
|
|
|
|
S += SignExtend64<28>((Instr & 0x3ffffff) << 2);
|
|
|
|
write32<E>(Loc, (Instr & ~0x3ffffff) | (S >> 2));
|
|
|
|
break;
|
|
|
|
}
|
2015-12-04 04:59:51 +08:00
|
|
|
case R_MIPS_CALL16:
|
|
|
|
case R_MIPS_GOT16: {
|
2016-02-10 18:08:35 +08:00
|
|
|
int64_t V = S - getMipsGpAddr<ELFT>();
|
2015-12-04 04:59:51 +08:00
|
|
|
if (Type == R_MIPS_GOT16)
|
|
|
|
checkInt<16>(V, Type);
|
|
|
|
write32<E>(Loc, (read32<E>(Loc) & 0xffff0000) | (V & 0xffff));
|
|
|
|
break;
|
|
|
|
}
|
2015-12-25 21:02:13 +08:00
|
|
|
case R_MIPS_GPREL16: {
|
|
|
|
uint32_t Instr = read32<E>(Loc);
|
2016-02-10 18:08:35 +08:00
|
|
|
int64_t V = S + SignExtend64<16>(Instr & 0xffff) - getMipsGpAddr<ELFT>();
|
2015-12-25 21:02:13 +08:00
|
|
|
checkInt<16>(V, Type);
|
|
|
|
write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case R_MIPS_GPREL32:
|
2016-02-10 18:08:35 +08:00
|
|
|
write32<E>(Loc, S + int32_t(read32<E>(Loc)) - getMipsGpAddr<ELFT>());
|
2015-12-25 21:02:13 +08:00
|
|
|
break;
|
2015-12-02 05:24:45 +08:00
|
|
|
case R_MIPS_HI16: {
|
|
|
|
uint32_t Instr = read32<E>(Loc);
|
|
|
|
if (PairedLoc) {
|
|
|
|
uint64_t AHL = ((Instr & 0xffff) << 16) +
|
2015-12-04 04:57:45 +08:00
|
|
|
SignExtend64<16>(read32<E>(PairedLoc) & 0xffff);
|
2016-02-11 03:57:19 +08:00
|
|
|
applyMipsHi16Reloc<E>(Loc, S, AHL);
|
2015-12-02 05:24:45 +08:00
|
|
|
} else {
|
|
|
|
warning("Can't find matching R_MIPS_LO16 relocation for R_MIPS_HI16");
|
2016-02-11 03:57:19 +08:00
|
|
|
applyMipsHi16Reloc<E>(Loc, S, 0);
|
2015-12-02 05:24:45 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2015-12-13 14:49:14 +08:00
|
|
|
case R_MIPS_JALR:
|
|
|
|
// Ignore this optimization relocation for now
|
|
|
|
break;
|
2015-12-02 05:24:45 +08:00
|
|
|
case R_MIPS_LO16: {
|
|
|
|
uint32_t Instr = read32<E>(Loc);
|
2015-12-04 04:57:45 +08:00
|
|
|
int64_t AHL = SignExtend64<16>(Instr & 0xffff);
|
2016-02-10 18:08:35 +08:00
|
|
|
write32<E>(Loc, (Instr & 0xffff0000) | ((S + AHL) & 0xffff));
|
2015-12-02 05:24:45 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-12-22 01:36:40 +08:00
|
|
|
case R_MIPS_PC16:
|
2016-02-10 18:08:35 +08:00
|
|
|
applyMipsPcReloc<E, 16, 2>(Loc, Type, P, S);
|
2015-12-22 01:36:40 +08:00
|
|
|
break;
|
|
|
|
case R_MIPS_PC19_S2:
|
2016-02-10 18:08:35 +08:00
|
|
|
applyMipsPcReloc<E, 19, 2>(Loc, Type, P, S);
|
2015-12-22 01:36:40 +08:00
|
|
|
break;
|
|
|
|
case R_MIPS_PC21_S2:
|
2016-02-10 18:08:35 +08:00
|
|
|
applyMipsPcReloc<E, 21, 2>(Loc, Type, P, S);
|
2015-12-22 01:36:40 +08:00
|
|
|
break;
|
|
|
|
case R_MIPS_PC26_S2:
|
2016-02-10 18:08:35 +08:00
|
|
|
applyMipsPcReloc<E, 26, 2>(Loc, Type, P, S);
|
2016-02-04 20:31:39 +08:00
|
|
|
break;
|
|
|
|
case R_MIPS_PC32:
|
2016-02-10 18:08:35 +08:00
|
|
|
applyMipsPcReloc<E, 32, 0>(Loc, Type, P, S);
|
2015-12-22 01:36:40 +08:00
|
|
|
break;
|
|
|
|
case R_MIPS_PCHI16: {
|
|
|
|
uint32_t Instr = read32<E>(Loc);
|
|
|
|
if (PairedLoc) {
|
|
|
|
uint64_t AHL = ((Instr & 0xffff) << 16) +
|
|
|
|
SignExtend64<16>(read32<E>(PairedLoc) & 0xffff);
|
2016-02-10 18:08:35 +08:00
|
|
|
write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(S + AHL - P));
|
2015-12-22 01:36:40 +08:00
|
|
|
} else {
|
|
|
|
warning("Can't find matching R_MIPS_PCLO16 relocation for R_MIPS_PCHI16");
|
2016-02-10 18:08:35 +08:00
|
|
|
write32<E>(Loc, (Instr & 0xffff0000) | mipsHigh(S - P));
|
2015-12-22 01:36:40 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case R_MIPS_PCLO16: {
|
|
|
|
uint32_t Instr = read32<E>(Loc);
|
|
|
|
int64_t AHL = SignExtend64<16>(Instr & 0xffff);
|
2016-02-10 18:08:35 +08:00
|
|
|
write32<E>(Loc, (Instr & 0xffff0000) | ((S + AHL - P) & 0xffff));
|
2015-12-22 01:36:40 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-10-12 23:10:02 +08:00
|
|
|
default:
|
ELF: Rename error -> fatal and redefine error as a non-noreturn function.
In many situations, we don't want to exit at the first error even in the
process model. For example, it is better to report all undefined symbols
rather than reporting the first one that the linker picked up randomly.
In order to handle such errors, we don't need to wrap everything with
ErrorOr (thanks for David Blaikie for pointing this out!) Instead, we
can set a flag to record the fact that we found an error and keep it
going until it reaches a reasonable checkpoint.
This idea should be applicable to other places. For example, we can
ignore broken relocations and check for errors after visiting all relocs.
In this patch, I rename error to fatal, and introduce another version of
error which doesn't call exit. That function instead sets HasError to true.
Once HasError becomes true, it stays true, so that we know that there
was an error if it is true.
I think introducing a non-noreturn error reporting function is by itself
a good idea, and it looks to me that this also provides a gradual path
towards lld-as-a-library (or at least embed-lld-to-your-program) without
sacrificing code readability with lots of ErrorOr's.
http://reviews.llvm.org/D16641
llvm-svn: 259069
2016-01-29 02:40:06 +08:00
|
|
|
fatal("unrecognized reloc " + Twine(Type));
|
2015-10-12 23:10:02 +08:00
|
|
|
}
|
|
|
|
}
|
2015-11-06 15:43:03 +08:00
|
|
|
|
2016-01-15 04:42:09 +08:00
|
|
|
template <class ELFT>
|
2016-01-29 10:33:45 +08:00
|
|
|
bool MipsTargetInfo<ELFT>::isHintRel(uint32_t Type) const {
|
2016-01-15 04:42:09 +08:00
|
|
|
return Type == R_MIPS_JALR;
|
|
|
|
}
|
|
|
|
|
2015-12-22 01:36:40 +08:00
|
|
|
template <class ELFT>
|
|
|
|
bool MipsTargetInfo<ELFT>::isRelRelative(uint32_t Type) const {
|
|
|
|
switch (Type) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case R_MIPS_PC16:
|
|
|
|
case R_MIPS_PC19_S2:
|
|
|
|
case R_MIPS_PC21_S2:
|
|
|
|
case R_MIPS_PC26_S2:
|
2016-02-04 20:31:39 +08:00
|
|
|
case R_MIPS_PC32:
|
2015-12-22 01:36:40 +08:00
|
|
|
case R_MIPS_PCHI16:
|
|
|
|
case R_MIPS_PCLO16:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-24 16:41:12 +08:00
|
|
|
// _gp is a MIPS-specific ABI-defined symbol which points to
|
|
|
|
// a location that is relative to GOT. This function returns
|
|
|
|
// the value for the symbol.
|
2015-12-04 04:57:45 +08:00
|
|
|
template <class ELFT> typename ELFFile<ELFT>::uintX_t getMipsGpAddr() {
|
2015-12-24 16:41:12 +08:00
|
|
|
unsigned GPOffset = 0x7ff0;
|
|
|
|
if (uint64_t V = Out<ELFT>::Got->getVA())
|
|
|
|
return V + GPOffset;
|
|
|
|
return 0;
|
2015-11-06 15:43:03 +08:00
|
|
|
}
|
|
|
|
|
2016-01-30 07:59:15 +08:00
|
|
|
template bool isGnuIFunc<ELF32LE>(const SymbolBody &S);
|
|
|
|
template bool isGnuIFunc<ELF32BE>(const SymbolBody &S);
|
|
|
|
template bool isGnuIFunc<ELF64LE>(const SymbolBody &S);
|
|
|
|
template bool isGnuIFunc<ELF64BE>(const SymbolBody &S);
|
|
|
|
|
2015-11-06 15:43:03 +08:00
|
|
|
template uint32_t getMipsGpAddr<ELF32LE>();
|
|
|
|
template uint32_t getMipsGpAddr<ELF32BE>();
|
|
|
|
template uint64_t getMipsGpAddr<ELF64LE>();
|
|
|
|
template uint64_t getMipsGpAddr<ELF64BE>();
|
2015-09-23 02:19:46 +08:00
|
|
|
}
|
|
|
|
}
|