forked from OSchip/llvm-project
RuntimeDyldELF: refactor AArch64 relocations. NFC.
llvm-svn: 290606
This commit is contained in:
parent
de1f4a4496
commit
5240a305a4
|
@ -28,9 +28,34 @@
|
|||
|
||||
using namespace llvm;
|
||||
using namespace llvm::object;
|
||||
using namespace llvm::support::endian;
|
||||
|
||||
#define DEBUG_TYPE "dyld"
|
||||
|
||||
static void or32le(void *P, int32_t V) { write32le(P, read32le(P) | V); }
|
||||
|
||||
static void or32AArch64Imm(void *L, uint64_t Imm) {
|
||||
or32le(L, (Imm & 0xFFF) << 10);
|
||||
}
|
||||
|
||||
template <class T> static void write(bool isBE, void *P, T V) {
|
||||
isBE ? write<T, support::big>(P, V) : write<T, support::little>(P, V);
|
||||
}
|
||||
|
||||
static void write32AArch64Addr(void *L, uint64_t Imm) {
|
||||
uint32_t ImmLo = (Imm & 0x3) << 29;
|
||||
uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
|
||||
uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
|
||||
write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
|
||||
}
|
||||
|
||||
// Return the bits [Start, End] from Val shifted Start bits.
|
||||
// For instance, getBits(0xF0, 4, 8) returns 0xF.
|
||||
static uint64_t getBits(uint64_t Val, int Start, int End) {
|
||||
uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
|
||||
return (Val >> Start) & Mask;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
|
||||
|
@ -339,25 +364,14 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
|
|||
default:
|
||||
llvm_unreachable("Relocation type not implemented yet!");
|
||||
break;
|
||||
case ELF::R_AARCH64_ABS64: {
|
||||
uint64_t *TargetPtr =
|
||||
reinterpret_cast<uint64_t *>(Section.getAddressWithOffset(Offset));
|
||||
if (isBE)
|
||||
support::ubig64_t::ref{TargetPtr} = Value + Addend;
|
||||
else
|
||||
support::ulittle64_t::ref{TargetPtr} = Value + Addend;
|
||||
case ELF::R_AARCH64_ABS64:
|
||||
write(isBE, TargetPtr, Value + Addend);
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_PREL32: {
|
||||
uint64_t Result = Value + Addend - FinalAddress;
|
||||
assert(static_cast<int64_t>(Result) >= INT32_MIN &&
|
||||
static_cast<int64_t>(Result) <= UINT32_MAX);
|
||||
if (isBE)
|
||||
support::ubig32_t::ref{TargetPtr} =
|
||||
static_cast<uint32_t>(Result & 0xffffffffU);
|
||||
else
|
||||
support::ulittle32_t::ref{TargetPtr} =
|
||||
static_cast<uint32_t>(Result & 0xffffffffU);
|
||||
write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_CALL26: // fallthrough
|
||||
|
@ -365,132 +379,56 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
|
|||
// Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
|
||||
// calculation.
|
||||
uint64_t BranchImm = Value + Addend - FinalAddress;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// "Check that -2^27 <= result < 2^27".
|
||||
assert(isInt<28>(BranchImm));
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
TargetValue &= 0xfc000000U;
|
||||
// Immediate goes in bits 25:0 of B and BL.
|
||||
TargetValue |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2;
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
or32le(TargetPtr, (BranchImm & 0x0FFFFFFC) >> 2);
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_MOVW_UABS_G3: {
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
TargetValue &= 0xffe0001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
TargetValue |= ((Result & 0xffff000000000000ULL) >> (48 - 5));
|
||||
// Shift must be "lsl #48", in bits 22:21
|
||||
assert((TargetValue >> 21 & 0x3) == 3 && "invalid shift for relocation");
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
case ELF::R_AARCH64_MOVW_UABS_G3:
|
||||
or32le(TargetPtr, ((Value + Addend) & 0xFFFF000000000000) >> 43);
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_MOVW_UABS_G2_NC: {
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
TargetValue &= 0xffe0001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
TargetValue |= ((Result & 0xffff00000000ULL) >> (32 - 5));
|
||||
// Shift must be "lsl #32", in bits 22:21
|
||||
assert((TargetValue >> 21 & 0x3) == 2 && "invalid shift for relocation");
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
case ELF::R_AARCH64_MOVW_UABS_G2_NC:
|
||||
or32le(TargetPtr, ((Value + Addend) & 0xFFFF00000000) >> 27);
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_MOVW_UABS_G1_NC: {
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
TargetValue &= 0xffe0001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
TargetValue |= ((Result & 0xffff0000U) >> (16 - 5));
|
||||
// Shift must be "lsl #16", in bits 22:2
|
||||
assert((TargetValue >> 21 & 0x3) == 1 && "invalid shift for relocation");
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
case ELF::R_AARCH64_MOVW_UABS_G1_NC:
|
||||
or32le(TargetPtr, ((Value + Addend) & 0xFFFF0000) >> 11);
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_MOVW_UABS_G0_NC: {
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
TargetValue &= 0xffe0001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
TargetValue |= ((Result & 0xffffU) << 5);
|
||||
// Shift must be "lsl #0", in bits 22:21.
|
||||
assert((TargetValue >> 21 & 0x3) == 0 && "invalid shift for relocation");
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
case ELF::R_AARCH64_MOVW_UABS_G0_NC:
|
||||
or32le(TargetPtr, ((Value + Addend) & 0xFFFF) << 5);
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
|
||||
// Operation: Page(S+A) - Page(P)
|
||||
uint64_t Result =
|
||||
((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// Check that -2^32 <= X < 2^32
|
||||
assert(isInt<33>(Result) && "overflow check failed for relocation");
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
TargetValue &= 0x9f00001fU;
|
||||
// Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
|
||||
// from bits 32:12 of X.
|
||||
TargetValue |= ((Result & 0x3000U) << (29 - 12));
|
||||
TargetValue |= ((Result & 0x1ffffc000ULL) >> (14 - 5));
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
write32AArch64Addr(TargetPtr, Result >> 12);
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_ADD_ABS_LO12_NC: {
|
||||
case ELF::R_AARCH64_ADD_ABS_LO12_NC:
|
||||
// Operation: S + A
|
||||
uint64_t Result = Value + Addend;
|
||||
|
||||
// Immediate goes in bits 21:10 of LD/ST instruction, taken
|
||||
// from bits 11:0 of X
|
||||
*TargetPtr |= ((Result & 0xfff) << 10);
|
||||
or32AArch64Imm(TargetPtr, Value + Addend);
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_LDST32_ABS_LO12_NC: {
|
||||
case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
|
||||
// Operation: S + A
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
TargetValue &= 0xffc003ffU;
|
||||
// Immediate goes in bits 21:10 of LD/ST instruction, taken
|
||||
// from bits 11:2 of X
|
||||
TargetValue |= ((Result & 0xffc) << (10 - 2));
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
or32AArch64Imm(TargetPtr, getBits(Value + Addend, 2, 11));
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_LDST64_ABS_LO12_NC: {
|
||||
case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
|
||||
// Operation: S + A
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
TargetValue &= 0xffc003ffU;
|
||||
// Immediate goes in bits 21:10 of LD/ST instruction, taken
|
||||
// from bits 11:3 of X
|
||||
TargetValue |= ((Result & 0xff8) << (10 - 3));
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
or32AArch64Imm(TargetPtr, getBits(Value + Addend, 3, 11));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
|
||||
|
|
Loading…
Reference in New Issue