forked from OSchip/llvm-project
Fix R_AARCH64_MOVW_UABS_G3 relocation
Summary: The relocation is missing mask so an address that has non-zero bits in 47:43 may overwrite the register number. (Frequently shows up as target register changed to `xzr`....) Reviewers: t.p.northover, lhames Subscribers: davide, aemerson, rengolin, llvm-commits Differential Revision: https://reviews.llvm.org/D27609 llvm-svn: 289880
This commit is contained in:
parent
d69b9414b3
commit
8f8cdd00da
|
@ -972,7 +972,7 @@ unsigned ELFObjectFile<ELFT>::getArch() const {
|
|||
case ELF::EM_X86_64:
|
||||
return Triple::x86_64;
|
||||
case ELF::EM_AARCH64:
|
||||
return Triple::aarch64;
|
||||
return IsLittleEndian ? Triple::aarch64 : Triple::aarch64_be;
|
||||
case ELF::EM_ARM:
|
||||
return Triple::arm;
|
||||
case ELF::EM_AVR:
|
||||
|
|
|
@ -86,6 +86,7 @@ private:
|
|||
return RelocToApply();
|
||||
}
|
||||
case Triple::aarch64:
|
||||
case Triple::aarch64_be:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_AARCH64_ABS32:
|
||||
return visitELF_AARCH64_ABS32(R, Value);
|
||||
|
|
|
@ -325,6 +325,8 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
|
|||
uint32_t *TargetPtr =
|
||||
reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
|
||||
uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
|
||||
// Data should use target endian. Code should always use little endian.
|
||||
bool isBE = Arch == Triple::aarch64_be;
|
||||
|
||||
DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
|
||||
<< format("%llx", Section.getAddressWithOffset(Offset))
|
||||
|
@ -340,14 +342,22 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
|
|||
case ELF::R_AARCH64_ABS64: {
|
||||
uint64_t *TargetPtr =
|
||||
reinterpret_cast<uint64_t *>(Section.getAddressWithOffset(Offset));
|
||||
*TargetPtr = Value + Addend;
|
||||
if (isBE)
|
||||
support::ubig64_t::ref{TargetPtr} = Value + Addend;
|
||||
else
|
||||
support::ulittle64_t::ref{TargetPtr} = Value + Addend;
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_PREL32: {
|
||||
uint64_t Result = Value + Addend - FinalAddress;
|
||||
assert(static_cast<int64_t>(Result) >= INT32_MIN &&
|
||||
static_cast<int64_t>(Result) <= UINT32_MAX);
|
||||
*TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU);
|
||||
if (isBE)
|
||||
support::ubig32_t::ref{TargetPtr} =
|
||||
static_cast<uint32_t>(Result & 0xffffffffU);
|
||||
else
|
||||
support::ulittle32_t::ref{TargetPtr} =
|
||||
static_cast<uint32_t>(Result & 0xffffffffU);
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_CALL26: // fallthrough
|
||||
|
@ -355,104 +365,120 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
|
|||
// Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
|
||||
// calculation.
|
||||
uint64_t BranchImm = Value + Addend - FinalAddress;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// "Check that -2^27 <= result < 2^27".
|
||||
assert(isInt<28>(BranchImm));
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
*TargetPtr &= 0xfc000000U;
|
||||
TargetValue &= 0xfc000000U;
|
||||
// Immediate goes in bits 25:0 of B and BL.
|
||||
*TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2;
|
||||
TargetValue |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2;
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_MOVW_UABS_G3: {
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
*TargetPtr &= 0xffe0001fU;
|
||||
TargetValue &= 0xffe0001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
*TargetPtr |= Result >> (48 - 5);
|
||||
TargetValue |= ((Result & 0xffff000000000000ULL) >> (48 - 5));
|
||||
// Shift must be "lsl #48", in bits 22:21
|
||||
assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation");
|
||||
assert((TargetValue >> 21 & 0x3) == 3 && "invalid shift for relocation");
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_MOVW_UABS_G2_NC: {
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
*TargetPtr &= 0xffe0001fU;
|
||||
TargetValue &= 0xffe0001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
*TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5));
|
||||
TargetValue |= ((Result & 0xffff00000000ULL) >> (32 - 5));
|
||||
// Shift must be "lsl #32", in bits 22:21
|
||||
assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation");
|
||||
assert((TargetValue >> 21 & 0x3) == 2 && "invalid shift for relocation");
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_MOVW_UABS_G1_NC: {
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
*TargetPtr &= 0xffe0001fU;
|
||||
TargetValue &= 0xffe0001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
*TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5));
|
||||
TargetValue |= ((Result & 0xffff0000U) >> (16 - 5));
|
||||
// Shift must be "lsl #16", in bits 22:2
|
||||
assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation");
|
||||
assert((TargetValue >> 21 & 0x3) == 1 && "invalid shift for relocation");
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_MOVW_UABS_G0_NC: {
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
*TargetPtr &= 0xffe0001fU;
|
||||
TargetValue &= 0xffe0001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
*TargetPtr |= ((Result & 0xffffU) << 5);
|
||||
TargetValue |= ((Result & 0xffffU) << 5);
|
||||
// Shift must be "lsl #0", in bits 22:21.
|
||||
assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation");
|
||||
assert((TargetValue >> 21 & 0x3) == 0 && "invalid shift for relocation");
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
|
||||
// Operation: Page(S+A) - Page(P)
|
||||
uint64_t Result =
|
||||
((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// Check that -2^32 <= X < 2^32
|
||||
assert(isInt<33>(Result) && "overflow check failed for relocation");
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
*TargetPtr &= 0x9f00001fU;
|
||||
TargetValue &= 0x9f00001fU;
|
||||
// Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
|
||||
// from bits 32:12 of X.
|
||||
*TargetPtr |= ((Result & 0x3000U) << (29 - 12));
|
||||
*TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5));
|
||||
TargetValue |= ((Result & 0x3000U) << (29 - 12));
|
||||
TargetValue |= ((Result & 0x1ffffc000ULL) >> (14 - 5));
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_LDST32_ABS_LO12_NC: {
|
||||
// Operation: S + A
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
*TargetPtr &= 0xffc003ffU;
|
||||
TargetValue &= 0xffc003ffU;
|
||||
// Immediate goes in bits 21:10 of LD/ST instruction, taken
|
||||
// from bits 11:2 of X
|
||||
*TargetPtr |= ((Result & 0xffc) << (10 - 2));
|
||||
TargetValue |= ((Result & 0xffc) << (10 - 2));
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_LDST64_ABS_LO12_NC: {
|
||||
// Operation: S + A
|
||||
uint64_t Result = Value + Addend;
|
||||
uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr};
|
||||
|
||||
// AArch64 code is emitted with .rela relocations. The data already in any
|
||||
// bits affected by the relocation on entry is garbage.
|
||||
*TargetPtr &= 0xffc003ffU;
|
||||
TargetValue &= 0xffc003ffU;
|
||||
// Immediate goes in bits 21:10 of LD/ST instruction, taken
|
||||
// from bits 11:3 of X
|
||||
*TargetPtr |= ((Result & 0xff8) << (10 - 3));
|
||||
TargetValue |= ((Result & 0xff8) << (10 - 3));
|
||||
support::ulittle32_t::ref{TargetPtr} = TargetValue;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
# RUN: llvm-mc -triple=aarch64_be-none-linux-gnu -filetype=obj -o %T/be-reloc.o %s
|
||||
# RUN: llvm-rtdyld -triple=aarch64_be-none-linux-gnu -verify -dummy-extern f=0x0123456789abcdef -check=%s %T/be-reloc.o
|
||||
|
||||
.text
|
||||
.globl g
|
||||
.p2align 2
|
||||
.type g,@function
|
||||
g:
|
||||
# R_AARCH64_MOVW_UABS_G3
|
||||
movz x0, #:abs_g3:f
|
||||
# R_AARCH64_MOVW_UABS_G2_NC
|
||||
movk x0, #:abs_g2_nc:f
|
||||
# R_AARCH64_MOVW_UABS_G1_NC
|
||||
movk x0, #:abs_g1_nc:f
|
||||
# R_AARCH64_MOVW_UABS_G0_NC
|
||||
movk x0, #:abs_g0_nc:f
|
||||
ret
|
||||
.Lfunc_end0:
|
||||
.size g, .Lfunc_end0-g
|
||||
|
||||
.type k,@object
|
||||
.data
|
||||
.globl k
|
||||
.p2align 3
|
||||
k:
|
||||
.xword f
|
||||
.size k, 8
|
||||
|
||||
# LE instructions read as BE
|
||||
# rtdyld-check: *{4}(g) = 0x6024e0d2
|
||||
# rtdyld-check: *{4}(g + 4) = 0xe0acc8f2
|
||||
# rtdyld-check: *{4}(g + 8) = 0x6035b1f2
|
||||
# rtdyld-check: *{4}(g + 12) = 0xe0bd99f2
|
||||
# rtdyld-check: *{8}k = f
|
|
@ -0,0 +1,33 @@
|
|||
# RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj -o %T/reloc.o %s
|
||||
# RUN: llvm-rtdyld -triple=arm64-none-linux-gnu -verify -dummy-extern f=0x0123456789abcdef -check=%s %T/reloc.o
|
||||
|
||||
.text
|
||||
.globl g
|
||||
.p2align 2
|
||||
.type g,@function
|
||||
g:
|
||||
# R_AARCH64_MOVW_UABS_G3
|
||||
movz x0, #:abs_g3:f
|
||||
# R_AARCH64_MOVW_UABS_G2_NC
|
||||
movk x0, #:abs_g2_nc:f
|
||||
# R_AARCH64_MOVW_UABS_G1_NC
|
||||
movk x0, #:abs_g1_nc:f
|
||||
# R_AARCH64_MOVW_UABS_G0_NC
|
||||
movk x0, #:abs_g0_nc:f
|
||||
ret
|
||||
.Lfunc_end0:
|
||||
.size g, .Lfunc_end0-g
|
||||
|
||||
.type k,@object
|
||||
.data
|
||||
.globl k
|
||||
.p2align 3
|
||||
k:
|
||||
.xword f
|
||||
.size k, 8
|
||||
|
||||
# rtdyld-check: *{4}(g) = 0xd2e02460
|
||||
# rtdyld-check: *{4}(g + 4) = 0xf2c8ace0
|
||||
# rtdyld-check: *{4}(g + 8) = 0xf2b13560
|
||||
# rtdyld-check: *{4}(g + 12) = 0xf299bde0
|
||||
# rtdyld-check: *{8}k = f
|
Loading…
Reference in New Issue