2015-05-29 03:09:30 +08:00
|
|
|
//===- Chunks.cpp ---------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Linker
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "Chunks.h"
|
|
|
|
#include "InputFiles.h"
|
2015-08-06 03:51:28 +08:00
|
|
|
#include "Symbols.h"
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
#include "Writer.h"
|
2018-11-09 02:38:17 +08:00
|
|
|
#include "SymbolTable.h"
|
[lld] unified COFF and ELF error handling on new Common/ErrorHandler
Summary:
The COFF linker and the ELF linker have long had similar but separate
Error.h and Error.cpp files to implement error handling. This change
introduces new error handling code in Common/ErrorHandler.h, changes the
COFF and ELF linkers to use it, and removes the old, separate
implementations.
Reviewers: ruiu
Reviewed By: ruiu
Subscribers: smeenai, jyknight, emaste, sdardis, nemanjai, nhaehnle, mgorny, javed.absar, kbarton, fedor.sergeev, llvm-commits
Differential Revision: https://reviews.llvm.org/D39259
llvm-svn: 316624
2017-10-26 06:28:38 +08:00
|
|
|
#include "lld/Common/ErrorHandler.h"
|
2017-01-14 06:05:22 +08:00
|
|
|
#include "llvm/ADT/Twine.h"
|
2017-06-07 11:48:56 +08:00
|
|
|
#include "llvm/BinaryFormat/COFF.h"
|
2015-05-29 03:09:30 +08:00
|
|
|
#include "llvm/Object/COFF.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/Endian.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2015-06-20 15:25:45 +08:00
|
|
|
#include <algorithm>
|
2015-05-29 03:09:30 +08:00
|
|
|
|
2015-06-07 06:46:15 +08:00
|
|
|
using namespace llvm;
|
2015-05-29 03:09:30 +08:00
|
|
|
using namespace llvm::object;
|
|
|
|
using namespace llvm::support::endian;
|
|
|
|
using namespace llvm::COFF;
|
2015-07-25 07:51:14 +08:00
|
|
|
using llvm::support::ulittle32_t;
|
2015-05-29 03:09:30 +08:00
|
|
|
|
|
|
|
namespace lld {
|
|
|
|
namespace coff {
|
|
|
|
|
2017-07-27 07:05:24 +08:00
|
|
|
SectionChunk::SectionChunk(ObjFile *F, const coff_section *H)
|
2016-11-12 08:00:51 +08:00
|
|
|
: Chunk(SectionKind), Repl(this), Header(H), File(F),
|
2018-04-17 09:54:34 +08:00
|
|
|
Relocs(File->getCOFFObj()->getRelocations(Header)) {
|
2015-05-29 03:09:30 +08:00
|
|
|
// Initialize SectionName.
|
|
|
|
File->getCOFFObj()->getSectionName(Header, SectionName);
|
2015-06-10 12:21:47 +08:00
|
|
|
|
2017-09-14 05:54:55 +08:00
|
|
|
Alignment = Header->getAlignment();
|
2015-09-17 05:40:47 +08:00
|
|
|
|
2017-06-17 04:47:19 +08:00
|
|
|
// If linker GC is disabled, every chunk starts out alive. If linker GC is
|
|
|
|
// enabled, treat non-comdat sections as roots. Generally optimized object
|
|
|
|
// files will be built with -ffunction-sections or /Gy, so most things worth
|
|
|
|
// stripping will be in a comdat.
|
|
|
|
Live = !Config->DoGC || !isCOMDAT();
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2018-09-25 18:59:29 +08:00
|
|
|
// Initialize the RelocTargets vector, to allow redirecting certain relocations
|
|
|
|
// to a thunk instead of the actual symbol the relocation's symbol table index
|
|
|
|
// indicates.
|
|
|
|
void SectionChunk::readRelocTargets() {
|
|
|
|
assert(RelocTargets.empty());
|
|
|
|
RelocTargets.reserve(Relocs.size());
|
|
|
|
for (const coff_relocation &Rel : Relocs)
|
|
|
|
RelocTargets.push_back(File->getSymbol(Rel.SymbolTableIndex));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset RelocTargets to their original targets before thunks were added.
|
|
|
|
void SectionChunk::resetRelocTargets() {
|
|
|
|
for (size_t I = 0, E = Relocs.size(); I < E; ++I)
|
|
|
|
RelocTargets[I] = File->getSymbol(Relocs[I].SymbolTableIndex);
|
|
|
|
}
|
|
|
|
|
2015-06-25 08:33:38 +08:00
|
|
|
static void add16(uint8_t *P, int16_t V) { write16le(P, read16le(P) + V); }
|
|
|
|
static void add32(uint8_t *P, int32_t V) { write32le(P, read32le(P) + V); }
|
|
|
|
static void add64(uint8_t *P, int64_t V) { write64le(P, read64le(P) + V); }
|
2015-07-25 11:03:46 +08:00
|
|
|
static void or16(uint8_t *P, uint16_t V) { write16le(P, read16le(P) | V); }
|
2017-07-11 15:22:44 +08:00
|
|
|
static void or32(uint8_t *P, uint32_t V) { write32le(P, read32le(P) | V); }
|
2015-06-25 08:33:38 +08:00
|
|
|
|
2018-02-18 04:28:15 +08:00
|
|
|
// Verify that given sections are appropriate targets for SECREL
|
|
|
|
// relocations. This check is relaxed because unfortunately debug
|
|
|
|
// sections have section-relative relocations against absolute symbols.
|
|
|
|
static bool checkSecRel(const SectionChunk *Sec, OutputSection *OS) {
|
|
|
|
if (OS)
|
|
|
|
return true;
|
|
|
|
if (Sec->isCodeView())
|
|
|
|
return false;
|
2018-08-22 19:34:58 +08:00
|
|
|
error("SECREL relocation cannot be applied to absolute symbols");
|
|
|
|
return false;
|
2018-02-18 04:28:15 +08:00
|
|
|
}
|
|
|
|
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
static void applySecRel(const SectionChunk *Sec, uint8_t *Off,
|
|
|
|
OutputSection *OS, uint64_t S) {
|
2018-02-18 04:28:15 +08:00
|
|
|
if (!checkSecRel(Sec, OS))
|
|
|
|
return;
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
uint64_t SecRel = S - OS->getRVA();
|
2017-09-20 08:21:58 +08:00
|
|
|
if (SecRel > UINT32_MAX) {
|
|
|
|
error("overflow in SECREL relocation in section: " + Sec->getSectionName());
|
|
|
|
return;
|
|
|
|
}
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
add32(Off, SecRel);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void applySecIdx(uint8_t *Off, OutputSection *OS) {
|
2018-02-18 04:41:38 +08:00
|
|
|
// Absolute symbol doesn't have section index, but section index relocation
|
|
|
|
// against absolute symbol should be resolved to one plus the last output
|
|
|
|
// section index. This is required for compatibility with MSVC.
|
|
|
|
if (OS)
|
|
|
|
add16(Off, OS->SectionIndex);
|
|
|
|
else
|
|
|
|
add16(Off, DefinedAbsolute::NumOutputSections + 1);
|
2017-06-23 07:33:04 +08:00
|
|
|
}
|
|
|
|
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
void SectionChunk::applyRelX64(uint8_t *Off, uint16_t Type, OutputSection *OS,
|
|
|
|
uint64_t S, uint64_t P) const {
|
2015-07-08 06:49:21 +08:00
|
|
|
switch (Type) {
|
|
|
|
case IMAGE_REL_AMD64_ADDR32: add32(Off, S + Config->ImageBase); break;
|
|
|
|
case IMAGE_REL_AMD64_ADDR64: add64(Off, S + Config->ImageBase); break;
|
|
|
|
case IMAGE_REL_AMD64_ADDR32NB: add32(Off, S); break;
|
|
|
|
case IMAGE_REL_AMD64_REL32: add32(Off, S - P - 4); break;
|
|
|
|
case IMAGE_REL_AMD64_REL32_1: add32(Off, S - P - 5); break;
|
|
|
|
case IMAGE_REL_AMD64_REL32_2: add32(Off, S - P - 6); break;
|
|
|
|
case IMAGE_REL_AMD64_REL32_3: add32(Off, S - P - 7); break;
|
|
|
|
case IMAGE_REL_AMD64_REL32_4: add32(Off, S - P - 8); break;
|
|
|
|
case IMAGE_REL_AMD64_REL32_5: add32(Off, S - P - 9); break;
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
case IMAGE_REL_AMD64_SECTION: applySecIdx(Off, OS); break;
|
|
|
|
case IMAGE_REL_AMD64_SECREL: applySecRel(this, Off, OS, S); break;
|
2015-07-08 06:49:21 +08:00
|
|
|
default:
|
2018-08-22 19:34:58 +08:00
|
|
|
error("unsupported relocation type 0x" + Twine::utohexstr(Type) + " in " +
|
2018-06-07 08:50:03 +08:00
|
|
|
toString(File));
|
2015-07-08 06:49:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
void SectionChunk::applyRelX86(uint8_t *Off, uint16_t Type, OutputSection *OS,
|
|
|
|
uint64_t S, uint64_t P) const {
|
2015-07-08 09:45:29 +08:00
|
|
|
switch (Type) {
|
|
|
|
case IMAGE_REL_I386_ABSOLUTE: break;
|
|
|
|
case IMAGE_REL_I386_DIR32: add32(Off, S + Config->ImageBase); break;
|
|
|
|
case IMAGE_REL_I386_DIR32NB: add32(Off, S); break;
|
|
|
|
case IMAGE_REL_I386_REL32: add32(Off, S - P - 4); break;
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
case IMAGE_REL_I386_SECTION: applySecIdx(Off, OS); break;
|
|
|
|
case IMAGE_REL_I386_SECREL: applySecRel(this, Off, OS, S); break;
|
2015-07-08 09:45:29 +08:00
|
|
|
default:
|
2018-08-22 19:34:58 +08:00
|
|
|
error("unsupported relocation type 0x" + Twine::utohexstr(Type) + " in " +
|
2018-06-07 08:50:03 +08:00
|
|
|
toString(File));
|
2015-07-08 09:45:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-06 03:40:07 +08:00
|
|
|
static void applyMOV(uint8_t *Off, uint16_t V) {
|
2016-08-06 02:20:31 +08:00
|
|
|
write16le(Off, (read16le(Off) & 0xfbf0) | ((V & 0x800) >> 1) | ((V >> 12) & 0xf));
|
|
|
|
write16le(Off + 2, (read16le(Off + 2) & 0x8f00) | ((V & 0x700) << 4) | (V & 0xff));
|
|
|
|
}
|
|
|
|
|
2018-08-27 14:04:36 +08:00
|
|
|
static uint16_t readMOV(uint8_t *Off, bool MOVT) {
|
2018-02-18 07:37:26 +08:00
|
|
|
uint16_t Op1 = read16le(Off);
|
2018-08-27 14:04:36 +08:00
|
|
|
if ((Op1 & 0xfbf0) != (MOVT ? 0xf2c0 : 0xf240))
|
|
|
|
error("unexpected instruction in " + Twine(MOVT ? "MOVT" : "MOVW") +
|
|
|
|
" instruction in MOV32T relocation");
|
2018-02-18 07:37:26 +08:00
|
|
|
uint16_t Op2 = read16le(Off + 2);
|
2018-08-27 14:04:36 +08:00
|
|
|
if ((Op2 & 0x8000) != 0)
|
|
|
|
error("unexpected instruction in " + Twine(MOVT ? "MOVT" : "MOVW") +
|
|
|
|
" instruction in MOV32T relocation");
|
2018-02-18 07:37:26 +08:00
|
|
|
return (Op2 & 0x00ff) | ((Op2 >> 4) & 0x0700) | ((Op1 << 1) & 0x0800) |
|
|
|
|
((Op1 & 0x000f) << 12);
|
2015-08-06 03:40:07 +08:00
|
|
|
}
|
|
|
|
|
2017-07-26 04:00:37 +08:00
|
|
|
void applyMOV32T(uint8_t *Off, uint32_t V) {
|
2018-08-27 14:04:36 +08:00
|
|
|
uint16_t ImmW = readMOV(Off, false); // read MOVW operand
|
|
|
|
uint16_t ImmT = readMOV(Off + 4, true); // read MOVT operand
|
2016-08-06 02:20:31 +08:00
|
|
|
uint32_t Imm = ImmW | (ImmT << 16);
|
|
|
|
V += Imm; // add the immediate offset
|
2015-08-06 03:40:07 +08:00
|
|
|
applyMOV(Off, V); // set MOVW operand
|
|
|
|
applyMOV(Off + 4, V >> 16); // set MOVT operand
|
|
|
|
}
|
|
|
|
|
|
|
|
static void applyBranch20T(uint8_t *Off, int32_t V) {
|
2017-12-14 16:56:36 +08:00
|
|
|
if (!isInt<21>(V))
|
2018-08-22 19:34:58 +08:00
|
|
|
error("relocation out of range");
|
2015-08-06 03:40:07 +08:00
|
|
|
uint32_t S = V < 0 ? 1 : 0;
|
|
|
|
uint32_t J1 = (V >> 19) & 1;
|
|
|
|
uint32_t J2 = (V >> 18) & 1;
|
|
|
|
or16(Off, (S << 10) | ((V >> 12) & 0x3f));
|
|
|
|
or16(Off + 2, (J1 << 13) | (J2 << 11) | ((V >> 1) & 0x7ff));
|
2015-07-25 11:03:46 +08:00
|
|
|
}
|
|
|
|
|
2017-07-26 04:00:37 +08:00
|
|
|
void applyBranch24T(uint8_t *Off, int32_t V) {
|
2016-08-06 01:33:24 +08:00
|
|
|
if (!isInt<25>(V))
|
2018-08-22 19:34:58 +08:00
|
|
|
error("relocation out of range");
|
2015-07-25 11:19:34 +08:00
|
|
|
uint32_t S = V < 0 ? 1 : 0;
|
|
|
|
uint32_t J1 = ((~V >> 23) & 1) ^ S;
|
|
|
|
uint32_t J2 = ((~V >> 22) & 1) ^ S;
|
2015-08-06 03:40:07 +08:00
|
|
|
or16(Off, (S << 10) | ((V >> 12) & 0x3ff));
|
2016-08-06 01:28:21 +08:00
|
|
|
// Clear out the J1 and J2 bits which may be set.
|
|
|
|
write16le(Off + 2, (read16le(Off + 2) & 0xd000) | (J1 << 13) | (J2 << 11) | ((V >> 1) & 0x7ff));
|
2015-07-25 11:19:34 +08:00
|
|
|
}
|
|
|
|
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
void SectionChunk::applyRelARM(uint8_t *Off, uint16_t Type, OutputSection *OS,
|
|
|
|
uint64_t S, uint64_t P) const {
|
2015-07-30 03:25:00 +08:00
|
|
|
// Pointer to thumb code must have the LSB set.
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
uint64_t SX = S;
|
2018-04-20 05:48:37 +08:00
|
|
|
if (OS && (OS->Header.Characteristics & IMAGE_SCN_MEM_EXECUTE))
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
SX |= 1;
|
2015-07-25 11:03:46 +08:00
|
|
|
switch (Type) {
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
case IMAGE_REL_ARM_ADDR32: add32(Off, SX + Config->ImageBase); break;
|
|
|
|
case IMAGE_REL_ARM_ADDR32NB: add32(Off, SX); break;
|
|
|
|
case IMAGE_REL_ARM_MOV32T: applyMOV32T(Off, SX + Config->ImageBase); break;
|
|
|
|
case IMAGE_REL_ARM_BRANCH20T: applyBranch20T(Off, SX - P - 4); break;
|
|
|
|
case IMAGE_REL_ARM_BRANCH24T: applyBranch24T(Off, SX - P - 4); break;
|
|
|
|
case IMAGE_REL_ARM_BLX23T: applyBranch24T(Off, SX - P - 4); break;
|
|
|
|
case IMAGE_REL_ARM_SECTION: applySecIdx(Off, OS); break;
|
|
|
|
case IMAGE_REL_ARM_SECREL: applySecRel(this, Off, OS, S); break;
|
2015-07-25 11:03:46 +08:00
|
|
|
default:
|
2018-08-22 19:34:58 +08:00
|
|
|
error("unsupported relocation type 0x" + Twine::utohexstr(Type) + " in " +
|
2018-06-07 08:50:03 +08:00
|
|
|
toString(File));
|
2015-07-25 11:03:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-27 04:51:47 +08:00
|
|
|
// Interpret the existing immediate value as a byte offset to the
|
|
|
|
// target symbol, then update the instruction with the immediate as
|
|
|
|
// the page offset from the current instruction to the target.
|
2018-09-18 15:22:01 +08:00
|
|
|
void applyArm64Addr(uint8_t *Off, uint64_t S, uint64_t P, int Shift) {
|
2017-07-27 04:51:47 +08:00
|
|
|
uint32_t Orig = read32le(Off);
|
|
|
|
uint64_t Imm = ((Orig >> 29) & 0x3) | ((Orig >> 3) & 0x1FFFFC);
|
|
|
|
S += Imm;
|
2018-05-04 14:06:27 +08:00
|
|
|
Imm = (S >> Shift) - (P >> Shift);
|
2017-07-11 15:22:44 +08:00
|
|
|
uint32_t ImmLo = (Imm & 0x3) << 29;
|
|
|
|
uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
|
|
|
|
uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
|
2017-07-27 04:51:47 +08:00
|
|
|
write32le(Off, (Orig & ~Mask) | ImmLo | ImmHi);
|
2017-07-11 15:22:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the immediate field in a AARCH64 ldr, str, and add instruction.
|
2017-07-27 04:51:47 +08:00
|
|
|
// Optionally limit the range of the written immediate by one or more bits
|
|
|
|
// (RangeLimit).
|
2018-09-18 15:22:01 +08:00
|
|
|
void applyArm64Imm(uint8_t *Off, uint64_t Imm, uint32_t RangeLimit) {
|
2017-07-11 15:22:44 +08:00
|
|
|
uint32_t Orig = read32le(Off);
|
|
|
|
Imm += (Orig >> 10) & 0xFFF;
|
|
|
|
Orig &= ~(0xFFF << 10);
|
2017-07-27 04:51:47 +08:00
|
|
|
write32le(Off, Orig | ((Imm & (0xFFF >> RangeLimit)) << 10));
|
2017-07-11 15:22:44 +08:00
|
|
|
}
|
|
|
|
|
2017-07-27 04:51:47 +08:00
|
|
|
// Add the 12 bit page offset to the existing immediate.
|
|
|
|
// Ldr/str instructions store the opcode immediate scaled
|
|
|
|
// by the load/store size (giving a larger range for larger
|
|
|
|
// loads/stores). The immediate is always (both before and after
|
|
|
|
// fixing up the relocation) stored scaled similarly.
|
|
|
|
// Even if larger loads/stores have a larger range, limit the
|
|
|
|
// effective offset to 12 bit, since it is intended to be a
|
|
|
|
// page offset.
|
2017-07-11 15:22:44 +08:00
|
|
|
static void applyArm64Ldr(uint8_t *Off, uint64_t Imm) {
|
2017-07-21 00:48:33 +08:00
|
|
|
uint32_t Orig = read32le(Off);
|
|
|
|
uint32_t Size = Orig >> 30;
|
|
|
|
// 0x04000000 indicates SIMD/FP registers
|
|
|
|
// 0x00800000 indicates 128 bit
|
|
|
|
if ((Orig & 0x4800000) == 0x4800000)
|
|
|
|
Size += 4;
|
2017-07-20 13:49:54 +08:00
|
|
|
if ((Imm & ((1 << Size) - 1)) != 0)
|
2018-08-22 19:34:58 +08:00
|
|
|
error("misaligned ldr/str offset");
|
2017-07-27 04:51:47 +08:00
|
|
|
applyArm64Imm(Off, Imm >> Size, Size);
|
2017-07-11 15:22:44 +08:00
|
|
|
}
|
|
|
|
|
2018-02-18 04:28:15 +08:00
|
|
|
static void applySecRelLow12A(const SectionChunk *Sec, uint8_t *Off,
|
|
|
|
OutputSection *OS, uint64_t S) {
|
|
|
|
if (checkSecRel(Sec, OS))
|
|
|
|
applyArm64Imm(Off, (S - OS->getRVA()) & 0xfff, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void applySecRelHigh12A(const SectionChunk *Sec, uint8_t *Off,
|
|
|
|
OutputSection *OS, uint64_t S) {
|
|
|
|
if (!checkSecRel(Sec, OS))
|
|
|
|
return;
|
|
|
|
uint64_t SecRel = (S - OS->getRVA()) >> 12;
|
|
|
|
if (0xfff < SecRel) {
|
2018-02-17 06:02:38 +08:00
|
|
|
error("overflow in SECREL_HIGH12A relocation in section: " +
|
|
|
|
Sec->getSectionName());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
applyArm64Imm(Off, SecRel & 0xfff, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void applySecRelLdr(const SectionChunk *Sec, uint8_t *Off,
|
|
|
|
OutputSection *OS, uint64_t S) {
|
2018-02-18 04:28:15 +08:00
|
|
|
if (checkSecRel(Sec, OS))
|
|
|
|
applyArm64Ldr(Off, (S - OS->getRVA()) & 0xfff);
|
2018-02-17 06:02:38 +08:00
|
|
|
}
|
|
|
|
|
2018-09-18 15:22:01 +08:00
|
|
|
void applyArm64Branch26(uint8_t *Off, int64_t V) {
|
2018-05-04 14:06:27 +08:00
|
|
|
if (!isInt<28>(V))
|
2018-08-22 19:34:58 +08:00
|
|
|
error("relocation out of range");
|
2018-05-04 14:06:27 +08:00
|
|
|
or32(Off, (V & 0x0FFFFFFC) >> 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void applyArm64Branch19(uint8_t *Off, int64_t V) {
|
|
|
|
if (!isInt<21>(V))
|
2018-08-22 19:34:58 +08:00
|
|
|
error("relocation out of range");
|
2018-05-04 14:06:27 +08:00
|
|
|
or32(Off, (V & 0x001FFFFC) << 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void applyArm64Branch14(uint8_t *Off, int64_t V) {
|
|
|
|
if (!isInt<16>(V))
|
2018-08-22 19:34:58 +08:00
|
|
|
error("relocation out of range");
|
2018-05-04 14:06:27 +08:00
|
|
|
or32(Off, (V & 0x0000FFFC) << 3);
|
|
|
|
}
|
|
|
|
|
2017-07-11 15:22:44 +08:00
|
|
|
void SectionChunk::applyRelARM64(uint8_t *Off, uint16_t Type, OutputSection *OS,
|
|
|
|
uint64_t S, uint64_t P) const {
|
|
|
|
switch (Type) {
|
2018-05-04 14:06:27 +08:00
|
|
|
case IMAGE_REL_ARM64_PAGEBASE_REL21: applyArm64Addr(Off, S, P, 12); break;
|
|
|
|
case IMAGE_REL_ARM64_REL21: applyArm64Addr(Off, S, P, 0); break;
|
2017-07-27 04:51:47 +08:00
|
|
|
case IMAGE_REL_ARM64_PAGEOFFSET_12A: applyArm64Imm(Off, S & 0xfff, 0); break;
|
2017-07-11 15:22:44 +08:00
|
|
|
case IMAGE_REL_ARM64_PAGEOFFSET_12L: applyArm64Ldr(Off, S & 0xfff); break;
|
2018-05-04 14:06:27 +08:00
|
|
|
case IMAGE_REL_ARM64_BRANCH26: applyArm64Branch26(Off, S - P); break;
|
|
|
|
case IMAGE_REL_ARM64_BRANCH19: applyArm64Branch19(Off, S - P); break;
|
|
|
|
case IMAGE_REL_ARM64_BRANCH14: applyArm64Branch14(Off, S - P); break;
|
2017-07-11 15:22:44 +08:00
|
|
|
case IMAGE_REL_ARM64_ADDR32: add32(Off, S + Config->ImageBase); break;
|
2017-10-12 13:37:06 +08:00
|
|
|
case IMAGE_REL_ARM64_ADDR32NB: add32(Off, S); break;
|
2017-07-11 15:22:44 +08:00
|
|
|
case IMAGE_REL_ARM64_ADDR64: add64(Off, S + Config->ImageBase); break;
|
2017-11-08 15:31:50 +08:00
|
|
|
case IMAGE_REL_ARM64_SECREL: applySecRel(this, Off, OS, S); break;
|
2018-02-18 04:28:15 +08:00
|
|
|
case IMAGE_REL_ARM64_SECREL_LOW12A: applySecRelLow12A(this, Off, OS, S); break;
|
|
|
|
case IMAGE_REL_ARM64_SECREL_HIGH12A: applySecRelHigh12A(this, Off, OS, S); break;
|
2018-02-17 06:02:38 +08:00
|
|
|
case IMAGE_REL_ARM64_SECREL_LOW12L: applySecRelLdr(this, Off, OS, S); break;
|
2018-05-04 14:06:27 +08:00
|
|
|
case IMAGE_REL_ARM64_SECTION: applySecIdx(Off, OS); break;
|
2017-07-11 15:22:44 +08:00
|
|
|
default:
|
2018-08-22 19:34:58 +08:00
|
|
|
error("unsupported relocation type 0x" + Twine::utohexstr(Type) + " in " +
|
2018-06-07 08:50:03 +08:00
|
|
|
toString(File));
|
2017-07-11 15:22:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-14 02:30:31 +08:00
|
|
|
static void maybeReportRelocationToDiscarded(const SectionChunk *FromChunk,
|
|
|
|
Defined *Sym,
|
|
|
|
const coff_relocation &Rel) {
|
|
|
|
// Don't report these errors when the relocation comes from a debug info
|
|
|
|
// section or in mingw mode. MinGW mode object files (built by GCC) can
|
|
|
|
// have leftover sections with relocations against discarded comdat
|
|
|
|
// sections. Such sections are left as is, with relocations untouched.
|
|
|
|
if (FromChunk->isCodeView() || FromChunk->isDWARF() || Config->MinGW)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Get the name of the symbol. If it's null, it was discarded early, so we
|
|
|
|
// have to go back to the object file.
|
|
|
|
ObjFile *File = FromChunk->File;
|
|
|
|
StringRef Name;
|
|
|
|
if (Sym) {
|
|
|
|
Name = Sym->getName();
|
|
|
|
} else {
|
|
|
|
COFFSymbolRef COFFSym =
|
|
|
|
check(File->getCOFFObj()->getSymbol(Rel.SymbolTableIndex));
|
|
|
|
File->getCOFFObj()->getSymbolName(COFFSym, Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
error("relocation against symbol in discarded section: " + Name +
|
|
|
|
getSymbolLocations(File, Rel.SymbolTableIndex));
|
|
|
|
}
|
|
|
|
|
2015-09-20 07:28:57 +08:00
|
|
|
void SectionChunk::writeTo(uint8_t *Buf) const {
|
2015-05-29 04:04:51 +08:00
|
|
|
if (!hasData())
|
|
|
|
return;
|
2015-06-06 12:07:39 +08:00
|
|
|
// Copy section contents from source object file to output file.
|
2015-06-26 01:56:36 +08:00
|
|
|
ArrayRef<uint8_t> A = getContents();
|
2018-04-21 06:16:09 +08:00
|
|
|
if (!A.empty())
|
|
|
|
memcpy(Buf + OutputSectionOff, A.data(), A.size());
|
2015-06-06 12:07:39 +08:00
|
|
|
|
|
|
|
// Apply relocations.
|
2017-07-14 04:29:59 +08:00
|
|
|
size_t InputSize = getSize();
|
2018-09-25 18:59:29 +08:00
|
|
|
for (size_t I = 0, E = Relocs.size(); I < E; I++) {
|
|
|
|
const coff_relocation &Rel = Relocs[I];
|
|
|
|
|
2017-07-14 04:29:59 +08:00
|
|
|
// Check for an invalid relocation offset. This check isn't perfect, because
|
|
|
|
// we don't have the relocation size, which is only known after checking the
|
|
|
|
// machine and relocation type. As a result, a relocation may overwrite the
|
|
|
|
// beginning of the following input section.
|
2018-08-22 19:34:58 +08:00
|
|
|
if (Rel.VirtualAddress >= InputSize) {
|
|
|
|
error("relocation points beyond the end of its parent section");
|
|
|
|
continue;
|
|
|
|
}
|
2017-07-14 04:29:59 +08:00
|
|
|
|
2015-08-14 11:30:59 +08:00
|
|
|
uint8_t *Off = Buf + OutputSectionOff + Rel.VirtualAddress;
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
|
2018-09-25 18:59:29 +08:00
|
|
|
// Use the potentially remapped Symbol instead of the one that the
|
|
|
|
// relocation points to.
|
|
|
|
auto *Sym = dyn_cast_or_null<Defined>(RelocTargets[I]);
|
2018-11-14 02:30:31 +08:00
|
|
|
|
2018-08-22 19:35:02 +08:00
|
|
|
// Get the output section of the symbol for this relocation. The output
|
|
|
|
// section is needed to compute SECREL and SECTION relocations used in debug
|
|
|
|
// info.
|
2018-11-14 02:30:31 +08:00
|
|
|
Chunk *C = Sym ? Sym->getChunk() : nullptr;
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
OutputSection *OS = C ? C->getOutputSection() : nullptr;
|
|
|
|
|
2018-11-14 02:30:31 +08:00
|
|
|
// Skip the relocation if it refers to a discarded section, and diagnose it
|
|
|
|
// as an error if appropriate. If a symbol was discarded early, it may be
|
|
|
|
// null. If it was discarded late, the output section will be null, unless
|
|
|
|
// it was an absolute or synthetic symbol.
|
|
|
|
if (!Sym ||
|
|
|
|
(!OS && !isa<DefinedAbsolute>(Sym) && !isa<DefinedSynthetic>(Sym))) {
|
|
|
|
maybeReportRelocationToDiscarded(this, Sym, Rel);
|
2018-08-22 19:34:58 +08:00
|
|
|
continue;
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
}
|
2018-11-14 02:30:31 +08:00
|
|
|
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
uint64_t S = Sym->getRVA();
|
|
|
|
|
|
|
|
// Compute the RVA of the relocation for relative relocations.
|
2015-06-25 08:33:38 +08:00
|
|
|
uint64_t P = RVA + Rel.VirtualAddress;
|
2015-07-26 05:54:50 +08:00
|
|
|
switch (Config->Machine) {
|
|
|
|
case AMD64:
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
applyRelX64(Off, Rel.Type, OS, S, P);
|
2015-07-08 09:45:29 +08:00
|
|
|
break;
|
2015-07-26 05:54:50 +08:00
|
|
|
case I386:
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
applyRelX86(Off, Rel.Type, OS, S, P);
|
2015-07-08 09:45:29 +08:00
|
|
|
break;
|
2015-07-26 05:54:50 +08:00
|
|
|
case ARMNT:
|
[COFF] Allow debug info to relocate against discarded symbols
Summary:
In order to do this without switching on the symbol kind multiple times,
I created Defined::getChunkAndOffset and use that instead of
SymbolBody::getRVA in the inner relocation loop.
Now we get the symbol's chunk before switching over relocation types, so
we can test if it has been discarded outside the inner relocation type
switch. This also simplifies application of section relative
relocations. Previously we would switch on symbol kind to compute the
RVA, then the relocation type, and then the symbol kind again to get the
output section so we could subtract that from the symbol RVA. Now we
*always* have an OutputSection, so applying SECREL and SECTION
relocations isn't as much of a special case.
I'm still not quite happy with the cleanliness of this code. I'm not
sure what offsets and bases we should be using during the relocation
processing loop: VA, RVA, or OutputSectionOffset.
Reviewers: ruiu, pcc
Reviewed By: ruiu
Subscribers: majnemer, inglorion, llvm-commits, aprantl
Differential Revision: https://reviews.llvm.org/D34650
llvm-svn: 306566
2017-06-29 01:06:35 +08:00
|
|
|
applyRelARM(Off, Rel.Type, OS, S, P);
|
2015-07-25 11:03:46 +08:00
|
|
|
break;
|
2017-07-11 15:22:44 +08:00
|
|
|
case ARM64:
|
|
|
|
applyRelARM64(Off, Rel.Type, OS, S, P);
|
|
|
|
break;
|
2015-07-08 09:45:29 +08:00
|
|
|
default:
|
|
|
|
llvm_unreachable("unknown machine type");
|
|
|
|
}
|
2015-06-25 08:33:38 +08:00
|
|
|
}
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void SectionChunk::addAssociative(SectionChunk *Child) {
|
|
|
|
AssocChildren.push_back(Child);
|
|
|
|
}
|
|
|
|
|
2015-07-25 09:44:32 +08:00
|
|
|
static uint8_t getBaserelType(const coff_relocation &Rel) {
|
2015-07-26 05:54:50 +08:00
|
|
|
switch (Config->Machine) {
|
|
|
|
case AMD64:
|
2015-07-25 09:44:32 +08:00
|
|
|
if (Rel.Type == IMAGE_REL_AMD64_ADDR64)
|
|
|
|
return IMAGE_REL_BASED_DIR64;
|
|
|
|
return IMAGE_REL_BASED_ABSOLUTE;
|
2015-07-26 05:54:50 +08:00
|
|
|
case I386:
|
2015-07-25 09:44:32 +08:00
|
|
|
if (Rel.Type == IMAGE_REL_I386_DIR32)
|
|
|
|
return IMAGE_REL_BASED_HIGHLOW;
|
|
|
|
return IMAGE_REL_BASED_ABSOLUTE;
|
2015-07-26 05:54:50 +08:00
|
|
|
case ARMNT:
|
2015-07-25 11:03:46 +08:00
|
|
|
if (Rel.Type == IMAGE_REL_ARM_ADDR32)
|
|
|
|
return IMAGE_REL_BASED_HIGHLOW;
|
|
|
|
if (Rel.Type == IMAGE_REL_ARM_MOV32T)
|
|
|
|
return IMAGE_REL_BASED_ARM_MOV32T;
|
|
|
|
return IMAGE_REL_BASED_ABSOLUTE;
|
2017-07-11 15:22:44 +08:00
|
|
|
case ARM64:
|
|
|
|
if (Rel.Type == IMAGE_REL_ARM64_ADDR64)
|
|
|
|
return IMAGE_REL_BASED_DIR64;
|
|
|
|
return IMAGE_REL_BASED_ABSOLUTE;
|
2015-07-10 04:36:59 +08:00
|
|
|
default:
|
|
|
|
llvm_unreachable("unknown machine type");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-15 09:23:58 +08:00
|
|
|
// Windows-specific.
|
2015-07-10 04:36:59 +08:00
|
|
|
// Collect all locations that contain absolute addresses, which need to be
|
|
|
|
// fixed by the loader if load-time relocation is needed.
|
2015-06-15 09:23:58 +08:00
|
|
|
// Only called when base relocation is enabled.
|
2015-07-25 09:44:32 +08:00
|
|
|
void SectionChunk::getBaserels(std::vector<Baserel> *Res) {
|
2018-09-25 18:59:29 +08:00
|
|
|
for (size_t I = 0, E = Relocs.size(); I < E; I++) {
|
|
|
|
const coff_relocation &Rel = Relocs[I];
|
2015-07-25 09:44:32 +08:00
|
|
|
uint8_t Ty = getBaserelType(Rel);
|
|
|
|
if (Ty == IMAGE_REL_BASED_ABSOLUTE)
|
2015-06-15 09:23:58 +08:00
|
|
|
continue;
|
2018-09-25 18:59:29 +08:00
|
|
|
// Use the potentially remapped Symbol instead of the one that the
|
|
|
|
// relocation points to.
|
|
|
|
Symbol *Target = RelocTargets[I];
|
2017-11-21 03:37:07 +08:00
|
|
|
if (!Target || isa<DefinedAbsolute>(Target))
|
2015-06-15 09:23:58 +08:00
|
|
|
continue;
|
2015-07-25 09:44:32 +08:00
|
|
|
Res->emplace_back(RVA + Rel.VirtualAddress, Ty);
|
2015-06-15 09:23:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[COFF] Support MinGW automatic dllimport of data
Normally, in order to reference exported data symbols from a different
DLL, the declarations need to have the dllimport attribute, in order to
use the __imp_<var> symbol (which contains an address to the actual
variable) instead of the variable itself directly. This isn't an issue
in the same way for functions, since any reference to the function without
the dllimport attribute will end up as a reference to a thunk which loads
the actual target function from the import address table (IAT).
GNU ld, in MinGW environments, supports automatically importing data
symbols from DLLs, even if the references didn't have the appropriate
dllimport attribute. Since the PE/COFF format doesn't support the kind
of relocations that this would require, the MinGW's CRT startup code
has an custom framework of their own for manually fixing the missing
relocations once module is loaded and the target addresses in the IAT
are known.
For this to work, the linker (originall in GNU ld) creates a list of
remaining references needing fixup, which the runtime processes on
startup before handing over control to user code.
While this feature is rather controversial, it's one of the main features
allowing unix style libraries to be used on windows without any extra
porting effort.
Some sort of automatic fixing of data imports is also necessary for the
itanium C++ ABI on windows (as clang implements it right now) for importing
vtable pointers in certain cases, see D43184 for some discussion on that.
The runtime pseudo relocation handler supports 8/16/32/64 bit addresses,
either PC relative references (like IMAGE_REL_*_REL32*) or absolute
references (IMAGE_REL_AMD64_ADDR32, IMAGE_REL_AMD64_ADDR32,
IMAGE_REL_I386_DIR32). On linking, the relocation is handled as a
relocation against the corresponding IAT slot. For the absolute references,
a normal base relocation is created, to update the embedded address
in case the image is loaded at a different address.
The list of runtime pseudo relocations contains the RVA of the
imported symbol (the IAT slot), the RVA of the location the relocation
should be applied to, and a size of the memory location. When the
relocations are fixed at runtime, the difference between the actual
IAT slot value and the IAT slot address is added to the reference,
doing the right thing for both absolute and relative references.
With this patch alone, things work fine for i386 binaries, and mostly
for x86_64 binaries, with feature parity with GNU ld. Despite this,
there are a few gotchas:
- References to data from within code works fine on both x86 architectures,
since their relocations consist of plain 32 or 64 bit absolute/relative
references. On ARM and AArch64, references to data doesn't consist of
a plain 32 or 64 bit embedded address or offset in the code. On ARMNT,
it's usually a MOVW+MOVT instruction pair represented by a
IMAGE_REL_ARM_MOV32T relocation, each instruction containing 16 bit of
the target address), on AArch64, it's usually an ADRP+ADD/LDR/STR
instruction pair with an even more complex encoding, storing a PC
relative address (with a range of +/- 4 GB). This could theoretically
be remedied by extending the runtime pseudo relocation handler with new
relocation types, to support these instruction encodings. This isn't an
issue for GCC/GNU ld since they don't support windows on ARMNT/AArch64.
- For x86_64, if references in code are encoded as 32 bit PC relative
offsets, the runtime relocation will fail if the target turns out to be
out of range for a 32 bit offset.
- Fixing up the relocations at runtime requires making sections writable
if necessary, with the VirtualProtect function. In Windows Store/UWP apps,
this function is forbidden.
These limitations are addressed by a few later patches in lld and
llvm.
Differential Revision: https://reviews.llvm.org/D50917
llvm-svn: 340726
2018-08-27 16:43:31 +08:00
|
|
|
// MinGW specific.
|
|
|
|
// Check whether a static relocation of type Type can be deferred and
|
|
|
|
// handled at runtime as a pseudo relocation (for references to a module
|
|
|
|
// local variable, which turned out to actually need to be imported from
|
|
|
|
// another DLL) This returns the size the relocation is supposed to update,
|
|
|
|
// in bits, or 0 if the relocation cannot be handled as a runtime pseudo
|
|
|
|
// relocation.
|
|
|
|
static int getRuntimePseudoRelocSize(uint16_t Type) {
|
|
|
|
// Relocations that either contain an absolute address, or a plain
|
|
|
|
// relative offset, since the runtime pseudo reloc implementation
|
|
|
|
// adds 8/16/32/64 bit values to a memory address.
|
|
|
|
//
|
|
|
|
// Given a pseudo relocation entry,
|
|
|
|
//
|
|
|
|
// typedef struct {
|
|
|
|
// DWORD sym;
|
|
|
|
// DWORD target;
|
|
|
|
// DWORD flags;
|
|
|
|
// } runtime_pseudo_reloc_item_v2;
|
|
|
|
//
|
|
|
|
// the runtime relocation performs this adjustment:
|
|
|
|
// *(base + .target) += *(base + .sym) - (base + .sym)
|
|
|
|
//
|
|
|
|
// This works for both absolute addresses (IMAGE_REL_*_ADDR32/64,
|
|
|
|
// IMAGE_REL_I386_DIR32, where the memory location initially contains
|
|
|
|
// the address of the IAT slot, and for relative addresses (IMAGE_REL*_REL32),
|
|
|
|
// where the memory location originally contains the relative offset to the
|
|
|
|
// IAT slot.
|
|
|
|
//
|
|
|
|
// This requires the target address to be writable, either directly out of
|
|
|
|
// the image, or temporarily changed at runtime with VirtualProtect.
|
|
|
|
// Since this only operates on direct address values, it doesn't work for
|
|
|
|
// ARM/ARM64 relocations, other than the plain ADDR32/ADDR64 relocations.
|
|
|
|
switch (Config->Machine) {
|
|
|
|
case AMD64:
|
|
|
|
switch (Type) {
|
|
|
|
case IMAGE_REL_AMD64_ADDR64:
|
|
|
|
return 64;
|
|
|
|
case IMAGE_REL_AMD64_ADDR32:
|
|
|
|
case IMAGE_REL_AMD64_REL32:
|
|
|
|
case IMAGE_REL_AMD64_REL32_1:
|
|
|
|
case IMAGE_REL_AMD64_REL32_2:
|
|
|
|
case IMAGE_REL_AMD64_REL32_3:
|
|
|
|
case IMAGE_REL_AMD64_REL32_4:
|
|
|
|
case IMAGE_REL_AMD64_REL32_5:
|
|
|
|
return 32;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case I386:
|
|
|
|
switch (Type) {
|
|
|
|
case IMAGE_REL_I386_DIR32:
|
|
|
|
case IMAGE_REL_I386_REL32:
|
|
|
|
return 32;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case ARMNT:
|
|
|
|
switch (Type) {
|
|
|
|
case IMAGE_REL_ARM_ADDR32:
|
|
|
|
return 32;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case ARM64:
|
|
|
|
switch (Type) {
|
|
|
|
case IMAGE_REL_ARM64_ADDR64:
|
|
|
|
return 64;
|
|
|
|
case IMAGE_REL_ARM64_ADDR32:
|
|
|
|
return 32;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("unknown machine type");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MinGW specific.
|
|
|
|
// Append information to the provided vector about all relocations that
|
|
|
|
// need to be handled at runtime as runtime pseudo relocations (references
|
|
|
|
// to a module local variable, which turned out to actually need to be
|
|
|
|
// imported from another DLL).
|
|
|
|
void SectionChunk::getRuntimePseudoRelocs(
|
|
|
|
std::vector<RuntimePseudoReloc> &Res) {
|
|
|
|
for (const coff_relocation &Rel : Relocs) {
|
2018-09-26 14:13:39 +08:00
|
|
|
auto *Target =
|
|
|
|
dyn_cast_or_null<Defined>(File->getSymbol(Rel.SymbolTableIndex));
|
[COFF] Support MinGW automatic dllimport of data
Normally, in order to reference exported data symbols from a different
DLL, the declarations need to have the dllimport attribute, in order to
use the __imp_<var> symbol (which contains an address to the actual
variable) instead of the variable itself directly. This isn't an issue
in the same way for functions, since any reference to the function without
the dllimport attribute will end up as a reference to a thunk which loads
the actual target function from the import address table (IAT).
GNU ld, in MinGW environments, supports automatically importing data
symbols from DLLs, even if the references didn't have the appropriate
dllimport attribute. Since the PE/COFF format doesn't support the kind
of relocations that this would require, the MinGW's CRT startup code
has an custom framework of their own for manually fixing the missing
relocations once module is loaded and the target addresses in the IAT
are known.
For this to work, the linker (originall in GNU ld) creates a list of
remaining references needing fixup, which the runtime processes on
startup before handing over control to user code.
While this feature is rather controversial, it's one of the main features
allowing unix style libraries to be used on windows without any extra
porting effort.
Some sort of automatic fixing of data imports is also necessary for the
itanium C++ ABI on windows (as clang implements it right now) for importing
vtable pointers in certain cases, see D43184 for some discussion on that.
The runtime pseudo relocation handler supports 8/16/32/64 bit addresses,
either PC relative references (like IMAGE_REL_*_REL32*) or absolute
references (IMAGE_REL_AMD64_ADDR32, IMAGE_REL_AMD64_ADDR32,
IMAGE_REL_I386_DIR32). On linking, the relocation is handled as a
relocation against the corresponding IAT slot. For the absolute references,
a normal base relocation is created, to update the embedded address
in case the image is loaded at a different address.
The list of runtime pseudo relocations contains the RVA of the
imported symbol (the IAT slot), the RVA of the location the relocation
should be applied to, and a size of the memory location. When the
relocations are fixed at runtime, the difference between the actual
IAT slot value and the IAT slot address is added to the reference,
doing the right thing for both absolute and relative references.
With this patch alone, things work fine for i386 binaries, and mostly
for x86_64 binaries, with feature parity with GNU ld. Despite this,
there are a few gotchas:
- References to data from within code works fine on both x86 architectures,
since their relocations consist of plain 32 or 64 bit absolute/relative
references. On ARM and AArch64, references to data doesn't consist of
a plain 32 or 64 bit embedded address or offset in the code. On ARMNT,
it's usually a MOVW+MOVT instruction pair represented by a
IMAGE_REL_ARM_MOV32T relocation, each instruction containing 16 bit of
the target address), on AArch64, it's usually an ADRP+ADD/LDR/STR
instruction pair with an even more complex encoding, storing a PC
relative address (with a range of +/- 4 GB). This could theoretically
be remedied by extending the runtime pseudo relocation handler with new
relocation types, to support these instruction encodings. This isn't an
issue for GCC/GNU ld since they don't support windows on ARMNT/AArch64.
- For x86_64, if references in code are encoded as 32 bit PC relative
offsets, the runtime relocation will fail if the target turns out to be
out of range for a 32 bit offset.
- Fixing up the relocations at runtime requires making sections writable
if necessary, with the VirtualProtect function. In Windows Store/UWP apps,
this function is forbidden.
These limitations are addressed by a few later patches in lld and
llvm.
Differential Revision: https://reviews.llvm.org/D50917
llvm-svn: 340726
2018-08-27 16:43:31 +08:00
|
|
|
if (!Target || !Target->IsRuntimePseudoReloc)
|
|
|
|
continue;
|
|
|
|
int SizeInBits = getRuntimePseudoRelocSize(Rel.Type);
|
|
|
|
if (SizeInBits == 0) {
|
|
|
|
error("unable to automatically import from " + Target->getName() +
|
|
|
|
" with relocation type " +
|
|
|
|
File->getCOFFObj()->getRelocationTypeName(Rel.Type) + " in " +
|
|
|
|
toString(File));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// SizeInBits is used to initialize the Flags field; currently no
|
|
|
|
// other flags are defined.
|
|
|
|
Res.emplace_back(
|
|
|
|
RuntimePseudoReloc(Target, this, Rel.VirtualAddress, SizeInBits));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
bool SectionChunk::hasData() const {
|
|
|
|
return !(Header->Characteristics & IMAGE_SCN_CNT_UNINITIALIZED_DATA);
|
|
|
|
}
|
|
|
|
|
2018-04-20 04:03:24 +08:00
|
|
|
uint32_t SectionChunk::getOutputCharacteristics() const {
|
2018-04-21 05:23:16 +08:00
|
|
|
return Header->Characteristics & (PermMask | TypeMask);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool SectionChunk::isCOMDAT() const {
|
|
|
|
return Header->Characteristics & IMAGE_SCN_LNK_COMDAT;
|
|
|
|
}
|
|
|
|
|
2015-06-26 03:10:58 +08:00
|
|
|
void SectionChunk::printDiscardedMessage() const {
|
2015-09-17 05:30:40 +08:00
|
|
|
// Removed by dead-stripping. If it's removed by ICF, ICF already
|
|
|
|
// printed out the name, so don't repeat that here.
|
2017-11-28 09:30:07 +08:00
|
|
|
if (Sym && this == Repl)
|
|
|
|
message("Discarded " + Sym->getName());
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2015-06-24 08:00:52 +08:00
|
|
|
StringRef SectionChunk::getDebugName() {
|
2015-08-21 15:01:10 +08:00
|
|
|
if (Sym)
|
|
|
|
return Sym->getName();
|
|
|
|
return "";
|
2015-06-24 08:00:52 +08:00
|
|
|
}
|
|
|
|
|
2015-06-26 01:56:36 +08:00
|
|
|
ArrayRef<uint8_t> SectionChunk::getContents() const {
|
|
|
|
ArrayRef<uint8_t> A;
|
|
|
|
File->getCOFFObj()->getSectionContents(Header, A);
|
|
|
|
return A;
|
|
|
|
}
|
|
|
|
|
2015-09-22 03:36:51 +08:00
|
|
|
void SectionChunk::replace(SectionChunk *Other) {
|
2018-05-15 02:36:51 +08:00
|
|
|
Alignment = std::max(Alignment, Other->Alignment);
|
2015-09-26 00:20:24 +08:00
|
|
|
Other->Repl = Repl;
|
2015-09-22 03:36:51 +08:00
|
|
|
Other->Live = false;
|
2015-06-24 12:36:52 +08:00
|
|
|
}
|
|
|
|
|
2018-10-05 20:56:46 +08:00
|
|
|
uint32_t SectionChunk::getSectionNumber() const {
|
|
|
|
DataRefImpl R;
|
|
|
|
R.p = reinterpret_cast<uintptr_t>(Header);
|
|
|
|
SectionRef S(R, File->getCOFFObj());
|
|
|
|
return S.getIndex() + 1;
|
|
|
|
}
|
|
|
|
|
2015-06-08 11:17:07 +08:00
|
|
|
CommonChunk::CommonChunk(const COFFSymbolRef S) : Sym(S) {
|
2015-06-20 15:25:45 +08:00
|
|
|
// Common symbols are aligned on natural boundaries up to 32 bytes.
|
|
|
|
// This is what MSVC link.exe does.
|
2017-09-14 05:54:55 +08:00
|
|
|
Alignment = std::min(uint64_t(32), PowerOf2Ceil(Sym.getValue()));
|
2017-08-15 03:07:27 +08:00
|
|
|
}
|
|
|
|
|
2018-04-20 04:03:24 +08:00
|
|
|
uint32_t CommonChunk::getOutputCharacteristics() const {
|
2015-05-29 03:09:30 +08:00
|
|
|
return IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_MEM_READ |
|
|
|
|
IMAGE_SCN_MEM_WRITE;
|
|
|
|
}
|
|
|
|
|
2015-09-20 07:28:57 +08:00
|
|
|
void StringChunk::writeTo(uint8_t *Buf) const {
|
2015-08-14 11:30:59 +08:00
|
|
|
memcpy(Buf + OutputSectionOff, Str.data(), Str.size());
|
2018-12-01 00:34:56 +08:00
|
|
|
Buf[OutputSectionOff + Str.size()] = '\0';
|
2015-05-29 03:45:43 +08:00
|
|
|
}
|
|
|
|
|
2015-07-25 09:16:06 +08:00
|
|
|
ImportThunkChunkX64::ImportThunkChunkX64(Defined *S) : ImpSymbol(S) {
|
2015-06-27 02:28:56 +08:00
|
|
|
// Intel Optimization Manual says that all branch targets
|
|
|
|
// should be 16-byte aligned. MSVC linker does this too.
|
2017-09-14 05:54:55 +08:00
|
|
|
Alignment = 16;
|
2015-06-27 02:28:56 +08:00
|
|
|
}
|
|
|
|
|
2015-09-20 07:28:57 +08:00
|
|
|
void ImportThunkChunkX64::writeTo(uint8_t *Buf) const {
|
2015-08-14 11:30:59 +08:00
|
|
|
memcpy(Buf + OutputSectionOff, ImportThunkX86, sizeof(ImportThunkX86));
|
2015-07-25 09:16:06 +08:00
|
|
|
// The first two bytes is a JMP instruction. Fill its operand.
|
2015-08-14 11:30:59 +08:00
|
|
|
write32le(Buf + OutputSectionOff + 2, ImpSymbol->getRVA() - RVA - getSize());
|
2015-07-25 09:16:06 +08:00
|
|
|
}
|
|
|
|
|
2015-07-25 09:44:32 +08:00
|
|
|
void ImportThunkChunkX86::getBaserels(std::vector<Baserel> *Res) {
|
|
|
|
Res->emplace_back(getRVA() + 2);
|
2015-07-15 08:25:38 +08:00
|
|
|
}
|
|
|
|
|
2015-09-20 07:28:57 +08:00
|
|
|
void ImportThunkChunkX86::writeTo(uint8_t *Buf) const {
|
2015-08-14 11:30:59 +08:00
|
|
|
memcpy(Buf + OutputSectionOff, ImportThunkX86, sizeof(ImportThunkX86));
|
2015-06-06 12:07:39 +08:00
|
|
|
// The first two bytes is a JMP instruction. Fill its operand.
|
2015-08-14 11:30:59 +08:00
|
|
|
write32le(Buf + OutputSectionOff + 2,
|
|
|
|
ImpSymbol->getRVA() + Config->ImageBase);
|
2015-05-29 03:09:30 +08:00
|
|
|
}
|
|
|
|
|
2015-07-25 11:39:29 +08:00
|
|
|
void ImportThunkChunkARM::getBaserels(std::vector<Baserel> *Res) {
|
|
|
|
Res->emplace_back(getRVA(), IMAGE_REL_BASED_ARM_MOV32T);
|
|
|
|
}
|
|
|
|
|
2015-09-20 07:28:57 +08:00
|
|
|
void ImportThunkChunkARM::writeTo(uint8_t *Buf) const {
|
2015-08-14 11:30:59 +08:00
|
|
|
memcpy(Buf + OutputSectionOff, ImportThunkARM, sizeof(ImportThunkARM));
|
2015-07-25 11:39:29 +08:00
|
|
|
// Fix mov.w and mov.t operands.
|
2015-08-14 11:30:59 +08:00
|
|
|
applyMOV32T(Buf + OutputSectionOff, ImpSymbol->getRVA() + Config->ImageBase);
|
2015-07-25 11:39:29 +08:00
|
|
|
}
|
|
|
|
|
2017-07-11 15:22:44 +08:00
|
|
|
void ImportThunkChunkARM64::writeTo(uint8_t *Buf) const {
|
|
|
|
int64_t Off = ImpSymbol->getRVA() & 0xfff;
|
|
|
|
memcpy(Buf + OutputSectionOff, ImportThunkARM64, sizeof(ImportThunkARM64));
|
2018-05-04 14:06:27 +08:00
|
|
|
applyArm64Addr(Buf + OutputSectionOff, ImpSymbol->getRVA(), RVA, 12);
|
2017-07-11 15:22:44 +08:00
|
|
|
applyArm64Ldr(Buf + OutputSectionOff + 4, Off);
|
|
|
|
}
|
|
|
|
|
2018-09-25 18:59:29 +08:00
|
|
|
// A Thumb2, PIC, non-interworking range extension thunk.
|
|
|
|
const uint8_t ArmThunk[] = {
|
|
|
|
0x40, 0xf2, 0x00, 0x0c, // P: movw ip,:lower16:S - (P + (L1-P) + 4)
|
|
|
|
0xc0, 0xf2, 0x00, 0x0c, // movt ip,:upper16:S - (P + (L1-P) + 4)
|
|
|
|
0xe7, 0x44, // L1: add pc, ip
|
|
|
|
};
|
|
|
|
|
|
|
|
size_t RangeExtensionThunk::getSize() const {
|
|
|
|
assert(Config->Machine == ARMNT);
|
|
|
|
return sizeof(ArmThunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
void RangeExtensionThunk::writeTo(uint8_t *Buf) const {
|
|
|
|
assert(Config->Machine == ARMNT);
|
|
|
|
uint64_t Offset = Target->getRVA() - RVA - 12;
|
|
|
|
memcpy(Buf + OutputSectionOff, ArmThunk, sizeof(ArmThunk));
|
|
|
|
applyMOV32T(Buf + OutputSectionOff, uint32_t(Offset));
|
|
|
|
}
|
|
|
|
|
2015-07-25 09:44:32 +08:00
|
|
|
void LocalImportChunk::getBaserels(std::vector<Baserel> *Res) {
|
|
|
|
Res->emplace_back(getRVA());
|
2015-07-03 04:33:50 +08:00
|
|
|
}
|
|
|
|
|
2018-10-12 01:45:58 +08:00
|
|
|
size_t LocalImportChunk::getSize() const { return Config->Wordsize; }
|
2015-07-10 05:15:58 +08:00
|
|
|
|
2015-09-20 07:28:57 +08:00
|
|
|
void LocalImportChunk::writeTo(uint8_t *Buf) const {
|
2015-07-10 05:15:58 +08:00
|
|
|
if (Config->is64()) {
|
2015-08-14 11:30:59 +08:00
|
|
|
write64le(Buf + OutputSectionOff, Sym->getRVA() + Config->ImageBase);
|
2015-07-10 05:15:58 +08:00
|
|
|
} else {
|
2015-08-14 11:30:59 +08:00
|
|
|
write32le(Buf + OutputSectionOff, Sym->getRVA() + Config->ImageBase);
|
2015-07-10 05:15:58 +08:00
|
|
|
}
|
2015-07-03 04:33:50 +08:00
|
|
|
}
|
|
|
|
|
2018-02-06 09:58:26 +08:00
|
|
|
void RVATableChunk::writeTo(uint8_t *Buf) const {
|
2015-08-14 11:30:59 +08:00
|
|
|
ulittle32_t *Begin = reinterpret_cast<ulittle32_t *>(Buf + OutputSectionOff);
|
2015-07-25 07:51:14 +08:00
|
|
|
size_t Cnt = 0;
|
2018-02-06 09:58:26 +08:00
|
|
|
for (const ChunkAndOffset &CO : Syms)
|
|
|
|
Begin[Cnt++] = CO.InputChunk->getRVA() + CO.Offset;
|
2015-07-25 07:51:14 +08:00
|
|
|
std::sort(Begin, Begin + Cnt);
|
2018-02-06 09:58:26 +08:00
|
|
|
assert(std::unique(Begin, Begin + Cnt) == Begin + Cnt &&
|
|
|
|
"RVA tables should be de-duplicated");
|
2015-07-25 07:51:14 +08:00
|
|
|
}
|
|
|
|
|
[COFF] Support MinGW automatic dllimport of data
Normally, in order to reference exported data symbols from a different
DLL, the declarations need to have the dllimport attribute, in order to
use the __imp_<var> symbol (which contains an address to the actual
variable) instead of the variable itself directly. This isn't an issue
in the same way for functions, since any reference to the function without
the dllimport attribute will end up as a reference to a thunk which loads
the actual target function from the import address table (IAT).
GNU ld, in MinGW environments, supports automatically importing data
symbols from DLLs, even if the references didn't have the appropriate
dllimport attribute. Since the PE/COFF format doesn't support the kind
of relocations that this would require, the MinGW's CRT startup code
has an custom framework of their own for manually fixing the missing
relocations once module is loaded and the target addresses in the IAT
are known.
For this to work, the linker (originall in GNU ld) creates a list of
remaining references needing fixup, which the runtime processes on
startup before handing over control to user code.
While this feature is rather controversial, it's one of the main features
allowing unix style libraries to be used on windows without any extra
porting effort.
Some sort of automatic fixing of data imports is also necessary for the
itanium C++ ABI on windows (as clang implements it right now) for importing
vtable pointers in certain cases, see D43184 for some discussion on that.
The runtime pseudo relocation handler supports 8/16/32/64 bit addresses,
either PC relative references (like IMAGE_REL_*_REL32*) or absolute
references (IMAGE_REL_AMD64_ADDR32, IMAGE_REL_AMD64_ADDR32,
IMAGE_REL_I386_DIR32). On linking, the relocation is handled as a
relocation against the corresponding IAT slot. For the absolute references,
a normal base relocation is created, to update the embedded address
in case the image is loaded at a different address.
The list of runtime pseudo relocations contains the RVA of the
imported symbol (the IAT slot), the RVA of the location the relocation
should be applied to, and a size of the memory location. When the
relocations are fixed at runtime, the difference between the actual
IAT slot value and the IAT slot address is added to the reference,
doing the right thing for both absolute and relative references.
With this patch alone, things work fine for i386 binaries, and mostly
for x86_64 binaries, with feature parity with GNU ld. Despite this,
there are a few gotchas:
- References to data from within code works fine on both x86 architectures,
since their relocations consist of plain 32 or 64 bit absolute/relative
references. On ARM and AArch64, references to data doesn't consist of
a plain 32 or 64 bit embedded address or offset in the code. On ARMNT,
it's usually a MOVW+MOVT instruction pair represented by a
IMAGE_REL_ARM_MOV32T relocation, each instruction containing 16 bit of
the target address), on AArch64, it's usually an ADRP+ADD/LDR/STR
instruction pair with an even more complex encoding, storing a PC
relative address (with a range of +/- 4 GB). This could theoretically
be remedied by extending the runtime pseudo relocation handler with new
relocation types, to support these instruction encodings. This isn't an
issue for GCC/GNU ld since they don't support windows on ARMNT/AArch64.
- For x86_64, if references in code are encoded as 32 bit PC relative
offsets, the runtime relocation will fail if the target turns out to be
out of range for a 32 bit offset.
- Fixing up the relocations at runtime requires making sections writable
if necessary, with the VirtualProtect function. In Windows Store/UWP apps,
this function is forbidden.
These limitations are addressed by a few later patches in lld and
llvm.
Differential Revision: https://reviews.llvm.org/D50917
llvm-svn: 340726
2018-08-27 16:43:31 +08:00
|
|
|
// MinGW specific, for the "automatic import of variables from DLLs" feature.
|
|
|
|
size_t PseudoRelocTableChunk::getSize() const {
|
|
|
|
if (Relocs.empty())
|
|
|
|
return 0;
|
|
|
|
return 12 + 12 * Relocs.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
// MinGW specific.
|
|
|
|
void PseudoRelocTableChunk::writeTo(uint8_t *Buf) const {
|
|
|
|
if (Relocs.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
ulittle32_t *Table = reinterpret_cast<ulittle32_t *>(Buf + OutputSectionOff);
|
|
|
|
// This is the list header, to signal the runtime pseudo relocation v2
|
|
|
|
// format.
|
|
|
|
Table[0] = 0;
|
|
|
|
Table[1] = 0;
|
|
|
|
Table[2] = 1;
|
|
|
|
|
|
|
|
size_t Idx = 3;
|
|
|
|
for (const RuntimePseudoReloc &RPR : Relocs) {
|
|
|
|
Table[Idx + 0] = RPR.Sym->getRVA();
|
|
|
|
Table[Idx + 1] = RPR.Target->getRVA() + RPR.TargetOffset;
|
|
|
|
Table[Idx + 2] = RPR.Flags;
|
|
|
|
Idx += 3;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-25 11:31:10 +08:00
|
|
|
// Windows-specific. This class represents a block in .reloc section.
|
|
|
|
// The format is described here.
|
|
|
|
//
|
|
|
|
// On Windows, each DLL is linked against a fixed base address and
|
|
|
|
// usually loaded to that address. However, if there's already another
|
|
|
|
// DLL that overlaps, the loader has to relocate it. To do that, DLLs
|
|
|
|
// contain .reloc sections which contain offsets that need to be fixed
|
2017-04-27 04:20:05 +08:00
|
|
|
// up at runtime. If the loader finds that a DLL cannot be loaded to its
|
2017-04-25 11:31:10 +08:00
|
|
|
// desired base address, it loads it to somewhere else, and add <actual
|
|
|
|
// base address> - <desired base address> to each offset that is
|
2017-04-27 04:20:05 +08:00
|
|
|
// specified by the .reloc section. In ELF terms, .reloc sections
|
|
|
|
// contain relative relocations in REL format (as opposed to RELA.)
|
2017-04-25 11:31:10 +08:00
|
|
|
//
|
2017-04-27 04:20:05 +08:00
|
|
|
// This already significantly reduces the size of relocations compared
|
|
|
|
// to ELF .rel.dyn, but Windows does more to reduce it (probably because
|
|
|
|
// it was invented for PCs in the late '80s or early '90s.) Offsets in
|
|
|
|
// .reloc are grouped by page where the page size is 12 bits, and
|
|
|
|
// offsets sharing the same page address are stored consecutively to
|
|
|
|
// represent them with less space. This is very similar to the page
|
|
|
|
// table which is grouped by (multiple stages of) pages.
|
2017-04-25 11:31:10 +08:00
|
|
|
//
|
2017-04-27 04:20:05 +08:00
|
|
|
// For example, let's say we have 0x00030, 0x00500, 0x00700, 0x00A00,
|
|
|
|
// 0x20004, and 0x20008 in a .reloc section for x64. The uppermost 4
|
|
|
|
// bits have a type IMAGE_REL_BASED_DIR64 or 0xA. In the section, they
|
|
|
|
// are represented like this:
|
2017-04-25 11:31:10 +08:00
|
|
|
//
|
|
|
|
// 0x00000 -- page address (4 bytes)
|
|
|
|
// 16 -- size of this block (4 bytes)
|
2017-04-27 04:20:05 +08:00
|
|
|
// 0xA030 -- entries (2 bytes each)
|
|
|
|
// 0xA500
|
|
|
|
// 0xA700
|
|
|
|
// 0xAA00
|
2017-04-25 11:31:10 +08:00
|
|
|
// 0x20000 -- page address (4 bytes)
|
|
|
|
// 12 -- size of this block (4 bytes)
|
2017-04-27 04:20:05 +08:00
|
|
|
// 0xA004 -- entries (2 bytes each)
|
|
|
|
// 0xA008
|
2017-04-25 11:31:10 +08:00
|
|
|
//
|
2017-04-27 04:20:05 +08:00
|
|
|
// Usually we have a lot of relocations for each page, so the number of
|
2017-04-27 03:50:49 +08:00
|
|
|
// bytes for one .reloc entry is close to 2 bytes on average.
|
2015-07-25 09:44:32 +08:00
|
|
|
BaserelChunk::BaserelChunk(uint32_t Page, Baserel *Begin, Baserel *End) {
|
2015-06-15 09:23:58 +08:00
|
|
|
// Block header consists of 4 byte page RVA and 4 byte block size.
|
|
|
|
// Each entry is 2 byte. Last entry may be padding.
|
2016-01-15 04:53:50 +08:00
|
|
|
Data.resize(alignTo((End - Begin) * 2 + 8, 4));
|
2015-06-15 09:23:58 +08:00
|
|
|
uint8_t *P = Data.data();
|
|
|
|
write32le(P, Page);
|
|
|
|
write32le(P + 4, Data.size());
|
|
|
|
P += 8;
|
2015-07-25 09:44:32 +08:00
|
|
|
for (Baserel *I = Begin; I != End; ++I) {
|
|
|
|
write16le(P, (I->Type << 12) | (I->RVA - Page));
|
2015-06-15 09:23:58 +08:00
|
|
|
P += 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-20 07:28:57 +08:00
|
|
|
void BaserelChunk::writeTo(uint8_t *Buf) const {
|
2015-08-14 11:30:59 +08:00
|
|
|
memcpy(Buf + OutputSectionOff, Data.data(), Data.size());
|
2015-06-15 09:23:58 +08:00
|
|
|
}
|
|
|
|
|
2015-07-25 09:44:32 +08:00
|
|
|
uint8_t Baserel::getDefaultType() {
|
2015-07-26 05:54:50 +08:00
|
|
|
switch (Config->Machine) {
|
|
|
|
case AMD64:
|
2017-11-06 15:02:33 +08:00
|
|
|
case ARM64:
|
2015-07-25 09:44:32 +08:00
|
|
|
return IMAGE_REL_BASED_DIR64;
|
2015-07-26 05:54:50 +08:00
|
|
|
case I386:
|
2017-07-26 04:00:37 +08:00
|
|
|
case ARMNT:
|
2015-07-25 09:44:32 +08:00
|
|
|
return IMAGE_REL_BASED_HIGHLOW;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("unknown machine type");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-16 05:14:02 +08:00
|
|
|
std::map<uint32_t, MergeChunk *> MergeChunk::Instances;
|
|
|
|
|
|
|
|
MergeChunk::MergeChunk(uint32_t Alignment)
|
|
|
|
: Builder(StringTableBuilder::RAW, Alignment) {
|
|
|
|
this->Alignment = Alignment;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MergeChunk::addSection(SectionChunk *C) {
|
|
|
|
auto *&MC = Instances[C->Alignment];
|
|
|
|
if (!MC)
|
|
|
|
MC = make<MergeChunk>(C->Alignment);
|
|
|
|
MC->Sections.push_back(C);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MergeChunk::finalizeContents() {
|
2018-09-25 18:59:29 +08:00
|
|
|
if (!Finalized) {
|
|
|
|
for (SectionChunk *C : Sections)
|
|
|
|
if (C->Live)
|
|
|
|
Builder.add(toStringRef(C->getContents()));
|
|
|
|
Builder.finalize();
|
|
|
|
Finalized = true;
|
|
|
|
}
|
2018-03-16 05:14:02 +08:00
|
|
|
|
|
|
|
for (SectionChunk *C : Sections) {
|
2018-08-31 15:45:20 +08:00
|
|
|
if (!C->Live)
|
2018-03-16 05:14:02 +08:00
|
|
|
continue;
|
|
|
|
size_t Off = Builder.getOffset(toStringRef(C->getContents()));
|
|
|
|
C->setOutputSection(Out);
|
|
|
|
C->setRVA(RVA + Off);
|
|
|
|
C->OutputSectionOff = OutputSectionOff + Off;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-20 04:03:24 +08:00
|
|
|
uint32_t MergeChunk::getOutputCharacteristics() const {
|
2018-03-16 05:14:02 +08:00
|
|
|
return IMAGE_SCN_MEM_READ | IMAGE_SCN_CNT_INITIALIZED_DATA;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t MergeChunk::getSize() const {
|
|
|
|
return Builder.getSize();
|
|
|
|
}
|
|
|
|
|
|
|
|
void MergeChunk::writeTo(uint8_t *Buf) const {
|
|
|
|
Builder.write(Buf + OutputSectionOff);
|
|
|
|
}
|
|
|
|
|
[COFF] Provide __CTOR_LIST__ and __DTOR_LIST__ symbols for MinGW
MinGW uses these kind of list terminator symbols for traversing
the constructor/destructor lists. These list terminators are
actual pointers entries in the lists, with the values 0 and
(uintptr_t)-1 (instead of just symbols pointing to the start/end
of the list).
(This mechanism exists in both the mingw-w64 crt startup code and
in libgcc; normally the mingw-w64 one is used, but a DLL build of
libgcc uses the libgcc one. Therefore it's not trivial to change
the mechanism without lots of cross-project synchronization and
potentially invalidating some combinations of old/new versions
of them.)
When mingw-w64 has been used with lld so far, the CRT startup object
files have so far provided these symbols, ending up with different,
incompatible builds of the CRT startup object files depending on
whether binutils or lld are going to be used.
In order to avoid the need of different configuration of the CRT startup
object files depending on what linker to be used, provide these symbols
in lld instead. (Mingw-w64 checks at build time whether the linker
provides these symbols or not.) This unifies this particular detail
between the two linkers.
This does disallow the use of the very latest lld with older versions
of mingw-w64 (the configure check for the list was added recently;
earlier it simply checked whether the CRT was built with gcc or clang),
and requires rebuilding the mingw-w64 CRT. But the number of users of
lld+mingw still is low enough that such a change should be tolerable,
and unifies this aspect of the toolchains, easing interoperability
between the toolchains for the future.
The actual test for this feature is added in ctors_dtors_priority.s,
but a number of other tests that checked absolute output addresses
are updated.
Differential Revision: https://reviews.llvm.org/D52053
llvm-svn: 342294
2018-09-15 06:26:59 +08:00
|
|
|
// MinGW specific.
|
2018-10-12 01:45:58 +08:00
|
|
|
size_t AbsolutePointerChunk::getSize() const { return Config->Wordsize; }
|
[COFF] Provide __CTOR_LIST__ and __DTOR_LIST__ symbols for MinGW
MinGW uses these kind of list terminator symbols for traversing
the constructor/destructor lists. These list terminators are
actual pointers entries in the lists, with the values 0 and
(uintptr_t)-1 (instead of just symbols pointing to the start/end
of the list).
(This mechanism exists in both the mingw-w64 crt startup code and
in libgcc; normally the mingw-w64 one is used, but a DLL build of
libgcc uses the libgcc one. Therefore it's not trivial to change
the mechanism without lots of cross-project synchronization and
potentially invalidating some combinations of old/new versions
of them.)
When mingw-w64 has been used with lld so far, the CRT startup object
files have so far provided these symbols, ending up with different,
incompatible builds of the CRT startup object files depending on
whether binutils or lld are going to be used.
In order to avoid the need of different configuration of the CRT startup
object files depending on what linker to be used, provide these symbols
in lld instead. (Mingw-w64 checks at build time whether the linker
provides these symbols or not.) This unifies this particular detail
between the two linkers.
This does disallow the use of the very latest lld with older versions
of mingw-w64 (the configure check for the list was added recently;
earlier it simply checked whether the CRT was built with gcc or clang),
and requires rebuilding the mingw-w64 CRT. But the number of users of
lld+mingw still is low enough that such a change should be tolerable,
and unifies this aspect of the toolchains, easing interoperability
between the toolchains for the future.
The actual test for this feature is added in ctors_dtors_priority.s,
but a number of other tests that checked absolute output addresses
are updated.
Differential Revision: https://reviews.llvm.org/D52053
llvm-svn: 342294
2018-09-15 06:26:59 +08:00
|
|
|
|
|
|
|
void AbsolutePointerChunk::writeTo(uint8_t *Buf) const {
|
|
|
|
if (Config->is64()) {
|
|
|
|
write64le(Buf + OutputSectionOff, Value);
|
|
|
|
} else {
|
|
|
|
write32le(Buf + OutputSectionOff, Value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-29 03:09:30 +08:00
|
|
|
} // namespace coff
|
|
|
|
} // namespace lld
|