2017-06-17 01:32:43 +08:00
|
|
|
//===- AMDGPU.cpp ---------------------------------------------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-06-17 01:32:43 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-02-24 12:44:34 +08:00
|
|
|
#include "InputFiles.h"
|
2017-06-17 01:32:43 +08:00
|
|
|
#include "Symbols.h"
|
|
|
|
#include "Target.h"
|
[lld] unified COFF and ELF error handling on new Common/ErrorHandler
Summary:
The COFF linker and the ELF linker have long had similar but separate
Error.h and Error.cpp files to implement error handling. This change
introduces new error handling code in Common/ErrorHandler.h, changes the
COFF and ELF linkers to use it, and removes the old, separate
implementations.
Reviewers: ruiu
Reviewed By: ruiu
Subscribers: smeenai, jyknight, emaste, sdardis, nemanjai, nhaehnle, mgorny, javed.absar, kbarton, fedor.sergeev, llvm-commits
Differential Revision: https://reviews.llvm.org/D39259
llvm-svn: 316624
2017-10-26 06:28:38 +08:00
|
|
|
#include "lld/Common/ErrorHandler.h"
|
2022-02-08 13:53:34 +08:00
|
|
|
#include "llvm/BinaryFormat/ELF.h"
|
2017-06-17 01:32:43 +08:00
|
|
|
#include "llvm/Support/Endian.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
using namespace llvm::object;
|
|
|
|
using namespace llvm::support::endian;
|
|
|
|
using namespace llvm::ELF;
|
2020-05-15 13:18:58 +08:00
|
|
|
using namespace lld;
|
|
|
|
using namespace lld::elf;
|
2017-06-17 01:32:43 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
class AMDGPU final : public TargetInfo {
|
2021-03-25 01:39:47 +08:00
|
|
|
private:
|
|
|
|
uint32_t calcEFlagsV3() const;
|
|
|
|
uint32_t calcEFlagsV4() const;
|
|
|
|
|
2017-06-17 01:32:43 +08:00
|
|
|
public:
|
|
|
|
AMDGPU();
|
2017-10-25 03:05:32 +08:00
|
|
|
uint32_t calcEFlags() const override;
|
2020-01-23 13:39:16 +08:00
|
|
|
void relocate(uint8_t *loc, const Relocation &rel,
|
|
|
|
uint64_t val) const override;
|
2017-11-04 05:21:47 +08:00
|
|
|
RelExpr getRelExpr(RelType type, const Symbol &s,
|
2017-06-17 01:32:43 +08:00
|
|
|
const uint8_t *loc) const override;
|
[ELF][ARM][AARCH64][MIPS][PPC] Simplify the logic to create R_*_RELATIVE for absolute relocation types in writable sections
Summary:
Our rule to create R_*_RELATIVE for absolute relocation types were
loose. D63121 made it stricter but it failed to create R_*_RELATIVE for
R_ARM_TARGET1 and R_PPC64_TOC. rLLD363236 worked around that by
reinstating the original behavior for ARM and PPC64.
This patch is an attempt to simplify the logic.
Note, in ld.bfd, R_ARM_TARGET2 --target2=abs also creates
R_ARM_RELATIVE. This seems a very uncommon scenario (moreover,
--target2=got-rel is the default), so I do not implement any logic
related to it.
Also, delete R_AARCH64_ABS32 from AArch64::getDynRel. We don't have
working ILP32 support yet. Allowing it would create an incorrect
R_AARCH64_RELATIVE.
For MIPS, the (if SymbolRel, then RelativeRel) code is to keep its
behavior unchanged.
Note, in ppc64-abs64-dyn.s, R_PPC64_TOC gets an incorrect addend because
computeAddend() doesn't compute the correct address. We seem to have the
wrong behavior for a long time. The important thing seems that a dynamic
relocation R_PPC64_TOC should not be created as the dynamic loader will
error R_PPC64_TOC is not supported.
Reviewers: atanasyan, grimar, peter.smith, ruiu, sfertile, espindola
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D63383
llvm-svn: 363928
2019-06-20 22:00:08 +08:00
|
|
|
RelType getDynRel(RelType type) const override;
|
2017-06-17 01:32:43 +08:00
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
AMDGPU::AMDGPU() {
|
2017-10-17 04:46:53 +08:00
|
|
|
relativeRel = R_AMDGPU_RELATIVE64;
|
2017-06-17 01:32:43 +08:00
|
|
|
gotRel = R_AMDGPU_ABS64;
|
2019-06-11 20:59:30 +08:00
|
|
|
symbolicRel = R_AMDGPU_ABS64;
|
2017-06-17 01:32:43 +08:00
|
|
|
}
|
|
|
|
|
2017-10-25 03:40:03 +08:00
|
|
|
static uint32_t getEFlags(InputFile *file) {
|
2020-09-09 22:03:53 +08:00
|
|
|
return cast<ObjFile<ELF64LE>>(file)->getObj().getHeader().e_flags;
|
2017-10-25 03:40:03 +08:00
|
|
|
}
|
2017-10-25 03:05:32 +08:00
|
|
|
|
2021-03-25 01:39:47 +08:00
|
|
|
uint32_t AMDGPU::calcEFlagsV3() const {
|
2017-10-25 03:40:03 +08:00
|
|
|
uint32_t ret = getEFlags(objectFiles[0]);
|
2017-10-25 03:05:32 +08:00
|
|
|
|
|
|
|
// Verify that all input files have the same e_flags.
|
2017-10-25 03:40:03 +08:00
|
|
|
for (InputFile *f : makeArrayRef(objectFiles).slice(1)) {
|
|
|
|
if (ret == getEFlags(f))
|
|
|
|
continue;
|
|
|
|
error("incompatible e_flags: " + toString(f));
|
|
|
|
return 0;
|
2017-10-25 03:05:32 +08:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-03-25 01:39:47 +08:00
|
|
|
uint32_t AMDGPU::calcEFlagsV4() const {
|
|
|
|
uint32_t retMach = getEFlags(objectFiles[0]) & EF_AMDGPU_MACH;
|
|
|
|
uint32_t retXnack = getEFlags(objectFiles[0]) & EF_AMDGPU_FEATURE_XNACK_V4;
|
|
|
|
uint32_t retSramEcc =
|
|
|
|
getEFlags(objectFiles[0]) & EF_AMDGPU_FEATURE_SRAMECC_V4;
|
|
|
|
|
|
|
|
// Verify that all input files have compatible e_flags (same mach, all
|
|
|
|
// features in the same category are either ANY, ANY and ON, or ANY and OFF).
|
|
|
|
for (InputFile *f : makeArrayRef(objectFiles).slice(1)) {
|
|
|
|
if (retMach != (getEFlags(f) & EF_AMDGPU_MACH)) {
|
|
|
|
error("incompatible mach: " + toString(f));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (retXnack == EF_AMDGPU_FEATURE_XNACK_UNSUPPORTED_V4 ||
|
|
|
|
(retXnack != EF_AMDGPU_FEATURE_XNACK_ANY_V4 &&
|
|
|
|
(getEFlags(f) & EF_AMDGPU_FEATURE_XNACK_V4)
|
|
|
|
!= EF_AMDGPU_FEATURE_XNACK_ANY_V4)) {
|
|
|
|
if (retXnack != (getEFlags(f) & EF_AMDGPU_FEATURE_XNACK_V4)) {
|
|
|
|
error("incompatible xnack: " + toString(f));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (retXnack == EF_AMDGPU_FEATURE_XNACK_ANY_V4)
|
|
|
|
retXnack = getEFlags(f) & EF_AMDGPU_FEATURE_XNACK_V4;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (retSramEcc == EF_AMDGPU_FEATURE_SRAMECC_UNSUPPORTED_V4 ||
|
|
|
|
(retSramEcc != EF_AMDGPU_FEATURE_SRAMECC_ANY_V4 &&
|
|
|
|
(getEFlags(f) & EF_AMDGPU_FEATURE_SRAMECC_V4) !=
|
|
|
|
EF_AMDGPU_FEATURE_SRAMECC_ANY_V4)) {
|
|
|
|
if (retSramEcc != (getEFlags(f) & EF_AMDGPU_FEATURE_SRAMECC_V4)) {
|
|
|
|
error("incompatible sramecc: " + toString(f));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (retSramEcc == EF_AMDGPU_FEATURE_SRAMECC_ANY_V4)
|
|
|
|
retSramEcc = getEFlags(f) & EF_AMDGPU_FEATURE_SRAMECC_V4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return retMach | retXnack | retSramEcc;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t AMDGPU::calcEFlags() const {
|
2022-04-09 01:22:23 +08:00
|
|
|
if (objectFiles.empty())
|
|
|
|
return 0;
|
2021-03-25 01:39:47 +08:00
|
|
|
|
|
|
|
uint8_t abiVersion = cast<ObjFile<ELF64LE>>(objectFiles[0])->getObj()
|
|
|
|
.getHeader().e_ident[EI_ABIVERSION];
|
|
|
|
switch (abiVersion) {
|
|
|
|
case ELFABIVERSION_AMDGPU_HSA_V2:
|
|
|
|
case ELFABIVERSION_AMDGPU_HSA_V3:
|
|
|
|
return calcEFlagsV3();
|
|
|
|
case ELFABIVERSION_AMDGPU_HSA_V4:
|
2022-03-22 04:54:08 +08:00
|
|
|
case ELFABIVERSION_AMDGPU_HSA_V5:
|
2021-03-25 01:39:47 +08:00
|
|
|
return calcEFlagsV4();
|
|
|
|
default:
|
|
|
|
error("unknown abi version: " + Twine(abiVersion));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-23 13:39:16 +08:00
|
|
|
void AMDGPU::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
|
|
|
|
switch (rel.type) {
|
2017-06-17 01:32:43 +08:00
|
|
|
case R_AMDGPU_ABS32:
|
|
|
|
case R_AMDGPU_GOTPCREL:
|
|
|
|
case R_AMDGPU_GOTPCREL32_LO:
|
|
|
|
case R_AMDGPU_REL32:
|
|
|
|
case R_AMDGPU_REL32_LO:
|
|
|
|
write32le(loc, val);
|
|
|
|
break;
|
|
|
|
case R_AMDGPU_ABS64:
|
2018-06-12 05:42:53 +08:00
|
|
|
case R_AMDGPU_REL64:
|
2017-06-17 01:32:43 +08:00
|
|
|
write64le(loc, val);
|
|
|
|
break;
|
|
|
|
case R_AMDGPU_GOTPCREL32_HI:
|
|
|
|
case R_AMDGPU_REL32_HI:
|
|
|
|
write32le(loc, val >> 32);
|
|
|
|
break;
|
2021-07-14 02:28:00 +08:00
|
|
|
case R_AMDGPU_REL16: {
|
|
|
|
int64_t simm = (static_cast<int64_t>(val) - 4) / 4;
|
|
|
|
checkInt(loc, simm, 16, rel);
|
|
|
|
write16le(loc, simm);
|
|
|
|
break;
|
|
|
|
}
|
2017-06-17 01:32:43 +08:00
|
|
|
default:
|
2019-02-15 02:02:20 +08:00
|
|
|
llvm_unreachable("unknown relocation");
|
2017-06-17 01:32:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-04 05:21:47 +08:00
|
|
|
RelExpr AMDGPU::getRelExpr(RelType type, const Symbol &s,
|
2017-10-12 11:14:06 +08:00
|
|
|
const uint8_t *loc) const {
|
2017-06-17 01:32:43 +08:00
|
|
|
switch (type) {
|
|
|
|
case R_AMDGPU_ABS32:
|
|
|
|
case R_AMDGPU_ABS64:
|
|
|
|
return R_ABS;
|
|
|
|
case R_AMDGPU_REL32:
|
|
|
|
case R_AMDGPU_REL32_LO:
|
|
|
|
case R_AMDGPU_REL32_HI:
|
2018-06-12 05:42:53 +08:00
|
|
|
case R_AMDGPU_REL64:
|
2021-07-14 02:28:00 +08:00
|
|
|
case R_AMDGPU_REL16:
|
2017-06-17 01:32:43 +08:00
|
|
|
return R_PC;
|
|
|
|
case R_AMDGPU_GOTPCREL:
|
|
|
|
case R_AMDGPU_GOTPCREL32_LO:
|
|
|
|
case R_AMDGPU_GOTPCREL32_HI:
|
|
|
|
return R_GOT_PC;
|
|
|
|
default:
|
2019-02-15 02:02:20 +08:00
|
|
|
error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
|
|
|
|
") against symbol " + toString(s));
|
|
|
|
return R_NONE;
|
2017-06-17 01:32:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[ELF][ARM][AARCH64][MIPS][PPC] Simplify the logic to create R_*_RELATIVE for absolute relocation types in writable sections
Summary:
Our rule to create R_*_RELATIVE for absolute relocation types were
loose. D63121 made it stricter but it failed to create R_*_RELATIVE for
R_ARM_TARGET1 and R_PPC64_TOC. rLLD363236 worked around that by
reinstating the original behavior for ARM and PPC64.
This patch is an attempt to simplify the logic.
Note, in ld.bfd, R_ARM_TARGET2 --target2=abs also creates
R_ARM_RELATIVE. This seems a very uncommon scenario (moreover,
--target2=got-rel is the default), so I do not implement any logic
related to it.
Also, delete R_AARCH64_ABS32 from AArch64::getDynRel. We don't have
working ILP32 support yet. Allowing it would create an incorrect
R_AARCH64_RELATIVE.
For MIPS, the (if SymbolRel, then RelativeRel) code is to keep its
behavior unchanged.
Note, in ppc64-abs64-dyn.s, R_PPC64_TOC gets an incorrect addend because
computeAddend() doesn't compute the correct address. We seem to have the
wrong behavior for a long time. The important thing seems that a dynamic
relocation R_PPC64_TOC should not be created as the dynamic loader will
error R_PPC64_TOC is not supported.
Reviewers: atanasyan, grimar, peter.smith, ruiu, sfertile, espindola
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D63383
llvm-svn: 363928
2019-06-20 22:00:08 +08:00
|
|
|
RelType AMDGPU::getDynRel(RelType type) const {
|
|
|
|
if (type == R_AMDGPU_ABS64)
|
|
|
|
return type;
|
|
|
|
return R_AMDGPU_NONE;
|
|
|
|
}
|
|
|
|
|
2020-05-15 13:18:58 +08:00
|
|
|
TargetInfo *elf::getAMDGPUTargetInfo() {
|
2017-06-17 04:15:03 +08:00
|
|
|
static AMDGPU target;
|
|
|
|
return ⌖
|
|
|
|
}
|