[mac/lld] Fix scale computation for vector ops in PAGEOFF12 relocations

With this, llvm-tblgen no longer tries and fails to allocate 7953 petabyte
when it runs during the build. Instead, `check-llvm` with lld/mac as host
linker now completes without any failures on an m1 mac.

This vector op handling code matches what happens in:
- ld64's OutputFile::applyFixUps() in OutputFile.cpp for kindStoreARM64PageOff12
- lld.ld64.darwinold's offset12KindFromInstruction() in
  lld/lib/ReaderWriter/MachO/ArchHandler_arm64.cpp for offset12scale16
- RuntimeDyld's decodeAddend() in
  llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h for
  ARM64_RELOC_PAGEOFF12

Fixes PR49444.

Differential Revision: https://reviews.llvm.org/D98053
This commit is contained in:
Nico Weber 2021-03-05 11:17:08 -05:00
parent 2ec43e4167
commit 210cc0738b
2 changed files with 21 additions and 3 deletions

View File

@ -127,8 +127,14 @@ inline uint64_t encodePage21(uint64_t base, uint64_t va) {
// | | imm12 | |
// +-------------------+-----------------------+-------------------+
inline uint64_t encodePageOff12(uint64_t base, uint64_t va) {
int scale = ((base & 0x3b000000) == 0x39000000) ? base >> 30 : 0;
inline uint64_t encodePageOff12(uint32_t base, uint64_t va) {
int scale = 0;
if ((base & 0x3b00'0000) == 0x3900'0000) { // load/store
scale = base >> 30;
if (scale == 0 && (base & 0x0480'0000) == 0x0480'0000) // vector op?
scale = 4;
}
// TODO(gkm): extract embedded addend and warn if != 0
// uint64_t addend = ((base & 0x003FFC00) >> 10);
return (base | bitField(va, scale, 12 - scale, 10));

View File

@ -15,6 +15,8 @@
## PAGE21 relocations are aligned to 4096 bytes
# CHECK-NEXT: adrp x2, [[#]] ; 0x[[#BAZ+4096-128]]
# CHECK-NEXT: ldr x2, [x2, #128]
# CHECK-NEXT: adrp x3, 8 ; 0x8000
# CHECK-NEXT: ldr q0, [x3, #144]
# CHECK-NEXT: ret
# CHECK-LABEL: Contents of (__DATA_CONST,__const) section
@ -22,7 +24,7 @@
# CHECK: [[#PTR_2]] {{0*}}[[#BAZ+123]] 00000000 00000000 00000000
.text
.globl _foo, _bar, _baz
.globl _foo, _bar, _baz, _quux
.p2align 2
_foo:
## Generates ARM64_RELOC_BRANCH26 and ARM64_RELOC_ADDEND
@ -31,6 +33,11 @@ _foo:
adrp x2, _baz@PAGE + 4097
## Generates ARM64_RELOC_PAGEOFF12
ldr x2, [x2, _baz@PAGEOFF]
## Generates ARM64_RELOC_PAGE21
adrp x3, _quux@PAGE
## Generates ARM64_RELOC_PAGEOFF12 with internal slide 4
ldr q0, [x3, _quux@PAGEOFF]
ret
.p2align 2
@ -42,6 +49,11 @@ _bar:
_baz:
.space 1
.p2align 4
_quux:
.quad 0
.quad 80
.section __DATA_CONST,__const
## These generate ARM64_RELOC_UNSIGNED symbol relocations. llvm-mc seems to
## generate UNSIGNED section relocations only for compact unwind sections, so