Make tp offset computation target independent.

This adds direct support for computing offsets from the thread pointer
for both variants. Of the architectures we support, variant 1 is used
only by aarch64 (but that doesn't seem to be documented anywhere.)

llvm-svn: 270243
This commit is contained in:
Rafael Espindola 2016-05-20 17:41:09 +00:00
parent a553012874
commit 8818ca69dc
3 changed files with 36 additions and 33 deletions

View File

@ -180,14 +180,17 @@ getSymVA(uint32_t Type, typename ELFT::uint A, typename ELFT::uint P,
return Body.getGotVA<ELFT>() + A - P;
case R_GOTONLY_PC:
return Out<ELFT>::Got->getVA() + A - P;
case R_RELAX_TLS_GD_TO_LE:
case R_RELAX_TLS_IE_TO_LE:
case R_RELAX_TLS_LD_TO_LE:
case R_TLS:
if (Target->TcbSize)
return Body.getVA<ELFT>(A) +
alignTo(Target->TcbSize, Out<ELFT>::TlsPhdr->p_align);
return Body.getVA<ELFT>(A) - Out<ELFT>::TlsPhdr->p_memsz;
case R_NEG_TLS:
return Out<ELF32LE>::TlsPhdr->p_memsz - Body.getVA<ELFT>(A);
case R_ABS:
case R_RELAX_TLS_GD_TO_LE:
case R_RELAX_TLS_IE_TO_LE:
case R_RELAX_TLS_LD_TO_LE:
return Body.getVA<ELFT>(A);
case R_GOT_OFF:
return Body.getGotOffset<ELFT>() + A;

View File

@ -150,9 +150,6 @@ public:
void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
private:
static const uint64_t TcbSize = 16;
};
class AMDGPUTargetInfo final : public TargetInfo {
@ -403,7 +400,7 @@ void X86TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
0x81, 0xe8, 0x00, 0x00, 0x00, 0x00 // subl 0(%ebx), %eax
};
memcpy(Loc - 3, Inst, sizeof(Inst));
relocateOne(Loc + 5, R_386_32, Out<ELF32LE>::TlsPhdr->p_memsz - Val);
relocateOne(Loc + 5, R_386_32, -Val);
}
void X86TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
@ -460,13 +457,13 @@ void X86TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
else
*Op = 0x80 | Reg | (Reg << 3);
}
relocateOne(Loc, R_386_TLS_LE, Val - Out<ELF32LE>::TlsPhdr->p_memsz);
relocateOne(Loc, R_386_TLS_LE, Val);
}
void X86TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
uint64_t Val) const {
if (Type == R_386_TLS_LDO_32) {
relocateOne(Loc, R_386_TLS_LE, Val - Out<ELF32LE>::TlsPhdr->p_memsz);
relocateOne(Loc, R_386_TLS_LE, Val);
return;
}
@ -605,8 +602,7 @@ void X86_64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff,%rax
};
memcpy(Loc - 4, Inst, sizeof(Inst));
relocateOne(Loc + 8, R_X86_64_TPOFF32,
Val + 4 - Out<ELF64LE>::TlsPhdr->p_memsz);
relocateOne(Loc + 8, R_X86_64_TPOFF32, Val + 4);
}
void X86_64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
@ -658,7 +654,7 @@ void X86_64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
if (*Prefix == 0x4c)
*Prefix = (IsMov || RspAdd) ? 0x49 : 0x4d;
*RegSlot = (IsMov || RspAdd) ? (0xc0 | Reg) : (0x80 | Reg | (Reg << 3));
relocateOne(Loc, R_X86_64_TPOFF32, Val + 4 - Out<ELF64LE>::TlsPhdr->p_memsz);
relocateOne(Loc, R_X86_64_TPOFF32, Val + 4);
}
void X86_64TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
@ -673,11 +669,11 @@ void X86_64TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
// mov %fs:0,%rax
// leaq bar@tpoff(%rax), %rcx
if (Type == R_X86_64_DTPOFF64) {
write64le(Loc, Val - Out<ELF64LE>::TlsPhdr->p_memsz);
write64le(Loc, Val);
return;
}
if (Type == R_X86_64_DTPOFF32) {
relocateOne(Loc, R_X86_64_TPOFF32, Val - Out<ELF64LE>::TlsPhdr->p_memsz);
relocateOne(Loc, R_X86_64_TPOFF32, Val);
return;
}
@ -936,6 +932,10 @@ AArch64TargetInfo::AArch64TargetInfo() {
TlsOffsetRel = R_AARCH64_TLS_DTPREL64;
PltEntrySize = 16;
PltZeroSize = 32;
// It doesn't seem to be documented anywhere, but tls on aarch64 uses variant
// 1 of the tls structures and the tcb size is 16.
TcbSize = 16;
}
RelExpr AArch64TargetInfo::getRelExpr(uint32_t Type,
@ -943,6 +943,11 @@ RelExpr AArch64TargetInfo::getRelExpr(uint32_t Type,
switch (Type) {
default:
return R_ABS;
case R_AARCH64_TLSLE_ADD_TPREL_HI12:
case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
return R_TLS;
case R_AARCH64_CALL26:
case R_AARCH64_CONDBR19:
case R_AARCH64_JUMP26:
@ -1138,17 +1143,13 @@ void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
checkInt<16>(Val, Type);
or32le(Loc, (Val & 0xFFFC) << 3);
break;
case R_AARCH64_TLSLE_ADD_TPREL_HI12: {
uint64_t V = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align) + Val;
checkInt<24>(V, Type);
updateAArch64Add(Loc, (V & 0xFFF000) >> 12);
case R_AARCH64_TLSLE_ADD_TPREL_HI12:
checkInt<24>(Val, Type);
updateAArch64Add(Loc, (Val & 0xFFF000) >> 12);
break;
}
case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: {
uint64_t V = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align) + Val;
updateAArch64Add(Loc, V & 0xFFF);
case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
updateAArch64Add(Loc, Val & 0xFFF);
break;
}
default:
fatal("unrecognized reloc " + Twine(Type));
}
@ -1166,9 +1167,7 @@ void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
// movk x0, #0x10
// nop
// nop
uint64_t TPOff = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align);
uint64_t X = Val + TPOff;
checkUInt<32>(X, Type);
checkUInt<32>(Val, Type);
uint32_t NewInst;
switch (Type) {
@ -1179,11 +1178,11 @@ void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
break;
case R_AARCH64_TLSDESC_ADR_PAGE21:
// movz
NewInst = 0xd2a00000 | (((X >> 16) & 0xffff) << 5);
NewInst = 0xd2a00000 | (((Val >> 16) & 0xffff) << 5);
break;
case R_AARCH64_TLSDESC_LD64_LO12_NC:
// movk
NewInst = 0xf2800000 | ((X & 0xffff) << 5);
NewInst = 0xf2800000 | ((Val & 0xffff) << 5);
break;
default:
llvm_unreachable("unsupported Relocation for TLS GD to LE relax");
@ -1193,20 +1192,18 @@ void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
void AArch64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
uint64_t Val) const {
uint64_t TPOff = llvm::alignTo(TcbSize, Out<ELF64LE>::TlsPhdr->p_align);
uint64_t X = Val + TPOff;
checkUInt<32>(X, Type);
checkUInt<32>(Val, Type);
uint32_t Inst = read32le(Loc);
uint32_t NewInst;
if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
// Generate movz.
unsigned RegNo = (Inst & 0x1f);
NewInst = (0xd2a00000 | RegNo) | (((X >> 16) & 0xffff) << 5);
NewInst = (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5);
} else if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
// Generate movk
unsigned RegNo = (Inst & 0x1f);
NewInst = (0xf2800000 | RegNo) | ((X & 0xffff) << 5);
NewInst = (0xf2800000 | RegNo) | ((Val & 0xffff) << 5);
} else {
llvm_unreachable("invalid Relocation for TLS IE to LE Relax");
}

View File

@ -83,6 +83,9 @@ public:
// to support lazy loading.
unsigned GotPltHeaderEntriesNum = 3;
// Set to 0 for variant 2
unsigned TcbSize = 0;
uint32_t ThunkSize = 0;
virtual void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const;