tsan: Enable 48-bit VMA support on aarch64

This patch adds 48-bits VMA support for tsan on aarch64.  As current
mappings for aarch64, 48-bit VMA also supports PIE executable.  This
limits the mapping mechanism because the PIE address bits
(usually 0aaaaXXXXXXXX) makes it harder to create a mask/xor value
to include all memory regions.  I think it is possible to create a
large application VAM range by either dropping PIE support or tune
current range.

It also changes slight the way addresses are packed in SyncVar structure:
previously it assumes x86_64 as the maximum VMA range.  Since ID is 14 bits
wide, shifting 48 bits should be ok.

Tested on x86_64, ppc64le and aarch64 (39 and 48 bits VMA).

llvm-svn: 277137
This commit is contained in:
Adhemerval Zanella 2016-07-29 12:45:35 +00:00
parent cbaca42a03
commit 4f9de1e7bf
4 changed files with 85 additions and 42 deletions

View File

@ -169,6 +169,27 @@ struct Mapping42 {
static const uptr kVdsoBeg = 0x37f00000000ull;
};
struct Mapping48 {
static const uptr kLoAppMemBeg = 0x0000000001000ull;
static const uptr kLoAppMemEnd = 0x0000200000000ull;
static const uptr kShadowBeg = 0x0002000000000ull;
static const uptr kShadowEnd = 0x0004000000000ull;
static const uptr kMetaShadowBeg = 0x0005000000000ull;
static const uptr kMetaShadowEnd = 0x0006000000000ull;
static const uptr kMidAppMemBeg = 0x0aaaa00000000ull;
static const uptr kMidAppMemEnd = 0x0aaaf00000000ull;
static const uptr kMidShadowOff = 0x0aaa800000000ull;
static const uptr kTraceMemBeg = 0x0f06000000000ull;
static const uptr kTraceMemEnd = 0x0f06200000000ull;
static const uptr kHeapMemBeg = 0x0ffff00000000ull;
static const uptr kHeapMemEnd = 0x0ffff00000000ull;
static const uptr kHiAppMemBeg = 0x0ffff00000000ull;
static const uptr kHiAppMemEnd = 0x1000000000000ull;
static const uptr kAppMemMsk = 0x0fff800000000ull;
static const uptr kAppMemXor = 0x0000800000000ull;
static const uptr kVdsoBeg = 0xffff000000000ull;
};
// Indicates the runtime will define the memory regions at runtime.
#define TSAN_RUNTIME_VMA 1
// Indicates that mapping defines a mid range memory segment.
@ -362,11 +383,13 @@ uptr MappingImpl(void) {
template<int Type>
uptr MappingArchImpl(void) {
#ifdef __aarch64__
if (vmaSize == 39)
return MappingImpl<Mapping39, Type>();
else
return MappingImpl<Mapping42, Type>();
switch (vmaSize) {
case 39: return MappingImpl<Mapping39, Type>();
case 42: return MappingImpl<Mapping42, Type>();
case 48: return MappingImpl<Mapping48, Type>();
}
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return MappingImpl<Mapping44, Type>();
@ -513,11 +536,13 @@ bool IsAppMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsAppMem(uptr mem) {
#ifdef __aarch64__
if (vmaSize == 39)
return IsAppMemImpl<Mapping39>(mem);
else
return IsAppMemImpl<Mapping42>(mem);
switch (vmaSize) {
case 39: return IsAppMemImpl<Mapping39>(mem);
case 42: return IsAppMemImpl<Mapping42>(mem);
case 48: return IsAppMemImpl<Mapping48>(mem);
}
DCHECK(0);
return false;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return IsAppMemImpl<Mapping44>(mem);
@ -538,11 +563,13 @@ bool IsShadowMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsShadowMem(uptr mem) {
#ifdef __aarch64__
if (vmaSize == 39)
return IsShadowMemImpl<Mapping39>(mem);
else
return IsShadowMemImpl<Mapping42>(mem);
switch (vmaSize) {
case 39: return IsShadowMemImpl<Mapping39>(mem);
case 42: return IsShadowMemImpl<Mapping42>(mem);
case 48: return IsShadowMemImpl<Mapping48>(mem);
}
DCHECK(0);
return false;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return IsShadowMemImpl<Mapping44>(mem);
@ -563,11 +590,13 @@ bool IsMetaMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsMetaMem(uptr mem) {
#ifdef __aarch64__
if (vmaSize == 39)
return IsMetaMemImpl<Mapping39>(mem);
else
return IsMetaMemImpl<Mapping42>(mem);
switch (vmaSize) {
case 39: return IsMetaMemImpl<Mapping39>(mem);
case 42: return IsMetaMemImpl<Mapping42>(mem);
case 48: return IsMetaMemImpl<Mapping48>(mem);
}
DCHECK(0);
return false;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return IsMetaMemImpl<Mapping44>(mem);
@ -598,11 +627,13 @@ uptr MemToShadowImpl(uptr x) {
ALWAYS_INLINE
uptr MemToShadow(uptr x) {
#ifdef __aarch64__
if (vmaSize == 39)
return MemToShadowImpl<Mapping39>(x);
else
return MemToShadowImpl<Mapping42>(x);
switch (vmaSize) {
case 39: return MemToShadowImpl<Mapping39>(x);
case 42: return MemToShadowImpl<Mapping42>(x);
case 48: return MemToShadowImpl<Mapping48>(x);
}
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return MemToShadowImpl<Mapping44>(x);
@ -631,11 +662,13 @@ u32 *MemToMetaImpl(uptr x) {
ALWAYS_INLINE
u32 *MemToMeta(uptr x) {
#ifdef __aarch64__
if (vmaSize == 39)
return MemToMetaImpl<Mapping39>(x);
else
return MemToMetaImpl<Mapping42>(x);
switch (vmaSize) {
case 39: return MemToMetaImpl<Mapping39>(x);
case 42: return MemToMetaImpl<Mapping42>(x);
case 48: return MemToMetaImpl<Mapping48>(x);
}
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return MemToMetaImpl<Mapping44>(x);
@ -674,11 +707,13 @@ uptr ShadowToMemImpl(uptr s) {
ALWAYS_INLINE
uptr ShadowToMem(uptr s) {
#ifdef __aarch64__
if (vmaSize == 39)
return ShadowToMemImpl<Mapping39>(s);
else
return ShadowToMemImpl<Mapping42>(s);
switch (vmaSize) {
case 39: return ShadowToMemImpl<Mapping39>(s);
case 42: return ShadowToMemImpl<Mapping42>(s);
case 48: return ShadowToMemImpl<Mapping48>(s);
}
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return ShadowToMemImpl<Mapping44>(s);
@ -707,11 +742,13 @@ uptr GetThreadTraceImpl(int tid) {
ALWAYS_INLINE
uptr GetThreadTrace(int tid) {
#ifdef __aarch64__
if (vmaSize == 39)
return GetThreadTraceImpl<Mapping39>(tid);
else
return GetThreadTraceImpl<Mapping42>(tid);
switch (vmaSize) {
case 39: return GetThreadTraceImpl<Mapping39>(tid);
case 42: return GetThreadTraceImpl<Mapping42>(tid);
case 48: return GetThreadTraceImpl<Mapping48>(tid);
}
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return GetThreadTraceImpl<Mapping44>(tid);
@ -735,11 +772,13 @@ uptr GetThreadTraceHeaderImpl(int tid) {
ALWAYS_INLINE
uptr GetThreadTraceHeader(int tid) {
#ifdef __aarch64__
if (vmaSize == 39)
return GetThreadTraceHeaderImpl<Mapping39>(tid);
else
return GetThreadTraceHeaderImpl<Mapping42>(tid);
switch (vmaSize) {
case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
case 48: return GetThreadTraceHeaderImpl<Mapping48>(tid);
}
DCHECK(0);
return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return GetThreadTraceHeaderImpl<Mapping44>(tid);

View File

@ -208,9 +208,9 @@ void InitializePlatformEarly() {
vmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
#if defined(__aarch64__)
if (vmaSize != 39 && vmaSize != 42) {
if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) {
Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
Printf("FATAL: Found %d - Supported 39 and 42\n", vmaSize);
Printf("FATAL: Found %d - Supported 39, 42 and 48\n", vmaSize);
Die();
}
#elif defined(__powerpc64__)

View File

@ -287,6 +287,10 @@ void MapThreadTrace(uptr addr, uptr size, const char *name) {
static void CheckShadowMapping() {
uptr beg, end;
for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
// Skip cases for empty regions (heap definition for architectures that
// do not use 64-bit allocator).
if (beg ==end)
continue;
VPrintf(3, "checking shadow region %p-%p\n", beg, end);
for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
for (int x = -1; x <= 1; x++) {

View File

@ -50,16 +50,16 @@ struct SyncVar {
void Reset(Processor *proc);
u64 GetId() const {
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
return GetLsb((u64)addr | (uid << 47), 61);
// 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits.
return GetLsb((u64)addr | (uid << 48), 60);
}
bool CheckId(u64 uid) const {
CHECK_EQ(uid, GetLsb(uid, 14));
return GetLsb(this->uid, 14) == uid;
}
static uptr SplitId(u64 id, u64 *uid) {
*uid = id >> 47;
return (uptr)GetLsb(id, 47);
*uid = id >> 48;
return (uptr)GetLsb(id, 48);
}
};