[compiler-rt] [tsan] Unify aarch64 mapping

This patch unify the 39 and 42-bit support for AArch64 by using an external
memory read to check the runtime detected VMA and select the better mapping
and transformation.  Although slower, this leads to same instrumented binary
to be independent of the kernel.

Along with this change this patch also fix some 42-bit failures with
ALSR disable by increasing the upper high app memory threshold and also
the 42-bit madvise value for non large page set.

llvm-svn: 254151
This commit is contained in:
Adhemerval Zanella 2015-11-26 13:10:47 +00:00
parent 4c86a1d97b
commit 066c5f0f92
8 changed files with 524 additions and 253 deletions

View File

@ -41,21 +41,23 @@ C/C++ on linux/x86_64 and freebsd/x86_64
7e00 0000 0000 - 7e80 0000 0000: -
7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
*/
const uptr kMetaShadowBeg = 0x300000000000ull;
const uptr kMetaShadowEnd = 0x400000000000ull;
const uptr kTraceMemBeg = 0x600000000000ull;
const uptr kTraceMemEnd = 0x620000000000ull;
const uptr kShadowBeg = 0x020000000000ull;
const uptr kShadowEnd = 0x100000000000ull;
const uptr kHeapMemBeg = 0x7d0000000000ull;
const uptr kHeapMemEnd = 0x7e0000000000ull;
const uptr kLoAppMemBeg = 0x000000001000ull;
const uptr kLoAppMemEnd = 0x010000000000ull;
const uptr kHiAppMemBeg = 0x7e8000000000ull;
const uptr kHiAppMemEnd = 0x800000000000ull;
const uptr kAppMemMsk = 0x7c0000000000ull;
const uptr kAppMemXor = 0x020000000000ull;
const uptr kVdsoBeg = 0xf000000000000000ull;
struct Mapping {
static const uptr kMetaShadowBeg = 0x300000000000ull;
static const uptr kMetaShadowEnd = 0x400000000000ull;
static const uptr kTraceMemBeg = 0x600000000000ull;
static const uptr kTraceMemEnd = 0x620000000000ull;
static const uptr kShadowBeg = 0x020000000000ull;
static const uptr kShadowEnd = 0x100000000000ull;
static const uptr kHeapMemBeg = 0x7d0000000000ull;
static const uptr kHeapMemEnd = 0x7e0000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x010000000000ull;
static const uptr kHiAppMemBeg = 0x7e8000000000ull;
static const uptr kHiAppMemEnd = 0x800000000000ull;
static const uptr kAppMemMsk = 0x7c0000000000ull;
static const uptr kAppMemXor = 0x020000000000ull;
static const uptr kVdsoBeg = 0xf000000000000000ull;
};
#elif defined(__mips64)
/*
C/C++ on linux/mips64
@ -71,53 +73,64 @@ fe00 0000 00 - ff00 0000 00: heap
ff00 0000 00 - ff80 0000 00: -
ff80 0000 00 - ffff ffff ff: modules and main thread stack
*/
const uptr kMetaShadowBeg = 0x3000000000ull;
const uptr kMetaShadowEnd = 0x4000000000ull;
const uptr kTraceMemBeg = 0x6000000000ull;
const uptr kTraceMemEnd = 0x6200000000ull;
const uptr kShadowBeg = 0x1400000000ull;
const uptr kShadowEnd = 0x2400000000ull;
const uptr kHeapMemBeg = 0xfe00000000ull;
const uptr kHeapMemEnd = 0xff00000000ull;
const uptr kLoAppMemBeg = 0x0100000000ull;
const uptr kLoAppMemEnd = 0x0200000000ull;
const uptr kHiAppMemBeg = 0xff80000000ull;
const uptr kHiAppMemEnd = 0xffffffffffull;
const uptr kAppMemMsk = 0xfc00000000ull;
const uptr kAppMemXor = 0x0400000000ull;
const uptr kVdsoBeg = 0xfffff00000ull;
struct Mapping {
static const uptr kMetaShadowBeg = 0x3000000000ull;
static const uptr kMetaShadowEnd = 0x4000000000ull;
static const uptr kTraceMemBeg = 0x6000000000ull;
static const uptr kTraceMemEnd = 0x6200000000ull;
static const uptr kShadowBeg = 0x1400000000ull;
static const uptr kShadowEnd = 0x2400000000ull;
static const uptr kHeapMemBeg = 0xfe00000000ull;
static const uptr kHeapMemEnd = 0xff00000000ull;
static const uptr kLoAppMemBeg = 0x0100000000ull;
static const uptr kLoAppMemEnd = 0x0200000000ull;
static const uptr kHiAppMemBeg = 0xff80000000ull;
static const uptr kHiAppMemEnd = 0xffffffffffull;
static const uptr kAppMemMsk = 0xfc00000000ull;
static const uptr kAppMemXor = 0x0400000000ull;
static const uptr kVdsoBeg = 0xfffff00000ull;
};
#elif defined(__aarch64__)
# if SANITIZER_AARCH64_VMA == 39
// AArch64 supports multiple VMA which leads to multiple address transformation
// functions. To support these multiple VMAS transformations and mappings TSAN
// runtime for AArch64 uses an external memory read (vmaSize) to select which
// mapping to use. Although slower, it make a same instrumented binary run on
// multiple kernels.
/*
C/C++ on linux/aarch64 (39-bit VMA)
0000 4000 00 - 0200 0000 00: main binary
2000 0000 00 - 4000 0000 00: shadow memory
4000 0000 00 - 5000 0000 00: metainfo
5000 0000 00 - 6000 0000 00: -
0000 0010 00 - 0100 0000 00: main binary
0100 0000 00 - 0800 0000 00: -
0800 0000 00 - 1F00 0000 00: shadow memory
1C00 0000 00 - 3100 0000 00: -
3100 0000 00 - 3400 0000 00: metainfo
3400 0000 00 - 6000 0000 00: -
6000 0000 00 - 6200 0000 00: traces
6200 0000 00 - 7d00 0000 00: -
7d00 0000 00 - 7e00 0000 00: heap
7e00 0000 00 - 7fff ffff ff: modules and main thread stack
7c00 0000 00 - 7d00 0000 00: heap
7d00 0000 00 - 7fff ffff ff: modules and main thread stack
*/
const uptr kLoAppMemBeg = 0x0000400000ull;
const uptr kLoAppMemEnd = 0x0200000000ull;
const uptr kShadowBeg = 0x2000000000ull;
const uptr kShadowEnd = 0x4000000000ull;
const uptr kMetaShadowBeg = 0x4000000000ull;
const uptr kMetaShadowEnd = 0x5000000000ull;
const uptr kTraceMemBeg = 0x6000000000ull;
const uptr kTraceMemEnd = 0x6200000000ull;
const uptr kHeapMemBeg = 0x7d00000000ull;
const uptr kHeapMemEnd = 0x7e00000000ull;
const uptr kHiAppMemBeg = 0x7e00000000ull;
const uptr kHiAppMemEnd = 0x7fffffffffull;
const uptr kAppMemMsk = 0x7800000000ull;
const uptr kAppMemXor = 0x0800000000ull;
const uptr kVdsoBeg = 0x7f00000000ull;
# elif SANITIZER_AARCH64_VMA == 42
struct Mapping39 {
static const uptr kLoAppMemBeg = 0x0000001000ull;
static const uptr kLoAppMemEnd = 0x0100000000ull;
static const uptr kShadowBeg = 0x0800000000ull;
static const uptr kShadowEnd = 0x1F00000000ull;
static const uptr kMetaShadowBeg = 0x3100000000ull;
static const uptr kMetaShadowEnd = 0x3400000000ull;
static const uptr kTraceMemBeg = 0x6000000000ull;
static const uptr kTraceMemEnd = 0x6200000000ull;
static const uptr kHeapMemBeg = 0x7c00000000ull;
static const uptr kHeapMemEnd = 0x7d00000000ull;
static const uptr kHiAppMemBeg = 0x7d00000000ull;
static const uptr kHiAppMemEnd = 0x7fffffffffull;
static const uptr kAppMemMsk = 0x7800000000ull;
static const uptr kAppMemXor = 0x0200000000ull;
static const uptr kVdsoBeg = 0x7f00000000ull;
};
/*
C/C++ on linux/aarch64 (42-bit VMA)
00000 4000 00 - 01000 0000 00: main binary
00000 0010 00 - 01000 0000 00: main binary
01000 0000 00 - 10000 0000 00: -
10000 0000 00 - 20000 0000 00: shadow memory
20000 0000 00 - 26000 0000 00: -
@ -126,73 +139,30 @@ C/C++ on linux/aarch64 (42-bit VMA)
36200 0000 00 - 36240 0000 00: traces
36240 0000 00 - 3e000 0000 00: -
3e000 0000 00 - 3f000 0000 00: heap
3c000 0000 00 - 3ff00 0000 00: -
3ff00 0000 00 - 3ffff f000 00: modules and main thread stack
3f000 0000 00 - 3ffff ffff ff: modules and main thread stack
*/
const uptr kLoAppMemBeg = 0x00000400000ull;
const uptr kLoAppMemEnd = 0x01000000000ull;
const uptr kShadowBeg = 0x10000000000ull;
const uptr kShadowEnd = 0x20000000000ull;
const uptr kMetaShadowBeg = 0x26000000000ull;
const uptr kMetaShadowEnd = 0x28000000000ull;
const uptr kTraceMemBeg = 0x36200000000ull;
const uptr kTraceMemEnd = 0x36400000000ull;
const uptr kHeapMemBeg = 0x3e000000000ull;
const uptr kHeapMemEnd = 0x3f000000000ull;
const uptr kHiAppMemBeg = 0x3ff00000000ull;
const uptr kHiAppMemEnd = 0x3fffff00000ull;
const uptr kAppMemMsk = 0x3c000000000ull;
const uptr kAppMemXor = 0x04000000000ull;
const uptr kVdsoBeg = 0x37f00000000ull;
# endif
#endif
ALWAYS_INLINE
bool IsAppMem(uptr mem) {
return (mem >= kHeapMemBeg && mem < kHeapMemEnd) ||
(mem >= kLoAppMemBeg && mem < kLoAppMemEnd) ||
(mem >= kHiAppMemBeg && mem < kHiAppMemEnd);
}
ALWAYS_INLINE
bool IsShadowMem(uptr mem) {
return mem >= kShadowBeg && mem <= kShadowEnd;
}
ALWAYS_INLINE
bool IsMetaMem(uptr mem) {
return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
}
ALWAYS_INLINE
uptr MemToShadow(uptr x) {
DCHECK(IsAppMem(x));
return (((x) & ~(kAppMemMsk | (kShadowCell - 1)))
^ kAppMemXor) * kShadowCnt;
}
ALWAYS_INLINE
u32 *MemToMeta(uptr x) {
DCHECK(IsAppMem(x));
return (u32*)(((((x) & ~(kAppMemMsk | (kMetaShadowCell - 1)))
^ kAppMemXor) / kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
}
ALWAYS_INLINE
uptr ShadowToMem(uptr s) {
CHECK(IsShadowMem(s));
if (s >= MemToShadow(kLoAppMemBeg) && s <= MemToShadow(kLoAppMemEnd - 1))
return (s / kShadowCnt) ^ kAppMemXor;
else
return ((s / kShadowCnt) ^ kAppMemXor) | kAppMemMsk;
}
static USED uptr UserRegions[] = {
kLoAppMemBeg, kLoAppMemEnd,
kHiAppMemBeg, kHiAppMemEnd,
kHeapMemBeg, kHeapMemEnd,
struct Mapping42 {
static const uptr kLoAppMemBeg = 0x00000001000ull;
static const uptr kLoAppMemEnd = 0x01000000000ull;
static const uptr kShadowBeg = 0x10000000000ull;
static const uptr kShadowEnd = 0x20000000000ull;
static const uptr kMetaShadowBeg = 0x26000000000ull;
static const uptr kMetaShadowEnd = 0x28000000000ull;
static const uptr kTraceMemBeg = 0x36200000000ull;
static const uptr kTraceMemEnd = 0x36400000000ull;
static const uptr kHeapMemBeg = 0x3e000000000ull;
static const uptr kHeapMemEnd = 0x3f000000000ull;
static const uptr kHiAppMemBeg = 0x3f000000000ull;
static const uptr kHiAppMemEnd = 0x3ffffffffffull;
static const uptr kAppMemMsk = 0x3c000000000ull;
static const uptr kAppMemXor = 0x04000000000ull;
static const uptr kVdsoBeg = 0x37f00000000ull;
};
// Indicates the runtime will define the memory regions at runtime.
#define TSAN_RUNTIME_VMA 1
#endif
#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS
/* Go on linux, darwin and freebsd
@ -208,51 +178,15 @@ static USED uptr UserRegions[] = {
6200 0000 0000 - 8000 0000 0000: -
*/
const uptr kMetaShadowBeg = 0x300000000000ull;
const uptr kMetaShadowEnd = 0x400000000000ull;
const uptr kTraceMemBeg = 0x600000000000ull;
const uptr kTraceMemEnd = 0x620000000000ull;
const uptr kShadowBeg = 0x200000000000ull;
const uptr kShadowEnd = 0x238000000000ull;
const uptr kAppMemBeg = 0x000000001000ull;
const uptr kAppMemEnd = 0x00e000000000ull;
ALWAYS_INLINE
bool IsAppMem(uptr mem) {
return mem >= kAppMemBeg && mem < kAppMemEnd;
}
ALWAYS_INLINE
bool IsShadowMem(uptr mem) {
return mem >= kShadowBeg && mem <= kShadowEnd;
}
ALWAYS_INLINE
bool IsMetaMem(uptr mem) {
return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
}
ALWAYS_INLINE
uptr MemToShadow(uptr x) {
DCHECK(IsAppMem(x));
return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
}
ALWAYS_INLINE
u32 *MemToMeta(uptr x) {
DCHECK(IsAppMem(x));
return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
}
ALWAYS_INLINE
uptr ShadowToMem(uptr s) {
CHECK(IsShadowMem(s));
return (s & ~kShadowBeg) / kShadowCnt;
}
static USED uptr UserRegions[] = {
kAppMemBeg, kAppMemEnd,
struct Mapping {
static const uptr kMetaShadowBeg = 0x300000000000ull;
static const uptr kMetaShadowEnd = 0x400000000000ull;
static const uptr kTraceMemBeg = 0x600000000000ull;
static const uptr kTraceMemEnd = 0x620000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
static const uptr kShadowEnd = 0x238000000000ull;
static const uptr kAppMemBeg = 0x000000001000ull;
static const uptr kAppMemEnd = 0x00e000000000ull;
};
#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS
@ -269,77 +203,381 @@ static USED uptr UserRegions[] = {
07d0 0000 0000 - 8000 0000 0000: -
*/
const uptr kMetaShadowBeg = 0x076000000000ull;
const uptr kMetaShadowEnd = 0x07d000000000ull;
const uptr kTraceMemBeg = 0x056000000000ull;
const uptr kTraceMemEnd = 0x076000000000ull;
const uptr kShadowBeg = 0x010000000000ull;
const uptr kShadowEnd = 0x050000000000ull;
const uptr kAppMemBeg = 0x000000001000ull;
const uptr kAppMemEnd = 0x00e000000000ull;
ALWAYS_INLINE
bool IsAppMem(uptr mem) {
return mem >= kAppMemBeg && mem < kAppMemEnd;
struct Mapping {
static const uptr kMetaShadowBeg = 0x076000000000ull;
static const uptr kMetaShadowEnd = 0x07d000000000ull;
static const uptr kTraceMemBeg = 0x056000000000ull;
static const uptr kTraceMemEnd = 0x076000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
static const uptr kShadowEnd = 0x050000000000ull;
static const uptr kAppMemBeg = 0x000000001000ull;
static const uptr kAppMemEnd = 0x00e000000000ull;
}
ALWAYS_INLINE
bool IsShadowMem(uptr mem) {
return mem >= kShadowBeg && mem <= kShadowEnd;
}
ALWAYS_INLINE
bool IsMetaMem(uptr mem) {
return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
}
ALWAYS_INLINE
uptr MemToShadow(uptr x) {
DCHECK(IsAppMem(x));
return ((x & ~(kShadowCell - 1)) * kShadowCnt) + kShadowBeg;
}
ALWAYS_INLINE
u32 *MemToMeta(uptr x) {
DCHECK(IsAppMem(x));
return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
}
ALWAYS_INLINE
uptr ShadowToMem(uptr s) {
CHECK(IsShadowMem(s));
// FIXME(dvyukov): this is most likely wrong as the mapping is not bijection.
return (s - kShadowBeg) / kShadowCnt;
}
static USED uptr UserRegions[] = {
kAppMemBeg, kAppMemEnd,
};
#else
# error "Unknown platform"
#endif
#ifdef TSAN_RUNTIME_VMA
extern uptr vmaSize;
#endif
enum MappingType {
MAPPING_LO_APP_BEG,
MAPPING_LO_APP_END,
MAPPING_HI_APP_BEG,
MAPPING_HI_APP_END,
MAPPING_HEAP_BEG,
MAPPING_HEAP_END,
MAPPING_APP_BEG,
MAPPING_APP_END,
MAPPING_SHADOW_BEG,
MAPPING_SHADOW_END,
MAPPING_META_SHADOW_BEG,
MAPPING_META_SHADOW_END,
MAPPING_TRACE_BEG,
MAPPING_TRACE_END,
MAPPING_VDSO_BEG,
};
template<typename Mapping, int Type>
uptr MappingImpl(void) {
switch (Type) {
#ifndef SANITIZER_GO
case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg;
case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd;
case MAPPING_HI_APP_BEG: return Mapping::kHiAppMemBeg;
case MAPPING_HI_APP_END: return Mapping::kHiAppMemEnd;
case MAPPING_HEAP_BEG: return Mapping::kHeapMemBeg;
case MAPPING_HEAP_END: return Mapping::kHeapMemEnd;
case MAPPING_VDSO_BEG: return Mapping::kVdsoBeg;
#else
case MAPPING_APP_BEG: return Mapping::kAppMemBeg;
case MAPPING_APP_END: return Mapping::kAppMemEnd;
#endif
case MAPPING_SHADOW_BEG: return Mapping::kShadowBeg;
case MAPPING_SHADOW_END: return Mapping::kShadowEnd;
case MAPPING_META_SHADOW_BEG: return Mapping::kMetaShadowBeg;
case MAPPING_META_SHADOW_END: return Mapping::kMetaShadowEnd;
case MAPPING_TRACE_BEG: return Mapping::kTraceMemBeg;
case MAPPING_TRACE_END: return Mapping::kTraceMemEnd;
}
}
template<int Type>
uptr MappingArchImpl(void) {
#ifdef __aarch64__
if (vmaSize == 39)
return MappingImpl<Mapping39, Type>();
else
return MappingImpl<Mapping42, Type>();
DCHECK(0);
#else
return MappingImpl<Mapping, Type>();
#endif
}
#ifndef SANITIZER_GO
ALWAYS_INLINE
uptr LoAppMemBeg(void) {
return MappingArchImpl<MAPPING_LO_APP_BEG>();
}
ALWAYS_INLINE
uptr LoAppMemEnd(void) {
return MappingArchImpl<MAPPING_LO_APP_END>();
}
ALWAYS_INLINE
uptr HeapMemBeg(void) {
return MappingArchImpl<MAPPING_HEAP_BEG>();
}
ALWAYS_INLINE
uptr HeapMemEnd(void) {
return MappingArchImpl<MAPPING_HEAP_END>();
}
ALWAYS_INLINE
uptr HiAppMemBeg(void) {
return MappingArchImpl<MAPPING_HI_APP_BEG>();
}
ALWAYS_INLINE
uptr HiAppMemEnd(void) {
return MappingArchImpl<MAPPING_HI_APP_END>();
}
ALWAYS_INLINE
uptr VdsoBeg(void) {
return MappingArchImpl<MAPPING_VDSO_BEG>();
}
#else
ALWAYS_INLINE
uptr AppMemBeg(void) {
return MappingArchImpl<MAPPING_APP_BEG>();
}
ALWAYS_INLINE
uptr AppMemEnd(void) {
return MappingArchImpl<MAPPING_APP_END>();
}
#endif
static inline
bool GetUserRegion(int i, uptr *start, uptr *end) {
switch (i) {
default:
return false;
#ifndef SANITIZER_GO
case 0:
*start = LoAppMemBeg();
*end = LoAppMemEnd();
return true;
case 1:
*start = HiAppMemBeg();
*end = HiAppMemEnd();
return true;
case 2:
*start = HeapMemBeg();
*end = HeapMemEnd();
return true;
#else
case 0:
*start = AppMemBeg();
*end = AppMemEnd();
return true;
#endif
}
}
ALWAYS_INLINE
uptr ShadowBeg(void) {
return MappingArchImpl<MAPPING_SHADOW_BEG>();
}
ALWAYS_INLINE
uptr ShadowEnd(void) {
return MappingArchImpl<MAPPING_SHADOW_END>();
}
ALWAYS_INLINE
uptr MetaShadowBeg(void) {
return MappingArchImpl<MAPPING_META_SHADOW_BEG>();
}
ALWAYS_INLINE
uptr MetaShadowEnd(void) {
return MappingArchImpl<MAPPING_META_SHADOW_END>();
}
ALWAYS_INLINE
uptr TraceMemBeg(void) {
return MappingArchImpl<MAPPING_TRACE_BEG>();
}
ALWAYS_INLINE
uptr TraceMemEnd(void) {
return MappingArchImpl<MAPPING_TRACE_END>();
}
template<typename Mapping>
bool IsAppMemImpl(uptr mem) {
#ifndef SANITIZER_GO
return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) ||
(mem >= Mapping::kLoAppMemBeg && mem < Mapping::kLoAppMemEnd) ||
(mem >= Mapping::kHiAppMemBeg && mem < Mapping::kHiAppMemEnd);
#else
return mem >= Mapping::kAppMemBeg && mem < Mapping::kAppMemEnd;
#endif
}
ALWAYS_INLINE
bool IsAppMem(uptr mem) {
#ifdef __aarch64__
if (vmaSize == 39)
return IsAppMemImpl<Mapping39>(mem);
else
return IsAppMemImpl<Mapping42>(mem);
DCHECK(0);
#else
return IsAppMemImpl<Mapping>(mem);
#endif
}
template<typename Mapping>
bool IsShadowMemImpl(uptr mem) {
return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
}
ALWAYS_INLINE
bool IsShadowMem(uptr mem) {
#ifdef __aarch64__
if (vmaSize == 39)
return IsShadowMemImpl<Mapping39>(mem);
else
return IsShadowMemImpl<Mapping42>(mem);
DCHECK(0);
#else
return IsShadowMemImpl<Mapping>(mem);
#endif
}
template<typename Mapping>
bool IsMetaMemImpl(uptr mem) {
return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
}
ALWAYS_INLINE
bool IsMetaMem(uptr mem) {
#ifdef __aarch64__
if (vmaSize == 39)
return IsMetaMemImpl<Mapping39>(mem);
else
return IsMetaMemImpl<Mapping42>(mem);
DCHECK(0);
#else
return IsMetaMemImpl<Mapping>(mem);
#endif
}
template<typename Mapping>
uptr MemToShadowImpl(uptr x) {
DCHECK(IsAppMem(x));
#ifndef SANITIZER_GO
return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1)))
^ Mapping::kAppMemXor) * kShadowCnt;
#else
return ((x & ~(kShadowCell - 1)) * kShadowCnt) | Mapping::kShadowBeg;
#endif
}
ALWAYS_INLINE
uptr MemToShadow(uptr x) {
#ifdef __aarch64__
if (vmaSize == 39)
return MemToShadowImpl<Mapping39>(x);
else
return MemToShadowImpl<Mapping42>(x);
DCHECK(0);
#else
return MemToShadowImpl<Mapping>(x);
#endif
}
template<typename Mapping>
u32 *MemToMetaImpl(uptr x) {
DCHECK(IsAppMem(x));
#ifndef SANITIZER_GO
return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))
^ Mapping::kAppMemXor) / kMetaShadowCell * kMetaShadowSize)
| Mapping::kMetaShadowBeg);
#else
return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
#endif
}
ALWAYS_INLINE
u32 *MemToMeta(uptr x) {
#ifdef __aarch64__
if (vmaSize == 39)
return MemToMetaImpl<Mapping39>(x);
else
return MemToMetaImpl<Mapping42>(x);
DCHECK(0);
#else
return MemToMetaImpl<Mapping>(x);
#endif
}
template<typename Mapping>
uptr ShadowToMemImpl(uptr s) {
DCHECK(IsShadowMem(s));
#ifndef SANITIZER_GO
if (s >= MemToShadow(Mapping::kLoAppMemBeg)
&& s <= MemToShadow(Mapping::kLoAppMemEnd - 1))
return (s / kShadowCnt) ^ Mapping::kAppMemXor;
else
return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
#else
# ifndef SANITIZER_WINDOWS
return (s & ~Mapping::kShadowBeg) / kShadowCnt;
# else
// FIXME(dvyukov): this is most likely wrong as the mapping is not bijection.
return (s - Mapping::kShadowBeg) / kShadowCnt;
# endif // SANITIZER_WINDOWS
#endif
}
ALWAYS_INLINE
uptr ShadowToMem(uptr s) {
#ifdef __aarch64__
if (vmaSize == 39)
return ShadowToMemImpl<Mapping39>(s);
else
return ShadowToMemImpl<Mapping42>(s);
DCHECK(0);
#else
return ShadowToMemImpl<Mapping>(s);
#endif
}
// The additional page is to catch shadow stack overflow as paging fault.
// Windows wants 64K alignment for mmaps.
const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
+ (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
uptr ALWAYS_INLINE GetThreadTrace(int tid) {
uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize;
DCHECK_LT(p, kTraceMemEnd);
template<typename Mapping>
uptr GetThreadTraceImpl(int tid) {
uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize;
DCHECK_LT(p, Mapping::kTraceMemEnd);
return p;
}
uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) {
uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize
ALWAYS_INLINE
uptr GetThreadTrace(int tid) {
#ifdef __aarch64__
if (vmaSize == 39)
return GetThreadTraceImpl<Mapping39>(tid);
else
return GetThreadTraceImpl<Mapping42>(tid);
DCHECK(0);
#else
return GetThreadTraceImpl<Mapping>(tid);
#endif
}
template<typename Mapping>
uptr GetThreadTraceHeaderImpl(int tid) {
uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize
+ kTraceSize * sizeof(Event);
DCHECK_LT(p, kTraceMemEnd);
DCHECK_LT(p, Mapping::kTraceMemEnd);
return p;
}
ALWAYS_INLINE
uptr GetThreadTraceHeader(int tid) {
#ifdef __aarch64__
if (vmaSize == 39)
return GetThreadTraceHeaderImpl<Mapping39>(tid);
else
return GetThreadTraceHeaderImpl<Mapping42>(tid);
DCHECK(0);
#else
return GetThreadTraceHeaderImpl<Mapping>(tid);
#endif
}
void InitializePlatform();
void InitializePlatformEarly();
void CheckAndProtect();
void InitializeShadowMemoryPlatform();
void FlushShadowMemory();

View File

@ -67,6 +67,11 @@ namespace __tsan {
static uptr g_data_start;
static uptr g_data_end;
#ifdef TSAN_RUNTIME_VMA
// Runtime detected VMA size.
uptr vmaSize;
#endif
enum {
MemTotal = 0,
MemShadow = 1,
@ -82,22 +87,22 @@ enum {
void FillProfileCallback(uptr p, uptr rss, bool file,
uptr *mem, uptr stats_size) {
mem[MemTotal] += rss;
if (p >= kShadowBeg && p < kShadowEnd)
if (p >= ShadowBeg() && p < ShadowEnd())
mem[MemShadow] += rss;
else if (p >= kMetaShadowBeg && p < kMetaShadowEnd)
else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
mem[MemMeta] += rss;
#ifndef SANITIZER_GO
else if (p >= kHeapMemBeg && p < kHeapMemEnd)
else if (p >= HeapMemBeg() && p < HeapMemEnd())
mem[MemHeap] += rss;
else if (p >= kLoAppMemBeg && p < kLoAppMemEnd)
else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
mem[file ? MemFile : MemMmap] += rss;
else if (p >= kHiAppMemBeg && p < kHiAppMemEnd)
else if (p >= HiAppMemBeg() && p < HiAppMemEnd())
mem[file ? MemFile : MemMmap] += rss;
#else
else if (p >= kAppMemBeg && p < kAppMemEnd)
else if (p >= AppMemBeg() && p < AppMemEnd())
mem[file ? MemFile : MemMmap] += rss;
#endif
else if (p >= kTraceMemBeg && p < kTraceMemEnd)
else if (p >= TraceMemBeg() && p < TraceMemEnd())
mem[MemTrace] += rss;
else
mem[MemOther] += rss;
@ -121,7 +126,7 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
FlushUnneededShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
FlushUnneededShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg());
}
#endif
@ -235,6 +240,18 @@ static void InitDataSeg() {
#endif // #ifndef SANITIZER_GO
void InitializePlatformEarly() {
#ifdef TSAN_RUNTIME_VMA
vmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
if (vmaSize != 39 && vmaSize != 42) {
Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
Printf("FATAL: Found %d - Supported 39 and 42\n", vmaSize);
Die();
}
#endif
}
void InitializePlatform() {
DisableCoreDumperIfNecessary();

View File

@ -151,6 +151,9 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
}
#endif
void InitializePlatformEarly() {
}
void InitializePlatform() {
DisableCoreDumperIfNecessary();
#ifndef SANITIZER_GO

View File

@ -27,11 +27,12 @@ namespace __tsan {
void InitializeShadowMemory() {
// Map memory shadow.
uptr shadow =
(uptr)MmapFixedNoReserve(kShadowBeg, kShadowEnd - kShadowBeg, "shadow");
if (shadow != kShadowBeg) {
(uptr)MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
"shadow");
if (shadow != ShadowBeg()) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
"to link with -pie (%p, %p).\n", shadow, kShadowBeg);
"to link with -pie (%p, %p).\n", shadow, ShadowBeg());
Die();
}
// This memory range is used for thread stacks and large user mmaps.
@ -46,29 +47,38 @@ void InitializeShadowMemory() {
const uptr kMadviseRangeBeg = 0xff00000000ull;
const uptr kMadviseRangeSize = 0x0100000000ull;
#elif defined(__aarch64__)
const uptr kMadviseRangeBeg = 0x7e00000000ull;
const uptr kMadviseRangeSize = 0x0100000000ull;
uptr kMadviseRangeBeg = 0;
uptr kMadviseRangeSize = 0;
if (vmaSize == 39) {
kMadviseRangeBeg = 0x7d00000000ull;
kMadviseRangeSize = 0x0300000000ull;
} else if (vmaSize == 42) {
kMadviseRangeBeg = 0x3f000000000ull;
kMadviseRangeSize = 0x01000000000ull;
} else {
DCHECK(0);
}
#endif
NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg),
kMadviseRangeSize * kShadowMultiplier);
// Meta shadow is compressing and we don't flush it,
// so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory.
// On one program it reduces memory consumption from 5GB to 2.5GB.
NoHugePagesInRegion(kMetaShadowBeg, kMetaShadowEnd - kMetaShadowBeg);
NoHugePagesInRegion(MetaShadowBeg(), MetaShadowEnd() - MetaShadowBeg());
if (common_flags()->use_madv_dontdump)
DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
DontDumpShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg());
DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
kShadowBeg, kShadowEnd,
(kShadowEnd - kShadowBeg) >> 30);
ShadowBeg(), ShadowEnd(),
(ShadowEnd() - ShadowBeg()) >> 30);
// Map meta shadow.
uptr meta_size = kMetaShadowEnd - kMetaShadowBeg;
uptr meta_size = MetaShadowEnd() - MetaShadowBeg();
uptr meta =
(uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size, "meta shadow");
if (meta != kMetaShadowBeg) {
(uptr)MmapFixedNoReserve(MetaShadowBeg(), meta_size, "meta shadow");
if (meta != MetaShadowBeg()) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
"to link with -pie (%p, %p).\n", meta, kMetaShadowBeg);
"to link with -pie (%p, %p).\n", meta, MetaShadowBeg());
Die();
}
if (common_flags()->use_madv_dontdump)
@ -97,25 +107,25 @@ void CheckAndProtect() {
while (proc_maps.Next(&p, &end, 0, 0, 0, &prot)) {
if (IsAppMem(p))
continue;
if (p >= kHeapMemEnd &&
if (p >= HeapMemEnd() &&
p < HeapEnd())
continue;
if (prot == 0) // Zero page or mprotected.
continue;
if (p >= kVdsoBeg) // vdso
if (p >= VdsoBeg()) // vdso
break;
Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
Die();
}
ProtectRange(kLoAppMemEnd, kShadowBeg);
ProtectRange(kShadowEnd, kMetaShadowBeg);
ProtectRange(kMetaShadowEnd, kTraceMemBeg);
ProtectRange(LoAppMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
ProtectRange(MetaShadowEnd(), TraceMemBeg());
// Memory for traces is mapped lazily in MapThreadTrace.
// Protect the whole range for now, so that user does not map something here.
ProtectRange(kTraceMemBeg, kTraceMemEnd);
ProtectRange(kTraceMemEnd, kHeapMemBeg);
ProtectRange(HeapEnd(), kHiAppMemBeg);
ProtectRange(TraceMemBeg(), TraceMemEnd());
ProtectRange(TraceMemEnd(), HeapMemBeg());
ProtectRange(HeapEnd(), HiAppMemBeg());
}
#endif

View File

@ -31,6 +31,9 @@ void FlushShadowMemory() {
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
void InitializePlatformEarly() {
}
void InitializePlatform() {
}

View File

@ -273,8 +273,8 @@ void MapShadow(uptr addr, uptr size) {
void MapThreadTrace(uptr addr, uptr size, const char *name) {
DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
CHECK_GE(addr, kTraceMemBeg);
CHECK_LE(addr + size, kTraceMemEnd);
CHECK_GE(addr, TraceMemBeg());
CHECK_LE(addr + size, TraceMemEnd());
CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name);
if (addr1 != addr) {
@ -285,9 +285,8 @@ void MapThreadTrace(uptr addr, uptr size, const char *name) {
}
static void CheckShadowMapping() {
for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
const uptr beg = UserRegions[i];
const uptr end = UserRegions[i + 1];
uptr beg, end;
for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
VPrintf(3, "checking shadow region %p-%p\n", beg, end);
for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
for (int x = -1; x <= 1; x++) {
@ -322,7 +321,7 @@ void Initialize(ThreadState *thr) {
const char *options = GetEnv(kTsanOptionsEnv);
CacheBinaryName();
InitializeFlags(&ctx->flags, options);
CheckVMASize();
InitializePlatformEarly();
#ifndef SANITIZER_GO
InitializeAllocator();
ReplaceSystemMalloc();

View File

@ -66,7 +66,8 @@ typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
MapUnmapCallback> PrimaryAllocator;
#else
typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
typedef SizeClassAllocator64<Mapping::kHeapMemBeg,
Mapping::kHeapMemEnd - Mapping::kHeapMemBeg, 0,
DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
@ -761,7 +762,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
#ifndef SANITIZER_GO
uptr ALWAYS_INLINE HeapEnd() {
return kHeapMemEnd + PrimaryAllocator::AdditionalSize();
return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
}
#endif

View File

@ -57,7 +57,7 @@ void print_address(void *address) {
else if (vma == 42)
format = "0x%011lx";
else {
fprintf(stderr, "unsupported vma: %ul\n", vma);
fprintf(stderr, "unsupported vma: %lu\n", vma);
exit(1);
}