Revert "[sanitizer] Introduce ReservedAddressRange to sanitizer_common"

This reverts commit r315493 which is failing to build on sanitizer-windows.

llvm-svn: 315494
This commit is contained in:
Petr Hosek 2017-10-11 19:29:14 +00:00
parent 2bd0123afc
commit c530f497b8
5 changed files with 0 additions and 180 deletions

View File

@ -128,20 +128,6 @@ void CheckVMASize();
void RunMallocHooks(const void *ptr, uptr size);
void RunFreeHooks(const void *ptr);
class ReservedAddressRange {
public:
uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
uptr Map(uptr fixed_addr, uptr size, bool tolerate_enomem = false);
void Unmap(uptr addr, uptr size);
const void *base() { return base_; }
const uptr size() { return size_; }
private:
void* base_;
uptr size_;
const char* name_;
};
typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
/*out*/uptr *stats, uptr stats_size);

View File

@ -236,37 +236,6 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
return DoAnonymousMmapOrDie(size, mem_type, false, false);
}
uptr ReservedAddressRange::Init(uptr init_size, const char* name = nullptr,
uptr fixed_addr = uptr(0)) {
base_ = MmapNoAccess(init_size);
size_ = size;
name_ = name;
return reinterpret_cast<uptr>(base_);
}
// Uses fixed_addr for now.
// Will use offset instead once we've implemented this function for real.
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
bool tolerate_enomem = true) {
return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size,
tolerate_enomem));
}
void ReservedAddressRange::Unmap(uptr addr, uptr size) {
void* addr_as_void = reinterpret_cast<void*>(addr);
uptr base_as_uptr = reinterpret_cast<uptr>(base_);
// Only unmap at the beginning or end of the range.
CHECK_EQ((addr_as_void == base_) || (addr + size == base_as_uptr + size_),
true);
// Detect overflows.
CHECK_LE(size, (base_as_uptr + size_) - addr);
UnmapOrDie(reinterpret_cast<void*>(addr), size);
if (addr_as_void == base_) {
base_ = reinterpret_cast<void*>(addr + size);
}
size_ = size_ - size;
}
// MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator.
// Instead of doing exactly what they say, we make MmapNoAccess actually
// just allocate a VMAR to reserve the address space. Then MmapFixedOrDie

View File

@ -337,42 +337,6 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
return (void *)p;
}
uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
if (fixed_addr) {
base_ = MmapFixedNoAccess(fixed_addr, size, name);
} else {
base_ = MmapNoAccess(size);
}
size_ = size;
name_ = name;
return reinterpret_cast<uptr>(base_);
}
// Uses fixed_addr for now.
// Will use offset instead once we've implemented this function for real.
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size,
bool tolerate_enomem) {
if (tolerate_enomem) {
return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
}
return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));
}
void ReservedAddressRange::Unmap(uptr addr, uptr size) {
void* addr_as_void = reinterpret_cast<void*>(addr);
uptr base_as_uptr = reinterpret_cast<uptr>(base_);
// Only unmap at the beginning or end of the range.
CHECK_EQ((addr_as_void == base_) || (addr + size == base_as_uptr + size_),
true);
// Detect overflows.
CHECK_LE(size, (base_as_uptr + size_) - addr);
UnmapOrDie(reinterpret_cast<void*>(addr), size);
if (addr_as_void == base_) {
base_ = reinterpret_cast<void*>(addr + size);
}
size_ = size_ - size;
}
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
int fd = name ? GetNamedMappingFd(name, size) : -1;
unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE;

View File

@ -235,31 +235,6 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
return p;
}
// Uses fixed_addr for now.
// Will use offset instead once we've implemented this function for real.
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size,
bool tolerate_enomem) {
if (tolerate_enomem) {
return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
}
return reinterpret_cast<uptr>(MmapFixedOrDie(uptr fixed_addr, uptr size));
}
void ReservedAddressRange::Unmap(uptr addr, uptr size) {
void* addr_as_void = reinterpret_cast<void*>(addr);
uptr base_as_uptr = reinterpret_cast<uptr>(base_);
// Only unmap at the beginning or end of the range.
CHECK_EQ((addr_as_void == base_) || (addr + size == base_as_uptr + size_),
true);
// Detect overflows.
CHECK_LE(size, (base_as_uptr + size_) - addr);
UnmapOrDie(reinterpret_cast<void*>(addr), size);
if (addr_as_void == base_) {
base_ = reinterpret_cast<void*>(addr + size);
}
size_ = size_ - size;
}
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
MEM_COMMIT, PAGE_READWRITE);
@ -277,18 +252,6 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
return MmapOrDie(size, mem_type);
}
uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
if (fixed_addr) {
base_ = MmapFixedNoAccess(fixed_addr, size, name);
} else {
base_ = MmapNoAccess(size);
}
size_ = size;
name_ = name;
return reinterpret_cast<uptr>(base_);
}
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
(void)name; // unsupported
void *res = VirtualAlloc((LPVOID)fixed_addr, size,

View File

@ -320,66 +320,4 @@ TEST(SanitizerCommon, GetRandom) {
}
#endif
TEST(SanitizerCommon, ReservedAddressRangeInit) {
uptr init_size = 0xffff;
ReservedAddressRange address_range;
uptr res = address_range.Init(init_size);
CHECK_NE(res, (void*)-1);
UnmapOrDie((void*)res, init_size);
// Should be able to map into the same space now.
ReservedAddressRange address_range2;
uptr res2 = address_range2.Init(init_size, nullptr, res);
CHECK_EQ(res, res2);
// TODO(flowerhack): Once this is switched to the "real" implementation
// (rather than passing through to MmapNoAccess*), enforce and test "no
// double initializations allowed"
}
TEST(SanitizerCommon, ReservedAddressRangeMap) {
constexpr uptr init_size = 0xffff;
ReservedAddressRange address_range;
uptr res = address_range.Init(init_size);
CHECK_NE(res, (void*) -1);
// Valid mappings should succeed.
CHECK_EQ(res, address_range.Map(res, init_size));
// Valid mappings should be readable.
unsigned char buffer[init_size];
memcpy(buffer, &res, sizeof(buffer));
// Invalid mappings should fail.
EXPECT_DEATH(address_range.Map(res, 0), ".*");
// TODO(flowerhack): Once this is switched to the "real" implementation, make
// sure you can only mmap into offsets in the Init range.
}
TEST(SanitizerCommon, ReservedAddressRangeUnmap) {
uptr PageSize = GetPageSizeCached();
uptr init_size = PageSize * 4;
ReservedAddressRange address_range;
uptr base_addr = address_range.Init(init_size);
CHECK_NE(base_addr, (void*)-1);
CHECK_EQ(base_addr, address_range.Map(base_addr, init_size));
// Unmapping at the beginning should succeed.
address_range.Unmap(base_addr, PageSize);
CHECK_EQ(base_addr + PageSize, address_range.base());
CHECK_EQ(init_size - PageSize, address_range.size());
// Unmapping at the end should succeed.
uptr old_size = address_range.size();
void* old_base = address_range.base();
uptr new_start = reinterpret_cast<uptr>(address_range.base()) +
address_range.size() - PageSize;
address_range.Unmap(new_start, PageSize);
CHECK_EQ(old_size - PageSize, address_range.size());
CHECK_EQ(old_base, address_range.base());
// Unmapping in the middle of the ReservedAddressRange should fail.
EXPECT_DEATH(address_range.Unmap(base_addr + 0xf, 0xff), ".*");
}
} // namespace __sanitizer