llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

126 lines
4.0 KiB
C
Raw Normal View History

//===-- hwasan_allocator.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef HWASAN_ALLOCATOR_H
#define HWASAN_ALLOCATOR_H
#include "hwasan.h"
#include "hwasan_interface_internal.h"
#include "hwasan_mapping.h"
#include "hwasan_poisoning.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_ring_buffer.h"
#if !defined(__aarch64__) && !defined(__x86_64__)
#error Unsupported platform
#endif
namespace __hwasan {
struct Metadata {
u32 requested_size_low;
u32 requested_size_high : 31;
u32 right_aligned : 1;
u32 alloc_context_id;
u64 get_requested_size() {
return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
}
void set_requested_size(u64 size) {
requested_size_low = size & ((1ul << 32) - 1);
requested_size_high = size >> 32;
}
};
struct HwasanMapUnmapCallback {
void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); }
void OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// It can return as user-requested mmap() or another thread stack.
// Make it accessible with zero-tagged pointer.
TagMemory(p, size, 0);
}
};
static const uptr kMaxAllowedMallocSize = 1UL << 40; // 1T
struct AP64 {
static const uptr kSpaceBeg = ~0ULL;
#if defined(HWASAN_ALIASING_MODE)
static const uptr kSpaceSize = 1ULL << kAddressTagShift;
#else
static const uptr kSpaceSize = 0x2000000000ULL;
#endif
static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
Introduce `AddressSpaceView` template parameter to `SizeClassAllocator32`, `FlatByteMap`, and `TwoLevelByteMap`. Summary: This is a follow up patch to r346956 for the `SizeClassAllocator32` allocator. This patch makes `AddressSpaceView` a template parameter both to the `ByteMap` implementations (but makes `LocalAddressSpaceView` the default), some `AP32` implementations and is used in `SizeClassAllocator32`. The actual changes to `ByteMap` implementations and `SizeClassAllocator32` are very simple. However the patch is large because it requires changing all the `AP32` definitions, and users of those definitions. For ASan and LSan we make `AP32` and `ByteMap` templateds type that take a single `AddressSpaceView` argument. This has been done because we will instantiate the allocator with a type that isn't `LocalAddressSpaceView` in the future patches. For the allocators used in the other sanitizers (i.e. HWAsan, MSan, Scudo, and TSan) use of `LocalAddressSpaceView` is hard coded because we do not intend to instantiate the allocators with any other type. In the cases where untemplated types have become templated on a single `AddressSpaceView` parameter (e.g. `PrimaryAllocator`) their name has been changed to have a `ASVT` suffix (Address Space View Type) to indicate they are templated. The only exception to this are the `AP32` types due to the desire to keep the type name as short as possible. In order to check that template is instantiated in the correct a way a `static_assert(...)` has been added that checks that the `AddressSpaceView` type used by `Params::ByteMap::AddressSpaceView` matches the `Params::AddressSpaceView`. This uses the new `sanitizer_type_traits.h` header. rdar://problem/45284065 Reviewers: kcc, dvyukov, vitalybuka, cryptoad, eugenis, kubamracek, george.karpenkov Subscribers: mgorny, llvm-commits, #sanitizers Differential Revision: https://reviews.llvm.org/D54904 llvm-svn: 349138
2018-12-14 17:03:18 +08:00
using AddressSpaceView = LocalAddressSpaceView;
typedef HwasanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
typedef CombinedAllocator<PrimaryAllocator> Allocator;
typedef Allocator::AllocatorCache AllocatorCache;
void AllocatorSwallowThreadLocalCache(AllocatorCache *cache);
class HwasanChunkView {
public:
HwasanChunkView() : block_(0), metadata_(nullptr) {}
HwasanChunkView(uptr block, Metadata *metadata)
: block_(block), metadata_(metadata) {}
bool IsAllocated() const; // Checks if the memory is currently allocated
uptr Beg() const; // First byte of user memory
uptr End() const; // Last byte of user memory
uptr UsedSize() const; // Size requested by the user
uptr ActualSize() const; // Size allocated by the allocator.
u32 GetAllocStackId() const;
bool FromSmallHeap() const;
private:
uptr block_;
Metadata *const metadata_;
};
HwasanChunkView FindHeapChunkByAddress(uptr address);
// Information about one (de)allocation that happened in the past.
// These are recorded in a thread-local ring buffer.
// TODO: this is currently 24 bytes (20 bytes + alignment).
// Compress it to 16 bytes or extend it to be more useful.
struct HeapAllocationRecord {
uptr tagged_addr;
u32 alloc_context_id;
u32 free_context_id;
u32 requested_size;
};
typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
void GetAllocatorStats(AllocatorStatCounters s);
inline bool InTaggableRegion(uptr addr) {
#if defined(HWASAN_ALIASING_MODE)
// Aliases are mapped next to shadow so that the upper bits match the shadow
// base.
return (addr >> kTaggableRegionCheckShift) ==
(GetShadowOffset() >> kTaggableRegionCheckShift);
#endif
return true;
}
} // namespace __hwasan
#endif // HWASAN_ALLOCATOR_H