forked from OSchip/llvm-project
[Support] Renamed member 'Size' to 'AllocatedSize' in MemoryBlock and OwningMemoryBlock.
Rename member 'Size' to 'AllocatedSize' in order to provide a hint that the allocated size may be different than the requested size. Comments are added to clarify this point. Updated the InMemoryBuffer in FileOutputBuffer.cpp to track the requested buffer size. Patch by Machiel van Hooren. Thanks Machiel! https://reviews.llvm.org/D61599 llvm-svn: 361195
This commit is contained in:
parent
52fa90a348
commit
93d2bdda6b
|
@ -31,14 +31,17 @@ namespace sys {
|
||||||
/// Memory block abstraction.
|
/// Memory block abstraction.
|
||||||
class MemoryBlock {
|
class MemoryBlock {
|
||||||
public:
|
public:
|
||||||
MemoryBlock() : Address(nullptr), Size(0) { }
|
MemoryBlock() : Address(nullptr), AllocatedSize(0) {}
|
||||||
MemoryBlock(void *addr, size_t size) : Address(addr), Size(size) { }
|
MemoryBlock(void *addr, size_t allocatedSize)
|
||||||
|
: Address(addr), AllocatedSize(allocatedSize) {}
|
||||||
void *base() const { return Address; }
|
void *base() const { return Address; }
|
||||||
size_t size() const { return Size; }
|
/// The size as it was allocated. This is always greater or equal to the
|
||||||
|
/// size that was originally requested.
|
||||||
|
size_t allocatedSize() const { return AllocatedSize; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void *Address; ///< Address of first byte of memory area
|
void *Address; ///< Address of first byte of memory area
|
||||||
size_t Size; ///< Size, in bytes of the memory area
|
size_t AllocatedSize; ///< Size, in bytes of the memory area
|
||||||
unsigned Flags = 0;
|
unsigned Flags = 0;
|
||||||
friend class Memory;
|
friend class Memory;
|
||||||
};
|
};
|
||||||
|
@ -139,7 +142,9 @@ namespace sys {
|
||||||
Memory::releaseMappedMemory(M);
|
Memory::releaseMappedMemory(M);
|
||||||
}
|
}
|
||||||
void *base() const { return M.base(); }
|
void *base() const { return M.base(); }
|
||||||
size_t size() const { return M.size(); }
|
/// The size as it was allocated. This is always greater or equal to the
|
||||||
|
/// size that was originally requested.
|
||||||
|
size_t allocatedSize() const { return M.allocatedSize(); }
|
||||||
MemoryBlock getMemoryBlock() const { return M; }
|
MemoryBlock getMemoryBlock() const { return M; }
|
||||||
private:
|
private:
|
||||||
MemoryBlock M;
|
MemoryBlock M;
|
||||||
|
|
|
@ -154,7 +154,7 @@ InProcessMemoryManager::allocate(const SegmentsRequestMap &Request) {
|
||||||
MutableArrayRef<char> getWorkingMemory(ProtectionFlags Seg) override {
|
MutableArrayRef<char> getWorkingMemory(ProtectionFlags Seg) override {
|
||||||
assert(SegBlocks.count(Seg) && "No allocation for segment");
|
assert(SegBlocks.count(Seg) && "No allocation for segment");
|
||||||
return {static_cast<char *>(SegBlocks[Seg].base()),
|
return {static_cast<char *>(SegBlocks[Seg].base()),
|
||||||
SegBlocks[Seg].size()};
|
SegBlocks[Seg].allocatedSize()};
|
||||||
}
|
}
|
||||||
JITTargetAddress getTargetMemory(ProtectionFlags Seg) override {
|
JITTargetAddress getTargetMemory(ProtectionFlags Seg) override {
|
||||||
assert(SegBlocks.count(Seg) && "No allocation for segment");
|
assert(SegBlocks.count(Seg) && "No allocation for segment");
|
||||||
|
@ -178,7 +178,8 @@ InProcessMemoryManager::allocate(const SegmentsRequestMap &Request) {
|
||||||
if (auto EC = sys::Memory::protectMappedMemory(Block, Prot))
|
if (auto EC = sys::Memory::protectMappedMemory(Block, Prot))
|
||||||
return errorCodeToError(EC);
|
return errorCodeToError(EC);
|
||||||
if (Prot & sys::Memory::MF_EXEC)
|
if (Prot & sys::Memory::MF_EXEC)
|
||||||
sys::Memory::InvalidateInstructionCache(Block.base(), Block.size());
|
sys::Memory::InvalidateInstructionCache(Block.base(),
|
||||||
|
Block.allocatedSize());
|
||||||
}
|
}
|
||||||
return Error::success();
|
return Error::success();
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,9 +64,9 @@ uint8_t *SectionMemoryManager::allocateSection(
|
||||||
// Look in the list of free memory regions and use a block there if one
|
// Look in the list of free memory regions and use a block there if one
|
||||||
// is available.
|
// is available.
|
||||||
for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
|
for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
|
||||||
if (FreeMB.Free.size() >= RequiredSize) {
|
if (FreeMB.Free.allocatedSize() >= RequiredSize) {
|
||||||
Addr = (uintptr_t)FreeMB.Free.base();
|
Addr = (uintptr_t)FreeMB.Free.base();
|
||||||
uintptr_t EndOfBlock = Addr + FreeMB.Free.size();
|
uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
|
||||||
// Align the address.
|
// Align the address.
|
||||||
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
|
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ uint8_t *SectionMemoryManager::allocateSection(
|
||||||
// Remember that we allocated this memory
|
// Remember that we allocated this memory
|
||||||
MemGroup.AllocatedMem.push_back(MB);
|
MemGroup.AllocatedMem.push_back(MB);
|
||||||
Addr = (uintptr_t)MB.base();
|
Addr = (uintptr_t)MB.base();
|
||||||
uintptr_t EndOfBlock = Addr + MB.size();
|
uintptr_t EndOfBlock = Addr + MB.allocatedSize();
|
||||||
|
|
||||||
// Align the address.
|
// Align the address.
|
||||||
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
|
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
|
||||||
|
@ -177,7 +177,7 @@ static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
|
||||||
size_t StartOverlap =
|
size_t StartOverlap =
|
||||||
(PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
|
(PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
|
||||||
|
|
||||||
size_t TrimmedSize = M.size();
|
size_t TrimmedSize = M.allocatedSize();
|
||||||
TrimmedSize -= StartOverlap;
|
TrimmedSize -= StartOverlap;
|
||||||
TrimmedSize -= TrimmedSize % PageSize;
|
TrimmedSize -= TrimmedSize % PageSize;
|
||||||
|
|
||||||
|
@ -185,8 +185,9 @@ static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
|
||||||
TrimmedSize);
|
TrimmedSize);
|
||||||
|
|
||||||
assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
|
assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
|
||||||
assert((Trimmed.size() % PageSize) == 0);
|
assert((Trimmed.allocatedSize() % PageSize) == 0);
|
||||||
assert(M.base() <= Trimmed.base() && Trimmed.size() <= M.size());
|
assert(M.base() <= Trimmed.base() &&
|
||||||
|
Trimmed.allocatedSize() <= M.allocatedSize());
|
||||||
|
|
||||||
return Trimmed;
|
return Trimmed;
|
||||||
}
|
}
|
||||||
|
@ -209,9 +210,10 @@ SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove all blocks which are now empty
|
// Remove all blocks which are now empty
|
||||||
MemGroup.FreeMem.erase(
|
MemGroup.FreeMem.erase(remove_if(MemGroup.FreeMem,
|
||||||
remove_if(MemGroup.FreeMem,
|
[](FreeMemBlock &FreeMB) {
|
||||||
[](FreeMemBlock &FreeMB) { return FreeMB.Free.size() == 0; }),
|
return FreeMB.Free.allocatedSize() == 0;
|
||||||
|
}),
|
||||||
MemGroup.FreeMem.end());
|
MemGroup.FreeMem.end());
|
||||||
|
|
||||||
return std::error_code();
|
return std::error_code();
|
||||||
|
@ -219,7 +221,8 @@ SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
|
||||||
|
|
||||||
void SectionMemoryManager::invalidateInstructionCache() {
|
void SectionMemoryManager::invalidateInstructionCache() {
|
||||||
for (sys::MemoryBlock &Block : CodeMem.PendingMem)
|
for (sys::MemoryBlock &Block : CodeMem.PendingMem)
|
||||||
sys::Memory::InvalidateInstructionCache(Block.base(), Block.size());
|
sys::Memory::InvalidateInstructionCache(Block.base(),
|
||||||
|
Block.allocatedSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
SectionMemoryManager::~SectionMemoryManager() {
|
SectionMemoryManager::~SectionMemoryManager() {
|
||||||
|
@ -242,11 +245,7 @@ public:
|
||||||
allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
|
allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
|
||||||
size_t NumBytes, const sys::MemoryBlock *const NearBlock,
|
size_t NumBytes, const sys::MemoryBlock *const NearBlock,
|
||||||
unsigned Flags, std::error_code &EC) override {
|
unsigned Flags, std::error_code &EC) override {
|
||||||
// allocateMappedMemory calls mmap(2). We round up a request size
|
return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
|
||||||
// to page size to get extra space for free.
|
|
||||||
static const size_t PageSize = sys::Process::getPageSizeEstimate();
|
|
||||||
size_t ReqBytes = (NumBytes + PageSize - 1) & ~(PageSize - 1);
|
|
||||||
return sys::Memory::allocateMappedMemory(ReqBytes, NearBlock, Flags, EC);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
|
std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
|
||||||
|
|
|
@ -75,20 +75,22 @@ private:
|
||||||
// output file on commit(). This is used only when we cannot use OnDiskBuffer.
|
// output file on commit(). This is used only when we cannot use OnDiskBuffer.
|
||||||
class InMemoryBuffer : public FileOutputBuffer {
|
class InMemoryBuffer : public FileOutputBuffer {
|
||||||
public:
|
public:
|
||||||
InMemoryBuffer(StringRef Path, MemoryBlock Buf, unsigned Mode)
|
InMemoryBuffer(StringRef Path, MemoryBlock Buf, std::size_t BufSize,
|
||||||
: FileOutputBuffer(Path), Buffer(Buf), Mode(Mode) {}
|
unsigned Mode)
|
||||||
|
: FileOutputBuffer(Path), Buffer(Buf), BufferSize(BufSize),
|
||||||
|
Mode(Mode) {}
|
||||||
|
|
||||||
uint8_t *getBufferStart() const override { return (uint8_t *)Buffer.base(); }
|
uint8_t *getBufferStart() const override { return (uint8_t *)Buffer.base(); }
|
||||||
|
|
||||||
uint8_t *getBufferEnd() const override {
|
uint8_t *getBufferEnd() const override {
|
||||||
return (uint8_t *)Buffer.base() + Buffer.size();
|
return (uint8_t *)Buffer.base() + BufferSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t getBufferSize() const override { return Buffer.size(); }
|
size_t getBufferSize() const override { return BufferSize; }
|
||||||
|
|
||||||
Error commit() override {
|
Error commit() override {
|
||||||
if (FinalPath == "-") {
|
if (FinalPath == "-") {
|
||||||
llvm::outs() << StringRef((const char *)Buffer.base(), Buffer.size());
|
llvm::outs() << StringRef((const char *)Buffer.base(), BufferSize);
|
||||||
llvm::outs().flush();
|
llvm::outs().flush();
|
||||||
return Error::success();
|
return Error::success();
|
||||||
}
|
}
|
||||||
|
@ -100,12 +102,14 @@ public:
|
||||||
openFileForWrite(FinalPath, FD, CD_CreateAlways, OF_None, Mode))
|
openFileForWrite(FinalPath, FD, CD_CreateAlways, OF_None, Mode))
|
||||||
return errorCodeToError(EC);
|
return errorCodeToError(EC);
|
||||||
raw_fd_ostream OS(FD, /*shouldClose=*/true, /*unbuffered=*/true);
|
raw_fd_ostream OS(FD, /*shouldClose=*/true, /*unbuffered=*/true);
|
||||||
OS << StringRef((const char *)Buffer.base(), Buffer.size());
|
OS << StringRef((const char *)Buffer.base(), BufferSize);
|
||||||
return Error::success();
|
return Error::success();
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
// Buffer may actually contain a larger memory block than BufferSize
|
||||||
OwningMemoryBlock Buffer;
|
OwningMemoryBlock Buffer;
|
||||||
|
size_t BufferSize;
|
||||||
unsigned Mode;
|
unsigned Mode;
|
||||||
};
|
};
|
||||||
} // namespace
|
} // namespace
|
||||||
|
@ -117,7 +121,7 @@ createInMemoryBuffer(StringRef Path, size_t Size, unsigned Mode) {
|
||||||
Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
|
Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
|
||||||
if (EC)
|
if (EC)
|
||||||
return errorCodeToError(EC);
|
return errorCodeToError(EC);
|
||||||
return llvm::make_unique<InMemoryBuffer>(Path, MB, Mode);
|
return llvm::make_unique<InMemoryBuffer>(Path, MB, Size, Mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
static Expected<std::unique_ptr<FileOutputBuffer>>
|
static Expected<std::unique_ptr<FileOutputBuffer>>
|
||||||
|
|
|
@ -43,8 +43,8 @@ raw_ostream &operator<<(raw_ostream &OS, const Memory::ProtectionFlags &PF) {
|
||||||
|
|
||||||
raw_ostream &operator<<(raw_ostream &OS, const MemoryBlock &MB) {
|
raw_ostream &operator<<(raw_ostream &OS, const MemoryBlock &MB) {
|
||||||
return OS << "[ " << MB.base() << " .. "
|
return OS << "[ " << MB.base() << " .. "
|
||||||
<< (void *)((char *)MB.base() + MB.size()) << " ] (" << MB.size()
|
<< (void *)((char *)MB.base() + MB.allocatedSize()) << " ] ("
|
||||||
<< " bytes)";
|
<< MB.allocatedSize() << " bytes)";
|
||||||
}
|
}
|
||||||
|
|
||||||
} // end namespace sys
|
} // end namespace sys
|
||||||
|
|
|
@ -117,13 +117,15 @@ Memory::allocateMappedMemory(size_t NumBytes,
|
||||||
|
|
||||||
// Use any near hint and the page size to set a page-aligned starting address
|
// Use any near hint and the page size to set a page-aligned starting address
|
||||||
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
|
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
|
||||||
NearBlock->size() : 0;
|
NearBlock->allocatedSize() : 0;
|
||||||
static const size_t PageSize = Process::getPageSizeEstimate();
|
static const size_t PageSize = Process::getPageSizeEstimate();
|
||||||
|
const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
|
||||||
|
|
||||||
if (Start && Start % PageSize)
|
if (Start && Start % PageSize)
|
||||||
Start += PageSize - Start % PageSize;
|
Start += PageSize - Start % PageSize;
|
||||||
|
|
||||||
// FIXME: Handle huge page requests (MF_HUGE_HINT).
|
// FIXME: Handle huge page requests (MF_HUGE_HINT).
|
||||||
void *Addr = ::mmap(reinterpret_cast<void *>(Start), NumBytes, Protect,
|
void *Addr = ::mmap(reinterpret_cast<void *>(Start), PageSize*NumPages, Protect,
|
||||||
MMFlags, fd, 0);
|
MMFlags, fd, 0);
|
||||||
if (Addr == MAP_FAILED) {
|
if (Addr == MAP_FAILED) {
|
||||||
if (NearBlock) { //Try again without a near hint
|
if (NearBlock) { //Try again without a near hint
|
||||||
|
@ -146,7 +148,7 @@ Memory::allocateMappedMemory(size_t NumBytes,
|
||||||
|
|
||||||
MemoryBlock Result;
|
MemoryBlock Result;
|
||||||
Result.Address = Addr;
|
Result.Address = Addr;
|
||||||
Result.Size = NumBytes;
|
Result.AllocatedSize = PageSize*NumPages;
|
||||||
Result.Flags = PFlags;
|
Result.Flags = PFlags;
|
||||||
|
|
||||||
// Rely on protectMappedMemory to invalidate instruction cache.
|
// Rely on protectMappedMemory to invalidate instruction cache.
|
||||||
|
@ -161,14 +163,14 @@ Memory::allocateMappedMemory(size_t NumBytes,
|
||||||
|
|
||||||
std::error_code
|
std::error_code
|
||||||
Memory::releaseMappedMemory(MemoryBlock &M) {
|
Memory::releaseMappedMemory(MemoryBlock &M) {
|
||||||
if (M.Address == nullptr || M.Size == 0)
|
if (M.Address == nullptr || M.AllocatedSize == 0)
|
||||||
return std::error_code();
|
return std::error_code();
|
||||||
|
|
||||||
if (0 != ::munmap(M.Address, M.Size))
|
if (0 != ::munmap(M.Address, M.AllocatedSize))
|
||||||
return std::error_code(errno, std::generic_category());
|
return std::error_code(errno, std::generic_category());
|
||||||
|
|
||||||
M.Address = nullptr;
|
M.Address = nullptr;
|
||||||
M.Size = 0;
|
M.AllocatedSize = 0;
|
||||||
|
|
||||||
return std::error_code();
|
return std::error_code();
|
||||||
}
|
}
|
||||||
|
@ -176,7 +178,7 @@ Memory::releaseMappedMemory(MemoryBlock &M) {
|
||||||
std::error_code
|
std::error_code
|
||||||
Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
|
Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
|
||||||
static const size_t PageSize = Process::getPageSizeEstimate();
|
static const size_t PageSize = Process::getPageSizeEstimate();
|
||||||
if (M.Address == nullptr || M.Size == 0)
|
if (M.Address == nullptr || M.AllocatedSize == 0)
|
||||||
return std::error_code();
|
return std::error_code();
|
||||||
|
|
||||||
if (!Flags)
|
if (!Flags)
|
||||||
|
@ -184,7 +186,7 @@ Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
|
||||||
|
|
||||||
int Protect = getPosixProtectionFlags(Flags);
|
int Protect = getPosixProtectionFlags(Flags);
|
||||||
uintptr_t Start = alignAddr((uint8_t *)M.Address - PageSize + 1, PageSize);
|
uintptr_t Start = alignAddr((uint8_t *)M.Address - PageSize + 1, PageSize);
|
||||||
uintptr_t End = alignAddr((uint8_t *)M.Address + M.Size, PageSize);
|
uintptr_t End = alignAddr((uint8_t *)M.Address + M.AllocatedSize, PageSize);
|
||||||
|
|
||||||
bool InvalidateCache = (Flags & MF_EXEC);
|
bool InvalidateCache = (Flags & MF_EXEC);
|
||||||
|
|
||||||
|
@ -197,7 +199,7 @@ Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
|
||||||
if (Result != 0)
|
if (Result != 0)
|
||||||
return std::error_code(errno, std::generic_category());
|
return std::error_code(errno, std::generic_category());
|
||||||
|
|
||||||
Memory::InvalidateInstructionCache(M.Address, M.Size);
|
Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
|
||||||
InvalidateCache = false;
|
InvalidateCache = false;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -208,7 +210,7 @@ Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
|
||||||
return std::error_code(errno, std::generic_category());
|
return std::error_code(errno, std::generic_category());
|
||||||
|
|
||||||
if (InvalidateCache)
|
if (InvalidateCache)
|
||||||
Memory::InvalidateInstructionCache(M.Address, M.Size);
|
Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
|
||||||
|
|
||||||
return std::error_code();
|
return std::error_code();
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,7 +125,7 @@ MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
|
||||||
size_t NumBlocks = (NumBytes + Granularity - 1) / Granularity;
|
size_t NumBlocks = (NumBytes + Granularity - 1) / Granularity;
|
||||||
|
|
||||||
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
|
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
|
||||||
NearBlock->size()
|
NearBlock->allocatedSize()
|
||||||
: 0;
|
: 0;
|
||||||
|
|
||||||
// If the requested address is not aligned to the allocation granularity,
|
// If the requested address is not aligned to the allocation granularity,
|
||||||
|
@ -149,7 +149,7 @@ MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
|
||||||
|
|
||||||
MemoryBlock Result;
|
MemoryBlock Result;
|
||||||
Result.Address = PA;
|
Result.Address = PA;
|
||||||
Result.Size = NumBytes;
|
Result.AllocatedSize = AllocSize;
|
||||||
Result.Flags = (Flags & ~MF_HUGE_HINT) | (HugePages ? MF_HUGE_HINT : 0);
|
Result.Flags = (Flags & ~MF_HUGE_HINT) | (HugePages ? MF_HUGE_HINT : 0);
|
||||||
|
|
||||||
if (Flags & MF_EXEC)
|
if (Flags & MF_EXEC)
|
||||||
|
@ -159,31 +159,31 @@ MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
std::error_code Memory::releaseMappedMemory(MemoryBlock &M) {
|
std::error_code Memory::releaseMappedMemory(MemoryBlock &M) {
|
||||||
if (M.Address == 0 || M.Size == 0)
|
if (M.Address == 0 || M.AllocatedSize == 0)
|
||||||
return std::error_code();
|
return std::error_code();
|
||||||
|
|
||||||
if (!VirtualFree(M.Address, 0, MEM_RELEASE))
|
if (!VirtualFree(M.Address, 0, MEM_RELEASE))
|
||||||
return mapWindowsError(::GetLastError());
|
return mapWindowsError(::GetLastError());
|
||||||
|
|
||||||
M.Address = 0;
|
M.Address = 0;
|
||||||
M.Size = 0;
|
M.AllocatedSize = 0;
|
||||||
|
|
||||||
return std::error_code();
|
return std::error_code();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::error_code Memory::protectMappedMemory(const MemoryBlock &M,
|
std::error_code Memory::protectMappedMemory(const MemoryBlock &M,
|
||||||
unsigned Flags) {
|
unsigned Flags) {
|
||||||
if (M.Address == 0 || M.Size == 0)
|
if (M.Address == 0 || M.AllocatedSize == 0)
|
||||||
return std::error_code();
|
return std::error_code();
|
||||||
|
|
||||||
DWORD Protect = getWindowsProtectionFlags(Flags);
|
DWORD Protect = getWindowsProtectionFlags(Flags);
|
||||||
|
|
||||||
DWORD OldFlags;
|
DWORD OldFlags;
|
||||||
if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags))
|
if (!VirtualProtect(M.Address, M.AllocatedSize, Protect, &OldFlags))
|
||||||
return mapWindowsError(::GetLastError());
|
return mapWindowsError(::GetLastError());
|
||||||
|
|
||||||
if (Flags & MF_EXEC)
|
if (Flags & MF_EXEC)
|
||||||
Memory::InvalidateInstructionCache(M.Address, M.Size);
|
Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
|
||||||
|
|
||||||
return std::error_code();
|
return std::error_code();
|
||||||
}
|
}
|
||||||
|
|
|
@ -644,7 +644,7 @@ static void remapSectionsAndSymbols(const llvm::Triple &TargetTriple,
|
||||||
// reason (e.g. zero byte COFF sections). Don't include those sections in
|
// reason (e.g. zero byte COFF sections). Don't include those sections in
|
||||||
// the allocation map.
|
// the allocation map.
|
||||||
if (LoadAddr != 0)
|
if (LoadAddr != 0)
|
||||||
AlreadyAllocated[LoadAddr] = (*Tmp)->MB.size();
|
AlreadyAllocated[LoadAddr] = (*Tmp)->MB.allocatedSize();
|
||||||
Worklist.erase(Tmp);
|
Worklist.erase(Tmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -668,13 +668,14 @@ static void remapSectionsAndSymbols(const llvm::Triple &TargetTriple,
|
||||||
uint64_t NextSectionAddr = TargetAddrStart;
|
uint64_t NextSectionAddr = TargetAddrStart;
|
||||||
|
|
||||||
for (const auto &Alloc : AlreadyAllocated)
|
for (const auto &Alloc : AlreadyAllocated)
|
||||||
if (NextSectionAddr + CurEntry->MB.size() + TargetSectionSep <= Alloc.first)
|
if (NextSectionAddr + CurEntry->MB.allocatedSize() + TargetSectionSep <=
|
||||||
|
Alloc.first)
|
||||||
break;
|
break;
|
||||||
else
|
else
|
||||||
NextSectionAddr = Alloc.first + Alloc.second + TargetSectionSep;
|
NextSectionAddr = Alloc.first + Alloc.second + TargetSectionSep;
|
||||||
|
|
||||||
Dyld.mapSectionAddress(CurEntry->MB.base(), NextSectionAddr);
|
Dyld.mapSectionAddress(CurEntry->MB.base(), NextSectionAddr);
|
||||||
AlreadyAllocated[NextSectionAddr] = CurEntry->MB.size();
|
AlreadyAllocated[NextSectionAddr] = CurEntry->MB.allocatedSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add dummy symbols to the memory manager.
|
// Add dummy symbols to the memory manager.
|
||||||
|
|
|
@ -76,9 +76,9 @@ protected:
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (M1.base() > M2.base())
|
if (M1.base() > M2.base())
|
||||||
return (unsigned char *)M2.base() + M2.size() > M1.base();
|
return (unsigned char *)M2.base() + M2.allocatedSize() > M1.base();
|
||||||
|
|
||||||
return (unsigned char *)M1.base() + M1.size() > M2.base();
|
return (unsigned char *)M1.base() + M1.allocatedSize() > M2.base();
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned Flags;
|
unsigned Flags;
|
||||||
|
@ -100,7 +100,7 @@ TEST_P(MappedMemoryTest, AllocAndRelease) {
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
|
|
||||||
EXPECT_NE((void*)nullptr, M1.base());
|
EXPECT_NE((void*)nullptr, M1.base());
|
||||||
EXPECT_LE(sizeof(int), M1.size());
|
EXPECT_LE(sizeof(int), M1.allocatedSize());
|
||||||
|
|
||||||
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
|
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,7 @@ TEST_P(MappedMemoryTest, AllocAndReleaseHuge) {
|
||||||
// returned, if large pages aren't available.
|
// returned, if large pages aren't available.
|
||||||
|
|
||||||
EXPECT_NE((void *)nullptr, M1.base());
|
EXPECT_NE((void *)nullptr, M1.base());
|
||||||
EXPECT_LE(sizeof(int), M1.size());
|
EXPECT_LE(sizeof(int), M1.allocatedSize());
|
||||||
|
|
||||||
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
|
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
|
||||||
}
|
}
|
||||||
|
@ -132,11 +132,11 @@ TEST_P(MappedMemoryTest, MultipleAllocAndRelease) {
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
|
|
||||||
EXPECT_NE((void*)nullptr, M1.base());
|
EXPECT_NE((void*)nullptr, M1.base());
|
||||||
EXPECT_LE(16U, M1.size());
|
EXPECT_LE(16U, M1.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M2.base());
|
EXPECT_NE((void*)nullptr, M2.base());
|
||||||
EXPECT_LE(64U, M2.size());
|
EXPECT_LE(64U, M2.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M3.base());
|
EXPECT_NE((void*)nullptr, M3.base());
|
||||||
EXPECT_LE(32U, M3.size());
|
EXPECT_LE(32U, M3.allocatedSize());
|
||||||
|
|
||||||
EXPECT_FALSE(doesOverlap(M1, M2));
|
EXPECT_FALSE(doesOverlap(M1, M2));
|
||||||
EXPECT_FALSE(doesOverlap(M2, M3));
|
EXPECT_FALSE(doesOverlap(M2, M3));
|
||||||
|
@ -147,7 +147,7 @@ TEST_P(MappedMemoryTest, MultipleAllocAndRelease) {
|
||||||
MemoryBlock M4 = Memory::allocateMappedMemory(16, nullptr, Flags, EC);
|
MemoryBlock M4 = Memory::allocateMappedMemory(16, nullptr, Flags, EC);
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
EXPECT_NE((void*)nullptr, M4.base());
|
EXPECT_NE((void*)nullptr, M4.base());
|
||||||
EXPECT_LE(16U, M4.size());
|
EXPECT_LE(16U, M4.allocatedSize());
|
||||||
EXPECT_FALSE(Memory::releaseMappedMemory(M4));
|
EXPECT_FALSE(Memory::releaseMappedMemory(M4));
|
||||||
EXPECT_FALSE(Memory::releaseMappedMemory(M2));
|
EXPECT_FALSE(Memory::releaseMappedMemory(M2));
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ TEST_P(MappedMemoryTest, BasicWrite) {
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
|
|
||||||
EXPECT_NE((void*)nullptr, M1.base());
|
EXPECT_NE((void*)nullptr, M1.base());
|
||||||
EXPECT_LE(sizeof(int), M1.size());
|
EXPECT_LE(sizeof(int), M1.allocatedSize());
|
||||||
|
|
||||||
int *a = (int*)M1.base();
|
int *a = (int*)M1.base();
|
||||||
*a = 1;
|
*a = 1;
|
||||||
|
@ -196,11 +196,11 @@ TEST_P(MappedMemoryTest, MultipleWrite) {
|
||||||
EXPECT_FALSE(doesOverlap(M1, M3));
|
EXPECT_FALSE(doesOverlap(M1, M3));
|
||||||
|
|
||||||
EXPECT_NE((void*)nullptr, M1.base());
|
EXPECT_NE((void*)nullptr, M1.base());
|
||||||
EXPECT_LE(1U * sizeof(int), M1.size());
|
EXPECT_LE(1U * sizeof(int), M1.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M2.base());
|
EXPECT_NE((void*)nullptr, M2.base());
|
||||||
EXPECT_LE(8U * sizeof(int), M2.size());
|
EXPECT_LE(8U * sizeof(int), M2.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M3.base());
|
EXPECT_NE((void*)nullptr, M3.base());
|
||||||
EXPECT_LE(4U * sizeof(int), M3.size());
|
EXPECT_LE(4U * sizeof(int), M3.allocatedSize());
|
||||||
|
|
||||||
int *x = (int*)M1.base();
|
int *x = (int*)M1.base();
|
||||||
*x = 1;
|
*x = 1;
|
||||||
|
@ -224,7 +224,7 @@ TEST_P(MappedMemoryTest, MultipleWrite) {
|
||||||
Flags, EC);
|
Flags, EC);
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
EXPECT_NE((void*)nullptr, M4.base());
|
EXPECT_NE((void*)nullptr, M4.base());
|
||||||
EXPECT_LE(64U * sizeof(int), M4.size());
|
EXPECT_LE(64U * sizeof(int), M4.allocatedSize());
|
||||||
x = (int*)M4.base();
|
x = (int*)M4.base();
|
||||||
*x = 4;
|
*x = 4;
|
||||||
EXPECT_EQ(4, *x);
|
EXPECT_EQ(4, *x);
|
||||||
|
@ -255,11 +255,11 @@ TEST_P(MappedMemoryTest, EnabledWrite) {
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
|
|
||||||
EXPECT_NE((void*)nullptr, M1.base());
|
EXPECT_NE((void*)nullptr, M1.base());
|
||||||
EXPECT_LE(2U * sizeof(int), M1.size());
|
EXPECT_LE(2U * sizeof(int), M1.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M2.base());
|
EXPECT_NE((void*)nullptr, M2.base());
|
||||||
EXPECT_LE(8U * sizeof(int), M2.size());
|
EXPECT_LE(8U * sizeof(int), M2.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M3.base());
|
EXPECT_NE((void*)nullptr, M3.base());
|
||||||
EXPECT_LE(4U * sizeof(int), M3.size());
|
EXPECT_LE(4U * sizeof(int), M3.allocatedSize());
|
||||||
|
|
||||||
EXPECT_FALSE(Memory::protectMappedMemory(M1, getTestableEquivalent(Flags)));
|
EXPECT_FALSE(Memory::protectMappedMemory(M1, getTestableEquivalent(Flags)));
|
||||||
EXPECT_FALSE(Memory::protectMappedMemory(M2, getTestableEquivalent(Flags)));
|
EXPECT_FALSE(Memory::protectMappedMemory(M2, getTestableEquivalent(Flags)));
|
||||||
|
@ -289,7 +289,7 @@ TEST_P(MappedMemoryTest, EnabledWrite) {
|
||||||
MemoryBlock M4 = Memory::allocateMappedMemory(16, nullptr, Flags, EC);
|
MemoryBlock M4 = Memory::allocateMappedMemory(16, nullptr, Flags, EC);
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
EXPECT_NE((void*)nullptr, M4.base());
|
EXPECT_NE((void*)nullptr, M4.base());
|
||||||
EXPECT_LE(16U, M4.size());
|
EXPECT_LE(16U, M4.allocatedSize());
|
||||||
EXPECT_EQ(std::error_code(),
|
EXPECT_EQ(std::error_code(),
|
||||||
Memory::protectMappedMemory(M4, getTestableEquivalent(Flags)));
|
Memory::protectMappedMemory(M4, getTestableEquivalent(Flags)));
|
||||||
x = (int*)M4.base();
|
x = (int*)M4.base();
|
||||||
|
@ -310,11 +310,11 @@ TEST_P(MappedMemoryTest, SuccessiveNear) {
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
|
|
||||||
EXPECT_NE((void*)nullptr, M1.base());
|
EXPECT_NE((void*)nullptr, M1.base());
|
||||||
EXPECT_LE(16U, M1.size());
|
EXPECT_LE(16U, M1.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M2.base());
|
EXPECT_NE((void*)nullptr, M2.base());
|
||||||
EXPECT_LE(64U, M2.size());
|
EXPECT_LE(64U, M2.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M3.base());
|
EXPECT_NE((void*)nullptr, M3.base());
|
||||||
EXPECT_LE(32U, M3.size());
|
EXPECT_LE(32U, M3.allocatedSize());
|
||||||
|
|
||||||
EXPECT_FALSE(doesOverlap(M1, M2));
|
EXPECT_FALSE(doesOverlap(M1, M2));
|
||||||
EXPECT_FALSE(doesOverlap(M2, M3));
|
EXPECT_FALSE(doesOverlap(M2, M3));
|
||||||
|
@ -337,11 +337,11 @@ TEST_P(MappedMemoryTest, DuplicateNear) {
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
|
|
||||||
EXPECT_NE((void*)nullptr, M1.base());
|
EXPECT_NE((void*)nullptr, M1.base());
|
||||||
EXPECT_LE(16U, M1.size());
|
EXPECT_LE(16U, M1.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M2.base());
|
EXPECT_NE((void*)nullptr, M2.base());
|
||||||
EXPECT_LE(64U, M2.size());
|
EXPECT_LE(64U, M2.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M3.base());
|
EXPECT_NE((void*)nullptr, M3.base());
|
||||||
EXPECT_LE(32U, M3.size());
|
EXPECT_LE(32U, M3.allocatedSize());
|
||||||
|
|
||||||
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
|
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
|
||||||
EXPECT_FALSE(Memory::releaseMappedMemory(M3));
|
EXPECT_FALSE(Memory::releaseMappedMemory(M3));
|
||||||
|
@ -360,11 +360,11 @@ TEST_P(MappedMemoryTest, ZeroNear) {
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
|
|
||||||
EXPECT_NE((void*)nullptr, M1.base());
|
EXPECT_NE((void*)nullptr, M1.base());
|
||||||
EXPECT_LE(16U, M1.size());
|
EXPECT_LE(16U, M1.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M2.base());
|
EXPECT_NE((void*)nullptr, M2.base());
|
||||||
EXPECT_LE(64U, M2.size());
|
EXPECT_LE(64U, M2.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M3.base());
|
EXPECT_NE((void*)nullptr, M3.base());
|
||||||
EXPECT_LE(32U, M3.size());
|
EXPECT_LE(32U, M3.allocatedSize());
|
||||||
|
|
||||||
EXPECT_FALSE(doesOverlap(M1, M2));
|
EXPECT_FALSE(doesOverlap(M1, M2));
|
||||||
EXPECT_FALSE(doesOverlap(M2, M3));
|
EXPECT_FALSE(doesOverlap(M2, M3));
|
||||||
|
@ -387,11 +387,11 @@ TEST_P(MappedMemoryTest, ZeroSizeNear) {
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
|
|
||||||
EXPECT_NE((void*)nullptr, M1.base());
|
EXPECT_NE((void*)nullptr, M1.base());
|
||||||
EXPECT_LE(16U, M1.size());
|
EXPECT_LE(16U, M1.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M2.base());
|
EXPECT_NE((void*)nullptr, M2.base());
|
||||||
EXPECT_LE(64U, M2.size());
|
EXPECT_LE(64U, M2.allocatedSize());
|
||||||
EXPECT_NE((void*)nullptr, M3.base());
|
EXPECT_NE((void*)nullptr, M3.base());
|
||||||
EXPECT_LE(32U, M3.size());
|
EXPECT_LE(32U, M3.allocatedSize());
|
||||||
|
|
||||||
EXPECT_FALSE(doesOverlap(M1, M2));
|
EXPECT_FALSE(doesOverlap(M1, M2));
|
||||||
EXPECT_FALSE(doesOverlap(M2, M3));
|
EXPECT_FALSE(doesOverlap(M2, M3));
|
||||||
|
@ -410,7 +410,7 @@ TEST_P(MappedMemoryTest, UnalignedNear) {
|
||||||
EXPECT_EQ(std::error_code(), EC);
|
EXPECT_EQ(std::error_code(), EC);
|
||||||
|
|
||||||
EXPECT_NE((void*)nullptr, M1.base());
|
EXPECT_NE((void*)nullptr, M1.base());
|
||||||
EXPECT_LE(sizeof(int), M1.size());
|
EXPECT_LE(sizeof(int), M1.allocatedSize());
|
||||||
|
|
||||||
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
|
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue