For stepping performance I added the ability to outlaw all memory accesseses

to the __PAGEZERO segment on darwin. The dynamic loader now correctly doesn't
slide __PAGEZERO and it also registers it as an invalid region of memory. This
allows us to not make any memory requests from the local or remote debug session
for any addresses in this region. Stepping performance can improve when uninitialized
local variables that point to locations in __PAGEZERO are attempted to be read 
from memory as we won't even make the memory read or write request.

llvm-svn: 151128
This commit is contained in:
Greg Clayton 2012-02-22 04:37:26 +00:00
parent 337cfaf757
commit a9f40ad80a
7 changed files with 162 additions and 42 deletions

View File

@ -188,6 +188,17 @@ namespace lldb_private {
m_entries.push_back (entry);
}
bool
RemoveEntrtAtIndex (uint32_t idx)
{
if (idx < m_entries.size())
{
m_entries.erase (m_entries.begin() + idx);
return true;
}
return false;
}
void
Sort ()
{

View File

@ -16,10 +16,10 @@
#include <vector>
// Other libraries and framework includes
//#include "llvm/ADT/BitVector.h"
// Project includes
#include "lldb/lldb-private.h"
#include "lldb/Core/RangeMap.h"
#include "lldb/Host/Mutex.h"
namespace lldb_private {
@ -54,16 +54,24 @@ namespace lldb_private {
{
return m_cache_line_byte_size ;
}
void
AddInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size);
bool
RemoveInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size);
protected:
typedef std::map<lldb::addr_t, lldb::DataBufferSP> collection;
typedef std::map<lldb::addr_t, lldb::DataBufferSP> BlockMap;
typedef RangeArray<lldb::addr_t, lldb::addr_t, 4> InvalidRanges;
//------------------------------------------------------------------
// Classes that inherit from MemoryCache can see and modify these
//------------------------------------------------------------------
Process &m_process;
uint32_t m_cache_line_byte_size;
Mutex m_cache_mutex;
collection m_cache;
Mutex m_mutex;
BlockMap m_cache;
InvalidRanges m_invalid_ranges;
private:
DISALLOW_COPY_AND_ASSIGN (MemoryCache);
};
@ -132,7 +140,6 @@ namespace lldb_private {
const uint32_t m_chunk_size; // The size of chunks that the memory at m_addr is divied up into
typedef std::map<uint32_t, uint32_t> OffsetToChunkSize;
OffsetToChunkSize m_offset_to_chunk_size;
//llvm::BitVector m_allocated;
};

View File

@ -1297,6 +1297,8 @@ public:
eBroadcastInternalStateControlResume = (1<<2)
};
typedef Range<lldb::addr_t, lldb::addr_t> LoadRange;
// These two functions fill out the Broadcaster interface:
static ConstString &GetStaticBroadcasterClass ();
@ -3091,6 +3093,23 @@ public:
void
SetSTDIOFileDescriptor (int file_descriptor);
//------------------------------------------------------------------
// Add a permanent region of memory that should never be read or
// written to. This can be used to ensure that memory reads or writes
// to certain areas of memory never end up being sent to the
// DoReadMemory or DoWriteMemory functions which can improve
// performance.
//------------------------------------------------------------------
void
AddInvalidMemoryRegion (const LoadRange &region);
//------------------------------------------------------------------
// Remove a permanent region of memory that should never be read or
// written to that was previously added with AddInvalidMemoryRegion.
//------------------------------------------------------------------
bool
RemoveInvalidMemoryRange (const LoadRange &region);
protected:
//------------------------------------------------------------------
// NextEventAction provides a way to register an action on the next

View File

@ -100,12 +100,6 @@
ReferencedContainer = "container:lldb.xcodeproj">
</BuildableReference>
</BuildableProductRunnable>
<CommandLineArguments>
<CommandLineArgument
argument = "/private/tmp/a.out"
isEnabled = "YES">
</CommandLineArgument>
</CommandLineArguments>
<EnvironmentVariables>
<EnvironmentVariable
key = "LLDB_LAUNCH_FLAG_DISABLE_ASLR"

View File

@ -433,12 +433,23 @@ DynamicLoaderMacOSXDYLD::UpdateImageLoadAddress (Module *module, DYLDImageInfo&
SectionList *section_list = image_object_file->GetSectionList ();
if (section_list)
{
std::vector<uint32_t> inaccessible_segment_indexes;
// We now know the slide amount, so go through all sections
// and update the load addresses with the correct values.
uint32_t num_segments = info.segments.size();
for (uint32_t i=0; i<num_segments; ++i)
{
// Only load a segment if it has protections. Things like
// __PAGEZERO don't have any protections, and they shouldn't
// be slid
SectionSP section_sp(section_list->FindSectionByName(info.segments[i].name));
if (info.segments[i].maxprot == 0)
{
inaccessible_segment_indexes.push_back(i);
}
else
{
const addr_t new_section_load_addr = info.segments[i].vmaddr + info.slide;
static ConstString g_section_name_LINKEDIT ("__LINKEDIT");
@ -469,6 +480,36 @@ DynamicLoaderMacOSXDYLD::UpdateImageLoadAddress (Module *module, DYLDImageInfo&
}
}
}
// If the loaded the file (it changed) and we have segments that
// are not readable or writeable, add them to the invalid memory
// region cache for the process. This will typically only be
// the __PAGEZERO segment in the main executable. We might be able
// to apply this more generally to more sections that have no
// protections in the future, but for now we are going to just
// do __PAGEZERO.
if (changed && !inaccessible_segment_indexes.empty())
{
for (uint32_t i=0; i<inaccessible_segment_indexes.size(); ++i)
{
const uint32_t seg_idx = inaccessible_segment_indexes[i];
SectionSP section_sp(section_list->FindSectionByName(info.segments[seg_idx].name));
if (section_sp)
{
static ConstString g_pagezero_section_name("__PAGEZERO");
if (g_pagezero_section_name == section_sp->GetName())
{
// __PAGEZERO never slides...
const lldb::addr_t vmaddr = info.segments[seg_idx].vmaddr;
const lldb::addr_t vmsize = info.segments[seg_idx].vmsize;
Process::LoadRange pagezero_range (vmaddr, vmsize);
m_process->AddInvalidMemoryRegion(pagezero_range);
}
}
}
}
}
}
}
return changed;

View File

@ -26,8 +26,9 @@ using namespace lldb_private;
MemoryCache::MemoryCache(Process &process) :
m_process (process),
m_cache_line_byte_size (512),
m_cache_mutex (Mutex::eMutexTypeRecursive),
m_cache ()
m_mutex (Mutex::eMutexTypeRecursive),
m_cache (),
m_invalid_ranges ()
{
}
@ -41,7 +42,7 @@ MemoryCache::~MemoryCache()
void
MemoryCache::Clear()
{
Mutex::Locker locker (m_cache_mutex);
Mutex::Locker locker (m_mutex);
m_cache.clear();
}
@ -56,7 +57,7 @@ MemoryCache::Flush (addr_t addr, size_t size)
const addr_t flush_start_addr = addr - (addr % cache_line_byte_size);
const addr_t flush_end_addr = end_addr - (end_addr % cache_line_byte_size);
Mutex::Locker locker (m_cache_mutex);
Mutex::Locker locker (m_mutex);
if (m_cache.empty())
return;
@ -64,12 +65,43 @@ MemoryCache::Flush (addr_t addr, size_t size)
for (addr_t curr_addr = flush_start_addr; curr_addr <= flush_end_addr; curr_addr += cache_line_byte_size)
{
collection::iterator pos = m_cache.find (curr_addr);
BlockMap::iterator pos = m_cache.find (curr_addr);
if (pos != m_cache.end())
m_cache.erase(pos);
}
}
void
MemoryCache::AddInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size)
{
if (byte_size > 0)
{
Mutex::Locker locker (m_mutex);
InvalidRanges::Entry range (base_addr, byte_size);
m_invalid_ranges.Append(range);
m_invalid_ranges.Sort();
}
}
bool
MemoryCache::RemoveInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size)
{
if (byte_size > 0)
{
Mutex::Locker locker (m_mutex);
const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr);
if (idx != UINT32_MAX)
{
const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex (idx);
if (entry->GetRangeBase() == base_addr && entry->GetByteSize() == byte_size)
return m_invalid_ranges.RemoveEntrtAtIndex (idx);
}
}
return false;
}
size_t
MemoryCache::Read (addr_t addr,
void *dst,
@ -83,12 +115,15 @@ MemoryCache::Read (addr_t addr,
uint8_t *dst_buf = (uint8_t *)dst;
addr_t curr_addr = addr - (addr % cache_line_byte_size);
addr_t cache_offset = addr - curr_addr;
Mutex::Locker locker (m_cache_mutex);
Mutex::Locker locker (m_mutex);
while (bytes_left > 0)
{
collection::const_iterator pos = m_cache.find (curr_addr);
collection::const_iterator end = m_cache.end ();
if (m_invalid_ranges.FindEntryThatContains(curr_addr))
return dst_len - bytes_left;
BlockMap::const_iterator pos = m_cache.find (curr_addr);
BlockMap::const_iterator end = m_cache.end ();
if (pos != end)
{

View File

@ -4451,6 +4451,19 @@ Process::GetThreadStatus (Stream &strm,
return num_thread_infos_dumped;
}
void
Process::AddInvalidMemoryRegion (const LoadRange &region)
{
m_memory_cache.AddInvalidRange(region.GetRangeBase(), region.GetByteSize());
}
bool
Process::RemoveInvalidMemoryRange (const LoadRange &region)
{
return m_memory_cache.RemoveInvalidRange(region.GetRangeBase(), region.GetByteSize());
}
//--------------------------------------------------------------
// class Process::SettingsController
//--------------------------------------------------------------