[lldb] Merge RangeArray and RangeVector

The two classes are equivalent, except:
- the former uses a llvm::SmallVector (with a configurable size), while
  the latter uses std::vector.
- the former has a typo in one of the functions name

This patch just leaves one class, using llvm::SmallVector, and defaults
the small size to zero. This is the same thing we did with the
RangeDataVector class in D56170.
This commit is contained in:
Pavel Labath 2020-02-18 15:19:08 +01:00
parent 523cae324d
commit b807a28787
7 changed files with 7 additions and 214 deletions

View File

@ -71,6 +71,6 @@ typedef uint32_t dw_offset_t; // Dwarf Debug Information Entry offset for any
//#define DW_OP_APPLE_error 0xFF // Stops expression evaluation and
//returns an error (no args)
typedef lldb_private::RangeArray<dw_addr_t, dw_addr_t, 2> DWARFRangeList;
typedef lldb_private::RangeVector<dw_addr_t, dw_addr_t, 2> DWARFRangeList;
#endif // LLDB_CORE_DWARF_H

View File

@ -40,7 +40,7 @@ namespace lldb_private {
/// blocks.
class Block : public UserID, public SymbolContextScope {
public:
typedef RangeArray<uint32_t, uint32_t, 1> RangeList;
typedef RangeVector<uint32_t, uint32_t, 1> RangeList;
typedef RangeList::Entry Range;
/// Construct with a User ID \a uid, \a depth.

View File

@ -183,7 +183,7 @@ public:
/// The number of line table entries in this line table.
uint32_t GetSize() const;
typedef lldb_private::RangeArray<lldb::addr_t, lldb::addr_t, 32>
typedef lldb_private::RangeVector<lldb::addr_t, lldb::addr_t, 32>
FileAddressRanges;
/// Gets all contiguous file address ranges for the entire line table.

View File

@ -45,7 +45,7 @@ public:
protected:
typedef std::map<lldb::addr_t, lldb::DataBufferSP> BlockMap;
typedef RangeArray<lldb::addr_t, lldb::addr_t, 4> InvalidRanges;
typedef RangeVector<lldb::addr_t, lldb::addr_t, 4> InvalidRanges;
typedef Range<lldb::addr_t, lldb::addr_t> AddrRange;
// Classes that inherit from MemoryCache can see and modify these
std::recursive_mutex m_mutex;

View File

@ -122,220 +122,13 @@ template <typename B, typename S> struct Range {
}
};
// A range array class where you get to define the type of the ranges
// that the collection contains.
template <typename B, typename S, unsigned N> class RangeArray {
template <typename B, typename S, unsigned N = 0> class RangeVector {
public:
typedef B BaseType;
typedef S SizeType;
typedef Range<B, S> Entry;
typedef llvm::SmallVector<Entry, N> Collection;
RangeArray() = default;
~RangeArray() = default;
void Append(const Entry &entry) { m_entries.push_back(entry); }
void Append(B base, S size) { m_entries.emplace_back(base, size); }
bool RemoveEntrtAtIndex(uint32_t idx) {
if (idx < m_entries.size()) {
m_entries.erase(m_entries.begin() + idx);
return true;
}
return false;
}
void Sort() {
if (m_entries.size() > 1)
std::stable_sort(m_entries.begin(), m_entries.end());
}
#ifdef ASSERT_RANGEMAP_ARE_SORTED
bool IsSorted() const {
typename Collection::const_iterator pos, end, prev;
for (pos = m_entries.begin(), end = m_entries.end(), prev = end; pos != end;
prev = pos++) {
if (prev != end && *pos < *prev)
return false;
}
return true;
}
#endif
void CombineConsecutiveRanges() {
#ifdef ASSERT_RANGEMAP_ARE_SORTED
assert(IsSorted());
#endif
// Can't combine if ranges if we have zero or one range
if (m_entries.size() > 1) {
// The list should be sorted prior to calling this function
typename Collection::iterator pos;
typename Collection::iterator end;
typename Collection::iterator prev;
bool can_combine = false;
// First we determine if we can combine any of the Entry objects so we
// don't end up allocating and making a new collection for no reason
for (pos = m_entries.begin(), end = m_entries.end(), prev = end;
pos != end; prev = pos++) {
if (prev != end && prev->DoesAdjoinOrIntersect(*pos)) {
can_combine = true;
break;
}
}
// We we can combine at least one entry, then we make a new collection
// and populate it accordingly, and then swap it into place.
if (can_combine) {
Collection minimal_ranges;
for (pos = m_entries.begin(), end = m_entries.end(), prev = end;
pos != end; prev = pos++) {
if (prev != end && prev->DoesAdjoinOrIntersect(*pos))
minimal_ranges.back().SetRangeEnd(
std::max<BaseType>(prev->GetRangeEnd(), pos->GetRangeEnd()));
else
minimal_ranges.push_back(*pos);
}
// Use the swap technique in case our new vector is much smaller. We
// must swap when using the STL because std::vector objects never
// release or reduce the memory once it has been allocated/reserved.
m_entries.swap(minimal_ranges);
}
}
}
BaseType GetMinRangeBase(BaseType fail_value) const {
#ifdef ASSERT_RANGEMAP_ARE_SORTED
assert(IsSorted());
#endif
if (m_entries.empty())
return fail_value;
// m_entries must be sorted, so if we aren't empty, we grab the first
// range's base
return m_entries.front().GetRangeBase();
}
BaseType GetMaxRangeEnd(BaseType fail_value) const {
#ifdef ASSERT_RANGEMAP_ARE_SORTED
assert(IsSorted());
#endif
if (m_entries.empty())
return fail_value;
// m_entries must be sorted, so if we aren't empty, we grab the last
// range's end
return m_entries.back().GetRangeEnd();
}
void Slide(BaseType slide) {
typename Collection::iterator pos, end;
for (pos = m_entries.begin(), end = m_entries.end(); pos != end; ++pos)
pos->Slide(slide);
}
void Clear() { m_entries.clear(); }
bool IsEmpty() const { return m_entries.empty(); }
size_t GetSize() const { return m_entries.size(); }
const Entry *GetEntryAtIndex(size_t i) const {
return ((i < m_entries.size()) ? &m_entries[i] : nullptr);
}
// Clients must ensure that "i" is a valid index prior to calling this
// function
const Entry &GetEntryRef(size_t i) const { return m_entries[i]; }
Entry *Back() { return (m_entries.empty() ? nullptr : &m_entries.back()); }
const Entry *Back() const {
return (m_entries.empty() ? nullptr : &m_entries.back());
}
static bool BaseLessThan(const Entry &lhs, const Entry &rhs) {
return lhs.GetRangeBase() < rhs.GetRangeBase();
}
uint32_t FindEntryIndexThatContains(B addr) const {
#ifdef ASSERT_RANGEMAP_ARE_SORTED
assert(IsSorted());
#endif
if (!m_entries.empty()) {
Entry entry(addr, 1);
typename Collection::const_iterator begin = m_entries.begin();
typename Collection::const_iterator end = m_entries.end();
typename Collection::const_iterator pos =
std::lower_bound(begin, end, entry, BaseLessThan);
if (pos != end && pos->Contains(addr)) {
return std::distance(begin, pos);
} else if (pos != begin) {
--pos;
if (pos->Contains(addr))
return std::distance(begin, pos);
}
}
return UINT32_MAX;
}
const Entry *FindEntryThatContains(B addr) const {
#ifdef ASSERT_RANGEMAP_ARE_SORTED
assert(IsSorted());
#endif
if (!m_entries.empty()) {
Entry entry(addr, 1);
typename Collection::const_iterator begin = m_entries.begin();
typename Collection::const_iterator end = m_entries.end();
typename Collection::const_iterator pos =
std::lower_bound(begin, end, entry, BaseLessThan);
if (pos != end && pos->Contains(addr)) {
return &(*pos);
} else if (pos != begin) {
--pos;
if (pos->Contains(addr)) {
return &(*pos);
}
}
}
return nullptr;
}
const Entry *FindEntryThatContains(const Entry &range) const {
#ifdef ASSERT_RANGEMAP_ARE_SORTED
assert(IsSorted());
#endif
if (!m_entries.empty()) {
typename Collection::const_iterator begin = m_entries.begin();
typename Collection::const_iterator end = m_entries.end();
typename Collection::const_iterator pos =
std::lower_bound(begin, end, range, BaseLessThan);
if (pos != end && pos->Contains(range)) {
return &(*pos);
} else if (pos != begin) {
--pos;
if (pos->Contains(range)) {
return &(*pos);
}
}
}
return nullptr;
}
protected:
Collection m_entries;
};
template <typename B, typename S> class RangeVector {
public:
typedef B BaseType;
typedef S SizeType;
typedef Range<B, S> Entry;
typedef std::vector<Entry> Collection;
RangeVector() = default;
~RangeVector() = default;

View File

@ -193,7 +193,7 @@ protected:
size_t ParseSymtab();
typedef lldb_private::RangeArray<uint32_t, uint32_t, 8> EncryptedFileRanges;
typedef lldb_private::RangeVector<uint32_t, uint32_t, 8> EncryptedFileRanges;
EncryptedFileRanges GetEncryptedFileRanges();
struct SegmentParsingContext;

View File

@ -116,7 +116,7 @@ bool MemoryCache::RemoveInvalidRange(lldb::addr_t base_addr,
const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex(idx);
if (entry->GetRangeBase() == base_addr &&
entry->GetByteSize() == byte_size)
return m_invalid_ranges.RemoveEntrtAtIndex(idx);
return m_invalid_ranges.RemoveEntryAtIndex(idx);
}
}
return false;