2012-03-13 03:47:17 +08:00
|
|
|
import lldb
|
2012-04-26 00:32:39 +08:00
|
|
|
import lldb.formatters.Logger
|
2012-03-13 03:47:17 +08:00
|
|
|
|
|
|
|
# libcxx STL formatters for LLDB
|
|
|
|
# These formatters are based upon the implementation of libc++ that
|
|
|
|
# ships with current releases of OS X - They will not work for other implementations
|
2016-09-07 04:57:50 +08:00
|
|
|
# of the standard C++ library - and they are bound to use the
|
|
|
|
# libc++-specific namespace
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2013-01-12 09:22:57 +08:00
|
|
|
# the std::string summary is just an example for your convenience
|
|
|
|
# the actual summary that LLDB uses is C++ code inside the debugger's own core
|
|
|
|
|
2012-03-13 03:47:17 +08:00
|
|
|
# this could probably be made more efficient but since it only reads a handful of bytes at a time
|
|
|
|
# we probably don't need to worry too much about this for the time being
|
2016-09-07 04:57:50 +08:00
|
|
|
|
|
|
|
|
|
|
|
def make_string(F, L):
|
|
|
|
strval = ''
|
|
|
|
G = F.GetData().uint8
|
|
|
|
for X in range(L):
|
|
|
|
V = G[X]
|
|
|
|
if V == 0:
|
|
|
|
break
|
|
|
|
strval = strval + chr(V % 256)
|
|
|
|
return '"' + strval + '"'
|
2012-03-13 03:47:17 +08:00
|
|
|
|
|
|
|
# if we ever care about big-endian, these two functions might need to change
|
2016-09-07 04:57:50 +08:00
|
|
|
|
|
|
|
|
2012-03-13 03:47:17 +08:00
|
|
|
def is_short_string(value):
|
2016-09-07 04:57:50 +08:00
|
|
|
return True if (value & 1) == 0 else False
|
|
|
|
|
|
|
|
|
2012-03-13 03:47:17 +08:00
|
|
|
def extract_short_size(value):
|
2016-09-07 04:57:50 +08:00
|
|
|
return ((value >> 1) % 256)
|
2012-03-13 03:47:17 +08:00
|
|
|
|
|
|
|
# some of the members of libc++ std::string are anonymous or have internal names that convey
|
|
|
|
# no external significance - we access them by index since this saves a name lookup that would add
|
2016-09-07 04:57:50 +08:00
|
|
|
# no information for readers of the code, but when possible try to use
|
|
|
|
# meaningful variable names
|
|
|
|
|
|
|
|
|
|
|
|
def stdstring_SummaryProvider(valobj, dict):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
r = valobj.GetChildAtIndex(0)
|
|
|
|
B = r.GetChildAtIndex(0)
|
|
|
|
first = B.GetChildAtIndex(0)
|
|
|
|
D = first.GetChildAtIndex(0)
|
|
|
|
l = D.GetChildAtIndex(0)
|
|
|
|
s = D.GetChildAtIndex(1)
|
|
|
|
D20 = s.GetChildAtIndex(0)
|
|
|
|
size_mode = D20.GetChildAtIndex(0).GetValueAsUnsigned(0)
|
|
|
|
if is_short_string(size_mode):
|
|
|
|
size = extract_short_size(size_mode)
|
|
|
|
return make_string(s.GetChildAtIndex(1), size)
|
|
|
|
else:
|
|
|
|
data_ptr = l.GetChildAtIndex(2)
|
|
|
|
size_vo = l.GetChildAtIndex(1)
|
|
|
|
# the NULL terminator must be accounted for
|
|
|
|
size = size_vo.GetValueAsUnsigned(0) + 1
|
|
|
|
if size <= 1 or size is None: # should never be the case
|
|
|
|
return '""'
|
|
|
|
try:
|
|
|
|
data = data_ptr.GetPointeeData(0, size)
|
|
|
|
except:
|
|
|
|
return '""'
|
|
|
|
error = lldb.SBError()
|
|
|
|
strval = data.GetString(error, 0)
|
|
|
|
if error.Fail():
|
|
|
|
return '<error:' + error.GetCString() + '>'
|
|
|
|
else:
|
|
|
|
return '"' + strval + '"'
|
|
|
|
|
2012-03-13 03:47:17 +08:00
|
|
|
|
|
|
|
class stdvector_SynthProvider:
|
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def __init__(self, valobj, dict):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
self.valobj = valobj
|
|
|
|
|
|
|
|
def num_children(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
try:
|
|
|
|
start_val = self.start.GetValueAsUnsigned(0)
|
|
|
|
finish_val = self.finish.GetValueAsUnsigned(0)
|
|
|
|
# Before a vector has been constructed, it will contain bad values
|
|
|
|
# so we really need to be careful about the length we return since
|
|
|
|
# uninitialized data can cause us to return a huge number. We need
|
|
|
|
# to also check for any of the start, finish or end of storage values
|
|
|
|
# being zero (NULL). If any are, then this vector has not been
|
|
|
|
# initialized yet and we should return zero
|
|
|
|
|
|
|
|
# Make sure nothing is NULL
|
|
|
|
if start_val == 0 or finish_val == 0:
|
|
|
|
return 0
|
|
|
|
# Make sure start is less than finish
|
|
|
|
if start_val >= finish_val:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
num_children = (finish_val - start_val)
|
|
|
|
if (num_children % self.data_size) != 0:
|
|
|
|
return 0
|
|
|
|
else:
|
|
|
|
num_children = num_children / self.data_size
|
|
|
|
return num_children
|
|
|
|
except:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
def get_child_index(self, name):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
try:
|
|
|
|
return int(name.lstrip('[').rstrip(']'))
|
|
|
|
except:
|
|
|
|
return -1
|
|
|
|
|
|
|
|
def get_child_at_index(self, index):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
logger >> "Retrieving child " + str(index)
|
|
|
|
if index < 0:
|
|
|
|
return None
|
|
|
|
if index >= self.num_children():
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
offset = index * self.data_size
|
|
|
|
return self.start.CreateChildAtOffset(
|
|
|
|
'[' + str(index) + ']', offset, self.data_type)
|
|
|
|
except:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def update(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
try:
|
|
|
|
self.start = self.valobj.GetChildMemberWithName('__begin_')
|
|
|
|
self.finish = self.valobj.GetChildMemberWithName('__end_')
|
|
|
|
# the purpose of this field is unclear, but it is the only field whose type is clearly T* for a vector<T>
|
|
|
|
# if this ends up not being correct, we can use the APIs to get at
|
|
|
|
# template arguments
|
|
|
|
data_type_finder = self.valobj.GetChildMemberWithName(
|
|
|
|
'__end_cap_').GetChildMemberWithName('__first_')
|
|
|
|
self.data_type = data_type_finder.GetType().GetPointeeType()
|
|
|
|
self.data_size = self.data_type.GetByteSize()
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def has_children(self):
|
|
|
|
return True
|
|
|
|
|
|
|
|
# Just an example: the actual summary is produced by a summary string:
|
|
|
|
# size=${svar%#}
|
|
|
|
|
|
|
|
|
|
|
|
def stdvector_SummaryProvider(valobj, dict):
|
|
|
|
prov = stdvector_SynthProvider(valobj, None)
|
|
|
|
return 'size=' + str(prov.num_children())
|
|
|
|
|
2012-03-13 03:47:17 +08:00
|
|
|
|
|
|
|
class stdlist_entry:
|
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def __init__(self, entry):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
self.entry = entry
|
|
|
|
|
|
|
|
def _next_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return stdlist_entry(self.entry.GetChildMemberWithName('__next_'))
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def _prev_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return stdlist_entry(self.entry.GetChildMemberWithName('__prev_'))
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def _value_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return self.entry.GetValueAsUnsigned(0)
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def _isnull_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return self._value_impl() == 0
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def _sbvalue_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return self.entry
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
next = property(_next_impl, None)
|
|
|
|
value = property(_value_impl, None)
|
|
|
|
is_null = property(_isnull_impl, None)
|
|
|
|
sbvalue = property(_sbvalue_impl, None)
|
2012-03-13 03:47:17 +08:00
|
|
|
|
|
|
|
|
|
|
|
class stdlist_iterator:
|
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def increment_node(self, node):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if node.is_null:
|
|
|
|
return None
|
|
|
|
return node.next
|
|
|
|
|
|
|
|
def __init__(self, node):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
# we convert the SBValue to an internal node object on entry
|
|
|
|
self.node = stdlist_entry(node)
|
|
|
|
|
|
|
|
def value(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return self.node.sbvalue # and return the SBValue back on exit
|
|
|
|
|
|
|
|
def next(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
node = self.increment_node(self.node)
|
|
|
|
if node is not None and node.sbvalue.IsValid() and not(node.is_null):
|
|
|
|
self.node = node
|
|
|
|
return self.value()
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def advance(self, N):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if N < 0:
|
|
|
|
return None
|
|
|
|
if N == 0:
|
|
|
|
return self.value()
|
|
|
|
if N == 1:
|
|
|
|
return self.next()
|
|
|
|
while N > 0:
|
|
|
|
self.next()
|
|
|
|
N = N - 1
|
|
|
|
return self.value()
|
2012-03-13 03:47:17 +08:00
|
|
|
|
|
|
|
|
|
|
|
class stdlist_SynthProvider:
|
2016-09-07 04:57:50 +08:00
|
|
|
|
|
|
|
def __init__(self, valobj, dict):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
self.valobj = valobj
|
|
|
|
self.count = None
|
|
|
|
|
|
|
|
def next_node(self, node):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return node.GetChildMemberWithName('__next_')
|
|
|
|
|
|
|
|
def value(self, node):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return node.GetValueAsUnsigned()
|
|
|
|
|
|
|
|
# Floyd's cycle-finding algorithm
|
|
|
|
# try to detect if this list has a loop
|
|
|
|
def has_loop(self):
|
|
|
|
global _list_uses_loop_detector
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if not _list_uses_loop_detector:
|
|
|
|
logger >> "Asked not to use loop detection"
|
|
|
|
return False
|
|
|
|
slow = stdlist_entry(self.head)
|
|
|
|
fast1 = stdlist_entry(self.head)
|
|
|
|
fast2 = stdlist_entry(self.head)
|
|
|
|
while slow.next.value != self.node_address:
|
|
|
|
slow_value = slow.value
|
|
|
|
fast1 = fast2.next
|
|
|
|
fast2 = fast1.next
|
|
|
|
if fast1.value == slow_value or fast2.value == slow_value:
|
|
|
|
return True
|
|
|
|
slow = slow.next
|
|
|
|
return False
|
|
|
|
|
|
|
|
def num_children(self):
|
|
|
|
global _list_capping_size
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if self.count is None:
|
|
|
|
self.count = self.num_children_impl()
|
|
|
|
if self.count > _list_capping_size:
|
|
|
|
self.count = _list_capping_size
|
|
|
|
return self.count
|
|
|
|
|
|
|
|
def num_children_impl(self):
|
|
|
|
global _list_capping_size
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
try:
|
|
|
|
next_val = self.head.GetValueAsUnsigned(0)
|
|
|
|
prev_val = self.tail.GetValueAsUnsigned(0)
|
|
|
|
# After a std::list has been initialized, both next and prev will
|
|
|
|
# be non-NULL
|
|
|
|
if next_val == 0 or prev_val == 0:
|
|
|
|
return 0
|
|
|
|
if next_val == self.node_address:
|
|
|
|
return 0
|
|
|
|
if next_val == prev_val:
|
|
|
|
return 1
|
|
|
|
if self.has_loop():
|
|
|
|
return 0
|
|
|
|
size = 2
|
|
|
|
current = stdlist_entry(self.head)
|
|
|
|
while current.next.value != self.node_address:
|
|
|
|
size = size + 1
|
|
|
|
current = current.next
|
|
|
|
if size > _list_capping_size:
|
|
|
|
return _list_capping_size
|
|
|
|
return (size - 1)
|
|
|
|
except:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
def get_child_index(self, name):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
try:
|
|
|
|
return int(name.lstrip('[').rstrip(']'))
|
|
|
|
except:
|
|
|
|
return -1
|
|
|
|
|
|
|
|
def get_child_at_index(self, index):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
logger >> "Fetching child " + str(index)
|
|
|
|
if index < 0:
|
|
|
|
return None
|
|
|
|
if index >= self.num_children():
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
current = stdlist_iterator(self.head)
|
|
|
|
current = current.advance(index)
|
|
|
|
# we do not return __value_ because then all our children would be named __value_
|
|
|
|
# we need to make a copy of __value__ with the right name -
|
|
|
|
# unfortunate
|
|
|
|
obj = current.GetChildMemberWithName('__value_')
|
|
|
|
obj_data = obj.GetData()
|
|
|
|
return self.valobj.CreateValueFromData(
|
|
|
|
'[' + str(index) + ']', obj_data, self.data_type)
|
|
|
|
except:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def extract_type(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
list_type = self.valobj.GetType().GetUnqualifiedType()
|
|
|
|
if list_type.IsReferenceType():
|
|
|
|
list_type = list_type.GetDereferencedType()
|
|
|
|
if list_type.GetNumberOfTemplateArguments() > 0:
|
|
|
|
data_type = list_type.GetTemplateArgumentType(0)
|
|
|
|
else:
|
|
|
|
data_type = None
|
|
|
|
return data_type
|
|
|
|
|
|
|
|
def update(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
self.count = None
|
|
|
|
try:
|
|
|
|
impl = self.valobj.GetChildMemberWithName('__end_')
|
|
|
|
self.node_address = self.valobj.AddressOf().GetValueAsUnsigned(0)
|
|
|
|
self.head = impl.GetChildMemberWithName('__next_')
|
|
|
|
self.tail = impl.GetChildMemberWithName('__prev_')
|
|
|
|
self.data_type = self.extract_type()
|
|
|
|
self.data_size = self.data_type.GetByteSize()
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def has_children(self):
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
# Just an example: the actual summary is produced by a summary string:
|
|
|
|
# size=${svar%#}
|
|
|
|
def stdlist_SummaryProvider(valobj, dict):
|
|
|
|
prov = stdlist_SynthProvider(valobj, None)
|
|
|
|
return 'size=' + str(prov.num_children())
|
|
|
|
|
|
|
|
# a tree node - this class makes the syntax in the actual iterator nicer
|
|
|
|
# to read and maintain
|
|
|
|
|
|
|
|
|
2012-03-13 03:47:17 +08:00
|
|
|
class stdmap_iterator_node:
|
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def _left_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return stdmap_iterator_node(
|
|
|
|
self.node.GetChildMemberWithName("__left_"))
|
|
|
|
|
|
|
|
def _right_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return stdmap_iterator_node(
|
|
|
|
self.node.GetChildMemberWithName("__right_"))
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def _parent_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return stdmap_iterator_node(
|
|
|
|
self.node.GetChildMemberWithName("__parent_"))
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def _value_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return self.node.GetValueAsUnsigned(0)
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def _sbvalue_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return self.node
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def _null_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return self.value == 0
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def __init__(self, node):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
self.node = node
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
left = property(_left_impl, None)
|
|
|
|
right = property(_right_impl, None)
|
|
|
|
parent = property(_parent_impl, None)
|
|
|
|
value = property(_value_impl, None)
|
|
|
|
is_null = property(_null_impl, None)
|
|
|
|
sbvalue = property(_sbvalue_impl, None)
|
2012-03-13 03:47:17 +08:00
|
|
|
|
|
|
|
# a Python implementation of the tree iterator used by libc++
|
2016-09-07 04:57:50 +08:00
|
|
|
|
|
|
|
|
2012-03-13 03:47:17 +08:00
|
|
|
class stdmap_iterator:
|
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def tree_min(self, x):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
steps = 0
|
|
|
|
if x.is_null:
|
|
|
|
return None
|
|
|
|
while (not x.left.is_null):
|
|
|
|
x = x.left
|
|
|
|
steps += 1
|
|
|
|
if steps > self.max_count:
|
|
|
|
logger >> "Returning None - we overflowed"
|
|
|
|
return None
|
|
|
|
return x
|
|
|
|
|
|
|
|
def tree_max(self, x):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if x.is_null:
|
|
|
|
return None
|
|
|
|
while (not x.right.is_null):
|
|
|
|
x = x.right
|
|
|
|
return x
|
|
|
|
|
|
|
|
def tree_is_left_child(self, x):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if x.is_null:
|
|
|
|
return None
|
|
|
|
return True if x.value == x.parent.left.value else False
|
|
|
|
|
|
|
|
def increment_node(self, node):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if node.is_null:
|
|
|
|
return None
|
|
|
|
if not node.right.is_null:
|
|
|
|
return self.tree_min(node.right)
|
|
|
|
steps = 0
|
|
|
|
while (not self.tree_is_left_child(node)):
|
|
|
|
steps += 1
|
|
|
|
if steps > self.max_count:
|
|
|
|
logger >> "Returning None - we overflowed"
|
|
|
|
return None
|
|
|
|
node = node.parent
|
|
|
|
return node.parent
|
|
|
|
|
|
|
|
def __init__(self, node, max_count=0):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
# we convert the SBValue to an internal node object on entry
|
|
|
|
self.node = stdmap_iterator_node(node)
|
|
|
|
self.max_count = max_count
|
|
|
|
|
|
|
|
def value(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
return self.node.sbvalue # and return the SBValue back on exit
|
|
|
|
|
|
|
|
def next(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
node = self.increment_node(self.node)
|
|
|
|
if node is not None and node.sbvalue.IsValid() and not(node.is_null):
|
|
|
|
self.node = node
|
|
|
|
return self.value()
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def advance(self, N):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if N < 0:
|
|
|
|
return None
|
|
|
|
if N == 0:
|
|
|
|
return self.value()
|
|
|
|
if N == 1:
|
|
|
|
return self.next()
|
|
|
|
while N > 0:
|
|
|
|
if self.next() is None:
|
|
|
|
return None
|
|
|
|
N = N - 1
|
|
|
|
return self.value()
|
|
|
|
|
2012-03-13 03:47:17 +08:00
|
|
|
|
|
|
|
class stdmap_SynthProvider:
|
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def __init__(self, valobj, dict):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
self.valobj = valobj
|
|
|
|
self.pointer_size = self.valobj.GetProcess().GetAddressByteSize()
|
|
|
|
self.count = None
|
|
|
|
|
|
|
|
def update(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
self.count = None
|
|
|
|
try:
|
|
|
|
# we will set this to True if we find out that discovering a node in the map takes more steps than the overall size of the RB tree
|
|
|
|
# if this gets set to True, then we will merrily return None for
|
|
|
|
# any child from that moment on
|
|
|
|
self.garbage = False
|
|
|
|
self.tree = self.valobj.GetChildMemberWithName('__tree_')
|
|
|
|
self.root_node = self.tree.GetChildMemberWithName('__begin_node_')
|
|
|
|
# this data is either lazily-calculated, or cannot be inferred at this moment
|
|
|
|
# we still need to mark it as None, meaning "please set me ASAP"
|
|
|
|
self.data_type = None
|
|
|
|
self.data_size = None
|
|
|
|
self.skip_size = None
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def num_children(self):
|
|
|
|
global _map_capping_size
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if self.count is None:
|
|
|
|
self.count = self.num_children_impl()
|
|
|
|
if self.count > _map_capping_size:
|
|
|
|
self.count = _map_capping_size
|
|
|
|
return self.count
|
|
|
|
|
|
|
|
def num_children_impl(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
try:
|
|
|
|
return self.valobj.GetChildMemberWithName('__tree_').GetChildMemberWithName(
|
|
|
|
'__pair3_').GetChildMemberWithName('__first_').GetValueAsUnsigned()
|
|
|
|
except:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
def has_children(self):
|
|
|
|
return True
|
|
|
|
|
|
|
|
def get_data_type(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if self.data_type is None or self.data_size is None:
|
|
|
|
if self.num_children() == 0:
|
|
|
|
return False
|
|
|
|
deref = self.root_node.Dereference()
|
|
|
|
if not(deref.IsValid()):
|
|
|
|
return False
|
|
|
|
value = deref.GetChildMemberWithName('__value_')
|
|
|
|
if not(value.IsValid()):
|
|
|
|
return False
|
|
|
|
self.data_type = value.GetType()
|
|
|
|
self.data_size = self.data_type.GetByteSize()
|
|
|
|
self.skip_size = None
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
|
|
|
|
def get_value_offset(self, node):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if self.skip_size is None:
|
|
|
|
node_type = node.GetType()
|
|
|
|
fields_count = node_type.GetNumberOfFields()
|
|
|
|
for i in range(fields_count):
|
|
|
|
field = node_type.GetFieldAtIndex(i)
|
|
|
|
if field.GetName() == '__value_':
|
|
|
|
self.skip_size = field.GetOffsetInBytes()
|
|
|
|
break
|
|
|
|
return (self.skip_size is not None)
|
|
|
|
|
|
|
|
def get_child_index(self, name):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
try:
|
|
|
|
return int(name.lstrip('[').rstrip(']'))
|
|
|
|
except:
|
|
|
|
return -1
|
|
|
|
|
|
|
|
def get_child_at_index(self, index):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
logger >> "Retrieving child " + str(index)
|
|
|
|
if index < 0:
|
|
|
|
return None
|
|
|
|
if index >= self.num_children():
|
|
|
|
return None
|
|
|
|
if self.garbage:
|
|
|
|
logger >> "Returning None since this tree is garbage"
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
iterator = stdmap_iterator(
|
|
|
|
self.root_node, max_count=self.num_children())
|
|
|
|
# the debug info for libc++ std::map is such that __begin_node_ has a very nice and useful type
|
|
|
|
# out of which we can grab the information we need - every other node has a less informative
|
|
|
|
# type which omits all value information and only contains housekeeping information for the RB tree
|
|
|
|
# hence, we need to know if we are at a node != 0, so that we can
|
|
|
|
# still get at the data
|
|
|
|
need_to_skip = (index > 0)
|
|
|
|
current = iterator.advance(index)
|
|
|
|
if current is None:
|
|
|
|
logger >> "Tree is garbage - returning None"
|
|
|
|
self.garbage = True
|
|
|
|
return None
|
|
|
|
if self.get_data_type():
|
|
|
|
if not(need_to_skip):
|
|
|
|
current = current.Dereference()
|
|
|
|
obj = current.GetChildMemberWithName('__value_')
|
|
|
|
obj_data = obj.GetData()
|
|
|
|
# make sure we have a valid offset for the next items
|
|
|
|
self.get_value_offset(current)
|
|
|
|
# we do not return __value_ because then we would end up with a child named
|
|
|
|
# __value_ instead of [0]
|
|
|
|
return self.valobj.CreateValueFromData(
|
|
|
|
'[' + str(index) + ']', obj_data, self.data_type)
|
|
|
|
else:
|
|
|
|
# FIXME we need to have accessed item 0 before accessing
|
|
|
|
# any other item!
|
|
|
|
if self.skip_size is None:
|
|
|
|
logger >> "You asked for item > 0 before asking for item == 0, I will fetch 0 now then retry"
|
|
|
|
if self.get_child_at_index(0):
|
|
|
|
return self.get_child_at_index(index)
|
|
|
|
else:
|
|
|
|
logger >> "item == 0 could not be found. sorry, nothing can be done here."
|
|
|
|
return None
|
|
|
|
return current.CreateChildAtOffset(
|
|
|
|
'[' + str(index) + ']', self.skip_size, self.data_type)
|
|
|
|
else:
|
|
|
|
logger >> "Unable to infer data-type - returning None (should mark tree as garbage here?)"
|
|
|
|
return None
|
|
|
|
except Exception as err:
|
|
|
|
logger >> "Hit an exception: " + str(err)
|
|
|
|
return None
|
|
|
|
|
|
|
|
# Just an example: the actual summary is produced by a summary string:
|
|
|
|
# size=${svar%#}
|
|
|
|
|
|
|
|
|
|
|
|
def stdmap_SummaryProvider(valobj, dict):
|
|
|
|
prov = stdmap_SynthProvider(valobj, None)
|
|
|
|
return 'size=' + str(prov.num_children())
|
|
|
|
|
2012-03-13 03:47:17 +08:00
|
|
|
|
2012-08-28 01:42:50 +08:00
|
|
|
class stddeque_SynthProvider:
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2012-08-28 01:42:50 +08:00
|
|
|
def __init__(self, valobj, d):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
logger.write("init")
|
|
|
|
self.valobj = valobj
|
|
|
|
self.pointer_size = self.valobj.GetProcess().GetAddressByteSize()
|
|
|
|
self.count = None
|
|
|
|
try:
|
|
|
|
self.find_block_size()
|
|
|
|
except:
|
|
|
|
self.block_size = -1
|
|
|
|
self.element_size = -1
|
2016-09-07 04:57:50 +08:00
|
|
|
logger.write(
|
|
|
|
"block_size=%d, element_size=%d" %
|
|
|
|
(self.block_size, self.element_size))
|
2012-08-28 01:42:50 +08:00
|
|
|
|
|
|
|
def find_block_size(self):
|
|
|
|
# in order to use the deque we must have the block size, or else
|
|
|
|
# it's impossible to know what memory addresses are valid
|
|
|
|
self.element_type = self.valobj.GetType().GetTemplateArgumentType(0)
|
|
|
|
self.element_size = self.element_type.GetByteSize()
|
|
|
|
# The code says this, but there must be a better way:
|
|
|
|
# template <class _Tp, class _Allocator>
|
|
|
|
# class __deque_base {
|
|
|
|
# static const difference_type __block_size = sizeof(value_type) < 256 ? 4096 / sizeof(value_type) : 16;
|
|
|
|
# }
|
|
|
|
if self.element_size < 256:
|
2016-09-07 04:57:50 +08:00
|
|
|
self.block_size = 4096 / self.element_size
|
2012-08-28 01:42:50 +08:00
|
|
|
else:
|
|
|
|
self.block_size = 16
|
|
|
|
|
|
|
|
def num_children(self):
|
|
|
|
global _deque_capping_size
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
if self.count is None:
|
|
|
|
return 0
|
|
|
|
return min(self.count, _deque_capping_size)
|
|
|
|
|
2012-10-24 05:54:53 +08:00
|
|
|
def has_children(self):
|
2012-12-11 03:55:53 +08:00
|
|
|
return True
|
2012-10-24 05:54:53 +08:00
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def get_child_index(self, name):
|
2012-08-28 01:42:50 +08:00
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
try:
|
|
|
|
return int(name.lstrip('[').rstrip(']'))
|
|
|
|
except:
|
|
|
|
return -1
|
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def get_child_at_index(self, index):
|
2012-08-28 01:42:50 +08:00
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
logger.write("Fetching child " + str(index))
|
|
|
|
if index < 0 or self.count is None:
|
2016-09-07 04:57:50 +08:00
|
|
|
return None
|
2012-08-28 01:42:50 +08:00
|
|
|
if index >= self.num_children():
|
2016-09-07 04:57:50 +08:00
|
|
|
return None
|
2012-08-28 01:42:50 +08:00
|
|
|
try:
|
2016-09-07 04:57:50 +08:00
|
|
|
i, j = divmod(self.start + index, self.block_size)
|
|
|
|
return self.first.CreateValueFromExpression(
|
|
|
|
'[' + str(index) + ']', '*(*(%s + %d) + %d)' %
|
|
|
|
(self.first.get_expr_path(), i, j))
|
2012-08-28 01:42:50 +08:00
|
|
|
except:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def update(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
try:
|
|
|
|
# A deque is effectively a two-dim array, with fixed width.
|
|
|
|
# 'map' contains pointers to the rows of this array. The
|
|
|
|
# full memory area allocated by the deque is delimited
|
|
|
|
# by 'first' and 'end_cap'. However, only a subset of this
|
|
|
|
# memory contains valid data since a deque may have some slack
|
|
|
|
# at the front and back in order to have O(1) insertion at
|
|
|
|
# both ends. The rows in active use are delimited by
|
|
|
|
# 'begin' and 'end'.
|
|
|
|
#
|
|
|
|
# To find the elements that are actually constructed, the 'start'
|
|
|
|
# variable tells which element in this NxM array is the 0th
|
|
|
|
# one, and the 'size' element gives the number of elements
|
|
|
|
# in the deque.
|
2016-09-07 04:57:50 +08:00
|
|
|
count = self.valobj.GetChildMemberWithName(
|
|
|
|
'__size_').GetChildMemberWithName('__first_').GetValueAsUnsigned(0)
|
2012-08-28 01:42:50 +08:00
|
|
|
# give up now if we cant access memory reliably
|
|
|
|
if self.block_size < 0:
|
|
|
|
logger.write("block_size < 0")
|
|
|
|
return
|
|
|
|
map_ = self.valobj.GetChildMemberWithName('__map_')
|
2016-09-07 04:57:50 +08:00
|
|
|
start = self.valobj.GetChildMemberWithName(
|
|
|
|
'__start_').GetValueAsUnsigned(0)
|
2012-08-28 01:42:50 +08:00
|
|
|
first = map_.GetChildMemberWithName('__first_')
|
|
|
|
map_first = first.GetValueAsUnsigned(0)
|
2016-09-07 04:57:50 +08:00
|
|
|
map_begin = map_.GetChildMemberWithName(
|
|
|
|
'__begin_').GetValueAsUnsigned(0)
|
|
|
|
map_end = map_.GetChildMemberWithName(
|
|
|
|
'__end_').GetValueAsUnsigned(0)
|
|
|
|
map_endcap = map_.GetChildMemberWithName(
|
|
|
|
'__end_cap_').GetChildMemberWithName('__first_').GetValueAsUnsigned(0)
|
2012-08-28 01:42:50 +08:00
|
|
|
# check consistency
|
|
|
|
if not map_first <= map_begin <= map_end <= map_endcap:
|
|
|
|
logger.write("map pointers are not monotonic")
|
|
|
|
return
|
2016-09-07 04:57:50 +08:00
|
|
|
total_rows, junk = divmod(
|
|
|
|
map_endcap - map_first, self.pointer_size)
|
2012-08-28 01:42:50 +08:00
|
|
|
if junk:
|
|
|
|
logger.write("endcap-first doesnt align correctly")
|
|
|
|
return
|
|
|
|
active_rows, junk = divmod(map_end - map_begin, self.pointer_size)
|
|
|
|
if junk:
|
|
|
|
logger.write("end-begin doesnt align correctly")
|
|
|
|
return
|
|
|
|
start_row, junk = divmod(map_begin - map_first, self.pointer_size)
|
|
|
|
if junk:
|
|
|
|
logger.write("begin-first doesnt align correctly")
|
|
|
|
return
|
2016-09-07 04:57:50 +08:00
|
|
|
if not start_row * \
|
|
|
|
self.block_size <= start < (start_row + 1) * self.block_size:
|
2012-08-28 01:42:50 +08:00
|
|
|
logger.write("0th element must be in the 'begin' row")
|
|
|
|
return
|
|
|
|
end_row = start_row + active_rows
|
|
|
|
if not count:
|
|
|
|
if active_rows:
|
|
|
|
logger.write("empty deque but begin!=end")
|
2016-09-07 04:57:50 +08:00
|
|
|
return
|
|
|
|
elif not (end_row - 1) * self.block_size <= start + count < end_row * self.block_size:
|
2012-08-28 01:42:50 +08:00
|
|
|
logger.write("nth element must be before the 'end' row")
|
|
|
|
return
|
2016-09-07 04:57:50 +08:00
|
|
|
logger.write(
|
|
|
|
"update success: count=%r, start=%r, first=%r" %
|
|
|
|
(count, start, first))
|
2012-08-28 01:42:50 +08:00
|
|
|
# if consistent, save all we really need:
|
|
|
|
self.count = count
|
|
|
|
self.start = start
|
|
|
|
self.first = first
|
|
|
|
except:
|
|
|
|
self.count = None
|
|
|
|
self.start = None
|
|
|
|
self.map_first = None
|
|
|
|
self.map_begin = None
|
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2012-08-28 01:42:50 +08:00
|
|
|
class stdsharedptr_SynthProvider:
|
2016-09-07 04:57:50 +08:00
|
|
|
|
2012-08-28 01:42:50 +08:00
|
|
|
def __init__(self, valobj, d):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
|
|
|
logger.write("init")
|
|
|
|
self.valobj = valobj
|
|
|
|
#self.element_ptr_type = self.valobj.GetType().GetTemplateArgumentType(0).GetPointerType()
|
|
|
|
self.ptr = None
|
|
|
|
self.cntrl = None
|
|
|
|
process = valobj.GetProcess()
|
|
|
|
self.endianness = process.GetByteOrder()
|
|
|
|
self.pointer_size = process.GetAddressByteSize()
|
|
|
|
self.count_type = valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
|
|
|
|
|
|
|
|
def num_children(self):
|
|
|
|
return 1
|
|
|
|
|
2012-10-24 05:54:53 +08:00
|
|
|
def has_children(self):
|
|
|
|
return True
|
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def get_child_index(self, name):
|
|
|
|
if name == "__ptr_":
|
2012-08-28 01:42:50 +08:00
|
|
|
return 0
|
2016-09-07 04:57:50 +08:00
|
|
|
if name == "count":
|
2012-08-28 01:42:50 +08:00
|
|
|
return 1
|
2016-09-07 04:57:50 +08:00
|
|
|
if name == "weak_count":
|
2012-08-28 01:42:50 +08:00
|
|
|
return 2
|
|
|
|
return -1
|
|
|
|
|
2016-09-07 04:57:50 +08:00
|
|
|
def get_child_at_index(self, index):
|
2012-08-28 01:42:50 +08:00
|
|
|
if index == 0:
|
|
|
|
return self.ptr
|
|
|
|
if index == 1:
|
2016-09-07 04:57:50 +08:00
|
|
|
if self.cntrl is None:
|
2012-08-28 01:42:50 +08:00
|
|
|
count = 0
|
|
|
|
else:
|
2016-09-07 04:57:50 +08:00
|
|
|
count = 1 + \
|
|
|
|
self.cntrl.GetChildMemberWithName('__shared_owners_').GetValueAsSigned()
|
|
|
|
return self.valobj.CreateValueFromData(
|
|
|
|
"count", lldb.SBData.CreateDataFromUInt64Array(
|
|
|
|
self.endianness, self.pointer_size, [count]), self.count_type)
|
2012-08-28 01:42:50 +08:00
|
|
|
if index == 2:
|
2016-09-07 04:57:50 +08:00
|
|
|
if self.cntrl is None:
|
2012-08-28 01:42:50 +08:00
|
|
|
count = 0
|
|
|
|
else:
|
2016-09-07 04:57:50 +08:00
|
|
|
count = 1 + \
|
|
|
|
self.cntrl.GetChildMemberWithName('__shared_weak_owners_').GetValueAsSigned()
|
|
|
|
return self.valobj.CreateValueFromData(
|
|
|
|
"weak_count", lldb.SBData.CreateDataFromUInt64Array(
|
|
|
|
self.endianness, self.pointer_size, [count]), self.count_type)
|
2012-08-28 01:42:50 +08:00
|
|
|
return None
|
|
|
|
|
|
|
|
def update(self):
|
|
|
|
logger = lldb.formatters.Logger.Logger()
|
2016-09-07 04:57:50 +08:00
|
|
|
self.ptr = self.valobj.GetChildMemberWithName(
|
|
|
|
'__ptr_') # .Cast(self.element_ptr_type)
|
2012-08-28 01:42:50 +08:00
|
|
|
cntrl = self.valobj.GetChildMemberWithName('__cntrl_')
|
|
|
|
if cntrl.GetValueAsUnsigned(0):
|
|
|
|
self.cntrl = cntrl.Dereference()
|
|
|
|
else:
|
|
|
|
self.cntrl = None
|
2012-03-13 03:47:17 +08:00
|
|
|
|
|
|
|
# we can use two different categories for old and new formatters - type names are different enough that we should make no confusion
|
2016-09-07 04:57:50 +08:00
|
|
|
# talking with libc++ developer: "std::__1::class_name is set in stone
|
|
|
|
# until we decide to change the ABI. That shouldn't happen within a 5 year
|
|
|
|
# time frame"
|
|
|
|
|
|
|
|
|
|
|
|
def __lldb_init_module(debugger, dict):
|
|
|
|
debugger.HandleCommand(
|
|
|
|
'type summary add -F libcxx.stdstring_SummaryProvider "std::__1::string" -w libcxx')
|
|
|
|
debugger.HandleCommand(
|
|
|
|
'type summary add -F libcxx.stdstring_SummaryProvider "std::__1::basic_string<char, class std::__1::char_traits<char>, class std::__1::allocator<char> >" -w libcxx')
|
|
|
|
debugger.HandleCommand(
|
|
|
|
'type synthetic add -l libcxx.stdvector_SynthProvider -x "^(std::__1::)vector<.+>$" -w libcxx')
|
|
|
|
debugger.HandleCommand(
|
|
|
|
'type summary add -F libcxx.stdvector_SummaryProvider -e -x "^(std::__1::)vector<.+>$" -w libcxx')
|
|
|
|
debugger.HandleCommand(
|
|
|
|
'type synthetic add -l libcxx.stdlist_SynthProvider -x "^(std::__1::)list<.+>$" -w libcxx')
|
|
|
|
debugger.HandleCommand(
|
|
|
|
'type summary add -F libcxx.stdlist_SummaryProvider -e -x "^(std::__1::)list<.+>$" -w libcxx')
|
|
|
|
debugger.HandleCommand(
|
|
|
|
'type synthetic add -l libcxx.stdmap_SynthProvider -x "^(std::__1::)map<.+> >$" -w libcxx')
|
|
|
|
debugger.HandleCommand(
|
|
|
|
'type summary add -F libcxx.stdmap_SummaryProvider -e -x "^(std::__1::)map<.+> >$" -w libcxx')
|
|
|
|
debugger.HandleCommand("type category enable libcxx")
|
|
|
|
debugger.HandleCommand(
|
|
|
|
'type synthetic add -l libcxx.stddeque_SynthProvider -x "^(std::__1::)deque<.+>$" -w libcxx')
|
|
|
|
debugger.HandleCommand(
|
|
|
|
'type synthetic add -l libcxx.stdsharedptr_SynthProvider -x "^(std::__1::)shared_ptr<.+>$" -w libcxx')
|
|
|
|
# turns out the structs look the same, so weak_ptr can be handled the same!
|
|
|
|
debugger.HandleCommand(
|
|
|
|
'type synthetic add -l libcxx.stdsharedptr_SynthProvider -x "^(std::__1::)weak_ptr<.+>$" -w libcxx')
|
Part 1 of a series of fixes meant to improve reliability and increase ease of bug fixing for data formatter issues.
We are introducing a new Logger class on the Python side. This has the same purpose, but is unrelated, to the C++ logging facility
The Pythonic logging can be enabled by using the following scripting commands:
(lldb) script Logger._lldb_formatters_debug_level = {0,1,2,...}
0 = no logging
1 = do log
2 = flush after logging each line - slower but safer
3 or more = each time a Logger is constructed, log the function that has created it
more log levels may be added, each one being more log-active than the previous
by default, the log output will come out on your screen, to direct it to a file:
(lldb) script Logger._lldb_formatters_debug_filename = 'filename'
that will make the output go to the file - set to None to disable the file output and get screen logging back
Logging has been enabled for the C++ STL formatters and for Cocoa class NSData - more logging will follow
synthetic children providers for classes list and map (both libstdcpp and libcxx) now have internal capping for safety reasons
this will fix crashers where a malformed list or map would not ever meet our termination conditions
to set the cap to a different value:
(lldb) script {gnu_libstdcpp|libcxx}.{map|list}_capping_size = new_cap (by default, it is 255)
you can optionally disable the loop detection algorithm for lists
(lldb) script {gnu_libstdcpp|libcxx}.list_uses_loop_detector = False
llvm-svn: 153676
2012-03-30 03:29:45 +08:00
|
|
|
|
|
|
|
_map_capping_size = 255
|
|
|
|
_list_capping_size = 255
|
|
|
|
_list_uses_loop_detector = True
|
2012-08-28 01:42:50 +08:00
|
|
|
_deque_capping_size = 255
|