2012-08-28 02:49:12 +08:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# A tool to parse ASTMatchers.h and update the documentation in
|
|
|
|
# ../LibASTMatchersReference.html automatically. Run from the
|
|
|
|
# directory in which this file is located to update the docs.
|
|
|
|
|
|
|
|
import collections
|
|
|
|
import re
|
2018-12-19 21:46:13 +08:00
|
|
|
try:
|
|
|
|
from urllib.request import urlopen
|
|
|
|
except ImportError:
|
|
|
|
from urllib2 import urlopen
|
2012-08-28 02:49:12 +08:00
|
|
|
|
2021-10-09 01:44:55 +08:00
|
|
|
CLASS_INDEX_PAGE_URL = 'https://clang.llvm.org/doxygen/classes.html'
|
|
|
|
try:
|
|
|
|
CLASS_INDEX_PAGE = urlopen(CLASS_INDEX_PAGE_URL).read()
|
|
|
|
except Exception as e:
|
|
|
|
raise Exception('Unable to get %s: %s' % (CLASS_INDEX_PAGE_URL, e))
|
|
|
|
|
2012-08-28 02:49:12 +08:00
|
|
|
MATCHERS_FILE = '../../include/clang/ASTMatchers/ASTMatchers.h'
|
|
|
|
|
|
|
|
# Each matcher is documented in one row of the form:
|
|
|
|
# result | name | argA
|
|
|
|
# The subsequent row contains the documentation and is hidden by default,
|
|
|
|
# becoming visible via javascript when the user clicks the matcher name.
|
|
|
|
TD_TEMPLATE="""
|
2012-09-07 21:10:32 +08:00
|
|
|
<tr><td>%(result)s</td><td class="name" onclick="toggle('%(id)s')"><a name="%(id)sAnchor">%(name)s</a></td><td>%(args)s</td></tr>
|
2012-08-28 02:49:12 +08:00
|
|
|
<tr><td colspan="4" class="doc" id="%(id)s"><pre>%(comment)s</pre></td></tr>
|
|
|
|
"""
|
|
|
|
|
|
|
|
# We categorize the matchers into these three categories in the reference:
|
|
|
|
node_matchers = {}
|
|
|
|
narrowing_matchers = {}
|
|
|
|
traversal_matchers = {}
|
|
|
|
|
|
|
|
# We output multiple rows per matcher if the matcher can be used on multiple
|
|
|
|
# node types. Thus, we need a new id per row to control the documentation
|
|
|
|
# pop-up. ids[name] keeps track of those ids.
|
|
|
|
ids = collections.defaultdict(int)
|
|
|
|
|
|
|
|
# Cache for doxygen urls we have already verified.
|
|
|
|
doxygen_probes = {}
|
|
|
|
|
|
|
|
def esc(text):
|
|
|
|
"""Escape any html in the given text."""
|
|
|
|
text = re.sub(r'&', '&', text)
|
|
|
|
text = re.sub(r'<', '<', text)
|
|
|
|
text = re.sub(r'>', '>', text)
|
|
|
|
def link_if_exists(m):
|
2021-10-09 01:44:55 +08:00
|
|
|
"""Wrap a likely AST node name in a link to its clang docs.
|
|
|
|
|
|
|
|
We want to do this only if the page exists, in which case it will be
|
|
|
|
referenced from the class index page.
|
|
|
|
"""
|
2012-08-28 02:49:12 +08:00
|
|
|
name = m.group(1)
|
2018-11-05 01:02:00 +08:00
|
|
|
url = 'https://clang.llvm.org/doxygen/classclang_1_1%s.html' % name
|
2012-08-28 02:49:12 +08:00
|
|
|
if url not in doxygen_probes:
|
2021-10-09 01:44:55 +08:00
|
|
|
search_str = 'href="classclang_1_1%s.html"' % name
|
|
|
|
doxygen_probes[url] = search_str in CLASS_INDEX_PAGE
|
|
|
|
if not doxygen_probes[url]:
|
|
|
|
print('Did not find %s in class index page' % name)
|
2012-08-28 02:49:12 +08:00
|
|
|
if doxygen_probes[url]:
|
2016-01-23 07:15:00 +08:00
|
|
|
return r'Matcher<<a href="%s">%s</a>>' % (url, name)
|
2012-08-28 02:49:12 +08:00
|
|
|
else:
|
|
|
|
return m.group(0)
|
|
|
|
text = re.sub(
|
|
|
|
r'Matcher<([^\*&]+)>', link_if_exists, text)
|
|
|
|
return text
|
|
|
|
|
|
|
|
def extract_result_types(comment):
|
|
|
|
"""Extracts a list of result types from the given comment.
|
|
|
|
|
|
|
|
We allow annotations in the comment of the matcher to specify what
|
|
|
|
nodes a matcher can match on. Those comments have the form:
|
|
|
|
Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]])
|
|
|
|
|
|
|
|
Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...].
|
|
|
|
Returns the empty list if no 'Usable as' specification could be
|
|
|
|
parsed.
|
|
|
|
"""
|
|
|
|
result_types = []
|
|
|
|
m = re.search(r'Usable as: Any Matcher[\s\n]*$', comment, re.S)
|
|
|
|
if m:
|
|
|
|
return ['*']
|
|
|
|
while True:
|
|
|
|
m = re.match(r'^(.*)Matcher<([^>]+)>\s*,?[\s\n]*$', comment, re.S)
|
|
|
|
if not m:
|
|
|
|
if re.search(r'Usable as:\s*$', comment):
|
|
|
|
return result_types
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
result_types += [m.group(2)]
|
|
|
|
comment = m.group(1)
|
|
|
|
|
|
|
|
def strip_doxygen(comment):
|
|
|
|
"""Returns the given comment without \-escaped words."""
|
|
|
|
# If there is only a doxygen keyword in the line, delete the whole line.
|
|
|
|
comment = re.sub(r'^\\[^\s]+\n', r'', comment, flags=re.M)
|
2016-01-21 23:18:25 +08:00
|
|
|
|
|
|
|
# If there is a doxygen \see command, change the \see prefix into "See also:".
|
|
|
|
# FIXME: it would be better to turn this into a link to the target instead.
|
|
|
|
comment = re.sub(r'\\see', r'See also:', comment)
|
|
|
|
|
2012-08-28 02:49:12 +08:00
|
|
|
# Delete the doxygen command and the following whitespace.
|
|
|
|
comment = re.sub(r'\\[^\s]+\s+', r'', comment)
|
|
|
|
return comment
|
|
|
|
|
|
|
|
def unify_arguments(args):
|
|
|
|
"""Gets rid of anything the user doesn't care about in the argument list."""
|
|
|
|
args = re.sub(r'internal::', r'', args)
|
2018-01-18 00:50:14 +08:00
|
|
|
args = re.sub(r'extern const\s+(.*)&', r'\1 ', args)
|
2012-08-28 02:49:12 +08:00
|
|
|
args = re.sub(r'&', r' ', args)
|
|
|
|
args = re.sub(r'(^|\s)M\d?(\s)', r'\1Matcher<*>\2', args)
|
2020-02-26 09:56:21 +08:00
|
|
|
args = re.sub(r'BindableMatcher', r'Matcher', args)
|
|
|
|
args = re.sub(r'const Matcher', r'Matcher', args)
|
2012-08-28 02:49:12 +08:00
|
|
|
return args
|
|
|
|
|
2020-02-25 07:59:45 +08:00
|
|
|
def unify_type(result_type):
|
|
|
|
"""Gets rid of anything the user doesn't care about in the type name."""
|
|
|
|
result_type = re.sub(r'^internal::(Bindable)?Matcher<([a-zA-Z_][a-zA-Z0-9_]*)>$', r'\2', result_type)
|
|
|
|
return result_type
|
|
|
|
|
2012-08-28 02:49:12 +08:00
|
|
|
def add_matcher(result_type, name, args, comment, is_dyncast=False):
|
|
|
|
"""Adds a matcher to one of our categories."""
|
|
|
|
if name == 'id':
|
|
|
|
# FIXME: Figure out whether we want to support the 'id' matcher.
|
|
|
|
return
|
|
|
|
matcher_id = '%s%d' % (name, ids[name])
|
|
|
|
ids[name] += 1
|
|
|
|
args = unify_arguments(args)
|
2020-02-25 07:59:45 +08:00
|
|
|
result_type = unify_type(result_type)
|
2021-01-02 07:18:43 +08:00
|
|
|
|
|
|
|
docs_result_type = esc('Matcher<%s>' % result_type);
|
|
|
|
|
|
|
|
if name == 'mapAnyOf':
|
|
|
|
args = "nodeMatcherFunction..."
|
|
|
|
docs_result_type = "<em>unspecified</em>"
|
|
|
|
|
2012-08-28 02:49:12 +08:00
|
|
|
matcher_html = TD_TEMPLATE % {
|
2021-01-02 07:18:43 +08:00
|
|
|
'result': docs_result_type,
|
2012-08-28 02:49:12 +08:00
|
|
|
'name': name,
|
|
|
|
'args': esc(args),
|
|
|
|
'comment': esc(strip_doxygen(comment)),
|
|
|
|
'id': matcher_id,
|
|
|
|
}
|
|
|
|
if is_dyncast:
|
2020-02-26 09:56:21 +08:00
|
|
|
dict = node_matchers
|
|
|
|
lookup = result_type + name
|
2012-08-28 02:49:12 +08:00
|
|
|
# Use a heuristic to figure out whether a matcher is a narrowing or
|
|
|
|
# traversal matcher. By default, matchers that take other matchers as
|
|
|
|
# arguments (and are not node matchers) do traversal. We specifically
|
|
|
|
# exclude known narrowing matchers that also take other matchers as
|
|
|
|
# arguments.
|
|
|
|
elif ('Matcher<' not in args or
|
2021-01-02 07:18:43 +08:00
|
|
|
name in ['allOf', 'anyOf', 'anything', 'unless', 'mapAnyOf']):
|
2020-02-26 09:56:21 +08:00
|
|
|
dict = narrowing_matchers
|
|
|
|
lookup = result_type + name + esc(args)
|
2012-08-28 02:49:12 +08:00
|
|
|
else:
|
2020-02-26 09:56:21 +08:00
|
|
|
dict = traversal_matchers
|
|
|
|
lookup = result_type + name + esc(args)
|
|
|
|
|
|
|
|
if dict.get(lookup) is None or len(dict.get(lookup)) < len(matcher_html):
|
|
|
|
dict[lookup] = matcher_html
|
2012-08-28 02:49:12 +08:00
|
|
|
|
|
|
|
def act_on_decl(declaration, comment, allowed_types):
|
|
|
|
"""Parse the matcher out of the given declaration and comment.
|
|
|
|
|
|
|
|
If 'allowed_types' is set, it contains a list of node types the matcher
|
|
|
|
can match on, as extracted from the static type asserts in the matcher
|
|
|
|
definition.
|
|
|
|
"""
|
|
|
|
if declaration.strip():
|
2020-02-26 09:56:21 +08:00
|
|
|
|
|
|
|
if re.match(r'^\s?(#|namespace|using)', declaration): return
|
|
|
|
|
2012-08-28 02:49:12 +08:00
|
|
|
# Node matchers are defined by writing:
|
|
|
|
# VariadicDynCastAllOfMatcher<ResultType, ArgumentType> name;
|
2013-01-09 17:38:21 +08:00
|
|
|
m = re.match(r""".*Variadic(?:DynCast)?AllOfMatcher\s*<
|
|
|
|
\s*([^\s,]+)\s*(?:,
|
|
|
|
\s*([^\s>]+)\s*)?>
|
2012-08-28 02:49:12 +08:00
|
|
|
\s*([^\s;]+)\s*;\s*$""", declaration, flags=re.X)
|
|
|
|
if m:
|
|
|
|
result, inner, name = m.groups()
|
2013-01-09 17:38:21 +08:00
|
|
|
if not inner:
|
|
|
|
inner = result
|
2012-08-28 02:49:12 +08:00
|
|
|
add_matcher(result, name, 'Matcher<%s>...' % inner,
|
|
|
|
comment, is_dyncast=True)
|
|
|
|
return
|
|
|
|
|
2018-01-18 00:50:14 +08:00
|
|
|
# Special case of type matchers:
|
|
|
|
# AstTypeMatcher<ArgumentType> name
|
|
|
|
m = re.match(r""".*AstTypeMatcher\s*<
|
|
|
|
\s*([^\s>]+)\s*>
|
|
|
|
\s*([^\s;]+)\s*;\s*$""", declaration, flags=re.X)
|
2013-01-09 17:38:21 +08:00
|
|
|
if m:
|
|
|
|
inner, name = m.groups()
|
|
|
|
add_matcher('Type', name, 'Matcher<%s>...' % inner,
|
|
|
|
comment, is_dyncast=True)
|
2013-07-25 14:05:50 +08:00
|
|
|
# FIXME: re-enable once we have implemented casting on the TypeLoc
|
|
|
|
# hierarchy.
|
|
|
|
# add_matcher('TypeLoc', '%sLoc' % name, 'Matcher<%sLoc>...' % inner,
|
|
|
|
# comment, is_dyncast=True)
|
2013-01-09 17:38:21 +08:00
|
|
|
return
|
|
|
|
|
2018-01-18 00:50:14 +08:00
|
|
|
# Parse the various matcher definition macros.
|
|
|
|
m = re.match(""".*AST_TYPE(LOC)?_TRAVERSE_MATCHER(?:_DECL)?\(
|
2013-01-09 17:38:21 +08:00
|
|
|
\s*([^\s,]+\s*),
|
2013-07-16 03:25:06 +08:00
|
|
|
\s*(?:[^\s,]+\s*),
|
2015-03-08 04:38:15 +08:00
|
|
|
\s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\)
|
2013-01-09 17:38:21 +08:00
|
|
|
\)\s*;\s*$""", declaration, flags=re.X)
|
|
|
|
if m:
|
2015-08-14 19:47:51 +08:00
|
|
|
loc, name, results = m.groups()[0:3]
|
2013-07-16 03:25:06 +08:00
|
|
|
result_types = [r.strip() for r in results.split(',')]
|
|
|
|
|
|
|
|
comment_result_types = extract_result_types(comment)
|
|
|
|
if (comment_result_types and
|
|
|
|
sorted(result_types) != sorted(comment_result_types)):
|
|
|
|
raise Exception('Inconsistent documentation for: %s' % name)
|
2013-01-09 17:38:21 +08:00
|
|
|
for result_type in result_types:
|
|
|
|
add_matcher(result_type, name, 'Matcher<Type>', comment)
|
2018-10-09 16:24:18 +08:00
|
|
|
# if loc:
|
|
|
|
# add_matcher('%sLoc' % result_type, '%sLoc' % name, 'Matcher<TypeLoc>',
|
|
|
|
# comment)
|
2013-01-09 17:38:21 +08:00
|
|
|
return
|
|
|
|
|
2013-06-21 23:51:31 +08:00
|
|
|
m = re.match(r"""^\s*AST_POLYMORPHIC_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
|
|
|
|
\s*([^\s,]+)\s*,
|
2015-03-08 04:38:15 +08:00
|
|
|
\s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\)
|
2013-06-21 23:51:31 +08:00
|
|
|
(?:,\s*([^\s,]+)\s*
|
|
|
|
,\s*([^\s,]+)\s*)?
|
|
|
|
(?:,\s*([^\s,]+)\s*
|
|
|
|
,\s*([^\s,]+)\s*)?
|
|
|
|
(?:,\s*\d+\s*)?
|
|
|
|
\)\s*{\s*$""", declaration, flags=re.X)
|
|
|
|
|
|
|
|
if m:
|
2015-08-14 19:47:51 +08:00
|
|
|
p, n, name, results = m.groups()[0:4]
|
|
|
|
args = m.groups()[4:]
|
2013-06-21 23:51:31 +08:00
|
|
|
result_types = [r.strip() for r in results.split(',')]
|
|
|
|
if allowed_types and allowed_types != result_types:
|
|
|
|
raise Exception('Inconsistent documentation for: %s' % name)
|
|
|
|
if n not in ['', '2']:
|
|
|
|
raise Exception('Cannot parse "%s"' % declaration)
|
|
|
|
args = ', '.join('%s %s' % (args[i], args[i+1])
|
|
|
|
for i in range(0, len(args), 2) if args[i])
|
|
|
|
for result_type in result_types:
|
|
|
|
add_matcher(result_type, name, args, comment)
|
|
|
|
return
|
|
|
|
|
2020-07-02 21:52:24 +08:00
|
|
|
m = re.match(r"""^\s*AST_POLYMORPHIC_MATCHER_REGEX(?:_OVERLOAD)?\(
|
|
|
|
\s*([^\s,]+)\s*,
|
|
|
|
\s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\),
|
|
|
|
\s*([^\s,]+)\s*
|
|
|
|
(?:,\s*\d+\s*)?
|
|
|
|
\)\s*{\s*$""", declaration, flags=re.X)
|
|
|
|
|
|
|
|
if m:
|
|
|
|
name, results, arg_name = m.groups()[0:3]
|
|
|
|
result_types = [r.strip() for r in results.split(',')]
|
|
|
|
if allowed_types and allowed_types != result_types:
|
|
|
|
raise Exception('Inconsistent documentation for: %s' % name)
|
|
|
|
arg = "StringRef %s, Regex::RegexFlags Flags = NoFlags" % arg_name
|
|
|
|
comment += """
|
|
|
|
If the matcher is used in clang-query, RegexFlags parameter
|
|
|
|
should be passed as a quoted string. e.g: "NoFlags".
|
|
|
|
Flags can be combined with '|' example \"IgnoreCase | BasicRegex\"
|
|
|
|
"""
|
|
|
|
for result_type in result_types:
|
|
|
|
add_matcher(result_type, name, arg, comment)
|
|
|
|
return
|
|
|
|
|
2014-03-10 23:40:23 +08:00
|
|
|
m = re.match(r"""^\s*AST_MATCHER_FUNCTION(_P)?(.?)(?:_OVERLOAD)?\(
|
|
|
|
(?:\s*([^\s,]+)\s*,)?
|
|
|
|
\s*([^\s,]+)\s*
|
|
|
|
(?:,\s*([^\s,]+)\s*
|
|
|
|
,\s*([^\s,]+)\s*)?
|
|
|
|
(?:,\s*([^\s,]+)\s*
|
|
|
|
,\s*([^\s,]+)\s*)?
|
|
|
|
(?:,\s*\d+\s*)?
|
|
|
|
\)\s*{\s*$""", declaration, flags=re.X)
|
|
|
|
if m:
|
|
|
|
p, n, result, name = m.groups()[0:4]
|
|
|
|
args = m.groups()[4:]
|
|
|
|
if n not in ['', '2']:
|
|
|
|
raise Exception('Cannot parse "%s"' % declaration)
|
|
|
|
args = ', '.join('%s %s' % (args[i], args[i+1])
|
|
|
|
for i in range(0, len(args), 2) if args[i])
|
|
|
|
add_matcher(result, name, args, comment)
|
|
|
|
return
|
|
|
|
|
2013-06-21 23:51:31 +08:00
|
|
|
m = re.match(r"""^\s*AST_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
|
2012-08-28 02:49:12 +08:00
|
|
|
(?:\s*([^\s,]+)\s*,)?
|
|
|
|
\s*([^\s,]+)\s*
|
2016-05-05 04:45:00 +08:00
|
|
|
(?:,\s*([^,]+)\s*
|
2012-08-28 02:49:12 +08:00
|
|
|
,\s*([^\s,]+)\s*)?
|
|
|
|
(?:,\s*([^\s,]+)\s*
|
|
|
|
,\s*([^\s,]+)\s*)?
|
2013-02-07 04:36:22 +08:00
|
|
|
(?:,\s*\d+\s*)?
|
2018-01-18 07:14:49 +08:00
|
|
|
\)\s*{""", declaration, flags=re.X)
|
2012-08-28 02:49:12 +08:00
|
|
|
if m:
|
2013-06-21 23:51:31 +08:00
|
|
|
p, n, result, name = m.groups()[0:4]
|
|
|
|
args = m.groups()[4:]
|
2012-08-28 02:49:12 +08:00
|
|
|
if not result:
|
|
|
|
if not allowed_types:
|
|
|
|
raise Exception('Did not find allowed result types for: %s' % name)
|
|
|
|
result_types = allowed_types
|
|
|
|
else:
|
|
|
|
result_types = [result]
|
|
|
|
if n not in ['', '2']:
|
|
|
|
raise Exception('Cannot parse "%s"' % declaration)
|
|
|
|
args = ', '.join('%s %s' % (args[i], args[i+1])
|
|
|
|
for i in range(0, len(args), 2) if args[i])
|
|
|
|
for result_type in result_types:
|
|
|
|
add_matcher(result_type, name, args, comment)
|
|
|
|
return
|
|
|
|
|
2020-07-02 21:52:24 +08:00
|
|
|
m = re.match(r"""^\s*AST_MATCHER_REGEX(?:_OVERLOAD)?\(
|
|
|
|
\s*([^\s,]+)\s*,
|
|
|
|
\s*([^\s,]+)\s*,
|
|
|
|
\s*([^\s,]+)\s*
|
|
|
|
(?:,\s*\d+\s*)?
|
|
|
|
\)\s*{""", declaration, flags=re.X)
|
|
|
|
if m:
|
|
|
|
result, name, arg_name = m.groups()[0:3]
|
|
|
|
if not result:
|
|
|
|
if not allowed_types:
|
|
|
|
raise Exception('Did not find allowed result types for: %s' % name)
|
|
|
|
result_types = allowed_types
|
|
|
|
else:
|
|
|
|
result_types = [result]
|
|
|
|
arg = "StringRef %s, Regex::RegexFlags Flags = NoFlags" % arg_name
|
|
|
|
comment += """
|
|
|
|
If the matcher is used in clang-query, RegexFlags parameter
|
|
|
|
should be passed as a quoted string. e.g: "NoFlags".
|
|
|
|
Flags can be combined with '|' example \"IgnoreCase | BasicRegex\"
|
|
|
|
"""
|
|
|
|
|
|
|
|
for result_type in result_types:
|
|
|
|
add_matcher(result_type, name, arg, comment)
|
|
|
|
return
|
|
|
|
|
2013-08-17 00:19:42 +08:00
|
|
|
# Parse ArgumentAdapting matchers.
|
|
|
|
m = re.match(
|
2018-01-18 00:50:14 +08:00
|
|
|
r"""^.*ArgumentAdaptingMatcherFunc<.*>\s*
|
|
|
|
([a-zA-Z]*);$""",
|
2013-08-17 00:19:42 +08:00
|
|
|
declaration, flags=re.X)
|
|
|
|
if m:
|
|
|
|
name = m.groups()[0]
|
|
|
|
add_matcher('*', name, 'Matcher<*>', comment)
|
|
|
|
return
|
|
|
|
|
2016-02-23 05:13:02 +08:00
|
|
|
# Parse Variadic functions.
|
|
|
|
m = re.match(
|
2016-05-05 04:45:00 +08:00
|
|
|
r"""^.*internal::VariadicFunction\s*<\s*([^,]+),\s*([^,]+),\s*[^>]+>\s*
|
2018-01-18 00:50:14 +08:00
|
|
|
([a-zA-Z]*);$""",
|
2016-02-23 05:13:02 +08:00
|
|
|
declaration, flags=re.X)
|
|
|
|
if m:
|
|
|
|
result, arg, name = m.groups()[:3]
|
|
|
|
add_matcher(result, name, '%s, ..., %s' % (arg, arg), comment)
|
|
|
|
return
|
|
|
|
|
2020-02-25 15:51:07 +08:00
|
|
|
m = re.match(
|
|
|
|
r"""^.*internal::VariadicFunction\s*<\s*
|
2021-01-13 11:07:03 +08:00
|
|
|
internal::PolymorphicMatcher<[\S\s]+
|
|
|
|
AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\),\s*(.*);$""",
|
2020-02-25 15:51:07 +08:00
|
|
|
declaration, flags=re.X)
|
|
|
|
|
|
|
|
if m:
|
2021-01-13 11:07:03 +08:00
|
|
|
results, trailing = m.groups()
|
|
|
|
trailing, name = trailing.rsplit(">", 1)
|
|
|
|
name = name.strip()
|
|
|
|
trailing, _ = trailing.rsplit(",", 1)
|
|
|
|
_, arg = trailing.rsplit(",", 1)
|
|
|
|
arg = arg.strip()
|
2020-02-25 15:51:07 +08:00
|
|
|
|
|
|
|
result_types = [r.strip() for r in results.split(',')]
|
|
|
|
for result_type in result_types:
|
|
|
|
add_matcher(result_type, name, '%s, ..., %s' % (arg, arg), comment)
|
|
|
|
return
|
|
|
|
|
|
|
|
|
2013-08-27 23:11:16 +08:00
|
|
|
# Parse Variadic operator matchers.
|
|
|
|
m = re.match(
|
2018-01-18 00:50:14 +08:00
|
|
|
r"""^.*VariadicOperatorMatcherFunc\s*<\s*([^,]+),\s*([^\s]+)\s*>\s*
|
|
|
|
([a-zA-Z]*);$""",
|
2013-08-27 23:11:16 +08:00
|
|
|
declaration, flags=re.X)
|
|
|
|
if m:
|
2014-02-24 18:28:36 +08:00
|
|
|
min_args, max_args, name = m.groups()[:3]
|
|
|
|
if max_args == '1':
|
|
|
|
add_matcher('*', name, 'Matcher<*>', comment)
|
|
|
|
return
|
2018-01-18 00:50:14 +08:00
|
|
|
elif max_args == 'std::numeric_limits<unsigned>::max()':
|
2014-02-24 18:28:36 +08:00
|
|
|
add_matcher('*', name, 'Matcher<*>, ..., Matcher<*>', comment)
|
|
|
|
return
|
2013-08-27 23:11:16 +08:00
|
|
|
|
2021-01-05 09:33:13 +08:00
|
|
|
m = re.match(
|
|
|
|
r"""^.*MapAnyOfMatcher<.*>\s*
|
|
|
|
([a-zA-Z]*);$""",
|
|
|
|
declaration, flags=re.X)
|
|
|
|
if m:
|
|
|
|
name = m.groups()[0]
|
|
|
|
add_matcher('*', name, 'Matcher<*>...Matcher<*>', comment)
|
|
|
|
return
|
2013-08-17 00:19:42 +08:00
|
|
|
|
2012-08-28 02:49:12 +08:00
|
|
|
# Parse free standing matcher functions, like:
|
|
|
|
# Matcher<ResultType> Name(Matcher<ArgumentType> InnerMatcher) {
|
2020-02-26 09:56:21 +08:00
|
|
|
m = re.match(r"""^\s*(?:template\s+<\s*(?:class|typename)\s+(.+)\s*>\s+)?
|
|
|
|
(.*)\s+
|
2012-08-28 02:49:12 +08:00
|
|
|
([^\s\(]+)\s*\(
|
|
|
|
(.*)
|
|
|
|
\)\s*{""", declaration, re.X)
|
|
|
|
if m:
|
2020-02-26 09:56:21 +08:00
|
|
|
template_name, result, name, args = m.groups()
|
|
|
|
if template_name:
|
|
|
|
matcherTemplateArgs = re.findall(r'Matcher<\s*(%s)\s*>' % template_name, args)
|
|
|
|
templateArgs = re.findall(r'(?:^|[\s,<])(%s)(?:$|[\s,>])' % template_name, args)
|
|
|
|
if len(matcherTemplateArgs) < len(templateArgs):
|
|
|
|
# The template name is used naked, so don't replace with `*`` later on
|
|
|
|
template_name = None
|
|
|
|
else :
|
|
|
|
args = re.sub(r'(^|[\s,<])%s($|[\s,>])' % template_name, r'\1*\2', args)
|
2012-08-28 02:49:12 +08:00
|
|
|
args = ', '.join(p.strip() for p in args.split(','))
|
2020-02-26 09:56:21 +08:00
|
|
|
m = re.match(r'(?:^|.*\s+)internal::(?:Bindable)?Matcher<([^>]+)>$', result)
|
2012-08-28 02:49:12 +08:00
|
|
|
if m:
|
2020-02-26 09:56:21 +08:00
|
|
|
result_types = [m.group(1)]
|
|
|
|
if template_name and len(result_types) is 1 and result_types[0] == template_name:
|
|
|
|
result_types = ['*']
|
2012-08-28 02:49:12 +08:00
|
|
|
else:
|
|
|
|
result_types = extract_result_types(comment)
|
|
|
|
if not result_types:
|
|
|
|
if not comment:
|
|
|
|
# Only overloads don't have their own doxygen comments; ignore those.
|
2018-12-18 16:36:33 +08:00
|
|
|
print('Ignoring "%s"' % name)
|
2012-08-28 02:49:12 +08:00
|
|
|
else:
|
2018-12-18 16:36:33 +08:00
|
|
|
print('Cannot determine result type for "%s"' % name)
|
2012-08-28 02:49:12 +08:00
|
|
|
else:
|
|
|
|
for result_type in result_types:
|
|
|
|
add_matcher(result_type, name, args, comment)
|
|
|
|
else:
|
2018-12-18 16:36:33 +08:00
|
|
|
print('*** Unparsable: "' + declaration + '" ***')
|
2012-08-28 02:49:12 +08:00
|
|
|
|
|
|
|
def sort_table(matcher_type, matcher_map):
|
|
|
|
"""Returns the sorted html table for the given row map."""
|
|
|
|
table = ''
|
|
|
|
for key in sorted(matcher_map.keys()):
|
|
|
|
table += matcher_map[key] + '\n'
|
|
|
|
return ('<!-- START_%(type)s_MATCHERS -->\n' +
|
|
|
|
'%(table)s' +
|
|
|
|
'<!--END_%(type)s_MATCHERS -->') % {
|
|
|
|
'type': matcher_type,
|
|
|
|
'table': table,
|
|
|
|
}
|
|
|
|
|
|
|
|
# Parse the ast matchers.
|
|
|
|
# We alternate between two modes:
|
|
|
|
# body = True: We parse the definition of a matcher. We need
|
|
|
|
# to parse the full definition before adding a matcher, as the
|
|
|
|
# definition might contain static asserts that specify the result
|
|
|
|
# type.
|
|
|
|
# body = False: We parse the comments and declaration of the matcher.
|
|
|
|
comment = ''
|
|
|
|
declaration = ''
|
|
|
|
allowed_types = []
|
|
|
|
body = False
|
|
|
|
for line in open(MATCHERS_FILE).read().splitlines():
|
|
|
|
if body:
|
|
|
|
if line.strip() and line[0] == '}':
|
|
|
|
if declaration:
|
|
|
|
act_on_decl(declaration, comment, allowed_types)
|
|
|
|
comment = ''
|
|
|
|
declaration = ''
|
|
|
|
allowed_types = []
|
|
|
|
body = False
|
|
|
|
else:
|
|
|
|
m = re.search(r'is_base_of<([^,]+), NodeType>', line)
|
|
|
|
if m and m.group(1):
|
|
|
|
allowed_types += [m.group(1)]
|
|
|
|
continue
|
|
|
|
if line.strip() and line.lstrip()[0] == '/':
|
2018-12-12 03:30:49 +08:00
|
|
|
comment += re.sub(r'^/+\s?', '', line) + '\n'
|
2012-08-28 02:49:12 +08:00
|
|
|
else:
|
|
|
|
declaration += ' ' + line
|
|
|
|
if ((not line.strip()) or
|
|
|
|
line.rstrip()[-1] == ';' or
|
2013-08-27 23:11:16 +08:00
|
|
|
(line.rstrip()[-1] == '{' and line.rstrip()[-3:] != '= {')):
|
2012-08-28 02:49:12 +08:00
|
|
|
if line.strip() and line.rstrip()[-1] == '{':
|
|
|
|
body = True
|
|
|
|
else:
|
|
|
|
act_on_decl(declaration, comment, allowed_types)
|
|
|
|
comment = ''
|
|
|
|
declaration = ''
|
|
|
|
allowed_types = []
|
|
|
|
|
|
|
|
node_matcher_table = sort_table('DECL', node_matchers)
|
|
|
|
narrowing_matcher_table = sort_table('NARROWING', narrowing_matchers)
|
|
|
|
traversal_matcher_table = sort_table('TRAVERSAL', traversal_matchers)
|
|
|
|
|
|
|
|
reference = open('../LibASTMatchersReference.html').read()
|
|
|
|
reference = re.sub(r'<!-- START_DECL_MATCHERS.*END_DECL_MATCHERS -->',
|
2016-02-18 23:43:56 +08:00
|
|
|
node_matcher_table, reference, flags=re.S)
|
2012-08-28 02:49:12 +08:00
|
|
|
reference = re.sub(r'<!-- START_NARROWING_MATCHERS.*END_NARROWING_MATCHERS -->',
|
2016-02-18 23:43:56 +08:00
|
|
|
narrowing_matcher_table, reference, flags=re.S)
|
2012-08-28 02:49:12 +08:00
|
|
|
reference = re.sub(r'<!-- START_TRAVERSAL_MATCHERS.*END_TRAVERSAL_MATCHERS -->',
|
2016-02-18 23:43:56 +08:00
|
|
|
traversal_matcher_table, reference, flags=re.S)
|
2012-08-28 02:49:12 +08:00
|
|
|
|
2015-11-20 15:46:19 +08:00
|
|
|
with open('../LibASTMatchersReference.html', 'wb') as output:
|
2012-08-28 02:49:12 +08:00
|
|
|
output.write(reference)
|
|
|
|
|