2012-08-28 02:49:12 +08:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# A tool to parse ASTMatchers.h and update the documentation in
|
|
|
|
# ../LibASTMatchersReference.html automatically. Run from the
|
|
|
|
# directory in which this file is located to update the docs.
|
|
|
|
|
|
|
|
import collections
|
|
|
|
import re
|
|
|
|
import urllib2
|
|
|
|
|
|
|
|
MATCHERS_FILE = '../../include/clang/ASTMatchers/ASTMatchers.h'
|
|
|
|
|
|
|
|
# Each matcher is documented in one row of the form:
|
|
|
|
# result | name | argA
|
|
|
|
# The subsequent row contains the documentation and is hidden by default,
|
|
|
|
# becoming visible via javascript when the user clicks the matcher name.
|
|
|
|
TD_TEMPLATE="""
|
2012-09-07 21:10:32 +08:00
|
|
|
<tr><td>%(result)s</td><td class="name" onclick="toggle('%(id)s')"><a name="%(id)sAnchor">%(name)s</a></td><td>%(args)s</td></tr>
|
2012-08-28 02:49:12 +08:00
|
|
|
<tr><td colspan="4" class="doc" id="%(id)s"><pre>%(comment)s</pre></td></tr>
|
|
|
|
"""
|
|
|
|
|
|
|
|
# We categorize the matchers into these three categories in the reference:
|
|
|
|
node_matchers = {}
|
|
|
|
narrowing_matchers = {}
|
|
|
|
traversal_matchers = {}
|
|
|
|
|
|
|
|
# We output multiple rows per matcher if the matcher can be used on multiple
|
|
|
|
# node types. Thus, we need a new id per row to control the documentation
|
|
|
|
# pop-up. ids[name] keeps track of those ids.
|
|
|
|
ids = collections.defaultdict(int)
|
|
|
|
|
|
|
|
# Cache for doxygen urls we have already verified.
|
|
|
|
doxygen_probes = {}
|
|
|
|
|
|
|
|
def esc(text):
|
|
|
|
"""Escape any html in the given text."""
|
|
|
|
text = re.sub(r'&', '&', text)
|
|
|
|
text = re.sub(r'<', '<', text)
|
|
|
|
text = re.sub(r'>', '>', text)
|
|
|
|
def link_if_exists(m):
|
|
|
|
name = m.group(1)
|
|
|
|
url = 'http://clang.llvm.org/doxygen/classclang_1_1%s.html' % name
|
|
|
|
if url not in doxygen_probes:
|
|
|
|
try:
|
|
|
|
print 'Probing %s...' % url
|
|
|
|
urllib2.urlopen(url)
|
|
|
|
doxygen_probes[url] = True
|
|
|
|
except:
|
|
|
|
doxygen_probes[url] = False
|
|
|
|
if doxygen_probes[url]:
|
2016-01-23 07:15:00 +08:00
|
|
|
return r'Matcher<<a href="%s">%s</a>>' % (url, name)
|
2012-08-28 02:49:12 +08:00
|
|
|
else:
|
|
|
|
return m.group(0)
|
|
|
|
text = re.sub(
|
|
|
|
r'Matcher<([^\*&]+)>', link_if_exists, text)
|
|
|
|
return text
|
|
|
|
|
|
|
|
def extract_result_types(comment):
|
|
|
|
"""Extracts a list of result types from the given comment.
|
|
|
|
|
|
|
|
We allow annotations in the comment of the matcher to specify what
|
|
|
|
nodes a matcher can match on. Those comments have the form:
|
|
|
|
Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]])
|
|
|
|
|
|
|
|
Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...].
|
|
|
|
Returns the empty list if no 'Usable as' specification could be
|
|
|
|
parsed.
|
|
|
|
"""
|
|
|
|
result_types = []
|
|
|
|
m = re.search(r'Usable as: Any Matcher[\s\n]*$', comment, re.S)
|
|
|
|
if m:
|
|
|
|
return ['*']
|
|
|
|
while True:
|
|
|
|
m = re.match(r'^(.*)Matcher<([^>]+)>\s*,?[\s\n]*$', comment, re.S)
|
|
|
|
if not m:
|
|
|
|
if re.search(r'Usable as:\s*$', comment):
|
|
|
|
return result_types
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
result_types += [m.group(2)]
|
|
|
|
comment = m.group(1)
|
|
|
|
|
|
|
|
def strip_doxygen(comment):
|
|
|
|
"""Returns the given comment without \-escaped words."""
|
|
|
|
# If there is only a doxygen keyword in the line, delete the whole line.
|
|
|
|
comment = re.sub(r'^\\[^\s]+\n', r'', comment, flags=re.M)
|
2016-01-21 23:18:25 +08:00
|
|
|
|
|
|
|
# If there is a doxygen \see command, change the \see prefix into "See also:".
|
|
|
|
# FIXME: it would be better to turn this into a link to the target instead.
|
|
|
|
comment = re.sub(r'\\see', r'See also:', comment)
|
|
|
|
|
2012-08-28 02:49:12 +08:00
|
|
|
# Delete the doxygen command and the following whitespace.
|
|
|
|
comment = re.sub(r'\\[^\s]+\s+', r'', comment)
|
|
|
|
return comment
|
|
|
|
|
|
|
|
def unify_arguments(args):
|
|
|
|
"""Gets rid of anything the user doesn't care about in the argument list."""
|
|
|
|
args = re.sub(r'internal::', r'', args)
|
2018-01-18 00:50:14 +08:00
|
|
|
args = re.sub(r'extern const\s+(.*)&', r'\1 ', args)
|
2012-08-28 02:49:12 +08:00
|
|
|
args = re.sub(r'&', r' ', args)
|
|
|
|
args = re.sub(r'(^|\s)M\d?(\s)', r'\1Matcher<*>\2', args)
|
|
|
|
return args
|
|
|
|
|
|
|
|
def add_matcher(result_type, name, args, comment, is_dyncast=False):
|
|
|
|
"""Adds a matcher to one of our categories."""
|
|
|
|
if name == 'id':
|
|
|
|
# FIXME: Figure out whether we want to support the 'id' matcher.
|
|
|
|
return
|
|
|
|
matcher_id = '%s%d' % (name, ids[name])
|
|
|
|
ids[name] += 1
|
|
|
|
args = unify_arguments(args)
|
|
|
|
matcher_html = TD_TEMPLATE % {
|
|
|
|
'result': esc('Matcher<%s>' % result_type),
|
|
|
|
'name': name,
|
|
|
|
'args': esc(args),
|
|
|
|
'comment': esc(strip_doxygen(comment)),
|
|
|
|
'id': matcher_id,
|
|
|
|
}
|
|
|
|
if is_dyncast:
|
|
|
|
node_matchers[result_type + name] = matcher_html
|
|
|
|
# Use a heuristic to figure out whether a matcher is a narrowing or
|
|
|
|
# traversal matcher. By default, matchers that take other matchers as
|
|
|
|
# arguments (and are not node matchers) do traversal. We specifically
|
|
|
|
# exclude known narrowing matchers that also take other matchers as
|
|
|
|
# arguments.
|
|
|
|
elif ('Matcher<' not in args or
|
|
|
|
name in ['allOf', 'anyOf', 'anything', 'unless']):
|
2014-02-24 18:40:22 +08:00
|
|
|
narrowing_matchers[result_type + name + esc(args)] = matcher_html
|
2012-08-28 02:49:12 +08:00
|
|
|
else:
|
2014-02-24 18:40:22 +08:00
|
|
|
traversal_matchers[result_type + name + esc(args)] = matcher_html
|
2012-08-28 02:49:12 +08:00
|
|
|
|
|
|
|
def act_on_decl(declaration, comment, allowed_types):
|
|
|
|
"""Parse the matcher out of the given declaration and comment.
|
|
|
|
|
|
|
|
If 'allowed_types' is set, it contains a list of node types the matcher
|
|
|
|
can match on, as extracted from the static type asserts in the matcher
|
|
|
|
definition.
|
|
|
|
"""
|
|
|
|
if declaration.strip():
|
|
|
|
# Node matchers are defined by writing:
|
|
|
|
# VariadicDynCastAllOfMatcher<ResultType, ArgumentType> name;
|
2013-01-09 17:38:21 +08:00
|
|
|
m = re.match(r""".*Variadic(?:DynCast)?AllOfMatcher\s*<
|
|
|
|
\s*([^\s,]+)\s*(?:,
|
|
|
|
\s*([^\s>]+)\s*)?>
|
2012-08-28 02:49:12 +08:00
|
|
|
\s*([^\s;]+)\s*;\s*$""", declaration, flags=re.X)
|
|
|
|
if m:
|
|
|
|
result, inner, name = m.groups()
|
2013-01-09 17:38:21 +08:00
|
|
|
if not inner:
|
|
|
|
inner = result
|
2012-08-28 02:49:12 +08:00
|
|
|
add_matcher(result, name, 'Matcher<%s>...' % inner,
|
|
|
|
comment, is_dyncast=True)
|
|
|
|
return
|
|
|
|
|
2018-01-18 00:50:14 +08:00
|
|
|
# Special case of type matchers:
|
|
|
|
# AstTypeMatcher<ArgumentType> name
|
|
|
|
m = re.match(r""".*AstTypeMatcher\s*<
|
|
|
|
\s*([^\s>]+)\s*>
|
|
|
|
\s*([^\s;]+)\s*;\s*$""", declaration, flags=re.X)
|
2013-01-09 17:38:21 +08:00
|
|
|
if m:
|
|
|
|
inner, name = m.groups()
|
|
|
|
add_matcher('Type', name, 'Matcher<%s>...' % inner,
|
|
|
|
comment, is_dyncast=True)
|
2013-07-25 14:05:50 +08:00
|
|
|
# FIXME: re-enable once we have implemented casting on the TypeLoc
|
|
|
|
# hierarchy.
|
|
|
|
# add_matcher('TypeLoc', '%sLoc' % name, 'Matcher<%sLoc>...' % inner,
|
|
|
|
# comment, is_dyncast=True)
|
2013-01-09 17:38:21 +08:00
|
|
|
return
|
|
|
|
|
2018-01-18 00:50:14 +08:00
|
|
|
# Parse the various matcher definition macros.
|
|
|
|
m = re.match(""".*AST_TYPE(LOC)?_TRAVERSE_MATCHER(?:_DECL)?\(
|
2013-01-09 17:38:21 +08:00
|
|
|
\s*([^\s,]+\s*),
|
2013-07-16 03:25:06 +08:00
|
|
|
\s*(?:[^\s,]+\s*),
|
2015-03-08 04:38:15 +08:00
|
|
|
\s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\)
|
2013-01-09 17:38:21 +08:00
|
|
|
\)\s*;\s*$""", declaration, flags=re.X)
|
|
|
|
if m:
|
2015-08-14 19:47:51 +08:00
|
|
|
loc, name, results = m.groups()[0:3]
|
2013-07-16 03:25:06 +08:00
|
|
|
result_types = [r.strip() for r in results.split(',')]
|
|
|
|
|
|
|
|
comment_result_types = extract_result_types(comment)
|
|
|
|
if (comment_result_types and
|
|
|
|
sorted(result_types) != sorted(comment_result_types)):
|
|
|
|
raise Exception('Inconsistent documentation for: %s' % name)
|
2013-01-09 17:38:21 +08:00
|
|
|
for result_type in result_types:
|
|
|
|
add_matcher(result_type, name, 'Matcher<Type>', comment)
|
|
|
|
if loc:
|
|
|
|
add_matcher('%sLoc' % result_type, '%sLoc' % name, 'Matcher<TypeLoc>',
|
|
|
|
comment)
|
|
|
|
return
|
|
|
|
|
2013-06-21 23:51:31 +08:00
|
|
|
m = re.match(r"""^\s*AST_POLYMORPHIC_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
|
|
|
|
\s*([^\s,]+)\s*,
|
2015-03-08 04:38:15 +08:00
|
|
|
\s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\)
|
2013-06-21 23:51:31 +08:00
|
|
|
(?:,\s*([^\s,]+)\s*
|
|
|
|
,\s*([^\s,]+)\s*)?
|
|
|
|
(?:,\s*([^\s,]+)\s*
|
|
|
|
,\s*([^\s,]+)\s*)?
|
|
|
|
(?:,\s*\d+\s*)?
|
|
|
|
\)\s*{\s*$""", declaration, flags=re.X)
|
|
|
|
|
|
|
|
if m:
|
2015-08-14 19:47:51 +08:00
|
|
|
p, n, name, results = m.groups()[0:4]
|
|
|
|
args = m.groups()[4:]
|
2013-06-21 23:51:31 +08:00
|
|
|
result_types = [r.strip() for r in results.split(',')]
|
|
|
|
if allowed_types and allowed_types != result_types:
|
|
|
|
raise Exception('Inconsistent documentation for: %s' % name)
|
|
|
|
if n not in ['', '2']:
|
|
|
|
raise Exception('Cannot parse "%s"' % declaration)
|
|
|
|
args = ', '.join('%s %s' % (args[i], args[i+1])
|
|
|
|
for i in range(0, len(args), 2) if args[i])
|
|
|
|
for result_type in result_types:
|
|
|
|
add_matcher(result_type, name, args, comment)
|
|
|
|
return
|
|
|
|
|
2014-03-10 23:40:23 +08:00
|
|
|
m = re.match(r"""^\s*AST_MATCHER_FUNCTION(_P)?(.?)(?:_OVERLOAD)?\(
|
|
|
|
(?:\s*([^\s,]+)\s*,)?
|
|
|
|
\s*([^\s,]+)\s*
|
|
|
|
(?:,\s*([^\s,]+)\s*
|
|
|
|
,\s*([^\s,]+)\s*)?
|
|
|
|
(?:,\s*([^\s,]+)\s*
|
|
|
|
,\s*([^\s,]+)\s*)?
|
|
|
|
(?:,\s*\d+\s*)?
|
|
|
|
\)\s*{\s*$""", declaration, flags=re.X)
|
|
|
|
if m:
|
|
|
|
p, n, result, name = m.groups()[0:4]
|
|
|
|
args = m.groups()[4:]
|
|
|
|
if n not in ['', '2']:
|
|
|
|
raise Exception('Cannot parse "%s"' % declaration)
|
|
|
|
args = ', '.join('%s %s' % (args[i], args[i+1])
|
|
|
|
for i in range(0, len(args), 2) if args[i])
|
|
|
|
add_matcher(result, name, args, comment)
|
|
|
|
return
|
|
|
|
|
2013-06-21 23:51:31 +08:00
|
|
|
m = re.match(r"""^\s*AST_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
|
2012-08-28 02:49:12 +08:00
|
|
|
(?:\s*([^\s,]+)\s*,)?
|
|
|
|
\s*([^\s,]+)\s*
|
2016-05-05 04:45:00 +08:00
|
|
|
(?:,\s*([^,]+)\s*
|
2012-08-28 02:49:12 +08:00
|
|
|
,\s*([^\s,]+)\s*)?
|
|
|
|
(?:,\s*([^\s,]+)\s*
|
|
|
|
,\s*([^\s,]+)\s*)?
|
2013-02-07 04:36:22 +08:00
|
|
|
(?:,\s*\d+\s*)?
|
2018-01-18 07:14:49 +08:00
|
|
|
\)\s*{""", declaration, flags=re.X)
|
2012-08-28 02:49:12 +08:00
|
|
|
if m:
|
2013-06-21 23:51:31 +08:00
|
|
|
p, n, result, name = m.groups()[0:4]
|
|
|
|
args = m.groups()[4:]
|
2012-08-28 02:49:12 +08:00
|
|
|
if not result:
|
|
|
|
if not allowed_types:
|
|
|
|
raise Exception('Did not find allowed result types for: %s' % name)
|
|
|
|
result_types = allowed_types
|
|
|
|
else:
|
|
|
|
result_types = [result]
|
|
|
|
if n not in ['', '2']:
|
|
|
|
raise Exception('Cannot parse "%s"' % declaration)
|
|
|
|
args = ', '.join('%s %s' % (args[i], args[i+1])
|
|
|
|
for i in range(0, len(args), 2) if args[i])
|
|
|
|
for result_type in result_types:
|
|
|
|
add_matcher(result_type, name, args, comment)
|
|
|
|
return
|
|
|
|
|
2013-08-17 00:19:42 +08:00
|
|
|
# Parse ArgumentAdapting matchers.
|
|
|
|
m = re.match(
|
2018-01-18 00:50:14 +08:00
|
|
|
r"""^.*ArgumentAdaptingMatcherFunc<.*>\s*
|
|
|
|
([a-zA-Z]*);$""",
|
2013-08-17 00:19:42 +08:00
|
|
|
declaration, flags=re.X)
|
|
|
|
if m:
|
|
|
|
name = m.groups()[0]
|
|
|
|
add_matcher('*', name, 'Matcher<*>', comment)
|
|
|
|
return
|
|
|
|
|
2016-02-23 05:13:02 +08:00
|
|
|
# Parse Variadic functions.
|
|
|
|
m = re.match(
|
2016-05-05 04:45:00 +08:00
|
|
|
r"""^.*internal::VariadicFunction\s*<\s*([^,]+),\s*([^,]+),\s*[^>]+>\s*
|
2018-01-18 00:50:14 +08:00
|
|
|
([a-zA-Z]*);$""",
|
2016-02-23 05:13:02 +08:00
|
|
|
declaration, flags=re.X)
|
|
|
|
if m:
|
|
|
|
result, arg, name = m.groups()[:3]
|
|
|
|
add_matcher(result, name, '%s, ..., %s' % (arg, arg), comment)
|
|
|
|
return
|
|
|
|
|
2013-08-27 23:11:16 +08:00
|
|
|
# Parse Variadic operator matchers.
|
|
|
|
m = re.match(
|
2018-01-18 00:50:14 +08:00
|
|
|
r"""^.*VariadicOperatorMatcherFunc\s*<\s*([^,]+),\s*([^\s]+)\s*>\s*
|
|
|
|
([a-zA-Z]*);$""",
|
2013-08-27 23:11:16 +08:00
|
|
|
declaration, flags=re.X)
|
|
|
|
if m:
|
2014-02-24 18:28:36 +08:00
|
|
|
min_args, max_args, name = m.groups()[:3]
|
|
|
|
if max_args == '1':
|
|
|
|
add_matcher('*', name, 'Matcher<*>', comment)
|
|
|
|
return
|
2018-01-18 00:50:14 +08:00
|
|
|
elif max_args == 'std::numeric_limits<unsigned>::max()':
|
2014-02-24 18:28:36 +08:00
|
|
|
add_matcher('*', name, 'Matcher<*>, ..., Matcher<*>', comment)
|
|
|
|
return
|
2013-08-27 23:11:16 +08:00
|
|
|
|
2013-08-17 00:19:42 +08:00
|
|
|
|
2012-08-28 02:49:12 +08:00
|
|
|
# Parse free standing matcher functions, like:
|
|
|
|
# Matcher<ResultType> Name(Matcher<ArgumentType> InnerMatcher) {
|
|
|
|
m = re.match(r"""^\s*(.*)\s+
|
|
|
|
([^\s\(]+)\s*\(
|
|
|
|
(.*)
|
|
|
|
\)\s*{""", declaration, re.X)
|
|
|
|
if m:
|
|
|
|
result, name, args = m.groups()
|
|
|
|
args = ', '.join(p.strip() for p in args.split(','))
|
2013-01-09 17:38:21 +08:00
|
|
|
m = re.match(r'.*\s+internal::(Bindable)?Matcher<([^>]+)>$', result)
|
2012-08-28 02:49:12 +08:00
|
|
|
if m:
|
2013-01-09 17:38:21 +08:00
|
|
|
result_types = [m.group(2)]
|
2012-08-28 02:49:12 +08:00
|
|
|
else:
|
|
|
|
result_types = extract_result_types(comment)
|
|
|
|
if not result_types:
|
|
|
|
if not comment:
|
|
|
|
# Only overloads don't have their own doxygen comments; ignore those.
|
|
|
|
print 'Ignoring "%s"' % name
|
|
|
|
else:
|
|
|
|
print 'Cannot determine result type for "%s"' % name
|
|
|
|
else:
|
|
|
|
for result_type in result_types:
|
|
|
|
add_matcher(result_type, name, args, comment)
|
|
|
|
else:
|
|
|
|
print '*** Unparsable: "' + declaration + '" ***'
|
|
|
|
|
|
|
|
def sort_table(matcher_type, matcher_map):
|
|
|
|
"""Returns the sorted html table for the given row map."""
|
|
|
|
table = ''
|
|
|
|
for key in sorted(matcher_map.keys()):
|
|
|
|
table += matcher_map[key] + '\n'
|
|
|
|
return ('<!-- START_%(type)s_MATCHERS -->\n' +
|
|
|
|
'%(table)s' +
|
|
|
|
'<!--END_%(type)s_MATCHERS -->') % {
|
|
|
|
'type': matcher_type,
|
|
|
|
'table': table,
|
|
|
|
}
|
|
|
|
|
|
|
|
# Parse the ast matchers.
|
|
|
|
# We alternate between two modes:
|
|
|
|
# body = True: We parse the definition of a matcher. We need
|
|
|
|
# to parse the full definition before adding a matcher, as the
|
|
|
|
# definition might contain static asserts that specify the result
|
|
|
|
# type.
|
|
|
|
# body = False: We parse the comments and declaration of the matcher.
|
|
|
|
comment = ''
|
|
|
|
declaration = ''
|
|
|
|
allowed_types = []
|
|
|
|
body = False
|
|
|
|
for line in open(MATCHERS_FILE).read().splitlines():
|
|
|
|
if body:
|
|
|
|
if line.strip() and line[0] == '}':
|
|
|
|
if declaration:
|
|
|
|
act_on_decl(declaration, comment, allowed_types)
|
|
|
|
comment = ''
|
|
|
|
declaration = ''
|
|
|
|
allowed_types = []
|
|
|
|
body = False
|
|
|
|
else:
|
|
|
|
m = re.search(r'is_base_of<([^,]+), NodeType>', line)
|
|
|
|
if m and m.group(1):
|
|
|
|
allowed_types += [m.group(1)]
|
|
|
|
continue
|
|
|
|
if line.strip() and line.lstrip()[0] == '/':
|
|
|
|
comment += re.sub(r'/+\s?', '', line) + '\n'
|
|
|
|
else:
|
|
|
|
declaration += ' ' + line
|
|
|
|
if ((not line.strip()) or
|
|
|
|
line.rstrip()[-1] == ';' or
|
2013-08-27 23:11:16 +08:00
|
|
|
(line.rstrip()[-1] == '{' and line.rstrip()[-3:] != '= {')):
|
2012-08-28 02:49:12 +08:00
|
|
|
if line.strip() and line.rstrip()[-1] == '{':
|
|
|
|
body = True
|
|
|
|
else:
|
|
|
|
act_on_decl(declaration, comment, allowed_types)
|
|
|
|
comment = ''
|
|
|
|
declaration = ''
|
|
|
|
allowed_types = []
|
|
|
|
|
|
|
|
node_matcher_table = sort_table('DECL', node_matchers)
|
|
|
|
narrowing_matcher_table = sort_table('NARROWING', narrowing_matchers)
|
|
|
|
traversal_matcher_table = sort_table('TRAVERSAL', traversal_matchers)
|
|
|
|
|
|
|
|
reference = open('../LibASTMatchersReference.html').read()
|
|
|
|
reference = re.sub(r'<!-- START_DECL_MATCHERS.*END_DECL_MATCHERS -->',
|
2016-02-18 23:43:56 +08:00
|
|
|
node_matcher_table, reference, flags=re.S)
|
2012-08-28 02:49:12 +08:00
|
|
|
reference = re.sub(r'<!-- START_NARROWING_MATCHERS.*END_NARROWING_MATCHERS -->',
|
2016-02-18 23:43:56 +08:00
|
|
|
narrowing_matcher_table, reference, flags=re.S)
|
2012-08-28 02:49:12 +08:00
|
|
|
reference = re.sub(r'<!-- START_TRAVERSAL_MATCHERS.*END_TRAVERSAL_MATCHERS -->',
|
2016-02-18 23:43:56 +08:00
|
|
|
traversal_matcher_table, reference, flags=re.S)
|
2012-08-28 02:49:12 +08:00
|
|
|
|
2015-11-20 15:46:19 +08:00
|
|
|
with open('../LibASTMatchersReference.html', 'wb') as output:
|
2012-08-28 02:49:12 +08:00
|
|
|
output.write(reference)
|
|
|
|
|