scripts: autopep8

This commit is contained in:
Daniel Hahler 2019-07-29 04:43:28 +02:00
parent bae02eb396
commit 97ce776e7b
5 changed files with 360 additions and 327 deletions

View File

@ -9,58 +9,58 @@ from argparse import ArgumentParser
GENERATED_INCLUDE_RE = re.compile(
r'^\s*#\s*include\s*"([/a-z_0-9.]+\.generated\.h)"(\s+//.*)?$')
r'^\s*#\s*include\s*"([/a-z_0-9.]+\.generated\.h)"(\s+//.*)?$')
def main(argv):
argparser = ArgumentParser()
argparser.add_argument('--generated-includes-dir', action='append',
help='Directory where generated includes are located.')
argparser.add_argument('--file', type=open, help='File to check.')
argparser.add_argument('iwyu_args', nargs='*',
help='IWYU arguments, must go after --.')
args = argparser.parse_args(argv)
argparser = ArgumentParser()
argparser.add_argument('--generated-includes-dir', action='append',
help='Directory where generated includes are located.')
argparser.add_argument('--file', type=open, help='File to check.')
argparser.add_argument('iwyu_args', nargs='*',
help='IWYU arguments, must go after --.')
args = argparser.parse_args(argv)
with args.file:
include_dirs = []
with args.file:
include_dirs = []
iwyu = Popen(['include-what-you-use', '-xc'] + args.iwyu_args + ['/dev/stdin'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
iwyu = Popen(['include-what-you-use', '-xc'] + args.iwyu_args + ['/dev/stdin'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
for line in args.file:
match = GENERATED_INCLUDE_RE.match(line)
if match:
for d in args.generated_includes_dir:
try:
f = open(os.path.join(d, match.group(1)))
except IOError:
continue
else:
with f:
for generated_line in f:
iwyu.stdin.write(generated_line)
break
else:
raise IOError('Failed to find {0}'.format(match.group(1)))
else:
iwyu.stdin.write(line)
for line in args.file:
match = GENERATED_INCLUDE_RE.match(line)
if match:
for d in args.generated_includes_dir:
try:
f = open(os.path.join(d, match.group(1)))
except IOError:
continue
else:
with f:
for generated_line in f:
iwyu.stdin.write(generated_line)
break
else:
raise IOError('Failed to find {0}'.format(match.group(1)))
else:
iwyu.stdin.write(line)
iwyu.stdin.close()
iwyu.stdin.close()
out = iwyu.stdout.read()
err = iwyu.stderr.read()
out = iwyu.stdout.read()
err = iwyu.stderr.read()
ret = iwyu.wait()
ret = iwyu.wait()
if ret != 2:
print('IWYU failed with exit code {0}:'.format(ret))
print('{0} stdout {0}'.format('=' * ((80 - len(' stdout ')) // 2)))
print(out)
print('{0} stderr {0}'.format('=' * ((80 - len(' stderr ')) // 2)))
print(err)
return 1
return 0
if ret != 2:
print('IWYU failed with exit code {0}:'.format(ret))
print('{0} stdout {0}'.format('=' * ((80 - len(' stdout ')) // 2)))
print(out)
print('{0} stderr {0}'.format('=' * ((80 - len(' stderr ')) // 2)))
print(err)
return 1
return 0
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
raise SystemExit(main(sys.argv[1:]))

View File

@ -81,8 +81,8 @@ SITENAVI_PLAIN = '<p>' + SITENAVI_LINKS_PLAIN + '</p>'
SITENAVI_WEB = '<p>' + SITENAVI_LINKS_WEB + '</p>'
SITENAVI_SEARCH = '<table width="100%"><tbody><tr><td>' + SITENAVI_LINKS_WEB + \
'</td><td style="text-align: right; max-width: 25vw"><div class="gcse-searchbox">' \
'</div></td></tr></tbody></table><div class="gcse-searchresults"></div>'
'</td><td style="text-align: right; max-width: 25vw"><div class="gcse-searchbox">' \
'</div></td></tr></tbody></table><div class="gcse-searchresults"></div>'
TEXTSTART = """
<div id="d1">
@ -100,74 +100,76 @@ FOOTER2 = """
</body>
</html>
""".format(
generated_date='{0:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()),
commit='?')
generated_date='{0:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()),
commit='?')
RE_TAGLINE = re.compile(r'(\S+)\s+(\S+)')
PAT_WORDCHAR = '[!#-)+-{}~\xC0-\xFF]'
PAT_HEADER = r'(^.*~$)'
PAT_GRAPHIC = r'(^.* `$)'
PAT_HEADER = r'(^.*~$)'
PAT_GRAPHIC = r'(^.* `$)'
PAT_PIPEWORD = r'(?<!\\)\|([#-)!+-~]+)\|'
PAT_STARWORD = r'\*([#-)!+-~]+)\*(?:(?=\s)|$)'
PAT_COMMAND = r'`([^` ]+)`'
PAT_OPTWORD = r"('(?:[a-z]{2,}|t_..)')"
PAT_CTRL = r'(CTRL-(?:W_)?(?:\{char\}|<[A-Za-z]+?>|.)?)'
PAT_SPECIAL = r'(<.+?>|\{.+?}|' \
r'\[(?:range|line|count|offset|\+?cmd|[-+]?num|\+\+opt|' \
r'arg|arguments|ident|addr|group)]|' \
r'(?<=\s)\[[-a-z^A-Z0-9_]{2,}])'
PAT_TITLE = r'(Vim version [0-9.a-z]+|VIM REFERENCE.*)'
PAT_NOTE = r'((?<!' + PAT_WORDCHAR + r')(?:note|NOTE|Notes?):?' \
r'(?!' + PAT_WORDCHAR + r'))'
PAT_URL = r'((?:https?|ftp)://[^\'"<> \t]+[a-zA-Z0-9/])'
PAT_WORD = r'((?<!' + PAT_WORDCHAR + r')' + PAT_WORDCHAR + r'+' \
r'(?!' + PAT_WORDCHAR + r'))'
PAT_COMMAND = r'`([^` ]+)`'
PAT_OPTWORD = r"('(?:[a-z]{2,}|t_..)')"
PAT_CTRL = r'(CTRL-(?:W_)?(?:\{char\}|<[A-Za-z]+?>|.)?)'
PAT_SPECIAL = r'(<.+?>|\{.+?}|' \
r'\[(?:range|line|count|offset|\+?cmd|[-+]?num|\+\+opt|' \
r'arg|arguments|ident|addr|group)]|' \
r'(?<=\s)\[[-a-z^A-Z0-9_]{2,}])'
PAT_TITLE = r'(Vim version [0-9.a-z]+|VIM REFERENCE.*)'
PAT_NOTE = r'((?<!' + PAT_WORDCHAR + r')(?:note|NOTE|Notes?):?' \
r'(?!' + PAT_WORDCHAR + r'))'
PAT_URL = r'((?:https?|ftp)://[^\'"<> \t]+[a-zA-Z0-9/])'
PAT_WORD = r'((?<!' + PAT_WORDCHAR + r')' + PAT_WORDCHAR + r'+' \
r'(?!' + PAT_WORDCHAR + r'))'
RE_LINKWORD = re.compile(
PAT_OPTWORD + '|' +
PAT_CTRL + '|' +
PAT_SPECIAL)
PAT_OPTWORD + '|' +
PAT_CTRL + '|' +
PAT_SPECIAL)
RE_TAGWORD = re.compile(
PAT_HEADER + '|' +
PAT_GRAPHIC + '|' +
PAT_PIPEWORD + '|' +
PAT_STARWORD + '|' +
PAT_COMMAND + '|' +
PAT_OPTWORD + '|' +
PAT_CTRL + '|' +
PAT_SPECIAL + '|' +
PAT_TITLE + '|' +
PAT_NOTE + '|' +
PAT_URL + '|' +
PAT_WORD)
RE_NEWLINE = re.compile(r'[\r\n]')
PAT_HEADER + '|' +
PAT_GRAPHIC + '|' +
PAT_PIPEWORD + '|' +
PAT_STARWORD + '|' +
PAT_COMMAND + '|' +
PAT_OPTWORD + '|' +
PAT_CTRL + '|' +
PAT_SPECIAL + '|' +
PAT_TITLE + '|' +
PAT_NOTE + '|' +
PAT_URL + '|' +
PAT_WORD)
RE_NEWLINE = re.compile(r'[\r\n]')
# H1 header "=====…"
# H2 header "-----…"
RE_HRULE = re.compile(r'[-=]{3,}.*[-=]{3,3}$')
RE_EG_START = re.compile(r'(?:.* )?>$')
RE_EG_END = re.compile(r'\S')
RE_SECTION = re.compile(r'[-A-Z .][-A-Z0-9 .()]*(?=\s+\*)')
RE_STARTAG = re.compile(r'\s\*([^ \t|]+)\*(?:\s|$)')
RE_HRULE = re.compile(r'[-=]{3,}.*[-=]{3,3}$')
RE_EG_START = re.compile(r'(?:.* )?>$')
RE_EG_END = re.compile(r'\S')
RE_SECTION = re.compile(r'[-A-Z .][-A-Z0-9 .()]*(?=\s+\*)')
RE_STARTAG = re.compile(r'\s\*([^ \t|]+)\*(?:\s|$)')
RE_LOCAL_ADD = re.compile(r'LOCAL ADDITIONS:\s+\*local-additions\*$')
class Link(object):
__slots__ = 'link_plain_same', 'link_pipe_same', \
'link_plain_foreign', 'link_pipe_foreign', \
'filename'
def __init__(self, link_plain_same, link_plain_foreign,
link_pipe_same, link_pipe_foreign, filename):
self.link_plain_same = link_plain_same
link_pipe_same, link_pipe_foreign, filename):
self.link_plain_same = link_plain_same
self.link_plain_foreign = link_plain_foreign
self.link_pipe_same = link_pipe_same
self.link_pipe_foreign = link_pipe_foreign
self.filename = filename
self.link_pipe_same = link_pipe_same
self.link_pipe_foreign = link_pipe_foreign
self.filename = filename
class VimH2H(object):
def __init__(self, tags, version=None, is_web_version=True):
self._urls = { }
self._urls = {}
self._version = version
self._is_web_version = is_web_version
for line in RE_NEWLINE.split(tags):
@ -183,6 +185,7 @@ class VimH2H(object):
def do_add_tag(self, filename, tag):
tag_quoted = urllib.parse.quote_plus(tag)
def mkpart1(doc):
return '<a href="' + doc + '#' + tag_quoted + '" class="'
part1_same = mkpart1('')
@ -192,16 +195,20 @@ class VimH2H(object):
doc = filename + '.html'
part1_foreign = mkpart1(doc)
part2 = '">' + html_escape[tag] + '</a>'
def mklinks(cssclass):
return (part1_same + cssclass + part2,
return (part1_same + cssclass + part2,
part1_foreign + cssclass + part2)
cssclass_plain = 'd'
m = RE_LINKWORD.match(tag)
if m:
opt, ctrl, special = m.groups()
if opt is not None: cssclass_plain = 'o'
elif ctrl is not None: cssclass_plain = 'k'
elif special is not None: cssclass_plain = 's'
if opt is not None:
cssclass_plain = 'o'
elif ctrl is not None:
cssclass_plain = 'k'
elif special is not None:
cssclass_plain = 's'
links_plain = mklinks(cssclass_plain)
links_pipe = mklinks('l')
self._urls[tag] = Link(
@ -213,18 +220,23 @@ class VimH2H(object):
links = self._urls.get(tag)
if links is not None:
if links.filename == curr_filename:
if css_class == 'l': return links.link_pipe_same
else: return links.link_plain_same
if css_class == 'l':
return links.link_pipe_same
else:
return links.link_plain_same
else:
if css_class == 'l': return links.link_pipe_foreign
else: return links.link_plain_foreign
if css_class == 'l':
return links.link_pipe_foreign
else:
return links.link_plain_foreign
elif css_class is not None:
return '<span class="' + css_class + '">' + html_escape[tag] + \
'</span>'
else: return html_escape[tag]
'</span>'
else:
return html_escape[tag]
def to_html(self, filename, contents, encoding):
out = [ ]
out = []
inexample = 0
filename = str(filename)
@ -247,10 +259,11 @@ class VimH2H(object):
if inexample == 2:
if RE_EG_END.match(line):
inexample = 0
if line[0] == '<': line = line[1:]
if line[0] == '<':
line = line[1:]
else:
out.extend(('<span class="e">', html_escape[line],
'</span>\n'))
'</span>\n'))
continue
if RE_EG_START.match(line_tabs):
inexample = 1
@ -266,12 +279,12 @@ class VimH2H(object):
out.append(html_escape[line[lastpos:pos]])
lastpos = match.end()
header, graphic, pipeword, starword, command, opt, ctrl, \
special, title, note, url, word = match.groups()
special, title, note, url, word = match.groups()
if pipeword is not None:
out.append(self.maplink(pipeword, filename, 'l'))
elif starword is not None:
out.extend(('<a name="', urllib.parse.quote_plus(starword),
'" class="t">', html_escape[starword], '</a>'))
'" class="t">', html_escape[starword], '</a>'))
elif command is not None:
out.extend(('<span class="e">', html_escape[command],
'</span>'))
@ -300,14 +313,15 @@ class VimH2H(object):
if lastpos < len(line):
out.append(html_escape[line[lastpos:]])
out.append('\n')
if inexample == 1: inexample = 2
if inexample == 1:
inexample = 2
header = []
header.append(HEAD.format(encoding=encoding, filename=filename))
header.append(HEAD_END)
if self._is_web_version and is_help_txt:
vers_note = VERSION_NOTE.replace('{version}', self._version) \
if self._version else ''
if self._version else ''
header.append(INTRO.replace('{vers-note}', vers_note))
if self._is_web_version:
header.append(SITENAVI_SEARCH)
@ -318,6 +332,7 @@ class VimH2H(object):
header.append(TEXTSTART)
return ''.join(chain(header, out, (FOOTER, sitenavi_footer, FOOTER2)))
class HtmlEscCache(dict):
def __missing__(self, key):
r = key.replace('&', '&amp;') \
@ -326,11 +341,10 @@ class HtmlEscCache(dict):
self[key] = r
return r
html_escape = HtmlEscCache()
def slurp(filename):
try:
with open(filename, encoding='UTF-8') as f:
@ -340,17 +354,20 @@ def slurp(filename):
with open(filename, encoding='latin-1') as f:
return f.read(), 'latin-1'
def usage():
return "usage: " + sys.argv[0] + " IN_DIR OUT_DIR [BASENAMES...]"
def main():
if len(sys.argv) < 3: sys.exit(usage())
if len(sys.argv) < 3:
sys.exit(usage())
in_dir = sys.argv[1]
out_dir = sys.argv[2]
basenames = sys.argv[3:]
print( "Processing tags...")
print("Processing tags...")
h2h = VimH2H(slurp(os.path.join(in_dir, 'tags'))[0], is_web_version=False)
if len(basenames) == 0:
@ -358,9 +375,9 @@ def main():
for basename in basenames:
if os.path.splitext(basename)[1] != '.txt' and basename != 'tags':
print( "Ignoring " + basename)
print("Ignoring " + basename)
continue
print( "Processing " + basename + "...")
print("Processing " + basename + "...")
path = os.path.join(in_dir, basename)
text, encoding = slurp(path)
outpath = os.path.join(out_dir, basename + '.html')
@ -368,4 +385,5 @@ def main():
of.write(h2h.to_html(basename, text, encoding))
of.close()
main()

View File

@ -57,54 +57,55 @@ seen_funcs = set()
lua2dox_filter = os.path.join(base_dir, 'scripts', 'lua2dox_filter')
CONFIG = {
'api': {
'filename': 'api.txt',
# String used to find the start of the generated part of the doc.
'section_start_token': '*api-global*',
# Section ordering.
'section_order' : [
'vim.c',
'buffer.c',
'window.c',
'tabpage.c',
'ui.c',
],
# List of files/directories for doxygen to read, separated by blanks
'files': os.path.join(base_dir, 'src/nvim/api'),
# file patterns used by doxygen
'file_patterns': '*.h *.c',
# Only function with this prefix are considered
'func_name_prefix': 'nvim_',
# Section name overrides.
'section_name': {
'vim.c': 'Global',
'api': {
'filename': 'api.txt',
# String used to find the start of the generated part of the doc.
'section_start_token': '*api-global*',
# Section ordering.
'section_order': [
'vim.c',
'buffer.c',
'window.c',
'tabpage.c',
'ui.c',
],
# List of files/directories for doxygen to read, separated by blanks
'files': os.path.join(base_dir, 'src/nvim/api'),
# file patterns used by doxygen
'file_patterns': '*.h *.c',
# Only function with this prefix are considered
'func_name_prefix': 'nvim_',
# Section name overrides.
'section_name': {
'vim.c': 'Global',
},
# Module name overrides (for Lua).
'module_override': {},
# Append the docs for these modules, do not start a new section.
'append_only': [],
},
# Module name overrides (for Lua).
'module_override': {},
# Append the docs for these modules, do not start a new section.
'append_only' : [],
},
'lua': {
'filename': 'if_lua.txt',
'section_start_token': '*lua-vim*',
'section_order' : [
'vim.lua',
'shared.lua',
],
'files': ' '.join([
os.path.join(base_dir, 'src/nvim/lua/vim.lua'),
os.path.join(base_dir, 'runtime/lua/vim/shared.lua'),
'lua': {
'filename': 'if_lua.txt',
'section_start_token': '*lua-vim*',
'section_order': [
'vim.lua',
'shared.lua',
],
'files': ' '.join([
os.path.join(base_dir, 'src/nvim/lua/vim.lua'),
os.path.join(base_dir, 'runtime/lua/vim/shared.lua'),
]),
'file_patterns': '*.lua',
'func_name_prefix': '',
'section_name': {},
'module_override': {
'shared': 'vim', # `shared` functions are exposed on the `vim` module.
'file_patterns': '*.lua',
'func_name_prefix': '',
'section_name': {},
'module_override': {
# `shared` functions are exposed on the `vim` module.
'shared': 'vim',
},
'append_only': [
'shared.lua',
],
},
'append_only' : [
'shared.lua',
],
},
}
param_exclude = (
@ -121,6 +122,7 @@ annotation_map = {
# deprecated functions.
xrefs = set()
def debug_this(s, n):
o = n if isinstance(n, str) else n.toprettyxml(indent=' ', newl='\n')
name = '' if isinstance(n, str) else n.nodeName
@ -191,7 +193,7 @@ def len_lastline(text):
if -1 == lastnl:
return len(text)
if '\n' == text[-1]:
return lastnl - (1+ text.rfind('\n', 0, lastnl))
return lastnl - (1 + text.rfind('\n', 0, lastnl))
return len(text) - (1 + lastnl)
@ -209,6 +211,7 @@ def is_inline(n):
return False
return True
def doc_wrap(text, prefix='', width=70, func=False, indent=None):
"""Wraps text to `width`.
@ -237,8 +240,8 @@ def doc_wrap(text, prefix='', width=70, func=False, indent=None):
if indent_only:
prefix = indent
tw = textwrap.TextWrapper(break_long_words = False,
break_on_hyphens = False,
tw = textwrap.TextWrapper(break_long_words=False,
break_on_hyphens=False,
width=width,
initial_indent=prefix,
subsequent_indent=indent)
@ -287,12 +290,14 @@ def render_params(parent, width=62):
desc_node = get_child(node, 'parameterdescription')
if desc_node:
desc = parse_parblock(desc_node, width=width,
indent=(' ' * len(name)))
indent=(' ' * len(name)))
out += '{}{}\n'.format(name, desc)
return out.rstrip()
# Renders a node as Vim help text, recursively traversing all descendants.
def render_node(n, text, prefix='', indent='', width=62):
text = ''
# space_preceding = (len(text) > 0 and ' ' == text[-1][-1])
@ -317,7 +322,9 @@ def render_node(n, text, prefix='', indent='', width=62):
text += ' [verbatim] {}'.format(get_text(n))
elif n.nodeName == 'listitem':
for c in n.childNodes:
text += indent + prefix + render_node(c, text, indent=indent+(' ' * len(prefix)), width=width)
text += indent + prefix + \
render_node(c, text, indent=indent +
(' ' * len(prefix)), width=width)
elif n.nodeName in ('para', 'heading'):
for c in n.childNodes:
text += render_node(c, text, indent=indent, width=width)
@ -326,7 +333,7 @@ def render_node(n, text, prefix='', indent='', width=62):
elif n.nodeName == 'itemizedlist':
for c in n.childNodes:
text += '{}\n'.format(render_node(c, text, prefix='',
indent=indent, width=width))
indent=indent, width=width))
elif n.nodeName == 'orderedlist':
i = 1
for c in n.childNodes:
@ -334,7 +341,7 @@ def render_node(n, text, prefix='', indent='', width=62):
text += '\n'
continue
text += '{}\n'.format(render_node(c, text, prefix='{}. '.format(i),
indent=indent, width=width))
indent=indent, width=width))
i = i + 1
elif n.nodeName == 'simplesect' and 'note' == n.getAttribute('kind'):
text += 'Note:\n '
@ -356,6 +363,7 @@ def render_node(n, text, prefix='', indent='', width=62):
n.nodeName, n.toprettyxml(indent=' ', newl='\n')))
return text
def render_para(parent, indent='', width=62):
"""Renders Doxygen <para> containing arbitrary nodes.
@ -363,7 +371,7 @@ def render_para(parent, indent='', width=62):
"""
if is_inline(parent):
return clean_lines(doc_wrap(render_node(parent, ''),
indent=indent, width=width).strip())
indent=indent, width=width).strip())
# Ordered dict of ordered lists.
groups = collections.OrderedDict([
@ -407,17 +415,19 @@ def render_para(parent, indent='', width=62):
if len(groups['return']) > 0:
chunks.append('\nReturn: ~')
for child in groups['return']:
chunks.append(render_node(child, chunks[-1][-1], indent=indent, width=width))
chunks.append(render_node(
child, chunks[-1][-1], indent=indent, width=width))
if len(groups['seealso']) > 0:
chunks.append('\nSee also: ~')
for child in groups['seealso']:
chunks.append(render_node(child, chunks[-1][-1], indent=indent, width=width))
chunks.append(render_node(
child, chunks[-1][-1], indent=indent, width=width))
for child in groups['xrefs']:
title = get_text(get_child(child, 'xreftitle'))
xrefs.add(title)
xrefdesc = render_para(get_child(child, 'xrefdescription'), width=width)
chunks.append(doc_wrap(xrefdesc, prefix='{}: '.format(title),
width=width) + '\n')
width=width) + '\n')
return clean_lines('\n'.join(chunks).strip())
@ -587,6 +597,7 @@ def delete_lines_below(filename, tokenstr):
with open(filename, 'wt') as fp:
fp.writelines(lines[0:i])
def gen_docs(config):
"""Generate documentation.
@ -619,7 +630,8 @@ def gen_docs(config):
continue
groupname = get_text(find_first(compound, 'name'))
groupxml = os.path.join(base, '%s.xml' % compound.getAttribute('refid'))
groupxml = os.path.join(base, '%s.xml' %
compound.getAttribute('refid'))
desc = find_first(minidom.parse(groupxml), 'detaileddescription')
if desc:
@ -635,7 +647,7 @@ def gen_docs(config):
if filename.endswith('.c') or filename.endswith('.lua'):
functions, deprecated = parse_source_xml(
os.path.join(base, '%s.xml' %
compound.getAttribute('refid')), mode)
compound.getAttribute('refid')), mode)
if not functions and not deprecated:
continue
@ -680,12 +692,14 @@ def gen_docs(config):
i = 0
for filename in CONFIG[mode]['section_order']:
if filename not in sections:
raise RuntimeError('found new module "{}"; update the "section_order" map'.format(filename))
raise RuntimeError(
'found new module "{}"; update the "section_order" map'.format(filename))
title, helptag, section_doc = sections.pop(filename)
i += 1
if filename not in CONFIG[mode]['append_only']:
docs += sep
docs += '\n%s%s' % (title, helptag.rjust(text_width - len(title)))
docs += '\n%s%s' % (title,
helptag.rjust(text_width - len(title)))
docs += section_doc
docs += '\n\n\n'
@ -693,7 +707,7 @@ def gen_docs(config):
docs += ' vim:tw=78:ts=8:ft=help:norl:\n'
doc_file = os.path.join(base_dir, 'runtime', 'doc',
CONFIG[mode]['filename'])
CONFIG[mode]['filename'])
delete_lines_below(doc_file, CONFIG[mode]['section_start_token'])
with open(doc_file, 'ab') as fp:

View File

@ -12,27 +12,27 @@ import msgpack
class EntryTypes(Enum):
Unknown = -1
Missing = 0
Header = 1
SearchPattern = 2
SubString = 3
HistoryEntry = 4
Register = 5
Variable = 6
GlobalMark = 7
Jump = 8
BufferList = 9
LocalMark = 10
Change = 11
Unknown = -1
Missing = 0
Header = 1
SearchPattern = 2
SubString = 3
HistoryEntry = 4
Register = 5
Variable = 6
GlobalMark = 7
Jump = 8
BufferList = 9
LocalMark = 10
Change = 11
def strtrans_errors(e):
if not isinstance(e, UnicodeDecodeError):
raise NotImplementedError('dont know how to handle {0} error'.format(
e.__class__.__name__))
return '<{0:x}>'.format(reduce((lambda a, b: a*0x100+b),
list(e.object[e.start:e.end]))), e.end
if not isinstance(e, UnicodeDecodeError):
raise NotImplementedError('dont know how to handle {0} error'.format(
e.__class__.__name__))
return '<{0:x}>'.format(reduce((lambda a, b: a*0x100+b),
list(e.object[e.start:e.end]))), e.end
codecs.register_error('strtrans', strtrans_errors)
@ -56,54 +56,54 @@ ctable = {
def mnormalize(o):
return ctable.get(type(o), idfunc)(o)
return ctable.get(type(o), idfunc)(o)
fname = sys.argv[1]
try:
filt = sys.argv[2]
filt = sys.argv[2]
except IndexError:
filt = lambda entry: True
def filt(entry): return True
else:
_filt = filt
filt = lambda entry: eval(_filt, globals(), {'entry': entry})
_filt = filt
def filt(entry): return eval(_filt, globals(), {'entry': entry})
poswidth = len(str(os.stat(fname).st_size or 1000))
class FullEntry(dict):
def __init__(self, val):
self.__dict__.update(val)
def __init__(self, val):
self.__dict__.update(val)
with open(fname, 'rb') as fp:
unpacker = msgpack.Unpacker(file_like=fp, read_size=1)
max_type = max(typ.value for typ in EntryTypes)
while True:
try:
pos = fp.tell()
typ = unpacker.unpack()
except msgpack.OutOfData:
break
else:
timestamp = unpacker.unpack()
time = datetime.fromtimestamp(timestamp)
length = unpacker.unpack()
if typ > max_type:
entry = fp.read(length)
typ = EntryTypes.Unknown
else:
entry = unpacker.unpack()
typ = EntryTypes(typ)
full_entry = FullEntry({
'value': entry,
'timestamp': timestamp,
'time': time,
'length': length,
'pos': pos,
'type': typ,
})
if not filt(full_entry):
continue
print('%*u %13s %s %5u %r' % (
poswidth, pos, typ.name, time.isoformat(), length, mnormalize(entry)))
unpacker = msgpack.Unpacker(file_like=fp, read_size=1)
max_type = max(typ.value for typ in EntryTypes)
while True:
try:
pos = fp.tell()
typ = unpacker.unpack()
except msgpack.OutOfData:
break
else:
timestamp = unpacker.unpack()
time = datetime.fromtimestamp(timestamp)
length = unpacker.unpack()
if typ > max_type:
entry = fp.read(length)
typ = EntryTypes.Unknown
else:
entry = unpacker.unpack()
typ = EntryTypes(typ)
full_entry = FullEntry({
'value': entry,
'timestamp': timestamp,
'time': time,
'length': length,
'pos': pos,
'type': typ,
})
if not filt(full_entry):
continue
print('%*u %13s %s %5u %r' % (
poswidth, pos, typ.name, time.isoformat(), length, mnormalize(entry)))

View File

@ -10,7 +10,7 @@ import os
DECL_KINDS = {
CursorKind.FUNCTION_DECL,
CursorKind.FUNCTION_DECL,
}
@ -18,122 +18,123 @@ Strip = namedtuple('Strip', 'start_line start_column end_line end_column')
def main(progname, cfname, only_static, move_all):
cfname = os.path.abspath(os.path.normpath(cfname))
cfname = os.path.abspath(os.path.normpath(cfname))
hfname1 = os.path.splitext(cfname)[0] + os.extsep + 'h'
hfname2 = os.path.splitext(cfname)[0] + '_defs' + os.extsep + 'h'
hfname1 = os.path.splitext(cfname)[0] + os.extsep + 'h'
hfname2 = os.path.splitext(cfname)[0] + '_defs' + os.extsep + 'h'
files_to_modify = (cfname, hfname1, hfname2)
files_to_modify = (cfname, hfname1, hfname2)
index = Index.create()
src_dirname = os.path.join(os.path.dirname(__file__), '..', 'src')
src_dirname = os.path.abspath(os.path.normpath(src_dirname))
relname = os.path.join(src_dirname, 'nvim')
unit = index.parse(cfname, args=('-I' + src_dirname,
'-DUNIX',
'-DEXITFREE',
'-DFEAT_USR_CMDS',
'-DFEAT_CMDL_COMPL',
'-DFEAT_COMPL_FUNC',
'-DPROTO',
'-DUSE_MCH_ERRMSG'))
cursor = unit.cursor
index = Index.create()
src_dirname = os.path.join(os.path.dirname(__file__), '..', 'src')
src_dirname = os.path.abspath(os.path.normpath(src_dirname))
relname = os.path.join(src_dirname, 'nvim')
unit = index.parse(cfname, args=('-I' + src_dirname,
'-DUNIX',
'-DEXITFREE',
'-DFEAT_USR_CMDS',
'-DFEAT_CMDL_COMPL',
'-DFEAT_COMPL_FUNC',
'-DPROTO',
'-DUSE_MCH_ERRMSG'))
cursor = unit.cursor
tostrip = defaultdict(OrderedDict)
definitions = set()
tostrip = defaultdict(OrderedDict)
definitions = set()
for child in cursor.get_children():
if not (child.location and child.location.file):
continue
fname = os.path.abspath(os.path.normpath(child.location.file.name))
if fname not in files_to_modify:
continue
if child.kind not in DECL_KINDS:
continue
if only_static and next(child.get_tokens()).spelling == 'static':
continue
for child in cursor.get_children():
if not (child.location and child.location.file):
continue
fname = os.path.abspath(os.path.normpath(child.location.file.name))
if fname not in files_to_modify:
continue
if child.kind not in DECL_KINDS:
continue
if only_static and next(child.get_tokens()).spelling == 'static':
continue
if child.is_definition() and fname == cfname:
definitions.add(child.spelling)
else:
stripdict = tostrip[fname]
assert(child.spelling not in stripdict)
stripdict[child.spelling] = Strip(
child.extent.start.line,
child.extent.start.column,
child.extent.end.line,
child.extent.end.column,
)
if child.is_definition() and fname == cfname:
definitions.add(child.spelling)
else:
stripdict = tostrip[fname]
assert(child.spelling not in stripdict)
stripdict[child.spelling] = Strip(
child.extent.start.line,
child.extent.start.column,
child.extent.end.line,
child.extent.end.column,
)
for (fname, stripdict) in tostrip.items():
if not move_all:
for name in set(stripdict) - definitions:
stripdict.pop(name)
for (fname, stripdict) in tostrip.items():
if not move_all:
for name in set(stripdict) - definitions:
stripdict.pop(name)
if not stripdict:
continue
if not stripdict:
continue
if fname.endswith('.h'):
is_h_file = True
include_line = next(reversed(stripdict.values())).start_line + 1
else:
is_h_file = False
include_line = next(iter(stripdict.values())).start_line
if fname.endswith('.h'):
is_h_file = True
include_line = next(reversed(stripdict.values())).start_line + 1
else:
is_h_file = False
include_line = next(iter(stripdict.values())).start_line
lines = None
generated_existed = os.path.exists(fname + '.generated.h')
with open(fname, 'rb') as F:
lines = list(F)
lines = None
generated_existed = os.path.exists(fname + '.generated.h')
with open(fname, 'rb') as F:
lines = list(F)
stripped = []
stripped = []
for name, position in reversed(stripdict.items()):
sl = slice(position.start_line - 1, position.end_line)
if is_h_file:
include_line -= sl.stop - sl.start
stripped += lines[sl]
lines[sl] = ()
for name, position in reversed(stripdict.items()):
sl = slice(position.start_line - 1, position.end_line)
if is_h_file:
include_line -= sl.stop - sl.start
stripped += lines[sl]
lines[sl] = ()
if not generated_existed:
lines[include_line:include_line] = [
'#ifdef INCLUDE_GENERATED_DECLARATIONS\n',
'# include "{0}.generated.h"\n'.format(os.path.relpath(fname, relname)),
'#endif\n',
]
if not generated_existed:
lines[include_line:include_line] = [
'#ifdef INCLUDE_GENERATED_DECLARATIONS\n',
'# include "{0}.generated.h"\n'.format(
os.path.relpath(fname, relname)),
'#endif\n',
]
with open(fname, 'wb') as F:
F.writelines(lines)
with open(fname, 'wb') as F:
F.writelines(lines)
if __name__ == '__main__':
progname = sys.argv[0]
args = sys.argv[1:]
if not args or '--help' in args:
print('Usage:')
print('')
print(' {0} [--static [--all]] file.c...'.format(progname))
print('')
print('Stripts all declarations from file.c, file.h and file_defs.h.')
print('If --static argument is given then only static declarations are')
print('stripped. Declarations are stripped only if corresponding')
print('definition is found unless --all argument was given.')
print('')
print('Note: it is assumed that static declarations starts with "static"')
print(' keyword.')
sys.exit(0 if args else 1)
progname = sys.argv[0]
args = sys.argv[1:]
if not args or '--help' in args:
print('Usage:')
print('')
print(' {0} [--static [--all]] file.c...'.format(progname))
print('')
print('Stripts all declarations from file.c, file.h and file_defs.h.')
print('If --static argument is given then only static declarations are')
print('stripped. Declarations are stripped only if corresponding')
print('definition is found unless --all argument was given.')
print('')
print('Note: it is assumed that static declarations starts with "static"')
print(' keyword.')
sys.exit(0 if args else 1)
if args[0] == '--static':
only_static = True
args = args[1:]
else:
only_static = False
if args[0] == '--static':
only_static = True
args = args[1:]
else:
only_static = False
if args[0] == '--all':
move_all = True
args = args[1:]
else:
move_all = False
if args[0] == '--all':
move_all = True
args = args[1:]
else:
move_all = False
for cfname in args:
print('Processing {0}'.format(cfname))
main(progname, cfname, only_static, move_all)
for cfname in args:
print('Processing {0}'.format(cfname))
main(progname, cfname, only_static, move_all)