makedocbook: Adjust inline whitespace to fix flake8 warnings

This commit is contained in:
Jon Turney 2022-11-01 11:35:51 +00:00
parent 2521dd48d8
commit 999925b253
No known key found for this signature in database
GPG Key ID: C7C86F0370285C81
2 changed files with 68 additions and 69 deletions

View File

@ -16,8 +16,8 @@ def main():
first_node = True first_node = True
prev_sect = False prev_sect = False
print ('<?xml version="1.0" encoding="UTF-8"?>') print('<?xml version="1.0" encoding="UTF-8"?>')
print ('<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">') print('<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">')
for l in sys.stdin.readlines(): for l in sys.stdin.readlines():
l = l.rstrip() l = l.rstrip()
@ -29,27 +29,27 @@ def main():
l = l.replace("@node", "", 1) l = l.replace("@node", "", 1)
l = l.strip() l = l.strip()
if first_node: if first_node:
print ('<chapter id="%s_chapter" xmlns:xi="http://www.w3.org/2001/XInclude">' % l.lower().replace(' ', '_')) print('<chapter id="%s_chapter" xmlns:xi="http://www.w3.org/2001/XInclude">' % l.lower().replace(' ', '_'))
first_node = False first_node = False
else: else:
if prev_sect: if prev_sect:
print ('</section>') print('</section>')
print ('<section id="%s">' % l) print('<section id="%s">' % l)
prev_sect = True prev_sect = True
elif l.startswith("@chapter "): elif l.startswith("@chapter "):
l = l.replace("@chapter ", "", 1) l = l.replace("@chapter ", "", 1)
print ('<title>%s</title>' % l) print('<title>%s</title>' % l)
elif l.startswith("@section "): elif l.startswith("@section "):
l = l.replace("@section ", "", 1) l = l.replace("@section ", "", 1)
print ('<title>%s</title>' % l) print('<title>%s</title>' % l)
elif l.startswith("@include "): elif l.startswith("@include "):
l = l.replace("@include ", "", 1) l = l.replace("@include ", "", 1)
l = l.replace(".def", ".xml", 1) l = l.replace(".def", ".xml", 1)
print ('<xi:include href="%s"/>' % l.strip()) print('<xi:include href="%s"/>' % l.strip())
if prev_sect: if prev_sect:
print ('</section>') print('</section>')
print ('</chapter>') print('</chapter>')
if __name__ == "__main__" : if __name__ == "__main__":
main() main()

View File

@ -35,7 +35,7 @@ rootelement = None # root element of the XML tree
refentry = None # the current refentry refentry = None # the current refentry
verbose = 0 verbose = 0
def dump(s, stage, threshold = 1): def dump(s, stage, threshold=1):
if verbose > threshold: if verbose > threshold:
print('*' * 40, file=sys.stderr) print('*' * 40, file=sys.stderr)
print(stage, file=sys.stderr) print(stage, file=sys.stderr)
@ -49,7 +49,7 @@ def dump(s, stage, threshold = 1):
def skip_whitespace_and_stars(i, src): def skip_whitespace_and_stars(i, src):
while i < len(src) and (src[i].isspace() or (src[i] == '*' and src[i+1] != '/')): while i < len(src) and (src[i].isspace() or (src[i] == '*' and src[i + 1] != '/')):
i += 1 i += 1
return i return i
@ -62,7 +62,7 @@ def comment_contents_generator(src):
i = 0 i = 0
while i < len(src) - 2: while i < len(src) - 2:
if src[i] == '\n' and src[i+1] == '/' and src[i+2] == '*': if src[i] == '\n' and src[i + 1] == '/' and src[i + 2] == '*':
i = i + 3 i = i + 3
i = skip_whitespace_and_stars(i, src) i = skip_whitespace_and_stars(i, src)
@ -82,7 +82,7 @@ def comment_contents_generator(src):
i = skip_whitespace_and_stars(i, src) i = skip_whitespace_and_stars(i, src)
elif src[i] == '*' and src[i+1] == '/': elif src[i] == '*' and src[i + 1] == '/':
i = i + 2 i = i + 2
# If we have just output \n\n, this adds another blank line. # If we have just output \n\n, this adds another blank line.
# This is the only way a double blank line can occur. # This is the only way a double blank line can occur.
@ -265,7 +265,7 @@ def index(c, l):
primary.text = l primary.text = l
# to validate, it seems we need to maintain refentry elements in a certain order # to validate, it seems we need to maintain refentry elements in a certain order
refentry[:] = sorted(refentry, key = lambda x: x.tag if isinstance(x.tag, str) else '') refentry[:] = sorted(refentry, key=lambda x: x.tag if isinstance(x.tag, str) else '')
# adds another alternate refname # adds another alternate refname
refnamediv = refentry.find('refnamediv') refnamediv = refentry.find('refnamediv')
@ -281,7 +281,7 @@ def index(c, l):
print('duplicate refname %s discarded' % l, file=sys.stderr) print('duplicate refname %s discarded' % l, file=sys.stderr)
# to validate, it seems we need to maintain refnamediv elements in a certain order # to validate, it seems we need to maintain refnamediv elements in a certain order
refnamediv[:] = sorted(refnamediv, key = lambda x: x.tag) refnamediv[:] = sorted(refnamediv, key=lambda x: x.tag)
# SYNOPSIS aka ANSI_SYNOPSIS # SYNOPSIS aka ANSI_SYNOPSIS
@ -378,14 +378,13 @@ def synopsis_for_prototype(funcsynopsis, s):
# sscanf, have very complex layout using nested tables and itemized lists, which # sscanf, have very complex layout using nested tables and itemized lists, which
# it is best to parse in order to transform correctly. # it is best to parse in order to transform correctly.
# #
def refsect(t, s): def refsect(t, s):
refsect = lxml.etree.SubElement(refentry, 'refsect1') refsect = lxml.etree.SubElement(refentry, 'refsect1')
title = lxml.etree.SubElement(refsect, 'title') title = lxml.etree.SubElement(refsect, 'title')
title.text = t.title() title.text = t.title()
if verbose: if verbose:
print('%s has %d paragraphs' % (t, len(s.split('\n\n'))) , file=sys.stderr) print('%s has %d paragraphs' % (t, len(s.split('\n\n'))), file=sys.stderr)
if verbose > 1: if verbose > 1:
dump(s, 'before lexing') dump(s, 'before lexing')
@ -422,25 +421,25 @@ def discarded(c, t):
return return
command_dispatch_dict = { command_dispatch_dict = {
'FUNCTION' : function, 'FUNCTION': function,
'TYPEDEF' : function, # TYPEDEF is not currently used, but described in doc.str 'TYPEDEF': function, # TYPEDEF is not currently used, but described in doc.str
'INDEX' : index, 'INDEX': index,
'TRAD_SYNOPSIS' : discarded, # K&R-style synopsis, obsolete and discarded 'TRAD_SYNOPSIS': discarded, # K&R-style synopsis, obsolete and discarded
'ANSI_SYNOPSIS' : synopsis, 'ANSI_SYNOPSIS': synopsis,
'SYNOPSIS' : synopsis, 'SYNOPSIS': synopsis,
'DESCRIPTION' : refsect, 'DESCRIPTION': refsect,
'RETURNS' : refsect, 'RETURNS': refsect,
'ERRORS' : refsect, 'ERRORS': refsect,
'PORTABILITY' : refsect, 'PORTABILITY': refsect,
'BUGS' : refsect, 'BUGS': refsect,
'WARNINGS' : refsect, 'WARNINGS': refsect,
'SEEALSO' : seealso, 'SEEALSO': seealso,
'NOTES' : refsect, # NOTES is not described in doc.str, so is currently discarded by makedoc, but that doesn't seem right 'NOTES': refsect, # NOTES is not described in doc.str, so is currently discarded by makedoc, but that doesn't seem right
'QUICKREF' : discarded, # The intent of QUICKREF and MATHREF is not obvious, but they don't generate any output currently 'QUICKREF': discarded, # The intent of QUICKREF and MATHREF is not obvious, but they don't generate any output currently
'MATHREF' : discarded, 'MATHREF': discarded,
'START' : discarded, # a START command is inserted to contain the text before the first command 'START': discarded, # a START command is inserted to contain the text before the first command
'END' : discarded, # an END command is inserted merely to terminate the text for the last command in a comment block 'END': discarded, # an END command is inserted merely to terminate the text for the last command in a comment block
'NEWPAGE' : newpage, 'NEWPAGE': newpage,
} }
# #
@ -455,17 +454,17 @@ def line_markup_convert(p):
s = s.replace('@@', '@') s = s.replace('@@', '@')
# escape characters not allowed in XML # escape characters not allowed in XML
s = s.replace('&','&amp;') s = s.replace('&', '&amp;')
s = s.replace('<','&lt;') s = s.replace('<', '&lt;')
s = s.replace('>','&gt;') s = s.replace('>', '&gt;')
# convert <<somecode>> to <code>somecode</code> and <[var]> to # convert <<somecode>> to <code>somecode</code> and <[var]> to
# <varname>var</varname> # <varname>var</varname>
# also handle nested << <[ ]> >> correctly # also handle nested << <[ ]> >> correctly
s = s.replace('&lt;&lt;','<code>') s = s.replace('&lt;&lt;', '<code>')
s = s.replace('&lt;[','<varname>') s = s.replace('&lt;[', '<varname>')
s = s.replace(']&gt;','</varname>') s = s.replace(']&gt;', '</varname>')
s = s.replace('&gt;&gt;','</code>') s = s.replace('&gt;&gt;', '</code>')
# also convert some simple texinfo markup # also convert some simple texinfo markup
# convert @emph{foo} to <emphasis>foo</emphasis> # convert @emph{foo} to <emphasis>foo</emphasis>
@ -493,18 +492,18 @@ def line_markup_convert(p):
# #
texinfo_commands = { texinfo_commands = {
'ifnottex' : 'IFNOTTEX', 'ifnottex': 'IFNOTTEX',
'end ifnottex' : 'ENDIFNOTTEX', 'end ifnottex': 'ENDIFNOTTEX',
'tex' : 'IFTEX', 'tex': 'IFTEX',
'end tex' : 'ENDIFTEX', 'end tex': 'ENDIFTEX',
'comment' : 'COMMENT', 'comment': 'COMMENT',
'c ' : 'COMMENT', 'c ': 'COMMENT',
'multitable' : 'MULTICOLUMNTABLE', 'multitable': 'MULTICOLUMNTABLE',
'end multitable' : 'ENDMULTICOLUMNTABLE', 'end multitable': 'ENDMULTICOLUMNTABLE',
'headitem' : 'MCT_HEADITEM', 'headitem': 'MCT_HEADITEM',
'tab' : 'MCT_COLUMN_SEPARATOR', 'tab': 'MCT_COLUMN_SEPARATOR',
'item' : 'MCT_ITEM', 'item': 'MCT_ITEM',
} }
# token names # token names
tokens = [ tokens = [
@ -575,9 +574,9 @@ def t_BLANKLINE(t):
return t return t
def t_eof(t): def t_eof(t):
if hasattr(t.lexer,'at_eof'): if hasattr(t.lexer, 'at_eof'):
# remove eof flag ready for lexing next input # remove eof flag ready for lexing next input
delattr(t.lexer,'at_eof') delattr(t.lexer, 'at_eof')
t.lexer.lineno = 0 t.lexer.lineno = 0
return None return None
@ -787,9 +786,10 @@ def p_multitable(p):
colspec = '\n'.join(['<colspec colwidth="%s*"/>' % (c) for c in colfrac]) colspec = '\n'.join(['<colspec colwidth="%s*"/>' % (c) for c in colfrac])
header = '<thead>' + p[2] + '</thead>\n' header = '<thead>' + p[2] + '</thead>\n'
body = '<tbody>' + p[3] + '</tbody>\n' body = '<tbody>' + p[3] + '</tbody>\n'
p[0] = '<informaltable><tgroup cols="' + str(len(colfrac)) +'">' + colspec + header + body + '</tgroup></informaltable>' p[0] = '<informaltable><tgroup cols="' + str(len(colfrac)) + '">' + colspec + header + body + '</tgroup></informaltable>'
parser_verbose(p) parser_verbose(p)
def p_error(t): def p_error(t):
sys.exit('parse error at line %d, token %s, next token %s' % (t.lineno, t, parser.token())) sys.exit('parse error at line %d, token %s, next token %s' % (t.lineno, t, parser.token()))
@ -831,10 +831,9 @@ def main(file):
# #
# #
# #
if __name__ == '__main__':
if __name__ == '__main__' :
options = OptionParser() options = OptionParser()
options.add_option('-v', '--verbose', action='count', dest = 'verbose', default = 0) options.add_option('-v', '--verbose', action='count', dest='verbose', default=0)
(opts, args) = options.parse_args() (opts, args) = options.parse_args()
verbose = opts.verbose verbose = opts.verbose