aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lv2/core/meta.ttl2
-rwxr-xr-xlv2specgen/lv2docgen.py3
-rwxr-xr-xlv2specgen/lv2specgen.py60
-rwxr-xr-xplugins/literasc.py125
4 files changed, 102 insertions, 88 deletions
diff --git a/lv2/core/meta.ttl b/lv2/core/meta.ttl
index 772fc48..7611b63 100644
--- a/lv2/core/meta.ttl
+++ b/lv2/core/meta.ttl
@@ -40,6 +40,8 @@ THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH R
rdfs:label "Fix or avoid new compiler and tool warnings."
] , [
rdfs:label "Add dark mode style for documentation."
+ ] , [
+ rdfs:label "Clean up and modernize Python support code."
]
]
] , [
diff --git a/lv2specgen/lv2docgen.py b/lv2specgen/lv2docgen.py
index 35237b3..c5e13a7 100755
--- a/lv2specgen/lv2docgen.py
+++ b/lv2specgen/lv2docgen.py
@@ -38,7 +38,8 @@ rdfs = rdflib.Namespace("http://www.w3.org/2000/01/rdf-schema#")
def uri_to_path(uri):
- path = uri[uri.find(":") :]
+ first_colon = uri.find(":")
+ path = uri[first_colon:]
while not path[0].isalpha():
path = path[1:]
return path
diff --git a/lv2specgen/lv2specgen.py b/lv2specgen/lv2specgen.py
index 10b7cf9..a4ccefa 100755
--- a/lv2specgen/lv2specgen.py
+++ b/lv2specgen/lv2specgen.py
@@ -239,9 +239,9 @@ def linkifyVocabIdentifiers(m, string, classlist, proplist, instalist):
def prettifyHtml(m, markup, subject, classlist, proplist, instalist):
# Syntax highlight all C code
if have_pygments:
- code_rgx = re.compile('<pre class="c-code">(.*?)</pre>', re.DOTALL)
+ code_re = re.compile('<pre class="c-code">(.*?)</pre>', re.DOTALL)
while True:
- code = code_rgx.search(markup)
+ code = code_re.search(markup)
if not code:
break
match_str = xml.sax.saxutils.unescape(code.group(1))
@@ -250,13 +250,13 @@ def prettifyHtml(m, markup, subject, classlist, proplist, instalist):
pygments.lexers.CLexer(),
pygments.formatters.HtmlFormatter(),
)
- markup = code_rgx.sub(code_str, markup, 1)
+ markup = code_re.sub(code_str, markup, 1)
# Syntax highlight all Turtle code
if have_pygments:
- code_rgx = re.compile('<pre class="turtle-code">(.*?)</pre>', re.DOTALL)
+ code_re = re.compile('<pre class="turtle-code">(.*?)</pre>', re.DOTALL)
while True:
- code = code_rgx.search(markup)
+ code = code_re.search(markup)
if not code:
break
match_str = xml.sax.saxutils.unescape(code.group(1))
@@ -265,7 +265,7 @@ def prettifyHtml(m, markup, subject, classlist, proplist, instalist):
pygments.lexers.rdf.TurtleLexer(),
pygments.formatters.HtmlFormatter(),
)
- markup = code_rgx.sub(code_str, markup, 1)
+ markup = code_re.sub(code_str, markup, 1)
# Add links to code documentation for identifiers
markup = linkifyCodeIdentifiers(markup)
@@ -371,7 +371,9 @@ def getDetailedDocumentation(m, subject, classlist, proplist, instalist):
if d:
doc = getObject(d)
if doc.datatype == lv2.Markdown:
- markup += formatDoc(m, subject, doc, classlist, proplist, instalist)
+ markup += formatDoc(
+ m, subject, doc, classlist, proplist, instalist
+ )
else:
html = getLiteralString(doc)
markup += prettifyHtml(
@@ -683,7 +685,9 @@ def extraInfo(term, m):
getTermLink(getObject(p), term, getPredicate(p)), first
)
elif isLiteral(getObject(p)):
- doc += getProperty(linkifyCodeIdentifiers(str(getObject(p))), first)
+ doc += getProperty(
+ linkifyCodeIdentifiers(str(getObject(p))), first
+ )
elif isBlank(getObject(p)):
doc += getProperty(str(blankNodeDesc(getObject(p), m)), first)
else:
@@ -762,7 +766,7 @@ def docTerms(category, list, m, classlist, proplist, instalist):
doc = ""
for term in list:
if not term.startswith(spec_ns_str):
- sys.stderr.write("warning: Skipping external term `%s'" % term)
+ sys.stderr.write("warning: Skipping external term `%s'\n" % term)
continue
t = termName(m, term)
@@ -1014,15 +1018,14 @@ def specAuthors(m, subject):
for d in sorted(dev):
if not first:
devdoc += ", "
- devdoc += '<span class="author" property="doap:developer">%s</span>' % d
+
+ devdoc += f'<span class="author" property="doap:developer">{d}</span>'
first = False
if len(dev) == 1:
- doc += (
- '<tr><th class="metahead">Developer</th><td>%s</td></tr>' % devdoc
- )
+ doc += f'<tr><th class="metahead">Developer</th><td>{devdoc}</td></tr>'
elif len(dev) > 0:
doc += (
- '<tr><th class="metahead">Developers</th><td>%s</td></tr>' % devdoc
+ f'<tr><th class="metahead">Developers</th><td>{devdoc}</td></tr>'
)
maintdoc = ""
@@ -1030,20 +1033,14 @@ def specAuthors(m, subject):
for m in sorted(maint):
if not first:
maintdoc += ", "
+
maintdoc += (
- '<span class="author" property="doap:maintainer">%s</span>' % m
+ f'<span class="author" property="doap:maintainer">{m}</span>'
)
first = False
- if len(maint) == 1:
- doc += (
- '<tr><th class="metahead">Maintainer</th><td>%s</td></tr>'
- % maintdoc
- )
- elif len(maint) > 0:
- doc += (
- '<tr><th class="metahead">Maintainers</th><td>%s</td></tr>'
- % maintdoc
- )
+ if len(maint):
+ label = "Maintainer" if len(maint) == 1 else "Maintainers"
+ doc += f'<tr><th class="metahead">{label}</th><td>{maintdoc}</td></tr>'
return doc
@@ -1183,7 +1180,10 @@ def load_tags(path, docdir):
def getChildText(elt, tagname):
"Return the content of the first child node with a certain tag name."
for e in elt.childNodes:
- if e.nodeType == xml.dom.Node.ELEMENT_NODE and e.tagName == tagname:
+ if (
+ e.nodeType == xml.dom.Node.ELEMENT_NODE
+ and e.tagName == tagname
+ ):
return e.firstChild.nodeValue
return ""
@@ -1457,7 +1457,9 @@ def specgen(
# Generate Term HTML
classlist = docTerms("Class", classlist, m, classlist, proplist, instalist)
- proplist = docTerms("Property", proplist, m, classlist, proplist, instalist)
+ proplist = docTerms(
+ "Property", proplist, m, classlist, proplist, instalist
+ )
if instances:
instlist = docTerms(
"Instance", instalist, m, classlist, proplist, instalist
@@ -1571,7 +1573,9 @@ def specgen(
etree.XMLParser(dtd_validation=True, no_network=True),
)
except Exception as e:
- sys.stderr.write("error: Validation failed for %s: %s" % (specloc, e))
+ sys.stderr.write(
+ "error: Validation failed for %s: %s\n" % (specloc, e)
+ )
finally:
os.chdir(oldcwd)
diff --git a/plugins/literasc.py b/plugins/literasc.py
index 82ee226..74b13a7 100755
--- a/plugins/literasc.py
+++ b/plugins/literasc.py
@@ -1,13 +1,16 @@
#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-#
-# Literasc, a simple literate programming tool for C, C++, and Turtle.
-# Copyright 2012 David Robillard <d@drobilla.net>
-#
-# Unlike many LP tools, this tool uses normal source code as input, there is no
-# tangle/weave and no special file format. The literate parts of the program
-# are written in comments, which are emitted as paragraphs of regular text
-# interleaved with code. Asciidoc is both the comment and output syntax.
+
+# Copyright 2012-2022 David Robillard <d@drobilla.net>
+# SPDX-License-Identifier: ISC
+
+"""
+A simple literate programming tool for C, C++, and Turtle.
+
+Unlike many LP tools, this tool uses normal source code as input, there is no
+tangle/weave and no special file format. The literate parts of the program are
+written in comments, which are emitted as paragraphs of regular text
+interleaved with code. Asciidoc is both the comment and output syntax.
+"""
import os
import re
@@ -15,74 +18,81 @@ import sys
def format_text(text):
- "Format a text (comment) fragment and return it as a marked up string"
+ "Format a text (comment) fragment and return it as a marked up string."
return "\n\n" + re.sub("\n *", "\n", text.strip()) + "\n\n"
def format_code(lang, code):
+ "Format a block of code and return it as a marked up string."
+
if code.strip() == "":
return code
- head = "[source,%s]" % lang
- sep = "-" * len(head) + "\n"
- return head + "\n" + sep + code.strip("\n") + "\n" + sep
+ head = f"[source,{lang}]"
+ code = code.strip("\n")
+ sep = "-" * len(head)
+ return "\n".join([head, sep, code, sep]) + "\n"
-def format_c_source(filename, file):
- output = "=== %s ===\n" % os.path.basename(filename)
+def format_c_source(filename, in_file):
+ "Format an annotated C source file as a marked up string."
+
+ output = f"=== {os.path.basename(filename)} ===\n"
chunk = ""
prev_c = 0
in_comment = False
in_comment_start = False
n_stars = 0
- code = ""
- for line in file:
- code += line
+ code = "".join(in_file)
# Skip initial license comment
if code[0:2] == "/*":
- code = code[code.find("*/") + 2 :]
+ end = code.find("*/") + 2
+ code = code[end:]
+
+ def last_chunk(chunk):
+ length = len(chunk) - 1
+ return chunk[0:length]
- for c in code:
- if prev_c == "/" and c == "*":
+ for char in code:
+ if prev_c == "/" and char == "*":
in_comment_start = True
n_stars = 1
elif in_comment_start:
- if c == "*":
+ if char == "*":
n_stars += 1
else:
if n_stars > 1:
- output += format_code("c", chunk[0 : len(chunk) - 1])
+ output += format_code("c", last_chunk(chunk))
chunk = ""
in_comment = True
else:
- chunk += "*" + c
+ chunk += "*" + char
in_comment_start = False
- elif in_comment and prev_c == "*" and c == "/":
+ elif in_comment and prev_c == "*" and char == "/":
if n_stars > 1:
- output += format_text(chunk[0 : len(chunk) - 1])
+ output += format_text(last_chunk(chunk))
else:
- output += format_code(
- "c", "/* " + chunk[0 : len(chunk) - 1] + "*/"
- )
+ output += format_code("c", "/* " + last_chunk(chunk) + "*/")
in_comment = False
in_comment_start = False
chunk = ""
- elif in_comment_start and c == "*":
- n_stars += 1
else:
- chunk += c
- prev_c = c
+ chunk += char
+
+ prev_c = char
return output + format_code("c", chunk)
-def format_ttl_source(filename, file):
- output = "=== %s ===\n" % os.path.basename(filename)
+def format_ttl_source(filename, in_file):
+ "Format an annotated Turtle source file as a marked up string."
+
+ output = f"=== {os.path.basename(filename)} ===\n"
in_comment = False
chunk = ""
- for line in file:
+ for line in in_file:
is_comment = line.strip().startswith("#")
if in_comment:
if is_comment:
@@ -101,36 +111,33 @@ def format_ttl_source(filename, file):
if in_comment:
return output + format_text(chunk)
- else:
- return output + format_code("turtle", chunk)
+
+ return output + format_code("turtle", chunk)
def gen(out, filenames):
- for filename in filenames:
- file = open(filename)
- if not file:
- sys.stderr.write("Failed to open file %s\n" % filename)
- continue
-
- if filename.endswith(".c") or filename.endswith(".h"):
- out.write(format_c_source(filename, file))
- elif filename.endswith(".ttl") or filename.endswith(".ttl.in"):
- out.write(format_ttl_source(filename, file))
- elif filename.endswith(".txt"):
- for line in file:
- out.write(line)
- out.write("\n")
- else:
- sys.stderr.write(
- "Unknown source format `%s'" % (filename[filename.find(".") :])
- )
+ "Write markup generated from filenames to an output file."
- file.close()
+ for filename in filenames:
+ with open(filename, "r", encoding="utf-8") as in_file:
+ if filename.endswith(".c") or filename.endswith(".h"):
+ out.write(format_c_source(filename, in_file))
+ elif filename.endswith(".ttl") or filename.endswith(".ttl.in"):
+ out.write(format_ttl_source(filename, in_file))
+ elif filename.endswith(".txt"):
+ for line in in_file:
+ out.write(line)
+ out.write("\n")
+ else:
+ sys.stderr.write(
+ f"Unknown source format `{filename.splitext()[1]}`\n"
+ )
if __name__ == "__main__":
if len(sys.argv) < 2:
- sys.stderr.write("Usage: %s FILENAME...\n" % sys.argv[1])
+ sys.stderr.write(f"Usage: {sys.argv[0]} OUT_FILE IN_FILE...\n")
sys.exit(1)
- gen(sys.argv[1:])
+ with open(sys.argv[1], "w", encoding="utf-8") as out_file:
+ gen(out_file, sys.argv[2:])