diff options
Diffstat (limited to 'Documentation/sphinx')
-rw-r--r-- | Documentation/sphinx/automarkup.py | 293 | ||||
-rw-r--r-- | Documentation/sphinx/cdomain.py | 115 | ||||
-rw-r--r-- | Documentation/sphinx/kernel_abi.py | 173 | ||||
-rw-r--r-- | Documentation/sphinx/kernel_feat.py | 133 | ||||
-rwxr-xr-x | Documentation/sphinx/kernel_include.py | 9 | ||||
-rw-r--r-- | Documentation/sphinx/kerneldoc-preamble.sty | 234 | ||||
-rw-r--r-- | Documentation/sphinx/kerneldoc.py | 250 | ||||
-rw-r--r-- | Documentation/sphinx/kernellog.py | 28 | ||||
-rw-r--r-- | Documentation/sphinx/kfigure.py | 214 | ||||
-rw-r--r-- | Documentation/sphinx/load_config.py | 9 | ||||
-rwxr-xr-x | Documentation/sphinx/maintainers_include.py | 14 | ||||
-rw-r--r-- | Documentation/sphinx/min_requirements.txt | 11 | ||||
-rwxr-xr-x | Documentation/sphinx/parse-headers.pl | 11 | ||||
-rw-r--r-- | Documentation/sphinx/requirements.txt | 7 | ||||
-rwxr-xr-x | Documentation/sphinx/rstFlatTable.py | 25 | ||||
-rw-r--r-- | Documentation/sphinx/templates/kernel-toc.html | 18 | ||||
-rw-r--r-- | Documentation/sphinx/templates/translations.html | 15 | ||||
-rw-r--r-- | Documentation/sphinx/translations.py | 99 |
18 files changed, 1412 insertions, 246 deletions
diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py index b18236370742..563033f764bb 100644 --- a/Documentation/sphinx/automarkup.py +++ b/Documentation/sphinx/automarkup.py @@ -7,12 +7,11 @@ from docutils import nodes import sphinx from sphinx import addnodes -if sphinx.version_info[0] < 2 or \ - sphinx.version_info[0] == 2 and sphinx.version_info[1] < 1: - from sphinx.environment import NoUri -else: - from sphinx.errors import NoUri +from sphinx.errors import NoUri import re +from itertools import chain + +from kernel_abi import get_kernel_abi # # Regex nastiness. Of course. @@ -21,7 +20,32 @@ import re # :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last # bit tries to restrict matches to things that won't create trouble. # -RE_function = re.compile(r'([\w_][\w\d_]+\(\))') +RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=re.ASCII) + +# +# Sphinx 3 uses a different C role for each one of struct, union, enum and +# typedef +# +RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=re.ASCII) +RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=re.ASCII) +RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=re.ASCII) +RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=re.ASCII) + +# +# Detects a reference to a documentation page of the form Documentation/... with +# an optional extension +# +RE_doc = re.compile(r'(\bDocumentation/)?((\.\./)*[\w\-/]+)\.(rst|txt)') +RE_abi_file = re.compile(r'(\bDocumentation/ABI/[\w\-/]+)') +RE_abi_symbol = re.compile(r'(\b/(sys|config|proc)/[\w\-/]+)') + +RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$') + +# +# Reserved C words that we should skip when cross-referencing +# +Skipnames = [ 'for', 'if', 'register', 'sizeof', 'struct', 'unsigned' ] + # # Many places in the docs refer to common system calls. It is @@ -34,70 +58,245 @@ Skipfuncs = [ 'open', 'close', 'read', 'write', 'fcntl', 'mmap', 'select', 'poll', 'fork', 'execve', 'clone', 'ioctl', 'socket' ] +c_namespace = '' + # -# Find all occurrences of function() and try to replace them with -# appropriate cross references. +# Detect references to commits. # -def markup_funcs(docname, app, node): - cdom = app.env.domains['c'] +RE_git = re.compile(r'commit\s+(?P<rev>[0-9a-f]{12,40})(?:\s+\(".*?"\))?', + flags=re.IGNORECASE | re.DOTALL) + +def markup_refs(docname, app, node): t = node.astext() done = 0 repl = [ ] - for m in RE_function.finditer(t): + # + # Associate each regex with the function that will markup its matches + # + + markup_func = {RE_doc: markup_doc_ref, + RE_abi_file: markup_abi_file_ref, + RE_abi_symbol: markup_abi_ref, + RE_function: markup_func_ref_sphinx3, + RE_struct: markup_c_ref, + RE_union: markup_c_ref, + RE_enum: markup_c_ref, + RE_typedef: markup_c_ref, + RE_git: markup_git} + + match_iterators = [regex.finditer(t) for regex in markup_func] + # + # Sort all references by the starting position in text + # + sorted_matches = sorted(chain(*match_iterators), key=lambda m: m.start()) + for m in sorted_matches: # - # Include any text prior to function() as a normal text node. + # Include any text prior to match as a normal text node. # if m.start() > done: repl.append(nodes.Text(t[done:m.start()])) + # - # Go through the dance of getting an xref out of the C domain - # - target = m.group(1)[:-2] - target_text = nodes.Text(target + '()') - xref = None - if target not in Skipfuncs: - lit_text = nodes.literal(classes=['xref', 'c', 'c-func']) - lit_text += target_text - pxref = addnodes.pending_xref('', refdomain = 'c', - reftype = 'function', - reftarget = target, modname = None, - classname = None) - # - # XXX The Latex builder will throw NoUri exceptions here, - # work around that by ignoring them. - # - try: - xref = cdom.resolve_xref(app.env, docname, app.builder, - 'function', target, pxref, lit_text) - except NoUri: - xref = None - # - # Toss the xref into the list if we got it; otherwise just put - # the function text. + # Call the function associated with the regex that matched this text and + # append its return to the text # - if xref: - repl.append(xref) - else: - repl.append(target_text) + repl.append(markup_func[m.re](docname, app, m)) + done = m.end() if done < len(t): repl.append(nodes.Text(t[done:])) return repl +# +# Keep track of cross-reference lookups that failed so we don't have to +# do them again. +# +failed_lookups = { } +def failure_seen(target): + return (target) in failed_lookups +def note_failure(target): + failed_lookups[target] = True + +# +# In sphinx3 we can cross-reference to C macro and function, each one with its +# own C role, but both match the same regex, so we try both. +# +def markup_func_ref_sphinx3(docname, app, match): + base_target = match.group(2) + target_text = nodes.Text(match.group(0)) + possible_targets = [base_target] + # Check if this document has a namespace, and if so, try + # cross-referencing inside it first. + if c_namespace: + possible_targets.insert(0, c_namespace + "." + base_target) + + if base_target not in Skipnames: + for target in possible_targets: + if (target not in Skipfuncs) and not failure_seen(target): + lit_text = nodes.literal(classes=['xref', 'c', 'c-func']) + lit_text += target_text + xref = add_and_resolve_xref(app, docname, 'c', 'function', + target, contnode=lit_text) + if xref: + return xref + note_failure(target) + + return target_text + +def markup_c_ref(docname, app, match): + class_str = {RE_struct: 'c-struct', + RE_union: 'c-union', + RE_enum: 'c-enum', + RE_typedef: 'c-type', + } + reftype_str = {RE_struct: 'struct', + RE_union: 'union', + RE_enum: 'enum', + RE_typedef: 'type', + } + + base_target = match.group(2) + target_text = nodes.Text(match.group(0)) + possible_targets = [base_target] + # Check if this document has a namespace, and if so, try + # cross-referencing inside it first. + if c_namespace: + possible_targets.insert(0, c_namespace + "." + base_target) + + if base_target not in Skipnames: + for target in possible_targets: + if not (match.re == RE_function and target in Skipfuncs): + lit_text = nodes.literal(classes=['xref', 'c', class_str[match.re]]) + lit_text += target_text + xref = add_and_resolve_xref(app, docname, 'c', + reftype_str[match.re], target, + contnode=lit_text) + if xref: + return xref + + return target_text + +# +# Try to replace a documentation reference of the form Documentation/... with a +# cross reference to that page +# +def markup_doc_ref(docname, app, match): + absolute = match.group(1) + target = match.group(2) + if absolute: + target = "/" + target + + xref = add_and_resolve_xref(app, docname, 'std', 'doc', target) + if xref: + return xref + else: + return nodes.Text(match.group(0)) + +# +# Try to replace a documentation reference for ABI symbols and files +# with a cross reference to that page +# +def markup_abi_ref(docname, app, match, warning=False): + kernel_abi = get_kernel_abi() + + fname = match.group(1) + target = kernel_abi.xref(fname) + + # Kernel ABI doesn't describe such file or symbol + if not target: + if warning: + kernel_abi.log.warning("%s not found", fname) + return nodes.Text(match.group(0)) + + xref = add_and_resolve_xref(app, docname, 'std', 'ref', target) + if xref: + return xref + else: + return nodes.Text(match.group(0)) + +def add_and_resolve_xref(app, docname, domain, reftype, target, contnode=None): + # + # Go through the dance of getting an xref out of the corresponding domain + # + dom_obj = app.env.domains[domain] + pxref = addnodes.pending_xref('', refdomain = domain, reftype = reftype, + reftarget = target, modname = None, + classname = None, refexplicit = False) + + # + # XXX The Latex builder will throw NoUri exceptions here, + # work around that by ignoring them. + # + try: + xref = dom_obj.resolve_xref(app.env, docname, app.builder, reftype, + target, pxref, contnode) + except NoUri: + xref = None + + if xref: + return xref + # + # We didn't find the xref; if a container node was supplied, + # mark it as a broken xref + # + if contnode: + contnode['classes'].append("broken_xref") + return contnode + +# +# Variant of markup_abi_ref() that warns whan a reference is not found +# +def markup_abi_file_ref(docname, app, match): + return markup_abi_ref(docname, app, match, warning=True) + + +def get_c_namespace(app, docname): + source = app.env.doc2path(docname) + with open(source) as f: + for l in f: + match = RE_namespace.search(l) + if match: + return match.group(1) + return '' + +def markup_git(docname, app, match): + # While we could probably assume that we are running in a git + # repository, we can't know for sure, so let's just mechanically + # turn them into git.kernel.org links without checking their + # validity. (Maybe we can do something in the future to warn about + # these references if this is explicitly requested.) + text = match.group(0) + rev = match.group('rev') + return nodes.reference('', nodes.Text(text), + refuri=f'https://git.kernel.org/torvalds/c/{rev}') + def auto_markup(app, doctree, name): + global c_namespace + c_namespace = get_c_namespace(app, name) + def text_but_not_a_reference(node): + # The nodes.literal test catches ``literal text``, its purpose is to + # avoid adding cross-references to functions that have been explicitly + # marked with cc:func:. + if not isinstance(node, nodes.Text) or isinstance(node.parent, nodes.literal): + return False + + child_of_reference = False + parent = node.parent + while parent: + if isinstance(parent, nodes.Referential): + child_of_reference = True + break + parent = parent.parent + return not child_of_reference + # # This loop could eventually be improved on. Someday maybe we # want a proper tree traversal with a lot of awareness of which # kinds of nodes to prune. But this works well for now. # - # The nodes.literal test catches ``literal text``, its purpose is to - # avoid adding cross-references to functions that have been explicitly - # marked with cc:func:. - # for para in doctree.traverse(nodes.paragraph): - for node in para.traverse(nodes.Text): - if not isinstance(node.parent, nodes.literal): - node.parent.replace(node, markup_funcs(name, app, node)) + for node in para.traverse(condition=text_but_not_a_reference): + node.parent.replace(node, markup_refs(name, app, node)) def setup(app): app.connect('doctree-resolved', auto_markup) diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py index cbac8e608dc4..3dc285dc70f5 100644 --- a/Documentation/sphinx/cdomain.py +++ b/Documentation/sphinx/cdomain.py @@ -1,6 +1,7 @@ # -*- coding: utf-8; mode: python -*- +# SPDX-License-Identifier: GPL-2.0 # pylint: disable=W0141,C0113,C0103,C0325 -u""" +""" cdomain ~~~~~~~ @@ -40,18 +41,91 @@ from sphinx import addnodes from sphinx.domains.c import c_funcptr_sig_re, c_sig_re from sphinx.domains.c import CObject as Base_CObject from sphinx.domains.c import CDomain as Base_CDomain +from itertools import chain +import re + +__version__ = '1.1' + +# Namespace to be prepended to the full name +namespace = None + +# +# Handle trivial newer c domain tags that are part of Sphinx 3.1 c domain tags +# - Store the namespace if ".. c:namespace::" tag is found +# +RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$') + +def markup_namespace(match): + global namespace + + namespace = match.group(1) + + return "" + +# +# Handle c:macro for function-style declaration +# +RE_macro = re.compile(r'^\s*..\s*c:macro::\s*(\S+)\s+(\S.*)\s*$') +def markup_macro(match): + return ".. c:function:: " + match.group(1) + ' ' + match.group(2) + +# +# Handle newer c domain tags that are evaluated as .. c:type: for +# backward-compatibility with Sphinx < 3.0 +# +RE_ctype = re.compile(r'^\s*..\s*c:(struct|union|enum|enumerator|alias)::\s*(.*)$') + +def markup_ctype(match): + return ".. c:type:: " + match.group(2) + +# +# Handle newer c domain tags that are evaluated as :c:type: for +# backward-compatibility with Sphinx < 3.0 +# +RE_ctype_refs = re.compile(r':c:(var|struct|union|enum|enumerator)::`([^\`]+)`') +def markup_ctype_refs(match): + return ":c:type:`" + match.group(2) + '`' + +# +# Simply convert :c:expr: and :c:texpr: into a literal block. +# +RE_expr = re.compile(r':c:(expr|texpr):`([^\`]+)`') +def markup_c_expr(match): + return '\\ ``' + match.group(2) + '``\\ ' + +# +# Parse Sphinx 3.x C markups, replacing them by backward-compatible ones +# +def c_markups(app, docname, source): + result = "" + markup_func = { + RE_namespace: markup_namespace, + RE_expr: markup_c_expr, + RE_macro: markup_macro, + RE_ctype: markup_ctype, + RE_ctype_refs: markup_ctype_refs, + } + + lines = iter(source[0].splitlines(True)) + for n in lines: + match_iterators = [regex.finditer(n) for regex in markup_func] + matches = sorted(chain(*match_iterators), key=lambda m: m.start()) + for m in matches: + n = n[:m.start()] + markup_func[m.re](m) + n[m.end():] -__version__ = '1.0' + result = result + n -# Get Sphinx version -major, minor, patch = sphinx.version_info[:3] + source[0] = result + +# +# Now implements support for the cdomain namespacing logic +# def setup(app): - if (major == 1 and minor < 8): - app.override_domain(CDomain) - else: - app.add_domain(CDomain, override=True) + # Handle easy Sphinx 3.1+ simple new tags: :c:expr and .. c:namespace:: + app.connect('source-read', c_markups) + app.add_domain(CDomain, override=True) return dict( version = __version__, @@ -69,12 +143,14 @@ class CObject(Base_CObject): } def handle_func_like_macro(self, sig, signode): - u"""Handles signatures of function-like macros. + """Handles signatures of function-like macros. - If the objtype is 'function' and the the signature ``sig`` is a + If the objtype is 'function' and the signature ``sig`` is a function-like macro, the name of the macro is returned. Otherwise ``False`` is returned. """ + global namespace + if not self.objtype == 'function': return False @@ -96,7 +172,7 @@ class CObject(Base_CObject): if len(arglist[0].split(" ")) > 1: return False - # This is a function-like macro, it's arguments are typeless! + # This is a function-like macro, its arguments are typeless! signode += addnodes.desc_name(fullname, fullname) paramlist = addnodes.desc_parameterlist() signode += paramlist @@ -107,11 +183,16 @@ class CObject(Base_CObject): param += nodes.emphasis(argname, argname) paramlist += param + if namespace: + fullname = namespace + "." + fullname + return fullname def handle_signature(self, sig, signode): """Transform a C signature into RST nodes.""" + global namespace + fullname = self.handle_func_like_macro(sig, signode) if not fullname: fullname = super(CObject, self).handle_signature(sig, signode) @@ -122,6 +203,10 @@ class CObject(Base_CObject): else: # FIXME: handle :name: value of other declaration types? pass + else: + if namespace: + fullname = namespace + "." + fullname + return fullname def add_target_and_index(self, name, sig, signode): @@ -145,13 +230,7 @@ class CObject(Base_CObject): indextext = self.get_index_text(name) if indextext: - if major == 1 and minor < 4: - # indexnode's tuple changed in 1.4 - # https://github.com/sphinx-doc/sphinx/commit/e6a5a3a92e938fcd75866b4227db9e0524d58f7c - self.indexnode['entries'].append( - ('single', indextext, targetname, '')) - else: - self.indexnode['entries'].append( + self.indexnode['entries'].append( ('single', indextext, targetname, '', None)) class CDomain(Base_CDomain): diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py new file mode 100644 index 000000000000..4c4375201b9e --- /dev/null +++ b/Documentation/sphinx/kernel_abi.py @@ -0,0 +1,173 @@ +# -*- coding: utf-8; mode: python -*- +# coding=utf-8 +# SPDX-License-Identifier: GPL-2.0 +# +""" + kernel-abi + ~~~~~~~~~~ + + Implementation of the ``kernel-abi`` reST-directive. + + :copyright: Copyright (C) 2016 Markus Heiser + :copyright: Copyright (C) 2016-2020 Mauro Carvalho Chehab + :maintained-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org> + :license: GPL Version 2, June 1991 see Linux/COPYING for details. + + The ``kernel-abi`` (:py:class:`KernelCmd`) directive calls the + scripts/get_abi.py script to parse the Kernel ABI files. + + Overview of directive's argument and options. + + .. code-block:: rst + + .. kernel-abi:: <ABI directory location> + :debug: + + The argument ``<ABI directory location>`` is required. It contains the + location of the ABI files to be parsed. + + ``debug`` + Inserts a code-block with the *raw* reST. Sometimes it is helpful to see + what reST is generated. + +""" + +import os +import re +import sys + +from docutils import nodes, statemachine +from docutils.statemachine import ViewList +from docutils.parsers.rst import directives, Directive +from sphinx.util.docutils import switch_source_input +from sphinx.util import logging + +srctree = os.path.abspath(os.environ["srctree"]) +sys.path.insert(0, os.path.join(srctree, "scripts/lib/abi")) + +from abi_parser import AbiParser + +__version__ = "1.0" + +logger = logging.getLogger('kernel_abi') +path = os.path.join(srctree, "Documentation/ABI") + +_kernel_abi = None + +def get_kernel_abi(): + """ + Initialize kernel_abi global var, if not initialized yet. + + This is needed to avoid warnings during Sphinx module initialization. + """ + global _kernel_abi + + if not _kernel_abi: + # Parse ABI symbols only once + _kernel_abi = AbiParser(path, logger=logger) + _kernel_abi.parse_abi() + _kernel_abi.check_issues() + + return _kernel_abi + +def setup(app): + + app.add_directive("kernel-abi", KernelCmd) + return { + "version": __version__, + "parallel_read_safe": True, + "parallel_write_safe": True + } + + +class KernelCmd(Directive): + """KernelABI (``kernel-abi``) directive""" + + required_arguments = 1 + optional_arguments = 3 + has_content = False + final_argument_whitespace = True + parser = None + + option_spec = { + "debug": directives.flag, + "no-symbols": directives.flag, + "no-files": directives.flag, + } + + def run(self): + kernel_abi = get_kernel_abi() + + doc = self.state.document + if not doc.settings.file_insertion_enabled: + raise self.warning("docutils: file insertion disabled") + + env = self.state.document.settings.env + content = ViewList() + node = nodes.section() + + abi_type = self.arguments[0] + + if "no-symbols" in self.options: + show_symbols = False + else: + show_symbols = True + + if "no-files" in self.options: + show_file = False + else: + show_file = True + + tab_width = self.options.get('tab-width', + self.state.document.settings.tab_width) + + old_f = None + n = 0 + n_sym = 0 + for msg, f, ln in kernel_abi.doc(show_file=show_file, + show_symbols=show_symbols, + filter_path=abi_type): + n_sym += 1 + msg_list = statemachine.string2lines(msg, tab_width, + convert_whitespace=True) + if "debug" in self.options: + lines = [ + "", "", ".. code-block:: rst", + " :linenos:", "" + ] + for m in msg_list: + lines.append(" " + m) + else: + lines = msg_list + + for line in lines: + # sphinx counts lines from 0 + content.append(line, f, ln - 1) + n += 1 + + if f != old_f: + # Add the file to Sphinx build dependencies if the file exists + fname = os.path.join(srctree, f) + if os.path.isfile(fname): + env.note_dependency(fname) + + old_f = f + + # Sphinx doesn't like to parse big messages. So, let's + # add content symbol by symbol + if content: + self.do_parse(content, node) + content = ViewList() + + if show_symbols and not show_file: + logger.verbose("%s ABI: %i symbols (%i ReST lines)" % (abi_type, n_sym, n)) + elif not show_symbols and show_file: + logger.verbose("%s ABI: %i files (%i ReST lines)" % (abi_type, n_sym, n)) + else: + logger.verbose("%s ABI: %i data (%i ReST lines)" % (abi_type, n_sym, n)) + + return node.children + + def do_parse(self, content, node): + with switch_source_input(self.state, content): + self.state.nested_parse(content, 0, node, match_titles=1) diff --git a/Documentation/sphinx/kernel_feat.py b/Documentation/sphinx/kernel_feat.py new file mode 100644 index 000000000000..e3a51867f27b --- /dev/null +++ b/Documentation/sphinx/kernel_feat.py @@ -0,0 +1,133 @@ +# coding=utf-8 +# SPDX-License-Identifier: GPL-2.0 +# +""" + kernel-feat + ~~~~~~~~~~~ + + Implementation of the ``kernel-feat`` reST-directive. + + :copyright: Copyright (C) 2016 Markus Heiser + :copyright: Copyright (C) 2016-2019 Mauro Carvalho Chehab + :maintained-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org> + :license: GPL Version 2, June 1991 see Linux/COPYING for details. + + The ``kernel-feat`` (:py:class:`KernelFeat`) directive calls the + scripts/get_feat.pl script to parse the Kernel ABI files. + + Overview of directive's argument and options. + + .. code-block:: rst + + .. kernel-feat:: <ABI directory location> + :debug: + + The argument ``<ABI directory location>`` is required. It contains the + location of the ABI files to be parsed. + + ``debug`` + Inserts a code-block with the *raw* reST. Sometimes it is helpful to see + what reST is generated. + +""" + +import codecs +import os +import re +import subprocess +import sys + +from docutils import nodes, statemachine +from docutils.statemachine import ViewList +from docutils.parsers.rst import directives, Directive +from docutils.utils.error_reporting import ErrorString +from sphinx.util.docutils import switch_source_input + +__version__ = '1.0' + +def setup(app): + + app.add_directive("kernel-feat", KernelFeat) + return dict( + version = __version__ + , parallel_read_safe = True + , parallel_write_safe = True + ) + +class KernelFeat(Directive): + + """KernelFeat (``kernel-feat``) directive""" + + required_arguments = 1 + optional_arguments = 2 + has_content = False + final_argument_whitespace = True + + option_spec = { + "debug" : directives.flag + } + + def warn(self, message, **replace): + replace["fname"] = self.state.document.current_source + replace["line_no"] = replace.get("line_no", self.lineno) + message = ("%(fname)s:%(line_no)s: [kernel-feat WARN] : " + message) % replace + self.state.document.settings.env.app.warn(message, prefix="") + + def run(self): + doc = self.state.document + if not doc.settings.file_insertion_enabled: + raise self.warning("docutils: file insertion disabled") + + env = doc.settings.env + + srctree = os.path.abspath(os.environ["srctree"]) + + args = [ + os.path.join(srctree, 'scripts/get_feat.pl'), + 'rest', + '--enable-fname', + '--dir', + os.path.join(srctree, 'Documentation', self.arguments[0]), + ] + + if len(self.arguments) > 1: + args.extend(['--arch', self.arguments[1]]) + + lines = subprocess.check_output(args, cwd=os.path.dirname(doc.current_source)).decode('utf-8') + + line_regex = re.compile(r"^\.\. FILE (\S+)$") + + out_lines = "" + + for line in lines.split("\n"): + match = line_regex.search(line) + if match: + fname = match.group(1) + + # Add the file to Sphinx build dependencies + env.note_dependency(os.path.abspath(fname)) + else: + out_lines += line + "\n" + + nodeList = self.nestedParse(out_lines, self.arguments[0]) + return nodeList + + def nestedParse(self, lines, fname): + content = ViewList() + node = nodes.section() + + if "debug" in self.options: + code_block = "\n\n.. code-block:: rst\n :linenos:\n" + for l in lines.split("\n"): + code_block += "\n " + l + lines = code_block + "\n\n" + + for c, l in enumerate(lines.split("\n")): + content.append(l, fname, c) + + buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter + + with switch_source_input(self.state, content): + self.state.nested_parse(content, 0, node, match_titles=1) + + return node.children diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py index f523aa68a36b..1e566e87ebcd 100755 --- a/Documentation/sphinx/kernel_include.py +++ b/Documentation/sphinx/kernel_include.py @@ -1,8 +1,9 @@ #!/usr/bin/env python3 # -*- coding: utf-8; mode: python -*- +# SPDX-License-Identifier: GPL-2.0 # pylint: disable=R0903, C0330, R0914, R0912, E0401 -u""" +""" kernel-include ~~~~~~~~~~~~~~ @@ -56,9 +57,10 @@ def setup(app): class KernelInclude(Include): # ============================================================================== - u"""KernelInclude (``kernel-include``) directive""" + """KernelInclude (``kernel-include``) directive""" def run(self): + env = self.state.document.settings.env path = os.path.realpath( os.path.expandvars(self.arguments[0])) @@ -70,6 +72,8 @@ class KernelInclude(Include): self.arguments[0] = path + env.note_dependency(os.path.abspath(path)) + #return super(KernelInclude, self).run() # won't work, see HINTs in _run() return self._run() @@ -94,7 +98,6 @@ class KernelInclude(Include): # HINT: this is the only line I had to change / commented out: #path = utils.relative_path(None, path) - path = nodes.reprunicode(path) encoding = self.options.get( 'encoding', self.state.document.settings.input_encoding) e_handler=self.state.document.settings.input_encoding_error_handler diff --git a/Documentation/sphinx/kerneldoc-preamble.sty b/Documentation/sphinx/kerneldoc-preamble.sty new file mode 100644 index 000000000000..5d68395539fe --- /dev/null +++ b/Documentation/sphinx/kerneldoc-preamble.sty @@ -0,0 +1,234 @@ +% -*- coding: utf-8 -*- +% SPDX-License-Identifier: GPL-2.0 +% +% LaTeX preamble for "make latexdocs" or "make pdfdocs" including: +% - TOC width settings +% - Setting of tabulary (\tymin) +% - Headheight setting for fancyhdr +% - Fontfamily settings for CJK (Chinese, Japanese, and Korean) translations +% +% Note on the suffix of .sty: +% This is not implemented as a LaTeX style file, but as a file containing +% plain LaTeX code to be included into preamble. +% ".sty" is chosen because ".tex" would cause the build scripts to confuse +% this file with a LaTeX main file. +% +% Copyright (C) 2022 Akira Yokosawa + +% Custom width parameters for TOC +% - Redefine low-level commands defined in report.cls. +% - Indent of 2 chars is preserved for ease of comparison. +% Summary of changes from default params: +% Width of page number (\@pnumwidth): 1.55em -> 2.7em +% Width of chapter number: 1.5em -> 2.4em +% Indent of section number: 1.5em -> 2.4em +% Width of section number: 2.6em -> 3.2em +% Indent of subsection number: 4.1em -> 5.6em +% Width of subsection number: 3.5em -> 4.3em +% +% These params can have 4 digit page counts, 3 digit chapter counts, +% section counts of 4 digits + 1 period (e.g., 18.10), and subsection counts +% of 5 digits + 2 periods (e.g., 18.7.13). +\makeatletter +%% Redefine \@pnumwidth (page number width) +\renewcommand*\@pnumwidth{2.7em} +%% Redefine \l@chapter (chapter list entry) +\renewcommand*\l@chapter[2]{% + \ifnum \c@tocdepth >\m@ne + \addpenalty{-\@highpenalty}% + \vskip 1.0em \@plus\p@ + \setlength\@tempdima{2.4em}% + \begingroup + \parindent \z@ \rightskip \@pnumwidth + \parfillskip -\@pnumwidth + \leavevmode \bfseries + \advance\leftskip\@tempdima + \hskip -\leftskip + #1\nobreak\hfil + \nobreak\hb@xt@\@pnumwidth{\hss #2% + \kern-\p@\kern\p@}\par + \penalty\@highpenalty + \endgroup + \fi} +%% Redefine \l@section and \l@subsection +\renewcommand*\l@section{\@dottedtocline{1}{2.4em}{3.2em}} +\renewcommand*\l@subsection{\@dottedtocline{2}{5.6em}{4.3em}} +\makeatother +%% Prevent default \sphinxtableofcontentshook from overwriting above tweaks. +\renewcommand{\sphinxtableofcontentshook}{} % Empty the hook + +% Prevent column squeezing of tabulary. \tymin is set by Sphinx as: +% \setlength{\tymin}{3\fontcharwd\font`0 } +% , which is too short. +\setlength{\tymin}{20em} + +% Adjust \headheight for fancyhdr +\addtolength{\headheight}{1.6pt} +\addtolength{\topmargin}{-1.6pt} + +% Translations have Asian (CJK) characters which are only displayed if +% xeCJK is used +\usepackage{ifthen} +\newboolean{enablecjk} +\setboolean{enablecjk}{false} +\IfFontExistsTF{Noto Sans CJK SC}{ + \IfFileExists{xeCJK.sty}{ + \setboolean{enablecjk}{true} + }{} +}{} +\ifthenelse{\boolean{enablecjk}}{ + % Load xeCJK when both the Noto Sans CJK font and xeCJK.sty are available. + \usepackage{xeCJK} + % Noto CJK fonts don't provide slant shape. [AutoFakeSlant] permits + % its emulation. + % Select KR variant at the beginning of each document so that quotation + % and apostorph symbols of half-width is used in TOC of Latin documents. + \IfFontExistsTF{Noto Serif CJK KR}{ + \setCJKmainfont{Noto Serif CJK KR}[AutoFakeSlant] + }{ + \setCJKmainfont{Noto Sans CJK KR}[AutoFakeSlant] + } + \setCJKsansfont{Noto Sans CJK KR}[AutoFakeSlant] + \setCJKmonofont{Noto Sans Mono CJK KR}[AutoFakeSlant] + % Teach xeCJK of half-width symbols + \xeCJKDeclareCharClass{HalfLeft}{`“,`‘} + \xeCJKDeclareCharClass{HalfRight}{`”,`’} + % CJK Language-specific font choices + %% for Simplified Chinese + \IfFontExistsTF{Noto Serif CJK SC}{ + \newCJKfontfamily[SCmain]\scmain{Noto Serif CJK SC}[AutoFakeSlant] + \newCJKfontfamily[SCserif]\scserif{Noto Serif CJK SC}[AutoFakeSlant] + }{ + \newCJKfontfamily[SCmain]\scmain{Noto Sans CJK SC}[AutoFakeSlant] + \newCJKfontfamily[SCserif]\scserif{Noto Sans CJK SC}[AutoFakeSlant] + } + \newCJKfontfamily[SCsans]\scsans{Noto Sans CJK SC}[AutoFakeSlant] + \newCJKfontfamily[SCmono]\scmono{Noto Sans Mono CJK SC}[AutoFakeSlant] + %% for Traditional Chinese + \IfFontExistsTF{Noto Serif CJK TC}{ + \newCJKfontfamily[TCmain]\tcmain{Noto Serif CJK TC}[AutoFakeSlant] + \newCJKfontfamily[TCserif]\tcserif{Noto Serif CJK TC}[AutoFakeSlant] + }{ + \newCJKfontfamily[TCmain]\tcmain{Noto Sans CJK TC}[AutoFakeSlant] + \newCJKfontfamily[TCserif]\tcserif{Noto Sans CJK TC}[AutoFakeSlant] + } + \newCJKfontfamily[TCsans]\tcsans{Noto Sans CJK TC}[AutoFakeSlant] + \newCJKfontfamily[TCmono]\tcmono{Noto Sans Mono CJK TC}[AutoFakeSlant] + %% for Korean + \IfFontExistsTF{Noto Serif CJK KR}{ + \newCJKfontfamily[KRmain]\krmain{Noto Serif CJK KR}[AutoFakeSlant] + \newCJKfontfamily[KRserif]\krserif{Noto Serif CJK KR}[AutoFakeSlant] + }{ + \newCJKfontfamily[KRmain]\krmain{Noto Sans CJK KR}[AutoFakeSlant] + \newCJKfontfamily[KRserif]\krserif{Noto Sans CJK KR}[AutoFakeSlant] + } + \newCJKfontfamily[KRsans]\krsans{Noto Sans CJK KR}[AutoFakeSlant] + \newCJKfontfamily[KRmono]\krmono{Noto Sans Mono CJK KR}[AutoFakeSlant] + %% for Japanese + \IfFontExistsTF{Noto Serif CJK JP}{ + \newCJKfontfamily[JPmain]\jpmain{Noto Serif CJK JP}[AutoFakeSlant] + \newCJKfontfamily[JPserif]\jpserif{Noto Serif CJK JP}[AutoFakeSlant] + }{ + \newCJKfontfamily[JPmain]\jpmain{Noto Sans CJK JP}[AutoFakeSlant] + \newCJKfontfamily[JPserif]\jpserif{Noto Sans CJK JP}[AutoFakeSlant] + } + \newCJKfontfamily[JPsans]\jpsans{Noto Sans CJK JP}[AutoFakeSlant] + \newCJKfontfamily[JPmono]\jpmono{Noto Sans Mono CJK JP}[AutoFakeSlant] + % Define custom macros to on/off CJK + %% One and half spacing for CJK contents + \newcommand{\kerneldocCJKon}{\makexeCJKactive\onehalfspacing} + \newcommand{\kerneldocCJKoff}{\makexeCJKinactive\singlespacing} + % Define custom macros for switching CJK font setting + %% for Simplified Chinese + \newcommand{\kerneldocBeginSC}{% + \begingroup% + \scmain% + \xeCJKDeclareCharClass{FullLeft}{`“,`‘}% Full-width in SC + \xeCJKDeclareCharClass{FullRight}{`”,`’}% Full-width in SC + \renewcommand{\CJKrmdefault}{SCserif}% + \renewcommand{\CJKsfdefault}{SCsans}% + \renewcommand{\CJKttdefault}{SCmono}% + \xeCJKsetup{CJKspace = false}% gobble white spaces by ' ' + % For CJK ascii-art alignment + \setmonofont{Noto Sans Mono CJK SC}[AutoFakeSlant]% + } + \newcommand{\kerneldocEndSC}{\endgroup} + %% for Traditional Chinese + \newcommand{\kerneldocBeginTC}{% + \begingroup% + \tcmain% + \xeCJKDeclareCharClass{FullLeft}{`“,`‘}% Full-width in TC + \xeCJKDeclareCharClass{FullRight}{`”,`’}% Full-width in TC + \renewcommand{\CJKrmdefault}{TCserif}% + \renewcommand{\CJKsfdefault}{TCsans}% + \renewcommand{\CJKttdefault}{TCmono}% + \xeCJKsetup{CJKspace = false}% gobble white spaces by ' ' + % For CJK ascii-art alignment + \setmonofont{Noto Sans Mono CJK TC}[AutoFakeSlant]% + } + \newcommand{\kerneldocEndTC}{\endgroup} + %% for Korean + \newcommand{\kerneldocBeginKR}{% + \begingroup% + \krmain% + \renewcommand{\CJKrmdefault}{KRserif}% + \renewcommand{\CJKsfdefault}{KRsans}% + \renewcommand{\CJKttdefault}{KRmono}% + % \xeCJKsetup{CJKspace = true} % true by default + % For CJK ascii-art alignment (still misaligned for Hangul) + \setmonofont{Noto Sans Mono CJK KR}[AutoFakeSlant]% + } + \newcommand{\kerneldocEndKR}{\endgroup} + %% for Japanese + \newcommand{\kerneldocBeginJP}{% + \begingroup% + \jpmain% + \renewcommand{\CJKrmdefault}{JPserif}% + \renewcommand{\CJKsfdefault}{JPsans}% + \renewcommand{\CJKttdefault}{JPmono}% + \xeCJKsetup{CJKspace = false}% gobble white space by ' ' + % For CJK ascii-art alignment + \setmonofont{Noto Sans Mono CJK JP}[AutoFakeSlant]% + } + \newcommand{\kerneldocEndJP}{\endgroup} + + % Single spacing in literal blocks + \fvset{baselinestretch=1} + % To customize \sphinxtableofcontents + \usepackage{etoolbox} + % Inactivate CJK after tableofcontents + \apptocmd{\sphinxtableofcontents}{\kerneldocCJKoff}{}{} + \xeCJKsetup{CJKspace = true}% For inter-phrase space of Korean TOC + % Suppress extra white space at latin .. non-latin in literal blocks + \AtBeginEnvironment{sphinxVerbatim}{\CJKsetecglue{}} +}{ % Don't enable CJK + % Custom macros to on/off CJK and switch CJK fonts (Dummy) + \newcommand{\kerneldocCJKon}{} + \newcommand{\kerneldocCJKoff}{} + %% By defining \kerneldocBegin(SC|TC|KR|JP) as commands with an argument + %% and ignore the argument (#1) in their definitions, whole contents of + %% CJK chapters can be ignored. + \newcommand{\kerneldocBeginSC}[1]{% + %% Put a note on missing CJK fonts or the xecjk package in place of + %% zh_CN translation. + \begin{sphinxadmonition}{note}{Note on missing fonts and a package:} + Translations of Simplified Chinese (zh\_CN), Traditional Chinese + (zh\_TW), Korean (ko\_KR), and Japanese (ja\_JP) were skipped + due to the lack of suitable font families and/or the texlive-xecjk + package. + + If you want them, please install non-variable ``Noto Sans CJK'' + font families along with the texlive-xecjk package by following + instructions from + \sphinxcode{./scripts/sphinx-pre-install}. + Having optional non-variable ``Noto Serif CJK'' font families will + improve the looks of those translations. + \end{sphinxadmonition}} + \newcommand{\kerneldocEndSC}{} + \newcommand{\kerneldocBeginTC}[1]{} + \newcommand{\kerneldocEndTC}{} + \newcommand{\kerneldocBeginKR}[1]{} + \newcommand{\kerneldocEndKR}{} + \newcommand{\kerneldocBeginJP}[1]{} + \newcommand{\kerneldocEndJP}{} +} diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py index 4bcbd6ae01cd..2586b4d4e494 100644 --- a/Documentation/sphinx/kerneldoc.py +++ b/Documentation/sphinx/kerneldoc.py @@ -1,4 +1,5 @@ # coding=utf-8 +# SPDX-License-Identifier: MIT # # Copyright © 2016 Intel Corporation # @@ -24,8 +25,6 @@ # Authors: # Jani Nikula <jani.nikula@intel.com> # -# Please make sure this works on both python2 and python3. -# import codecs import os @@ -37,21 +36,43 @@ import glob from docutils import nodes, statemachine from docutils.statemachine import ViewList from docutils.parsers.rst import directives, Directive - -# -# AutodocReporter is only good up to Sphinx 1.7 -# import sphinx +from sphinx.util.docutils import switch_source_input +from sphinx.util import logging +from pprint import pformat -Use_SSI = sphinx.__version__[:3] >= '1.7' -if Use_SSI: - from sphinx.util.docutils import switch_source_input -else: - from sphinx.ext.autodoc import AutodocReporter +srctree = os.path.abspath(os.environ["srctree"]) +sys.path.insert(0, os.path.join(srctree, "scripts/lib/kdoc")) -import kernellog +from kdoc_files import KernelFiles +from kdoc_output import RestFormat __version__ = '1.0' +kfiles = None +logger = logging.getLogger(__name__) + +def cmd_str(cmd): + """ + Helper function to output a command line that can be used to produce + the same records via command line. Helpful to debug troubles at the + script. + """ + + cmd_line = "" + + for w in cmd: + if w == "" or " " in w: + esc_cmd = "'" + w + "'" + else: + esc_cmd = w + + if cmd_line: + cmd_line += " " + esc_cmd + continue + else: + cmd_line = esc_cmd + + return cmd_line class KernelDocDirective(Directive): """Extract kernel-doc comments from the specified file""" @@ -62,21 +83,52 @@ class KernelDocDirective(Directive): 'export': directives.unchanged, 'internal': directives.unchanged, 'identifiers': directives.unchanged, + 'no-identifiers': directives.unchanged, 'functions': directives.unchanged, } has_content = False + verbose = 0 + + parse_args = {} + msg_args = {} + + def handle_args(self): - def run(self): env = self.state.document.settings.env cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno'] filename = env.config.kerneldoc_srctree + '/' + self.arguments[0] + + # Arguments used by KernelFiles.parse() function + self.parse_args = { + "file_list": [filename], + "export_file": [] + } + + # Arguments used by KernelFiles.msg() function + self.msg_args = { + "enable_lineno": True, + "export": False, + "internal": False, + "symbol": [], + "nosymbol": [], + "no_doc_sections": False + } + export_file_patterns = [] + verbose = os.environ.get("V") + if verbose: + try: + self.verbose = int(verbose) + except ValueError: + pass + # Tell sphinx of the dependency env.note_dependency(os.path.abspath(filename)) - tab_width = self.options.get('tab-width', self.state.document.settings.tab_width) + self.tab_width = self.options.get('tab-width', + self.state.document.settings.tab_width) # 'function' is an alias of 'identifiers' if 'functions' in self.options: @@ -85,82 +137,164 @@ class KernelDocDirective(Directive): # FIXME: make this nicer and more robust against errors if 'export' in self.options: cmd += ['-export'] + self.msg_args["export"] = True export_file_patterns = str(self.options.get('export')).split() elif 'internal' in self.options: cmd += ['-internal'] + self.msg_args["internal"] = True export_file_patterns = str(self.options.get('internal')).split() elif 'doc' in self.options: - cmd += ['-function', str(self.options.get('doc'))] + func = str(self.options.get('doc')) + cmd += ['-function', func] + self.msg_args["symbol"].append(func) elif 'identifiers' in self.options: identifiers = self.options.get('identifiers').split() if identifiers: for i in identifiers: + i = i.rstrip("\\").strip() + if not i: + continue + cmd += ['-function', i] + self.msg_args["symbol"].append(i) else: cmd += ['-no-doc-sections'] + self.msg_args["no_doc_sections"] = True + + if 'no-identifiers' in self.options: + no_identifiers = self.options.get('no-identifiers').split() + if no_identifiers: + for i in no_identifiers: + i = i.rstrip("\\").strip() + if not i: + continue + + cmd += ['-nosymbol', i] + self.msg_args["nosymbol"].append(i) for pattern in export_file_patterns: + pattern = pattern.rstrip("\\").strip() + if not pattern: + continue + for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern): env.note_dependency(os.path.abspath(f)) cmd += ['-export-file', f] + self.parse_args["export_file"].append(f) + + # Export file is needed by both parse and msg, as kernel-doc + # cache exports. + self.msg_args["export_file"] = self.parse_args["export_file"] cmd += [filename] - try: - kernellog.verbose(env.app, - 'calling kernel-doc \'%s\'' % (" ".join(cmd))) + return cmd - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = p.communicate() + def run_cmd(self, cmd): + """ + Execute an external kernel-doc command. + """ - out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8') + env = self.state.document.settings.env + node = nodes.section() - if p.returncode != 0: - sys.stderr.write(err) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() - kernellog.warn(env.app, - 'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode)) - return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] - elif env.config.kerneldoc_verbosity > 0: - sys.stderr.write(err) + out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8') - lines = statemachine.string2lines(out, tab_width, convert_whitespace=True) - result = ViewList() + if p.returncode != 0: + sys.stderr.write(err) - lineoffset = 0; - line_regex = re.compile("^#define LINENO ([0-9]+)$") - for line in lines: - match = line_regex.search(line) - if match: - # sphinx counts lines from 0 - lineoffset = int(match.group(1)) - 1 - # we must eat our comments since the upset the markup - else: - result.append(line, filename, lineoffset) - lineoffset += 1 + logger.warning("kernel-doc '%s' failed with return code %d" + % (" ".join(cmd), p.returncode)) + return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] + elif env.config.kerneldoc_verbosity > 0: + sys.stderr.write(err) - node = nodes.section() - self.do_parse(result, node) + filenames = self.parse_args["file_list"] + for filename in filenames: + self.parse_msg(filename, node, out, cmd) - return node.children + return node.children + + def parse_msg(self, filename, node, out, cmd): + """ + Handles a kernel-doc output for a given file + """ + + env = self.state.document.settings.env + + lines = statemachine.string2lines(out, self.tab_width, + convert_whitespace=True) + result = ViewList() + + lineoffset = 0; + line_regex = re.compile(r"^\.\. LINENO ([0-9]+)$") + for line in lines: + match = line_regex.search(line) + if match: + # sphinx counts lines from 0 + lineoffset = int(match.group(1)) - 1 + # we must eat our comments since the upset the markup + else: + doc = str(env.srcdir) + "/" + env.docname + ":" + str(self.lineno) + result.append(line, doc + ": " + filename, lineoffset) + lineoffset += 1 + + self.do_parse(result, node) + + def run_kdoc(self, cmd, kfiles): + """ + Execute kernel-doc classes directly instead of running as a separate + command. + """ + + env = self.state.document.settings.env + + node = nodes.section() + + kfiles.parse(**self.parse_args) + filenames = self.parse_args["file_list"] + + for filename, out in kfiles.msg(**self.msg_args, filenames=filenames): + self.parse_msg(filename, node, out, cmd) + + return node.children + + def run(self): + global kfiles + + cmd = self.handle_args() + if self.verbose >= 1: + logger.info(cmd_str(cmd)) + + try: + if kfiles: + return self.run_kdoc(cmd, kfiles) + else: + return self.run_cmd(cmd) except Exception as e: # pylint: disable=W0703 - kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' % - (" ".join(cmd), str(e))) + logger.warning("kernel-doc '%s' processing failed with: %s" % + (cmd_str(cmd), pformat(e))) return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] def do_parse(self, result, node): - if Use_SSI: - with switch_source_input(self.state, result): - self.state.nested_parse(result, 0, node, match_titles=1) - else: - save = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter - self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter) - self.state.memo.title_styles, self.state.memo.section_level = [], 0 - try: - self.state.nested_parse(result, 0, node, match_titles=1) - finally: - self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = save + with switch_source_input(self.state, result): + self.state.nested_parse(result, 0, node, match_titles=1) + +def setup_kfiles(app): + global kfiles + + kerneldoc_bin = app.env.config.kerneldoc_bin + + if kerneldoc_bin and kerneldoc_bin.endswith("kernel-doc.py"): + print("Using Python kernel-doc") + out_style = RestFormat() + kfiles = KernelFiles(out_style=out_style, logger=logger) + else: + print(f"Using {kerneldoc_bin}") def setup(app): @@ -170,6 +304,8 @@ def setup(app): app.add_directive('kernel-doc', KernelDocDirective) + app.connect('builder-inited', setup_kfiles) + return dict( version = __version__, parallel_read_safe = True, diff --git a/Documentation/sphinx/kernellog.py b/Documentation/sphinx/kernellog.py deleted file mode 100644 index af924f51a7dc..000000000000 --- a/Documentation/sphinx/kernellog.py +++ /dev/null @@ -1,28 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Sphinx has deprecated its older logging interface, but the replacement -# only goes back to 1.6. So here's a wrapper layer to keep around for -# as long as we support 1.4. -# -import sphinx - -if sphinx.__version__[:3] >= '1.6': - UseLogging = True - from sphinx.util import logging - logger = logging.getLogger('kerneldoc') -else: - UseLogging = False - -def warn(app, message): - if UseLogging: - logger.warning(message) - else: - app.warn(message) - -def verbose(app, message): - if UseLogging: - logger.verbose(message) - else: - app.verbose(message) - - diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py index 788704886eec..ad495c0da270 100644 --- a/Documentation/sphinx/kfigure.py +++ b/Documentation/sphinx/kfigure.py @@ -1,6 +1,7 @@ # -*- coding: utf-8; mode: python -*- +# SPDX-License-Identifier: GPL-2.0 # pylint: disable=C0103, R0903, R0912, R0915 -u""" +""" scalable figure and image handling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -31,10 +32,13 @@ u""" * ``dot(1)``: Graphviz (https://www.graphviz.org). If Graphviz is not available, the DOT language is inserted as literal-block. + For conversion to PDF, ``rsvg-convert(1)`` of librsvg + (https://gitlab.gnome.org/GNOME/librsvg) is used when available. * SVG to PDF: To generate PDF, you need at least one of this tools: - ``convert(1)``: ImageMagick (https://www.imagemagick.org) + - ``inkscape(1)``: Inkscape (https://inkscape.org/) List of customizations: @@ -49,36 +53,21 @@ import os from os import path import subprocess from hashlib import sha1 -import sys - +import re from docutils import nodes from docutils.statemachine import ViewList from docutils.parsers.rst import directives from docutils.parsers.rst.directives import images import sphinx - from sphinx.util.nodes import clean_astext -from six import iteritems - -import kernellog - -PY3 = sys.version_info[0] == 3 +from sphinx.util import logging -if PY3: - _unicode = str -else: - _unicode = unicode - -# Get Sphinx version -major, minor, patch = sphinx.version_info[:3] -if major == 1 and minor > 3: - # patches.Figure only landed in Sphinx 1.4 - from sphinx.directives.patches import Figure # pylint: disable=C0413 -else: - Figure = images.Figure +Figure = images.Figure __version__ = '1.0.0' +logger = logging.getLogger('kfigure') + # simple helper # ------------- @@ -121,10 +110,20 @@ def pass_handle(self, node): # pylint: disable=W0613 # Graphviz's dot(1) support dot_cmd = None +# dot(1) -Tpdf should be used +dot_Tpdf = False # ImageMagick' convert(1) support convert_cmd = None +# librsvg's rsvg-convert(1) support +rsvg_convert_cmd = None + +# Inkscape's inkscape(1) support +inkscape_cmd = None +# Inkscape prior to 1.0 uses different command options +inkscape_ver_one = False + def setup(app): # check toolchain first @@ -167,28 +166,69 @@ def setup(app): def setupTools(app): - u""" + """ Check available build tools and log some *verbose* messages. This function is called once, when the builder is initiated. """ - global dot_cmd, convert_cmd # pylint: disable=W0603 - kernellog.verbose(app, "kfigure: check installed tools ...") + global dot_cmd, dot_Tpdf, convert_cmd, rsvg_convert_cmd # pylint: disable=W0603 + global inkscape_cmd, inkscape_ver_one # pylint: disable=W0603 + logger.verbose("kfigure: check installed tools ...") dot_cmd = which('dot') convert_cmd = which('convert') + rsvg_convert_cmd = which('rsvg-convert') + inkscape_cmd = which('inkscape') if dot_cmd: - kernellog.verbose(app, "use dot(1) from: " + dot_cmd) + logger.verbose("use dot(1) from: " + dot_cmd) + + try: + dot_Thelp_list = subprocess.check_output([dot_cmd, '-Thelp'], + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as err: + dot_Thelp_list = err.output + pass + + dot_Tpdf_ptn = b'pdf' + dot_Tpdf = re.search(dot_Tpdf_ptn, dot_Thelp_list) else: - kernellog.warn(app, "dot(1) not found, for better output quality install " - "graphviz from https://www.graphviz.org") - if convert_cmd: - kernellog.verbose(app, "use convert(1) from: " + convert_cmd) + logger.warning( + "dot(1) not found, for better output quality install graphviz from https://www.graphviz.org" + ) + if inkscape_cmd: + logger.verbose("use inkscape(1) from: " + inkscape_cmd) + inkscape_ver = subprocess.check_output([inkscape_cmd, '--version'], + stderr=subprocess.DEVNULL) + ver_one_ptn = b'Inkscape 1' + inkscape_ver_one = re.search(ver_one_ptn, inkscape_ver) + convert_cmd = None + rsvg_convert_cmd = None + dot_Tpdf = False + else: - kernellog.warn(app, - "convert(1) not found, for SVG to PDF conversion install " - "ImageMagick (https://www.imagemagick.org)") + if convert_cmd: + logger.verbose("use convert(1) from: " + convert_cmd) + else: + logger.verbose( + "Neither inkscape(1) nor convert(1) found.\n" + "For SVG to PDF conversion, install either Inkscape (https://inkscape.org/) (preferred) or\n" + "ImageMagick (https://www.imagemagick.org)" + ) + + if rsvg_convert_cmd: + logger.verbose("use rsvg-convert(1) from: " + rsvg_convert_cmd) + logger.verbose("use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion") + dot_Tpdf = False + else: + logger.verbose( + "rsvg-convert(1) not found.\n" + " SVG rendering of convert(1) is done by ImageMagick-native renderer." + ) + if dot_Tpdf: + logger.verbose("use 'dot -Tpdf' for DOT -> PDF conversion") + else: + logger.verbose("use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion") # integrate conversion tools @@ -222,13 +262,12 @@ def convert_image(img_node, translator, src_fname=None): # in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages - kernellog.verbose(app, 'assert best format for: ' + img_node['uri']) + logger.verbose('assert best format for: ' + img_node['uri']) if in_ext == '.dot': if not dot_cmd: - kernellog.verbose(app, - "dot from graphviz not available / include DOT raw.") + logger.verbose("dot from graphviz not available / include DOT raw.") img_node.replace_self(file2literal(src_fname)) elif translator.builder.format == 'latex': @@ -254,9 +293,12 @@ def convert_image(img_node, translator, src_fname=None): elif in_ext == '.svg': if translator.builder.format == 'latex': - if convert_cmd is None: - kernellog.verbose(app, - "no SVG to PDF conversion available / include SVG raw.") + if not inkscape_cmd and convert_cmd is None: + logger.warning( + "no SVG to PDF conversion available / include SVG raw.\n" + "Including large raw SVGs can cause xelatex error.\n" + "Install Inkscape (preferred) or ImageMagick." + ) img_node.replace_self(file2literal(src_fname)) else: dst_fname = path.join(translator.builder.outdir, fname + '.pdf') @@ -266,22 +308,28 @@ def convert_image(img_node, translator, src_fname=None): if dst_fname: # the builder needs not to copy one more time, so pop it if exists. translator.builder.images.pop(img_node['uri'], None) - _name = dst_fname[len(translator.builder.outdir) + 1:] + _name = dst_fname[len(str(translator.builder.outdir)) + 1:] if isNewer(dst_fname, src_fname): - kernellog.verbose(app, - "convert: {out}/%s already exists and is newer" % _name) + logger.verbose("convert: {out}/%s already exists and is newer" % _name) else: ok = False mkdir(path.dirname(dst_fname)) if in_ext == '.dot': - kernellog.verbose(app, 'convert DOT to: {out}/' + _name) - ok = dot2format(app, src_fname, dst_fname) + logger.verbose('convert DOT to: {out}/' + _name) + if translator.builder.format == 'latex' and not dot_Tpdf: + svg_fname = path.join(translator.builder.outdir, fname + '.svg') + ok1 = dot2format(app, src_fname, svg_fname) + ok2 = svg2pdf_by_rsvg(app, svg_fname, dst_fname) + ok = ok1 and ok2 + + else: + ok = dot2format(app, src_fname, dst_fname) elif in_ext == '.svg': - kernellog.verbose(app, 'convert SVG to: {out}/' + _name) + logger.verbose('convert SVG to: {out}/' + _name) ok = svg2pdf(app, src_fname, dst_fname) if not ok: @@ -310,27 +358,77 @@ def dot2format(app, dot_fname, out_fname): with open(out_fname, "w") as out: exit_code = subprocess.call(cmd, stdout = out) if exit_code != 0: - kernellog.warn(app, + logger.warning( "Error #%d when calling: %s" % (exit_code, " ".join(cmd))) return bool(exit_code == 0) def svg2pdf(app, svg_fname, pdf_fname): - """Converts SVG to PDF with ``convert(1)`` command. + """Converts SVG to PDF with ``inkscape(1)`` or ``convert(1)`` command. - Uses ``convert(1)`` from ImageMagick (https://www.imagemagick.org) for - conversion. Returns ``True`` on success and ``False`` if an error occurred. + Uses ``inkscape(1)`` from Inkscape (https://inkscape.org/) or ``convert(1)`` + from ImageMagick (https://www.imagemagick.org) for conversion. + Returns ``True`` on success and ``False`` if an error occurred. * ``svg_fname`` pathname of the input SVG file with extension (``.svg``) * ``pdf_name`` pathname of the output PDF file with extension (``.pdf``) """ cmd = [convert_cmd, svg_fname, pdf_fname] - # use stdout and stderr from parent - exit_code = subprocess.call(cmd) + cmd_name = 'convert(1)' + + if inkscape_cmd: + cmd_name = 'inkscape(1)' + if inkscape_ver_one: + cmd = [inkscape_cmd, '-o', pdf_fname, svg_fname] + else: + cmd = [inkscape_cmd, '-z', '--export-pdf=%s' % pdf_fname, svg_fname] + + try: + warning_msg = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + exit_code = 0 + except subprocess.CalledProcessError as err: + warning_msg = err.output + exit_code = err.returncode + pass + if exit_code != 0: - kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd))) + logger.warning("Error #%d when calling: %s" % + (exit_code, " ".join(cmd))) + if warning_msg: + logger.warning( "Warning msg from %s: %s" % + (cmd_name, str(warning_msg, 'utf-8'))) + elif warning_msg: + logger.verbose("Warning msg from %s (likely harmless):\n%s" % + (cmd_name, str(warning_msg, 'utf-8'))) + return bool(exit_code == 0) +def svg2pdf_by_rsvg(app, svg_fname, pdf_fname): + """Convert SVG to PDF with ``rsvg-convert(1)`` command. + + * ``svg_fname`` pathname of input SVG file, including extension ``.svg`` + * ``pdf_fname`` pathname of output PDF file, including extension ``.pdf`` + + Input SVG file should be the one generated by ``dot2format()``. + SVG -> PDF conversion is done by ``rsvg-convert(1)``. + + If ``rsvg-convert(1)`` is unavailable, fall back to ``svg2pdf()``. + + """ + + if rsvg_convert_cmd is None: + ok = svg2pdf(app, svg_fname, pdf_fname) + else: + cmd = [rsvg_convert_cmd, '--format=pdf', '-o', pdf_fname, svg_fname] + # use stdout and stderr from parent + exit_code = subprocess.call(cmd) + if exit_code != 0: + logger.warning("Error #%d when calling: %s" % + (exit_code, " ".join(cmd))) + ok = bool(exit_code == 0) + + return ok + # image handling # --------------------- @@ -348,7 +446,7 @@ class kernel_image(nodes.image): pass class KernelImage(images.Image): - u"""KernelImage directive + """KernelImage directive Earns everything from ``.. image::`` directive, except *remote URI* and *glob* pattern. The KernelImage wraps a image node into a @@ -384,7 +482,7 @@ class kernel_figure(nodes.figure): """Node for ``kernel-figure`` directive.""" class KernelFigure(Figure): - u"""KernelImage directive + """KernelImage directive Earns everything from ``.. figure::`` directive, except *remote URI* and *glob* pattern. The KernelFigure wraps a figure node into a kernel_figure @@ -421,15 +519,15 @@ def visit_kernel_render(self, node): app = self.builder.app srclang = node.get('srclang') - kernellog.verbose(app, 'visit kernel-render node lang: "%s"' % (srclang)) + logger.verbose('visit kernel-render node lang: "%s"' % srclang) tmp_ext = RENDER_MARKUP_EXT.get(srclang, None) if tmp_ext is None: - kernellog.warn(app, 'kernel-render: "%s" unknown / include raw.' % (srclang)) + logger.warning( 'kernel-render: "%s" unknown / include raw.' % srclang) return if not dot_cmd and tmp_ext == '.dot': - kernellog.verbose(app, "dot from graphviz not available / include raw.") + logger.verbose("dot from graphviz not available / include raw.") return literal_block = node[0] @@ -460,7 +558,7 @@ class kernel_render(nodes.General, nodes.Inline, nodes.Element): pass class KernelRender(Figure): - u"""KernelRender directive + """KernelRender directive Render content by external tool. Has all the options known from the *figure* directive, plus option ``caption``. If ``caption`` has a @@ -540,7 +638,7 @@ def add_kernel_figure_to_std_domain(app, doctree): docname = app.env.docname labels = std.data["labels"] - for name, explicit in iteritems(doctree.nametypes): + for name, explicit in doctree.nametypes.items(): if not explicit: continue labelid = doctree.nameids[name] diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py index eeb394b39e2c..1afb0c97f06b 100644 --- a/Documentation/sphinx/load_config.py +++ b/Documentation/sphinx/load_config.py @@ -1,15 +1,16 @@ # -*- coding: utf-8; mode: python -*- +# SPDX-License-Identifier: GPL-2.0 # pylint: disable=R0903, C0330, R0914, R0912, E0401 import os import sys -from sphinx.util.pycompat import execfile_ +from sphinx.util.osutil import fs_encoding # ------------------------------------------------------------------------------ def loadConfig(namespace): # ------------------------------------------------------------------------------ - u"""Load an additional configuration file into *namespace*. + """Load an additional configuration file into *namespace*. The name of the configuration file is taken from the environment ``SPHINX_CONF``. The external configuration file extends (or overwrites) the @@ -48,7 +49,9 @@ def loadConfig(namespace): sys.stdout.write("load additional sphinx-config: %s\n" % config_file) config = namespace.copy() config['__file__'] = config_file - execfile_(config_file, config) + with open(config_file, 'rb') as f: + code = compile(f.read(), fs_encoding, 'exec') + exec(code, config) del config['__file__'] namespace.update(config) else: diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py index dc8fed48d3c2..d31cff867436 100755 --- a/Documentation/sphinx/maintainers_include.py +++ b/Documentation/sphinx/maintainers_include.py @@ -3,7 +3,7 @@ # -*- coding: utf-8; mode: python -*- # pylint: disable=R0903, C0330, R0914, R0912, E0401 -u""" +""" maintainers-include ~~~~~~~~~~~~~~~~~~~ @@ -37,7 +37,7 @@ def setup(app): ) class MaintainersInclude(Include): - u"""MaintainersInclude (``maintainers-include``) directive""" + """MaintainersInclude (``maintainers-include``) directive""" required_arguments = 0 def parse_maintainers(self, path): @@ -61,8 +61,6 @@ class MaintainersInclude(Include): field_content = "" for line in open(path): - if sys.version_info.major == 2: - line = unicode(line, 'utf-8') # Have we reached the end of the preformatted Descriptions text? if descriptions and line.startswith('Maintainers'): descriptions = False @@ -79,7 +77,7 @@ class MaintainersInclude(Include): line = line.rstrip() # Linkify all non-wildcard refs to ReST files in Documentation/. - pat = '(Documentation/([^\s\?\*]*)\.rst)' + pat = r'(Documentation/([^\s\?\*]*)\.rst)' m = re.search(pat, line) if m: # maintainers.rst is in a subdirectory, so include "../". @@ -92,11 +90,11 @@ class MaintainersInclude(Include): output = "| %s" % (line.replace("\\", "\\\\")) # Look for and record field letter to field name mappings: # R: Designated *reviewer*: FullName <address@domain> - m = re.search("\s(\S):\s", line) + m = re.search(r"\s(\S):\s", line) if m: field_letter = m.group(1) if field_letter and not field_letter in fields: - m = re.search("\*([^\*]+)\*", line) + m = re.search(r"\*([^\*]+)\*", line) if m: fields[field_letter] = m.group(1) elif subsystems: @@ -114,7 +112,7 @@ class MaintainersInclude(Include): field_content = "" # Collapse whitespace in subsystem name. - heading = re.sub("\s+", " ", line) + heading = re.sub(r"\s+", " ", line) output = output + "%s\n%s" % (heading, "~" * len(heading)) field_prev = "" else: diff --git a/Documentation/sphinx/min_requirements.txt b/Documentation/sphinx/min_requirements.txt new file mode 100644 index 000000000000..96b5e0bfa3d7 --- /dev/null +++ b/Documentation/sphinx/min_requirements.txt @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +alabaster >=0.7,<0.8 +docutils>=0.15,<0.18 +jinja2>=2.3,<3.1 +PyYAML>=5.1,<6.1 +Sphinx==3.4.3 +sphinxcontrib-applehelp==1.0.2 +sphinxcontrib-devhelp==1.0.1 +sphinxcontrib-htmlhelp==1.0.3 +sphinxcontrib-qthelp==1.0.2 +sphinxcontrib-serializinghtml==1.1.4 diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl index c518050ffc3f..7b1458544e2e 100755 --- a/Documentation/sphinx/parse-headers.pl +++ b/Documentation/sphinx/parse-headers.pl @@ -1,4 +1,7 @@ -#!/usr/bin/perl +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@kernel.org>. + use strict; use Text::Tabs; use Getopt::Long; @@ -110,7 +113,7 @@ while (<IN>) { ) { my $s = $1; - $structs{$s} = "struct :c:type:`$s`\\ "; + $structs{$s} = "struct $s\\ "; next; } } @@ -391,9 +394,9 @@ Report bugs to Mauro Carvalho Chehab <mchehab@kernel.org> =head1 COPYRIGHT -Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>. +Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@kernel.org>. -License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>. +License GPLv2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>. This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. diff --git a/Documentation/sphinx/requirements.txt b/Documentation/sphinx/requirements.txt index 489f6626de67..76b4255061d0 100644 --- a/Documentation/sphinx/requirements.txt +++ b/Documentation/sphinx/requirements.txt @@ -1,3 +1,4 @@ -docutils -Sphinx==2.4.4 -sphinx_rtd_theme +# SPDX-License-Identifier: GPL-2.0 +alabaster +Sphinx +pyyaml diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py index 2019a55f6b18..3d19569e5728 100755 --- a/Documentation/sphinx/rstFlatTable.py +++ b/Documentation/sphinx/rstFlatTable.py @@ -1,8 +1,9 @@ #!/usr/bin/env python3 # -*- coding: utf-8; mode: python -*- +# SPDX-License-Identifier: GPL-2.0 # pylint: disable=C0330, R0903, R0912 -u""" +""" flat-table ~~~~~~~~~~ @@ -22,7 +23,7 @@ u""" * *auto span* rightmost cell of a table row over the missing cells on the right side of that table-row. With Option ``:fill-cells:`` this behavior - can changed from *auto span* to *auto fill*, which automaticly inserts + can be changed from *auto span* to *auto fill*, which automatically inserts (empty) cells instead of spanning the last cell. Options: @@ -42,8 +43,6 @@ u""" # imports # ============================================================================== -import sys - from docutils import nodes from docutils.parsers.rst import directives, roles from docutils.parsers.rst.directives.tables import Table @@ -55,14 +54,6 @@ from docutils.utils import SystemMessagePropagation __version__ = '1.0' -PY3 = sys.version_info[0] == 3 -PY2 = sys.version_info[0] == 2 - -if PY3: - # pylint: disable=C0103, W0622 - unicode = str - basestring = str - # ============================================================================== def setup(app): # ============================================================================== @@ -109,7 +100,7 @@ class colSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321 class FlatTable(Table): # ============================================================================== - u"""FlatTable (``flat-table``) directive""" + """FlatTable (``flat-table``) directive""" option_spec = { 'name': directives.unchanged @@ -145,7 +136,7 @@ class FlatTable(Table): class ListTableBuilder(object): # ============================================================================== - u"""Builds a table from a double-stage list""" + """Builds a table from a double-stage list""" def __init__(self, directive): self.directive = directive @@ -171,7 +162,7 @@ class ListTableBuilder(object): for colwidth in colwidths: colspec = nodes.colspec(colwidth=colwidth) # FIXME: It seems, that the stub method only works well in the - # absence of rowspan (observed by the html buidler, the docutils-xml + # absence of rowspan (observed by the html builder, the docutils-xml # build seems OK). This is not extraordinary, because there exists # no table directive (except *this* flat-table) which allows to # define coexistent of rowspan and stubs (there was no use-case @@ -222,7 +213,7 @@ class ListTableBuilder(object): raise SystemMessagePropagation(error) def parseFlatTableNode(self, node): - u"""parses the node from a :py:class:`FlatTable` directive's body""" + """parses the node from a :py:class:`FlatTable` directive's body""" if len(node) != 1 or not isinstance(node[0], nodes.bullet_list): self.raiseError( @@ -235,7 +226,7 @@ class ListTableBuilder(object): self.roundOffTableDefinition() def roundOffTableDefinition(self): - u"""Round off the table definition. + """Round off the table definition. This method rounds off the table definition in :py:member:`rows`. diff --git a/Documentation/sphinx/templates/kernel-toc.html b/Documentation/sphinx/templates/kernel-toc.html new file mode 100644 index 000000000000..41f1efbe64bb --- /dev/null +++ b/Documentation/sphinx/templates/kernel-toc.html @@ -0,0 +1,18 @@ +<!-- SPDX-License-Identifier: GPL-2.0 --> +{# Create a local TOC the kernel way #} +<p> +<h3 class="kernel-toc-contents">Contents</h3> +<input type="checkbox" class="kernel-toc-toggle" id = "kernel-toc-toggle" checked> +<label class="kernel-toc-title" for="kernel-toc-toggle"></label> + +<div class="kerneltoc" id="kerneltoc"> +{{ toctree(maxdepth=3) }} +</div> +{# hacky script to try to position the left column #} +<script type="text/javascript"> <!-- + var sbar = document.getElementsByClassName("sphinxsidebar")[0]; + let currents = document.getElementsByClassName("current") + if (currents.length) { + sbar.scrollTop = currents[currents.length - 1].offsetTop; + } + --> </script> diff --git a/Documentation/sphinx/templates/translations.html b/Documentation/sphinx/templates/translations.html new file mode 100644 index 000000000000..8df5d42d8dcd --- /dev/null +++ b/Documentation/sphinx/templates/translations.html @@ -0,0 +1,15 @@ +<!-- SPDX-License-Identifier: GPL-2.0 --> +<!-- Copyright © 2023, Oracle and/or its affiliates. --> + +{# Create a language menu for translations #} +{% if languages|length > 0: %} +<div class="language-selection"> +{{ current_language }} + +<ul> +{% for ref in languages: %} +<li><a href="{{ ref.refuri }}">{{ ref.astext() }}</a></li> +{% endfor %} +</ul> +</div> +{% endif %} diff --git a/Documentation/sphinx/translations.py b/Documentation/sphinx/translations.py new file mode 100644 index 000000000000..32c2b32b2b5e --- /dev/null +++ b/Documentation/sphinx/translations.py @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright © 2023, Oracle and/or its affiliates. +# Author: Vegard Nossum <vegard.nossum@oracle.com> +# +# Add translation links to the top of the document. +# + +import os + +from docutils import nodes +from docutils.transforms import Transform + +import sphinx +from sphinx import addnodes +from sphinx.errors import NoUri + +all_languages = { + # English is always first + None: 'English', + + # Keep the rest sorted alphabetically + 'zh_CN': 'Chinese (Simplified)', + 'zh_TW': 'Chinese (Traditional)', + 'it_IT': 'Italian', + 'ja_JP': 'Japanese', + 'ko_KR': 'Korean', + 'sp_SP': 'Spanish', +} + +class LanguagesNode(nodes.Element): + pass + +class TranslationsTransform(Transform): + default_priority = 900 + + def apply(self): + app = self.document.settings.env.app + docname = self.document.settings.env.docname + + this_lang_code = None + components = docname.split(os.sep) + if components[0] == 'translations' and len(components) > 2: + this_lang_code = components[1] + + # normalize docname to be the untranslated one + docname = os.path.join(*components[2:]) + + new_nodes = LanguagesNode() + new_nodes['current_language'] = all_languages[this_lang_code] + + for lang_code, lang_name in all_languages.items(): + if lang_code == this_lang_code: + continue + + if lang_code is None: + target_name = docname + else: + target_name = os.path.join('translations', lang_code, docname) + + pxref = addnodes.pending_xref('', refdomain='std', + reftype='doc', reftarget='/' + target_name, modname=None, + classname=None, refexplicit=True) + pxref += nodes.Text(lang_name) + new_nodes += pxref + + self.document.insert(0, new_nodes) + +def process_languages(app, doctree, docname): + for node in doctree.traverse(LanguagesNode): + if app.builder.format not in ['html']: + node.parent.remove(node) + continue + + languages = [] + + # Iterate over the child nodes; any resolved links will have + # the type 'nodes.reference', while unresolved links will be + # type 'nodes.Text'. + languages = list(filter(lambda xref: + isinstance(xref, nodes.reference), node.children)) + + html_content = app.builder.templates.render('translations.html', + context={ + 'current_language': node['current_language'], + 'languages': languages, + }) + + node.replace_self(nodes.raw('', html_content, format='html')) + +def setup(app): + app.add_node(LanguagesNode) + app.add_transform(TranslationsTransform) + app.connect('doctree-resolved', process_languages) + + return { + 'parallel_read_safe': True, + 'parallel_write_safe': True, + } |