summaryrefslogtreecommitdiffstats
path: root/Documentation/sphinx
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation/sphinx')
-rw-r--r--Documentation/sphinx/automarkup.py262
-rw-r--r--Documentation/sphinx/cdomain.py259
-rw-r--r--Documentation/sphinx/kernel_abi.py194
-rwxr-xr-xDocumentation/sphinx/kernel_include.py190
-rw-r--r--Documentation/sphinx/kerneldoc.py190
-rw-r--r--Documentation/sphinx/kernellog.py32
-rw-r--r--Documentation/sphinx/kfigure.py557
-rw-r--r--Documentation/sphinx/load_config.py59
-rwxr-xr-xDocumentation/sphinx/maintainers_include.py197
-rw-r--r--Documentation/sphinx/parallel-wrapper.sh33
-rwxr-xr-xDocumentation/sphinx/parse-headers.pl401
-rw-r--r--Documentation/sphinx/requirements.txt3
-rwxr-xr-xDocumentation/sphinx/rstFlatTable.py374
13 files changed, 2751 insertions, 0 deletions
diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py
new file mode 100644
index 000000000..3e81ebab2
--- /dev/null
+++ b/Documentation/sphinx/automarkup.py
@@ -0,0 +1,262 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2019 Jonathan Corbet <corbet@lwn.net>
+#
+# Apply kernel-specific tweaks after the initial document processing
+# has been done.
+#
+from docutils import nodes
+import sphinx
+from sphinx import addnodes
+if sphinx.version_info[0] < 2 or \
+ sphinx.version_info[0] == 2 and sphinx.version_info[1] < 1:
+ from sphinx.environment import NoUri
+else:
+ from sphinx.errors import NoUri
+import re
+from itertools import chain
+
+#
+# Python 2 lacks re.ASCII...
+#
+try:
+ ascii_p3 = re.ASCII
+except AttributeError:
+ ascii_p3 = 0
+
+#
+# Regex nastiness. Of course.
+# Try to identify "function()" that's not already marked up some
+# other way. Sphinx doesn't like a lot of stuff right after a
+# :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last
+# bit tries to restrict matches to things that won't create trouble.
+#
+RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=ascii_p3)
+
+#
+# Sphinx 2 uses the same :c:type role for struct, union, enum and typedef
+#
+RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)',
+ flags=ascii_p3)
+
+#
+# Sphinx 3 uses a different C role for each one of struct, union, enum and
+# typedef
+#
+RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
+RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
+RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
+RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
+
+#
+# Detects a reference to a documentation page of the form Documentation/... with
+# an optional extension
+#
+RE_doc = re.compile(r'\bDocumentation(/[\w\-_/]+)(\.\w+)*')
+
+#
+# Reserved C words that we should skip when cross-referencing
+#
+Skipnames = [ 'for', 'if', 'register', 'sizeof', 'struct', 'unsigned' ]
+
+
+#
+# Many places in the docs refer to common system calls. It is
+# pointless to try to cross-reference them and, as has been known
+# to happen, somebody defining a function by these names can lead
+# to the creation of incorrect and confusing cross references. So
+# just don't even try with these names.
+#
+Skipfuncs = [ 'open', 'close', 'read', 'write', 'fcntl', 'mmap',
+ 'select', 'poll', 'fork', 'execve', 'clone', 'ioctl',
+ 'socket' ]
+
+def markup_refs(docname, app, node):
+ t = node.astext()
+ done = 0
+ repl = [ ]
+ #
+ # Associate each regex with the function that will markup its matches
+ #
+ markup_func_sphinx2 = {RE_doc: markup_doc_ref,
+ RE_function: markup_c_ref,
+ RE_generic_type: markup_c_ref}
+
+ markup_func_sphinx3 = {RE_doc: markup_doc_ref,
+ RE_function: markup_func_ref_sphinx3,
+ RE_struct: markup_c_ref,
+ RE_union: markup_c_ref,
+ RE_enum: markup_c_ref,
+ RE_typedef: markup_c_ref}
+
+ if sphinx.version_info[0] >= 3:
+ markup_func = markup_func_sphinx3
+ else:
+ markup_func = markup_func_sphinx2
+
+ match_iterators = [regex.finditer(t) for regex in markup_func]
+ #
+ # Sort all references by the starting position in text
+ #
+ sorted_matches = sorted(chain(*match_iterators), key=lambda m: m.start())
+ for m in sorted_matches:
+ #
+ # Include any text prior to match as a normal text node.
+ #
+ if m.start() > done:
+ repl.append(nodes.Text(t[done:m.start()]))
+
+ #
+ # Call the function associated with the regex that matched this text and
+ # append its return to the text
+ #
+ repl.append(markup_func[m.re](docname, app, m))
+
+ done = m.end()
+ if done < len(t):
+ repl.append(nodes.Text(t[done:]))
+ return repl
+
+#
+# In sphinx3 we can cross-reference to C macro and function, each one with its
+# own C role, but both match the same regex, so we try both.
+#
+def markup_func_ref_sphinx3(docname, app, match):
+ class_str = ['c-func', 'c-macro']
+ reftype_str = ['function', 'macro']
+
+ cdom = app.env.domains['c']
+ #
+ # Go through the dance of getting an xref out of the C domain
+ #
+ target = match.group(2)
+ target_text = nodes.Text(match.group(0))
+ xref = None
+ if not (target in Skipfuncs or target in Skipnames):
+ for class_s, reftype_s in zip(class_str, reftype_str):
+ lit_text = nodes.literal(classes=['xref', 'c', class_s])
+ lit_text += target_text
+ pxref = addnodes.pending_xref('', refdomain = 'c',
+ reftype = reftype_s,
+ reftarget = target, modname = None,
+ classname = None)
+ #
+ # XXX The Latex builder will throw NoUri exceptions here,
+ # work around that by ignoring them.
+ #
+ try:
+ xref = cdom.resolve_xref(app.env, docname, app.builder,
+ reftype_s, target, pxref,
+ lit_text)
+ except NoUri:
+ xref = None
+
+ if xref:
+ return xref
+
+ return target_text
+
+def markup_c_ref(docname, app, match):
+ class_str = {# Sphinx 2 only
+ RE_function: 'c-func',
+ RE_generic_type: 'c-type',
+ # Sphinx 3+ only
+ RE_struct: 'c-struct',
+ RE_union: 'c-union',
+ RE_enum: 'c-enum',
+ RE_typedef: 'c-type',
+ }
+ reftype_str = {# Sphinx 2 only
+ RE_function: 'function',
+ RE_generic_type: 'type',
+ # Sphinx 3+ only
+ RE_struct: 'struct',
+ RE_union: 'union',
+ RE_enum: 'enum',
+ RE_typedef: 'type',
+ }
+
+ cdom = app.env.domains['c']
+ #
+ # Go through the dance of getting an xref out of the C domain
+ #
+ target = match.group(2)
+ target_text = nodes.Text(match.group(0))
+ xref = None
+ if not ((match.re == RE_function and target in Skipfuncs)
+ or (target in Skipnames)):
+ lit_text = nodes.literal(classes=['xref', 'c', class_str[match.re]])
+ lit_text += target_text
+ pxref = addnodes.pending_xref('', refdomain = 'c',
+ reftype = reftype_str[match.re],
+ reftarget = target, modname = None,
+ classname = None)
+ #
+ # XXX The Latex builder will throw NoUri exceptions here,
+ # work around that by ignoring them.
+ #
+ try:
+ xref = cdom.resolve_xref(app.env, docname, app.builder,
+ reftype_str[match.re], target, pxref,
+ lit_text)
+ except NoUri:
+ xref = None
+ #
+ # Return the xref if we got it; otherwise just return the plain text.
+ #
+ if xref:
+ return xref
+ else:
+ return target_text
+
+#
+# Try to replace a documentation reference of the form Documentation/... with a
+# cross reference to that page
+#
+def markup_doc_ref(docname, app, match):
+ stddom = app.env.domains['std']
+ #
+ # Go through the dance of getting an xref out of the std domain
+ #
+ target = match.group(1)
+ xref = None
+ pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'doc',
+ reftarget = target, modname = None,
+ classname = None, refexplicit = False)
+ #
+ # XXX The Latex builder will throw NoUri exceptions here,
+ # work around that by ignoring them.
+ #
+ try:
+ xref = stddom.resolve_xref(app.env, docname, app.builder, 'doc',
+ target, pxref, None)
+ except NoUri:
+ xref = None
+ #
+ # Return the xref if we got it; otherwise just return the plain text.
+ #
+ if xref:
+ return xref
+ else:
+ return nodes.Text(match.group(0))
+
+def auto_markup(app, doctree, name):
+ #
+ # This loop could eventually be improved on. Someday maybe we
+ # want a proper tree traversal with a lot of awareness of which
+ # kinds of nodes to prune. But this works well for now.
+ #
+ # The nodes.literal test catches ``literal text``, its purpose is to
+ # avoid adding cross-references to functions that have been explicitly
+ # marked with cc:func:.
+ #
+ for para in doctree.traverse(nodes.paragraph):
+ for node in para.traverse(nodes.Text):
+ if not isinstance(node.parent, nodes.literal):
+ node.parent.replace(node, markup_refs(name, app, node))
+
+def setup(app):
+ app.connect('doctree-resolved', auto_markup)
+ return {
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py
new file mode 100644
index 000000000..014a5229e
--- /dev/null
+++ b/Documentation/sphinx/cdomain.py
@@ -0,0 +1,259 @@
+# -*- coding: utf-8; mode: python -*-
+# pylint: disable=W0141,C0113,C0103,C0325
+u"""
+ cdomain
+ ~~~~~~~
+
+ Replacement for the sphinx c-domain.
+
+ :copyright: Copyright (C) 2016 Markus Heiser
+ :license: GPL Version 2, June 1991 see Linux/COPYING for details.
+
+ List of customizations:
+
+ * Moved the *duplicate C object description* warnings for function
+ declarations in the nitpicky mode. See Sphinx documentation for
+ the config values for ``nitpick`` and ``nitpick_ignore``.
+
+ * Add option 'name' to the "c:function:" directive. With option 'name' the
+ ref-name of a function can be modified. E.g.::
+
+ .. c:function:: int ioctl( int fd, int request )
+ :name: VIDIOC_LOG_STATUS
+
+ The func-name (e.g. ioctl) remains in the output but the ref-name changed
+ from 'ioctl' to 'VIDIOC_LOG_STATUS'. The function is referenced by::
+
+ * :c:func:`VIDIOC_LOG_STATUS` or
+ * :any:`VIDIOC_LOG_STATUS` (``:any:`` needs sphinx 1.3)
+
+ * Handle signatures of function-like macros well. Don't try to deduce
+ arguments types of function-like macros.
+
+"""
+
+from docutils import nodes
+from docutils.parsers.rst import directives
+
+import sphinx
+from sphinx import addnodes
+from sphinx.domains.c import c_funcptr_sig_re, c_sig_re
+from sphinx.domains.c import CObject as Base_CObject
+from sphinx.domains.c import CDomain as Base_CDomain
+from itertools import chain
+import re
+
+__version__ = '1.1'
+
+# Get Sphinx version
+major, minor, patch = sphinx.version_info[:3]
+
+# Namespace to be prepended to the full name
+namespace = None
+
+#
+# Handle trivial newer c domain tags that are part of Sphinx 3.1 c domain tags
+# - Store the namespace if ".. c:namespace::" tag is found
+#
+RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
+
+def markup_namespace(match):
+ global namespace
+
+ namespace = match.group(1)
+
+ return ""
+
+#
+# Handle c:macro for function-style declaration
+#
+RE_macro = re.compile(r'^\s*..\s*c:macro::\s*(\S+)\s+(\S.*)\s*$')
+def markup_macro(match):
+ return ".. c:function:: " + match.group(1) + ' ' + match.group(2)
+
+#
+# Handle newer c domain tags that are evaluated as .. c:type: for
+# backward-compatibility with Sphinx < 3.0
+#
+RE_ctype = re.compile(r'^\s*..\s*c:(struct|union|enum|enumerator|alias)::\s*(.*)$')
+
+def markup_ctype(match):
+ return ".. c:type:: " + match.group(2)
+
+#
+# Handle newer c domain tags that are evaluated as :c:type: for
+# backward-compatibility with Sphinx < 3.0
+#
+RE_ctype_refs = re.compile(r':c:(var|struct|union|enum|enumerator)::`([^\`]+)`')
+def markup_ctype_refs(match):
+ return ":c:type:`" + match.group(2) + '`'
+
+#
+# Simply convert :c:expr: and :c:texpr: into a literal block.
+#
+RE_expr = re.compile(r':c:(expr|texpr):`([^\`]+)`')
+def markup_c_expr(match):
+ return '\ ``' + match.group(2) + '``\ '
+
+#
+# Parse Sphinx 3.x C markups, replacing them by backward-compatible ones
+#
+def c_markups(app, docname, source):
+ result = ""
+ markup_func = {
+ RE_namespace: markup_namespace,
+ RE_expr: markup_c_expr,
+ RE_macro: markup_macro,
+ RE_ctype: markup_ctype,
+ RE_ctype_refs: markup_ctype_refs,
+ }
+
+ lines = iter(source[0].splitlines(True))
+ for n in lines:
+ match_iterators = [regex.finditer(n) for regex in markup_func]
+ matches = sorted(chain(*match_iterators), key=lambda m: m.start())
+ for m in matches:
+ n = n[:m.start()] + markup_func[m.re](m) + n[m.end():]
+
+ result = result + n
+
+ source[0] = result
+
+#
+# Now implements support for the cdomain namespacing logic
+#
+
+def setup(app):
+
+ # Handle easy Sphinx 3.1+ simple new tags: :c:expr and .. c:namespace::
+ app.connect('source-read', c_markups)
+
+ if (major == 1 and minor < 8):
+ app.override_domain(CDomain)
+ else:
+ app.add_domain(CDomain, override=True)
+
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
+
+class CObject(Base_CObject):
+
+ """
+ Description of a C language object.
+ """
+ option_spec = {
+ "name" : directives.unchanged
+ }
+
+ def handle_func_like_macro(self, sig, signode):
+ u"""Handles signatures of function-like macros.
+
+ If the objtype is 'function' and the the signature ``sig`` is a
+ function-like macro, the name of the macro is returned. Otherwise
+ ``False`` is returned. """
+
+ global namespace
+
+ if not self.objtype == 'function':
+ return False
+
+ m = c_funcptr_sig_re.match(sig)
+ if m is None:
+ m = c_sig_re.match(sig)
+ if m is None:
+ raise ValueError('no match')
+
+ rettype, fullname, arglist, _const = m.groups()
+ arglist = arglist.strip()
+ if rettype or not arglist:
+ return False
+
+ arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
+ arglist = [a.strip() for a in arglist.split(",")]
+
+ # has the first argument a type?
+ if len(arglist[0].split(" ")) > 1:
+ return False
+
+ # This is a function-like macro, it's arguments are typeless!
+ signode += addnodes.desc_name(fullname, fullname)
+ paramlist = addnodes.desc_parameterlist()
+ signode += paramlist
+
+ for argname in arglist:
+ param = addnodes.desc_parameter('', '', noemph=True)
+ # separate by non-breaking space in the output
+ param += nodes.emphasis(argname, argname)
+ paramlist += param
+
+ if namespace:
+ fullname = namespace + "." + fullname
+
+ return fullname
+
+ def handle_signature(self, sig, signode):
+ """Transform a C signature into RST nodes."""
+
+ global namespace
+
+ fullname = self.handle_func_like_macro(sig, signode)
+ if not fullname:
+ fullname = super(CObject, self).handle_signature(sig, signode)
+
+ if "name" in self.options:
+ if self.objtype == 'function':
+ fullname = self.options["name"]
+ else:
+ # FIXME: handle :name: value of other declaration types?
+ pass
+ else:
+ if namespace:
+ fullname = namespace + "." + fullname
+
+ return fullname
+
+ def add_target_and_index(self, name, sig, signode):
+ # for C API items we add a prefix since names are usually not qualified
+ # by a module name and so easily clash with e.g. section titles
+ targetname = 'c.' + name
+ if targetname not in self.state.document.ids:
+ signode['names'].append(targetname)
+ signode['ids'].append(targetname)
+ signode['first'] = (not self.names)
+ self.state.document.note_explicit_target(signode)
+ inv = self.env.domaindata['c']['objects']
+ if (name in inv and self.env.config.nitpicky):
+ if self.objtype == 'function':
+ if ('c:func', name) not in self.env.config.nitpick_ignore:
+ self.state_machine.reporter.warning(
+ 'duplicate C object description of %s, ' % name +
+ 'other instance in ' + self.env.doc2path(inv[name][0]),
+ line=self.lineno)
+ inv[name] = (self.env.docname, self.objtype)
+
+ indextext = self.get_index_text(name)
+ if indextext:
+ if major == 1 and minor < 4:
+ # indexnode's tuple changed in 1.4
+ # https://github.com/sphinx-doc/sphinx/commit/e6a5a3a92e938fcd75866b4227db9e0524d58f7c
+ self.indexnode['entries'].append(
+ ('single', indextext, targetname, ''))
+ else:
+ self.indexnode['entries'].append(
+ ('single', indextext, targetname, '', None))
+
+class CDomain(Base_CDomain):
+
+ """C language domain."""
+ name = 'c'
+ label = 'C'
+ directives = {
+ 'function': CObject,
+ 'member': CObject,
+ 'macro': CObject,
+ 'type': CObject,
+ 'var': CObject,
+ }
diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py
new file mode 100644
index 000000000..f3da859c9
--- /dev/null
+++ b/Documentation/sphinx/kernel_abi.py
@@ -0,0 +1,194 @@
+# -*- coding: utf-8; mode: python -*-
+# coding=utf-8
+# SPDX-License-Identifier: GPL-2.0
+#
+u"""
+ kernel-abi
+ ~~~~~~~~~~
+
+ Implementation of the ``kernel-abi`` reST-directive.
+
+ :copyright: Copyright (C) 2016 Markus Heiser
+ :copyright: Copyright (C) 2016-2020 Mauro Carvalho Chehab
+ :maintained-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ :license: GPL Version 2, June 1991 see Linux/COPYING for details.
+
+ The ``kernel-abi`` (:py:class:`KernelCmd`) directive calls the
+ scripts/get_abi.pl script to parse the Kernel ABI files.
+
+ Overview of directive's argument and options.
+
+ .. code-block:: rst
+
+ .. kernel-abi:: <ABI directory location>
+ :debug:
+
+ The argument ``<ABI directory location>`` is required. It contains the
+ location of the ABI files to be parsed.
+
+ ``debug``
+ Inserts a code-block with the *raw* reST. Sometimes it is helpful to see
+ what reST is generated.
+
+"""
+
+import codecs
+import os
+import subprocess
+import sys
+import re
+import kernellog
+
+from os import path
+
+from docutils import nodes, statemachine
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import directives, Directive
+from docutils.utils.error_reporting import ErrorString
+
+#
+# AutodocReporter is only good up to Sphinx 1.7
+#
+import sphinx
+
+Use_SSI = sphinx.__version__[:3] >= '1.7'
+if Use_SSI:
+ from sphinx.util.docutils import switch_source_input
+else:
+ from sphinx.ext.autodoc import AutodocReporter
+
+__version__ = '1.0'
+
+def setup(app):
+
+ app.add_directive("kernel-abi", KernelCmd)
+ return dict(
+ version = __version__
+ , parallel_read_safe = True
+ , parallel_write_safe = True
+ )
+
+class KernelCmd(Directive):
+
+ u"""KernelABI (``kernel-abi``) directive"""
+
+ required_arguments = 1
+ optional_arguments = 2
+ has_content = False
+ final_argument_whitespace = True
+
+ option_spec = {
+ "debug" : directives.flag,
+ "rst" : directives.unchanged
+ }
+
+ def run(self):
+
+ doc = self.state.document
+ if not doc.settings.file_insertion_enabled:
+ raise self.warning("docutils: file insertion disabled")
+
+ env = doc.settings.env
+ cwd = path.dirname(doc.current_source)
+ cmd = "get_abi.pl rest --enable-lineno --dir "
+ cmd += self.arguments[0]
+
+ if 'rst' in self.options:
+ cmd += " --rst-source"
+
+ srctree = path.abspath(os.environ["srctree"])
+
+ fname = cmd
+
+ # extend PATH with $(srctree)/scripts
+ path_env = os.pathsep.join([
+ srctree + os.sep + "scripts",
+ os.environ["PATH"]
+ ])
+ shell_env = os.environ.copy()
+ shell_env["PATH"] = path_env
+ shell_env["srctree"] = srctree
+
+ lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
+ nodeList = self.nestedParse(lines, self.arguments[0])
+ return nodeList
+
+ def runCmd(self, cmd, **kwargs):
+ u"""Run command ``cmd`` and return it's stdout as unicode."""
+
+ try:
+ proc = subprocess.Popen(
+ cmd
+ , stdout = subprocess.PIPE
+ , stderr = subprocess.PIPE
+ , **kwargs
+ )
+ out, err = proc.communicate()
+
+ out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
+
+ if proc.returncode != 0:
+ raise self.severe(
+ u"command '%s' failed with return code %d"
+ % (cmd, proc.returncode)
+ )
+ except OSError as exc:
+ raise self.severe(u"problems with '%s' directive: %s."
+ % (self.name, ErrorString(exc)))
+ return out
+
+ def nestedParse(self, lines, fname):
+ content = ViewList()
+ node = nodes.section()
+
+ if "debug" in self.options:
+ code_block = "\n\n.. code-block:: rst\n :linenos:\n"
+ for l in lines.split("\n"):
+ code_block += "\n " + l
+ lines = code_block + "\n\n"
+
+ line_regex = re.compile("^#define LINENO (\S+)\#([0-9]+)$")
+ ln = 0
+ n = 0
+ f = fname
+
+ for line in lines.split("\n"):
+ n = n + 1
+ match = line_regex.search(line)
+ if match:
+ new_f = match.group(1)
+
+ # Sphinx parser is lazy: it stops parsing contents in the
+ # middle, if it is too big. So, handle it per input file
+ if new_f != f and content:
+ self.do_parse(content, node)
+ content = ViewList()
+
+ f = new_f
+
+ # sphinx counts lines from 0
+ ln = int(match.group(2)) - 1
+ else:
+ content.append(line, f, ln)
+
+ kernellog.info(self.state.document.settings.env.app, "%s: parsed %i lines" % (fname, n))
+
+ if content:
+ self.do_parse(content, node)
+
+ return node.children
+
+ def do_parse(self, content, node):
+ if Use_SSI:
+ with switch_source_input(self.state, content):
+ self.state.nested_parse(content, 0, node, match_titles=1)
+ else:
+ buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
+
+ self.state.memo.title_styles = []
+ self.state.memo.section_level = 0
+ self.state.memo.reporter = AutodocReporter(content, self.state.memo.reporter)
+ try:
+ self.state.nested_parse(content, 0, node, match_titles=1)
+ finally:
+ self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf
diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
new file mode 100755
index 000000000..f523aa68a
--- /dev/null
+++ b/Documentation/sphinx/kernel_include.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8; mode: python -*-
+# pylint: disable=R0903, C0330, R0914, R0912, E0401
+
+u"""
+ kernel-include
+ ~~~~~~~~~~~~~~
+
+ Implementation of the ``kernel-include`` reST-directive.
+
+ :copyright: Copyright (C) 2016 Markus Heiser
+ :license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+ The ``kernel-include`` reST-directive is a replacement for the ``include``
+ directive. The ``kernel-include`` directive expand environment variables in
+ the path name and allows to include files from arbitrary locations.
+
+ .. hint::
+
+ Including files from arbitrary locations (e.g. from ``/etc``) is a
+ security risk for builders. This is why the ``include`` directive from
+ docutils *prohibit* pathnames pointing to locations *above* the filesystem
+ tree where the reST document with the include directive is placed.
+
+ Substrings of the form $name or ${name} are replaced by the value of
+ environment variable name. Malformed variable names and references to
+ non-existing variables are left unchanged.
+"""
+
+# ==============================================================================
+# imports
+# ==============================================================================
+
+import os.path
+
+from docutils import io, nodes, statemachine
+from docutils.utils.error_reporting import SafeString, ErrorString
+from docutils.parsers.rst import directives
+from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
+from docutils.parsers.rst.directives.misc import Include
+
+__version__ = '1.0'
+
+# ==============================================================================
+def setup(app):
+# ==============================================================================
+
+ app.add_directive("kernel-include", KernelInclude)
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
+
+# ==============================================================================
+class KernelInclude(Include):
+# ==============================================================================
+
+ u"""KernelInclude (``kernel-include``) directive"""
+
+ def run(self):
+ path = os.path.realpath(
+ os.path.expandvars(self.arguments[0]))
+
+ # to get a bit security back, prohibit /etc:
+ if path.startswith(os.sep + "etc"):
+ raise self.severe(
+ 'Problems with "%s" directive, prohibited path: %s'
+ % (self.name, path))
+
+ self.arguments[0] = path
+
+ #return super(KernelInclude, self).run() # won't work, see HINTs in _run()
+ return self._run()
+
+ def _run(self):
+ """Include a file as part of the content of this reST file."""
+
+ # HINT: I had to copy&paste the whole Include.run method. I'am not happy
+ # with this, but due to security reasons, the Include.run method does
+ # not allow absolute or relative pathnames pointing to locations *above*
+ # the filesystem tree where the reST document is placed.
+
+ if not self.state.document.settings.file_insertion_enabled:
+ raise self.warning('"%s" directive disabled.' % self.name)
+ source = self.state_machine.input_lines.source(
+ self.lineno - self.state_machine.input_offset - 1)
+ source_dir = os.path.dirname(os.path.abspath(source))
+ path = directives.path(self.arguments[0])
+ if path.startswith('<') and path.endswith('>'):
+ path = os.path.join(self.standard_include_path, path[1:-1])
+ path = os.path.normpath(os.path.join(source_dir, path))
+
+ # HINT: this is the only line I had to change / commented out:
+ #path = utils.relative_path(None, path)
+
+ path = nodes.reprunicode(path)
+ encoding = self.options.get(
+ 'encoding', self.state.document.settings.input_encoding)
+ e_handler=self.state.document.settings.input_encoding_error_handler
+ tab_width = self.options.get(
+ 'tab-width', self.state.document.settings.tab_width)
+ try:
+ self.state.document.settings.record_dependencies.add(path)
+ include_file = io.FileInput(source_path=path,
+ encoding=encoding,
+ error_handler=e_handler)
+ except UnicodeEncodeError as error:
+ raise self.severe('Problems with "%s" directive path:\n'
+ 'Cannot encode input file path "%s" '
+ '(wrong locale?).' %
+ (self.name, SafeString(path)))
+ except IOError as error:
+ raise self.severe('Problems with "%s" directive path:\n%s.' %
+ (self.name, ErrorString(error)))
+ startline = self.options.get('start-line', None)
+ endline = self.options.get('end-line', None)
+ try:
+ if startline or (endline is not None):
+ lines = include_file.readlines()
+ rawtext = ''.join(lines[startline:endline])
+ else:
+ rawtext = include_file.read()
+ except UnicodeError as error:
+ raise self.severe('Problem with "%s" directive:\n%s' %
+ (self.name, ErrorString(error)))
+ # start-after/end-before: no restrictions on newlines in match-text,
+ # and no restrictions on matching inside lines vs. line boundaries
+ after_text = self.options.get('start-after', None)
+ if after_text:
+ # skip content in rawtext before *and incl.* a matching text
+ after_index = rawtext.find(after_text)
+ if after_index < 0:
+ raise self.severe('Problem with "start-after" option of "%s" '
+ 'directive:\nText not found.' % self.name)
+ rawtext = rawtext[after_index + len(after_text):]
+ before_text = self.options.get('end-before', None)
+ if before_text:
+ # skip content in rawtext after *and incl.* a matching text
+ before_index = rawtext.find(before_text)
+ if before_index < 0:
+ raise self.severe('Problem with "end-before" option of "%s" '
+ 'directive:\nText not found.' % self.name)
+ rawtext = rawtext[:before_index]
+
+ include_lines = statemachine.string2lines(rawtext, tab_width,
+ convert_whitespace=True)
+ if 'literal' in self.options:
+ # Convert tabs to spaces, if `tab_width` is positive.
+ if tab_width >= 0:
+ text = rawtext.expandtabs(tab_width)
+ else:
+ text = rawtext
+ literal_block = nodes.literal_block(rawtext, source=path,
+ classes=self.options.get('class', []))
+ literal_block.line = 1
+ self.add_name(literal_block)
+ if 'number-lines' in self.options:
+ try:
+ startline = int(self.options['number-lines'] or 1)
+ except ValueError:
+ raise self.error(':number-lines: with non-integer '
+ 'start value')
+ endline = startline + len(include_lines)
+ if text.endswith('\n'):
+ text = text[:-1]
+ tokens = NumberLines([([], text)], startline, endline)
+ for classes, value in tokens:
+ if classes:
+ literal_block += nodes.inline(value, value,
+ classes=classes)
+ else:
+ literal_block += nodes.Text(value, value)
+ else:
+ literal_block += nodes.Text(text, text)
+ return [literal_block]
+ if 'code' in self.options:
+ self.options['source'] = path
+ codeblock = CodeBlock(self.name,
+ [self.options.pop('code')], # arguments
+ self.options,
+ include_lines, # content
+ self.lineno,
+ self.content_offset,
+ self.block_text,
+ self.state,
+ self.state_machine)
+ return codeblock.run()
+ self.state_machine.insert_input(include_lines, path)
+ return []
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
new file mode 100644
index 000000000..e9857ab90
--- /dev/null
+++ b/Documentation/sphinx/kerneldoc.py
@@ -0,0 +1,190 @@
+# coding=utf-8
+#
+# Copyright © 2016 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# Authors:
+# Jani Nikula <jani.nikula@intel.com>
+#
+# Please make sure this works on both python2 and python3.
+#
+
+import codecs
+import os
+import subprocess
+import sys
+import re
+import glob
+
+from docutils import nodes, statemachine
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import directives, Directive
+
+#
+# AutodocReporter is only good up to Sphinx 1.7
+#
+import sphinx
+
+Use_SSI = sphinx.__version__[:3] >= '1.7'
+if Use_SSI:
+ from sphinx.util.docutils import switch_source_input
+else:
+ from sphinx.ext.autodoc import AutodocReporter
+
+import kernellog
+
+__version__ = '1.0'
+
+class KernelDocDirective(Directive):
+ """Extract kernel-doc comments from the specified file"""
+ required_argument = 1
+ optional_arguments = 4
+ option_spec = {
+ 'doc': directives.unchanged_required,
+ 'export': directives.unchanged,
+ 'internal': directives.unchanged,
+ 'identifiers': directives.unchanged,
+ 'no-identifiers': directives.unchanged,
+ 'functions': directives.unchanged,
+ }
+ has_content = False
+
+ def run(self):
+ env = self.state.document.settings.env
+ cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
+
+ # Pass the version string to kernel-doc, as it needs to use a different
+ # dialect, depending what the C domain supports for each specific
+ # Sphinx versions
+ cmd += ['-sphinx-version', sphinx.__version__]
+
+ filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
+ export_file_patterns = []
+
+ # Tell sphinx of the dependency
+ env.note_dependency(os.path.abspath(filename))
+
+ tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
+
+ # 'function' is an alias of 'identifiers'
+ if 'functions' in self.options:
+ self.options['identifiers'] = self.options.get('functions')
+
+ # FIXME: make this nicer and more robust against errors
+ if 'export' in self.options:
+ cmd += ['-export']
+ export_file_patterns = str(self.options.get('export')).split()
+ elif 'internal' in self.options:
+ cmd += ['-internal']
+ export_file_patterns = str(self.options.get('internal')).split()
+ elif 'doc' in self.options:
+ cmd += ['-function', str(self.options.get('doc'))]
+ elif 'identifiers' in self.options:
+ identifiers = self.options.get('identifiers').split()
+ if identifiers:
+ for i in identifiers:
+ cmd += ['-function', i]
+ else:
+ cmd += ['-no-doc-sections']
+
+ if 'no-identifiers' in self.options:
+ no_identifiers = self.options.get('no-identifiers').split()
+ if no_identifiers:
+ for i in no_identifiers:
+ cmd += ['-nosymbol', i]
+
+ for pattern in export_file_patterns:
+ for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
+ env.note_dependency(os.path.abspath(f))
+ cmd += ['-export-file', f]
+
+ cmd += [filename]
+
+ try:
+ kernellog.verbose(env.app,
+ 'calling kernel-doc \'%s\'' % (" ".join(cmd)))
+
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+
+ out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
+
+ if p.returncode != 0:
+ sys.stderr.write(err)
+
+ kernellog.warn(env.app,
+ 'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
+ return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
+ elif env.config.kerneldoc_verbosity > 0:
+ sys.stderr.write(err)
+
+ lines = statemachine.string2lines(out, tab_width, convert_whitespace=True)
+ result = ViewList()
+
+ lineoffset = 0;
+ line_regex = re.compile("^#define LINENO ([0-9]+)$")
+ for line in lines:
+ match = line_regex.search(line)
+ if match:
+ # sphinx counts lines from 0
+ lineoffset = int(match.group(1)) - 1
+ # we must eat our comments since the upset the markup
+ else:
+ doc = env.srcdir + "/" + env.docname + ":" + str(self.lineno)
+ result.append(line, doc + ": " + filename, lineoffset)
+ lineoffset += 1
+
+ node = nodes.section()
+ self.do_parse(result, node)
+
+ return node.children
+
+ except Exception as e: # pylint: disable=W0703
+ kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' %
+ (" ".join(cmd), str(e)))
+ return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
+
+ def do_parse(self, result, node):
+ if Use_SSI:
+ with switch_source_input(self.state, result):
+ self.state.nested_parse(result, 0, node, match_titles=1)
+ else:
+ save = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
+ self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter)
+ self.state.memo.title_styles, self.state.memo.section_level = [], 0
+ try:
+ self.state.nested_parse(result, 0, node, match_titles=1)
+ finally:
+ self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = save
+
+
+def setup(app):
+ app.add_config_value('kerneldoc_bin', None, 'env')
+ app.add_config_value('kerneldoc_srctree', None, 'env')
+ app.add_config_value('kerneldoc_verbosity', 1, 'env')
+
+ app.add_directive('kernel-doc', KernelDocDirective)
+
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
diff --git a/Documentation/sphinx/kernellog.py b/Documentation/sphinx/kernellog.py
new file mode 100644
index 000000000..8ac7d274f
--- /dev/null
+++ b/Documentation/sphinx/kernellog.py
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Sphinx has deprecated its older logging interface, but the replacement
+# only goes back to 1.6. So here's a wrapper layer to keep around for
+# as long as we support 1.4.
+#
+import sphinx
+
+if sphinx.__version__[:3] >= '1.6':
+ UseLogging = True
+ from sphinx.util import logging
+ logger = logging.getLogger('kerneldoc')
+else:
+ UseLogging = False
+
+def warn(app, message):
+ if UseLogging:
+ logger.warning(message)
+ else:
+ app.warn(message)
+
+def verbose(app, message):
+ if UseLogging:
+ logger.verbose(message)
+ else:
+ app.verbose(message)
+
+def info(app, message):
+ if UseLogging:
+ logger.info(message)
+ else:
+ app.info(message)
diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py
new file mode 100644
index 000000000..788704886
--- /dev/null
+++ b/Documentation/sphinx/kfigure.py
@@ -0,0 +1,557 @@
+# -*- coding: utf-8; mode: python -*-
+# pylint: disable=C0103, R0903, R0912, R0915
+u"""
+ scalable figure and image handling
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Sphinx extension which implements scalable image handling.
+
+ :copyright: Copyright (C) 2016 Markus Heiser
+ :license: GPL Version 2, June 1991 see Linux/COPYING for details.
+
+ The build for image formats depend on image's source format and output's
+ destination format. This extension implement methods to simplify image
+ handling from the author's POV. Directives like ``kernel-figure`` implement
+ methods *to* always get the best output-format even if some tools are not
+ installed. For more details take a look at ``convert_image(...)`` which is
+ the core of all conversions.
+
+ * ``.. kernel-image``: for image handling / a ``.. image::`` replacement
+
+ * ``.. kernel-figure``: for figure handling / a ``.. figure::`` replacement
+
+ * ``.. kernel-render``: for render markup / a concept to embed *render*
+ markups (or languages). Supported markups (see ``RENDER_MARKUP_EXT``)
+
+ - ``DOT``: render embedded Graphviz's **DOC**
+ - ``SVG``: render embedded Scalable Vector Graphics (**SVG**)
+ - ... *developable*
+
+ Used tools:
+
+ * ``dot(1)``: Graphviz (https://www.graphviz.org). If Graphviz is not
+ available, the DOT language is inserted as literal-block.
+
+ * SVG to PDF: To generate PDF, you need at least one of this tools:
+
+ - ``convert(1)``: ImageMagick (https://www.imagemagick.org)
+
+ List of customizations:
+
+ * generate PDF from SVG / used by PDF (LaTeX) builder
+
+ * generate SVG (html-builder) and PDF (latex-builder) from DOT files.
+ DOT: see https://www.graphviz.org/content/dot-language
+
+ """
+
+import os
+from os import path
+import subprocess
+from hashlib import sha1
+import sys
+
+from docutils import nodes
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import directives
+from docutils.parsers.rst.directives import images
+import sphinx
+
+from sphinx.util.nodes import clean_astext
+from six import iteritems
+
+import kernellog
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+ _unicode = str
+else:
+ _unicode = unicode
+
+# Get Sphinx version
+major, minor, patch = sphinx.version_info[:3]
+if major == 1 and minor > 3:
+ # patches.Figure only landed in Sphinx 1.4
+ from sphinx.directives.patches import Figure # pylint: disable=C0413
+else:
+ Figure = images.Figure
+
+__version__ = '1.0.0'
+
+# simple helper
+# -------------
+
+def which(cmd):
+ """Searches the ``cmd`` in the ``PATH`` environment.
+
+ This *which* searches the PATH for executable ``cmd`` . First match is
+ returned, if nothing is found, ``None` is returned.
+ """
+ envpath = os.environ.get('PATH', None) or os.defpath
+ for folder in envpath.split(os.pathsep):
+ fname = folder + os.sep + cmd
+ if path.isfile(fname):
+ return fname
+
+def mkdir(folder, mode=0o775):
+ if not path.isdir(folder):
+ os.makedirs(folder, mode)
+
+def file2literal(fname):
+ with open(fname, "r") as src:
+ data = src.read()
+ node = nodes.literal_block(data, data)
+ return node
+
+def isNewer(path1, path2):
+ """Returns True if ``path1`` is newer than ``path2``
+
+ If ``path1`` exists and is newer than ``path2`` the function returns
+ ``True`` is returned otherwise ``False``
+ """
+ return (path.exists(path1)
+ and os.stat(path1).st_ctime > os.stat(path2).st_ctime)
+
+def pass_handle(self, node): # pylint: disable=W0613
+ pass
+
+# setup conversion tools and sphinx extension
+# -------------------------------------------
+
+# Graphviz's dot(1) support
+dot_cmd = None
+
+# ImageMagick' convert(1) support
+convert_cmd = None
+
+
+def setup(app):
+ # check toolchain first
+ app.connect('builder-inited', setupTools)
+
+ # image handling
+ app.add_directive("kernel-image", KernelImage)
+ app.add_node(kernel_image,
+ html = (visit_kernel_image, pass_handle),
+ latex = (visit_kernel_image, pass_handle),
+ texinfo = (visit_kernel_image, pass_handle),
+ text = (visit_kernel_image, pass_handle),
+ man = (visit_kernel_image, pass_handle), )
+
+ # figure handling
+ app.add_directive("kernel-figure", KernelFigure)
+ app.add_node(kernel_figure,
+ html = (visit_kernel_figure, pass_handle),
+ latex = (visit_kernel_figure, pass_handle),
+ texinfo = (visit_kernel_figure, pass_handle),
+ text = (visit_kernel_figure, pass_handle),
+ man = (visit_kernel_figure, pass_handle), )
+
+ # render handling
+ app.add_directive('kernel-render', KernelRender)
+ app.add_node(kernel_render,
+ html = (visit_kernel_render, pass_handle),
+ latex = (visit_kernel_render, pass_handle),
+ texinfo = (visit_kernel_render, pass_handle),
+ text = (visit_kernel_render, pass_handle),
+ man = (visit_kernel_render, pass_handle), )
+
+ app.connect('doctree-read', add_kernel_figure_to_std_domain)
+
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
+
+
+def setupTools(app):
+ u"""
+ Check available build tools and log some *verbose* messages.
+
+ This function is called once, when the builder is initiated.
+ """
+ global dot_cmd, convert_cmd # pylint: disable=W0603
+ kernellog.verbose(app, "kfigure: check installed tools ...")
+
+ dot_cmd = which('dot')
+ convert_cmd = which('convert')
+
+ if dot_cmd:
+ kernellog.verbose(app, "use dot(1) from: " + dot_cmd)
+ else:
+ kernellog.warn(app, "dot(1) not found, for better output quality install "
+ "graphviz from https://www.graphviz.org")
+ if convert_cmd:
+ kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
+ else:
+ kernellog.warn(app,
+ "convert(1) not found, for SVG to PDF conversion install "
+ "ImageMagick (https://www.imagemagick.org)")
+
+
+# integrate conversion tools
+# --------------------------
+
+RENDER_MARKUP_EXT = {
+ # The '.ext' must be handled by convert_image(..) function's *in_ext* input.
+ # <name> : <.ext>
+ 'DOT' : '.dot',
+ 'SVG' : '.svg'
+}
+
+def convert_image(img_node, translator, src_fname=None):
+ """Convert a image node for the builder.
+
+ Different builder prefer different image formats, e.g. *latex* builder
+ prefer PDF while *html* builder prefer SVG format for images.
+
+ This function handles output image formats in dependence of source the
+ format (of the image) and the translator's output format.
+ """
+ app = translator.builder.app
+
+ fname, in_ext = path.splitext(path.basename(img_node['uri']))
+ if src_fname is None:
+ src_fname = path.join(translator.builder.srcdir, img_node['uri'])
+ if not path.exists(src_fname):
+ src_fname = path.join(translator.builder.outdir, img_node['uri'])
+
+ dst_fname = None
+
+ # in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages
+
+ kernellog.verbose(app, 'assert best format for: ' + img_node['uri'])
+
+ if in_ext == '.dot':
+
+ if not dot_cmd:
+ kernellog.verbose(app,
+ "dot from graphviz not available / include DOT raw.")
+ img_node.replace_self(file2literal(src_fname))
+
+ elif translator.builder.format == 'latex':
+ dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
+ img_node['uri'] = fname + '.pdf'
+ img_node['candidates'] = {'*': fname + '.pdf'}
+
+
+ elif translator.builder.format == 'html':
+ dst_fname = path.join(
+ translator.builder.outdir,
+ translator.builder.imagedir,
+ fname + '.svg')
+ img_node['uri'] = path.join(
+ translator.builder.imgpath, fname + '.svg')
+ img_node['candidates'] = {
+ '*': path.join(translator.builder.imgpath, fname + '.svg')}
+
+ else:
+ # all other builder formats will include DOT as raw
+ img_node.replace_self(file2literal(src_fname))
+
+ elif in_ext == '.svg':
+
+ if translator.builder.format == 'latex':
+ if convert_cmd is None:
+ kernellog.verbose(app,
+ "no SVG to PDF conversion available / include SVG raw.")
+ img_node.replace_self(file2literal(src_fname))
+ else:
+ dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
+ img_node['uri'] = fname + '.pdf'
+ img_node['candidates'] = {'*': fname + '.pdf'}
+
+ if dst_fname:
+ # the builder needs not to copy one more time, so pop it if exists.
+ translator.builder.images.pop(img_node['uri'], None)
+ _name = dst_fname[len(translator.builder.outdir) + 1:]
+
+ if isNewer(dst_fname, src_fname):
+ kernellog.verbose(app,
+ "convert: {out}/%s already exists and is newer" % _name)
+
+ else:
+ ok = False
+ mkdir(path.dirname(dst_fname))
+
+ if in_ext == '.dot':
+ kernellog.verbose(app, 'convert DOT to: {out}/' + _name)
+ ok = dot2format(app, src_fname, dst_fname)
+
+ elif in_ext == '.svg':
+ kernellog.verbose(app, 'convert SVG to: {out}/' + _name)
+ ok = svg2pdf(app, src_fname, dst_fname)
+
+ if not ok:
+ img_node.replace_self(file2literal(src_fname))
+
+
+def dot2format(app, dot_fname, out_fname):
+ """Converts DOT file to ``out_fname`` using ``dot(1)``.
+
+ * ``dot_fname`` pathname of the input DOT file, including extension ``.dot``
+ * ``out_fname`` pathname of the output file, including format extension
+
+ The *format extension* depends on the ``dot`` command (see ``man dot``
+ option ``-Txxx``). Normally you will use one of the following extensions:
+
+ - ``.ps`` for PostScript,
+ - ``.svg`` or ``svgz`` for Structured Vector Graphics,
+ - ``.fig`` for XFIG graphics and
+ - ``.png`` or ``gif`` for common bitmap graphics.
+
+ """
+ out_format = path.splitext(out_fname)[1][1:]
+ cmd = [dot_cmd, '-T%s' % out_format, dot_fname]
+ exit_code = 42
+
+ with open(out_fname, "w") as out:
+ exit_code = subprocess.call(cmd, stdout = out)
+ if exit_code != 0:
+ kernellog.warn(app,
+ "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
+ return bool(exit_code == 0)
+
+def svg2pdf(app, svg_fname, pdf_fname):
+ """Converts SVG to PDF with ``convert(1)`` command.
+
+ Uses ``convert(1)`` from ImageMagick (https://www.imagemagick.org) for
+ conversion. Returns ``True`` on success and ``False`` if an error occurred.
+
+ * ``svg_fname`` pathname of the input SVG file with extension (``.svg``)
+ * ``pdf_name`` pathname of the output PDF file with extension (``.pdf``)
+
+ """
+ cmd = [convert_cmd, svg_fname, pdf_fname]
+ # use stdout and stderr from parent
+ exit_code = subprocess.call(cmd)
+ if exit_code != 0:
+ kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
+ return bool(exit_code == 0)
+
+
+# image handling
+# ---------------------
+
+def visit_kernel_image(self, node): # pylint: disable=W0613
+ """Visitor of the ``kernel_image`` Node.
+
+ Handles the ``image`` child-node with the ``convert_image(...)``.
+ """
+ img_node = node[0]
+ convert_image(img_node, self)
+
+class kernel_image(nodes.image):
+ """Node for ``kernel-image`` directive."""
+ pass
+
+class KernelImage(images.Image):
+ u"""KernelImage directive
+
+ Earns everything from ``.. image::`` directive, except *remote URI* and
+ *glob* pattern. The KernelImage wraps a image node into a
+ kernel_image node. See ``visit_kernel_image``.
+ """
+
+ def run(self):
+ uri = self.arguments[0]
+ if uri.endswith('.*') or uri.find('://') != -1:
+ raise self.severe(
+ 'Error in "%s: %s": glob pattern and remote images are not allowed'
+ % (self.name, uri))
+ result = images.Image.run(self)
+ if len(result) == 2 or isinstance(result[0], nodes.system_message):
+ return result
+ (image_node,) = result
+ # wrap image node into a kernel_image node / see visitors
+ node = kernel_image('', image_node)
+ return [node]
+
+# figure handling
+# ---------------------
+
+def visit_kernel_figure(self, node): # pylint: disable=W0613
+ """Visitor of the ``kernel_figure`` Node.
+
+ Handles the ``image`` child-node with the ``convert_image(...)``.
+ """
+ img_node = node[0][0]
+ convert_image(img_node, self)
+
+class kernel_figure(nodes.figure):
+ """Node for ``kernel-figure`` directive."""
+
+class KernelFigure(Figure):
+ u"""KernelImage directive
+
+ Earns everything from ``.. figure::`` directive, except *remote URI* and
+ *glob* pattern. The KernelFigure wraps a figure node into a kernel_figure
+ node. See ``visit_kernel_figure``.
+ """
+
+ def run(self):
+ uri = self.arguments[0]
+ if uri.endswith('.*') or uri.find('://') != -1:
+ raise self.severe(
+ 'Error in "%s: %s":'
+ ' glob pattern and remote images are not allowed'
+ % (self.name, uri))
+ result = Figure.run(self)
+ if len(result) == 2 or isinstance(result[0], nodes.system_message):
+ return result
+ (figure_node,) = result
+ # wrap figure node into a kernel_figure node / see visitors
+ node = kernel_figure('', figure_node)
+ return [node]
+
+
+# render handling
+# ---------------------
+
+def visit_kernel_render(self, node):
+ """Visitor of the ``kernel_render`` Node.
+
+ If rendering tools available, save the markup of the ``literal_block`` child
+ node into a file and replace the ``literal_block`` node with a new created
+ ``image`` node, pointing to the saved markup file. Afterwards, handle the
+ image child-node with the ``convert_image(...)``.
+ """
+ app = self.builder.app
+ srclang = node.get('srclang')
+
+ kernellog.verbose(app, 'visit kernel-render node lang: "%s"' % (srclang))
+
+ tmp_ext = RENDER_MARKUP_EXT.get(srclang, None)
+ if tmp_ext is None:
+ kernellog.warn(app, 'kernel-render: "%s" unknown / include raw.' % (srclang))
+ return
+
+ if not dot_cmd and tmp_ext == '.dot':
+ kernellog.verbose(app, "dot from graphviz not available / include raw.")
+ return
+
+ literal_block = node[0]
+
+ code = literal_block.astext()
+ hashobj = code.encode('utf-8') # str(node.attributes)
+ fname = path.join('%s-%s' % (srclang, sha1(hashobj).hexdigest()))
+
+ tmp_fname = path.join(
+ self.builder.outdir, self.builder.imagedir, fname + tmp_ext)
+
+ if not path.isfile(tmp_fname):
+ mkdir(path.dirname(tmp_fname))
+ with open(tmp_fname, "w") as out:
+ out.write(code)
+
+ img_node = nodes.image(node.rawsource, **node.attributes)
+ img_node['uri'] = path.join(self.builder.imgpath, fname + tmp_ext)
+ img_node['candidates'] = {
+ '*': path.join(self.builder.imgpath, fname + tmp_ext)}
+
+ literal_block.replace_self(img_node)
+ convert_image(img_node, self, tmp_fname)
+
+
+class kernel_render(nodes.General, nodes.Inline, nodes.Element):
+ """Node for ``kernel-render`` directive."""
+ pass
+
+class KernelRender(Figure):
+ u"""KernelRender directive
+
+ Render content by external tool. Has all the options known from the
+ *figure* directive, plus option ``caption``. If ``caption`` has a
+ value, a figure node with the *caption* is inserted. If not, a image node is
+ inserted.
+
+ The KernelRender directive wraps the text of the directive into a
+ literal_block node and wraps it into a kernel_render node. See
+ ``visit_kernel_render``.
+ """
+ has_content = True
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = False
+
+ # earn options from 'figure'
+ option_spec = Figure.option_spec.copy()
+ option_spec['caption'] = directives.unchanged
+
+ def run(self):
+ return [self.build_node()]
+
+ def build_node(self):
+
+ srclang = self.arguments[0].strip()
+ if srclang not in RENDER_MARKUP_EXT.keys():
+ return [self.state_machine.reporter.warning(
+ 'Unknown source language "%s", use one of: %s.' % (
+ srclang, ",".join(RENDER_MARKUP_EXT.keys())),
+ line=self.lineno)]
+
+ code = '\n'.join(self.content)
+ if not code.strip():
+ return [self.state_machine.reporter.warning(
+ 'Ignoring "%s" directive without content.' % (
+ self.name),
+ line=self.lineno)]
+
+ node = kernel_render()
+ node['alt'] = self.options.get('alt','')
+ node['srclang'] = srclang
+ literal_node = nodes.literal_block(code, code)
+ node += literal_node
+
+ caption = self.options.get('caption')
+ if caption:
+ # parse caption's content
+ parsed = nodes.Element()
+ self.state.nested_parse(
+ ViewList([caption], source=''), self.content_offset, parsed)
+ caption_node = nodes.caption(
+ parsed[0].rawsource, '', *parsed[0].children)
+ caption_node.source = parsed[0].source
+ caption_node.line = parsed[0].line
+
+ figure_node = nodes.figure('', node)
+ for k,v in self.options.items():
+ figure_node[k] = v
+ figure_node += caption_node
+
+ node = figure_node
+
+ return node
+
+def add_kernel_figure_to_std_domain(app, doctree):
+ """Add kernel-figure anchors to 'std' domain.
+
+ The ``StandardDomain.process_doc(..)`` method does not know how to resolve
+ the caption (label) of ``kernel-figure`` directive (it only knows about
+ standard nodes, e.g. table, figure etc.). Without any additional handling
+ this will result in a 'undefined label' for kernel-figures.
+
+ This handle adds labels of kernel-figure to the 'std' domain labels.
+ """
+
+ std = app.env.domains["std"]
+ docname = app.env.docname
+ labels = std.data["labels"]
+
+ for name, explicit in iteritems(doctree.nametypes):
+ if not explicit:
+ continue
+ labelid = doctree.nameids[name]
+ if labelid is None:
+ continue
+ node = doctree.ids[labelid]
+
+ if node.tagname == 'kernel_figure':
+ for n in node.next_node():
+ if n.tagname == 'caption':
+ sectname = clean_astext(n)
+ # add label to std domain
+ labels[name] = docname, labelid, sectname
+ break
diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py
new file mode 100644
index 000000000..8b416bfd7
--- /dev/null
+++ b/Documentation/sphinx/load_config.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8; mode: python -*-
+# pylint: disable=R0903, C0330, R0914, R0912, E0401
+
+import os
+import sys
+from sphinx.util.osutil import fs_encoding
+
+# ------------------------------------------------------------------------------
+def loadConfig(namespace):
+# ------------------------------------------------------------------------------
+
+ u"""Load an additional configuration file into *namespace*.
+
+ The name of the configuration file is taken from the environment
+ ``SPHINX_CONF``. The external configuration file extends (or overwrites) the
+ configuration values from the origin ``conf.py``. With this you are able to
+ maintain *build themes*. """
+
+ config_file = os.environ.get("SPHINX_CONF", None)
+ if (config_file is not None
+ and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ):
+ config_file = os.path.abspath(config_file)
+
+ # Let's avoid one conf.py file just due to latex_documents
+ start = config_file.find('Documentation/')
+ if start >= 0:
+ start = config_file.find('/', start + 1)
+
+ end = config_file.rfind('/')
+ if start >= 0 and end > 0:
+ dir = config_file[start + 1:end]
+
+ print("source directory: %s" % dir)
+ new_latex_docs = []
+ latex_documents = namespace['latex_documents']
+
+ for l in latex_documents:
+ if l[0].find(dir + '/') == 0:
+ has = True
+ fn = l[0][len(dir) + 1:]
+ new_latex_docs.append((fn, l[1], l[2], l[3], l[4]))
+ break
+
+ namespace['latex_documents'] = new_latex_docs
+
+ # If there is an extra conf.py file, load it
+ if os.path.isfile(config_file):
+ sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
+ config = namespace.copy()
+ config['__file__'] = config_file
+ with open(config_file, 'rb') as f:
+ code = compile(f.read(), fs_encoding, 'exec')
+ exec(code, config)
+ del config['__file__']
+ namespace.update(config)
+ else:
+ config = namespace.copy()
+ config['tags'].add("subproject")
+ namespace.update(config)
diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py
new file mode 100755
index 000000000..dc8fed48d
--- /dev/null
+++ b/Documentation/sphinx/maintainers_include.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0
+# -*- coding: utf-8; mode: python -*-
+# pylint: disable=R0903, C0330, R0914, R0912, E0401
+
+u"""
+ maintainers-include
+ ~~~~~~~~~~~~~~~~~~~
+
+ Implementation of the ``maintainers-include`` reST-directive.
+
+ :copyright: Copyright (C) 2019 Kees Cook <keescook@chromium.org>
+ :license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+ The ``maintainers-include`` reST-directive performs extensive parsing
+ specific to the Linux kernel's standard "MAINTAINERS" file, in an
+ effort to avoid needing to heavily mark up the original plain text.
+"""
+
+import sys
+import re
+import os.path
+
+from docutils import statemachine
+from docutils.utils.error_reporting import ErrorString
+from docutils.parsers.rst import Directive
+from docutils.parsers.rst.directives.misc import Include
+
+__version__ = '1.0'
+
+def setup(app):
+ app.add_directive("maintainers-include", MaintainersInclude)
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
+
+class MaintainersInclude(Include):
+ u"""MaintainersInclude (``maintainers-include``) directive"""
+ required_arguments = 0
+
+ def parse_maintainers(self, path):
+ """Parse all the MAINTAINERS lines into ReST for human-readability"""
+
+ result = list()
+ result.append(".. _maintainers:")
+ result.append("")
+
+ # Poor man's state machine.
+ descriptions = False
+ maintainers = False
+ subsystems = False
+
+ # Field letter to field name mapping.
+ field_letter = None
+ fields = dict()
+
+ prev = None
+ field_prev = ""
+ field_content = ""
+
+ for line in open(path):
+ if sys.version_info.major == 2:
+ line = unicode(line, 'utf-8')
+ # Have we reached the end of the preformatted Descriptions text?
+ if descriptions and line.startswith('Maintainers'):
+ descriptions = False
+ # Ensure a blank line following the last "|"-prefixed line.
+ result.append("")
+
+ # Start subsystem processing? This is to skip processing the text
+ # between the Maintainers heading and the first subsystem name.
+ if maintainers and not subsystems:
+ if re.search('^[A-Z0-9]', line):
+ subsystems = True
+
+ # Drop needless input whitespace.
+ line = line.rstrip()
+
+ # Linkify all non-wildcard refs to ReST files in Documentation/.
+ pat = '(Documentation/([^\s\?\*]*)\.rst)'
+ m = re.search(pat, line)
+ if m:
+ # maintainers.rst is in a subdirectory, so include "../".
+ line = re.sub(pat, ':doc:`%s <../%s>`' % (m.group(2), m.group(2)), line)
+
+ # Check state machine for output rendering behavior.
+ output = None
+ if descriptions:
+ # Escape the escapes in preformatted text.
+ output = "| %s" % (line.replace("\\", "\\\\"))
+ # Look for and record field letter to field name mappings:
+ # R: Designated *reviewer*: FullName <address@domain>
+ m = re.search("\s(\S):\s", line)
+ if m:
+ field_letter = m.group(1)
+ if field_letter and not field_letter in fields:
+ m = re.search("\*([^\*]+)\*", line)
+ if m:
+ fields[field_letter] = m.group(1)
+ elif subsystems:
+ # Skip empty lines: subsystem parser adds them as needed.
+ if len(line) == 0:
+ continue
+ # Subsystem fields are batched into "field_content"
+ if line[1] != ':':
+ # Render a subsystem entry as:
+ # SUBSYSTEM NAME
+ # ~~~~~~~~~~~~~~
+
+ # Flush pending field content.
+ output = field_content + "\n\n"
+ field_content = ""
+
+ # Collapse whitespace in subsystem name.
+ heading = re.sub("\s+", " ", line)
+ output = output + "%s\n%s" % (heading, "~" * len(heading))
+ field_prev = ""
+ else:
+ # Render a subsystem field as:
+ # :Field: entry
+ # entry...
+ field, details = line.split(':', 1)
+ details = details.strip()
+
+ # Mark paths (and regexes) as literal text for improved
+ # readability and to escape any escapes.
+ if field in ['F', 'N', 'X', 'K']:
+ # But only if not already marked :)
+ if not ':doc:' in details:
+ details = '``%s``' % (details)
+
+ # Comma separate email field continuations.
+ if field == field_prev and field_prev in ['M', 'R', 'L']:
+ field_content = field_content + ","
+
+ # Do not repeat field names, so that field entries
+ # will be collapsed together.
+ if field != field_prev:
+ output = field_content + "\n"
+ field_content = ":%s:" % (fields.get(field, field))
+ field_content = field_content + "\n\t%s" % (details)
+ field_prev = field
+ else:
+ output = line
+
+ # Re-split on any added newlines in any above parsing.
+ if output != None:
+ for separated in output.split('\n'):
+ result.append(separated)
+
+ # Update the state machine when we find heading separators.
+ if line.startswith('----------'):
+ if prev.startswith('Descriptions'):
+ descriptions = True
+ if prev.startswith('Maintainers'):
+ maintainers = True
+
+ # Retain previous line for state machine transitions.
+ prev = line
+
+ # Flush pending field contents.
+ if field_content != "":
+ for separated in field_content.split('\n'):
+ result.append(separated)
+
+ output = "\n".join(result)
+ # For debugging the pre-rendered results...
+ #print(output, file=open("/tmp/MAINTAINERS.rst", "w"))
+
+ self.state_machine.insert_input(
+ statemachine.string2lines(output), path)
+
+ def run(self):
+ """Include the MAINTAINERS file as part of this reST file."""
+ if not self.state.document.settings.file_insertion_enabled:
+ raise self.warning('"%s" directive disabled.' % self.name)
+
+ # Walk up source path directories to find Documentation/../
+ path = self.state_machine.document.attributes['source']
+ path = os.path.realpath(path)
+ tail = path
+ while tail != "Documentation" and tail != "":
+ (path, tail) = os.path.split(path)
+
+ # Append "MAINTAINERS"
+ path = os.path.join(path, "MAINTAINERS")
+
+ try:
+ self.state.document.settings.record_dependencies.add(path)
+ lines = self.parse_maintainers(path)
+ except IOError as error:
+ raise self.severe('Problems with "%s" directive path:\n%s.' %
+ (self.name, ErrorString(error)))
+
+ return []
diff --git a/Documentation/sphinx/parallel-wrapper.sh b/Documentation/sphinx/parallel-wrapper.sh
new file mode 100644
index 000000000..e54c44ce1
--- /dev/null
+++ b/Documentation/sphinx/parallel-wrapper.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Figure out if we should follow a specific parallelism from the make
+# environment (as exported by scripts/jobserver-exec), or fall back to
+# the "auto" parallelism when "-jN" is not specified at the top-level
+# "make" invocation.
+
+sphinx="$1"
+shift || true
+
+parallel="$PARALLELISM"
+if [ -z "$parallel" ] ; then
+ # If no parallelism is specified at the top-level make, then
+ # fall back to the expected "-jauto" mode that the "htmldocs"
+ # target has had.
+ auto=$(perl -e 'open IN,"'"$sphinx"' --version 2>&1 |";
+ while (<IN>) {
+ if (m/([\d\.]+)/) {
+ print "auto" if ($1 >= "1.7")
+ }
+ }
+ close IN')
+ if [ -n "$auto" ] ; then
+ parallel="$auto"
+ fi
+fi
+# Only if some parallelism has been determined do we add the -jN option.
+if [ -n "$parallel" ] ; then
+ parallel="-j$parallel"
+fi
+
+exec "$sphinx" $parallel "$@"
diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl
new file mode 100755
index 000000000..b063f2f1c
--- /dev/null
+++ b/Documentation/sphinx/parse-headers.pl
@@ -0,0 +1,401 @@
+#!/usr/bin/env perl
+use strict;
+use Text::Tabs;
+use Getopt::Long;
+use Pod::Usage;
+
+my $debug;
+my $help;
+my $man;
+
+GetOptions(
+ "debug" => \$debug,
+ 'usage|?' => \$help,
+ 'help' => \$man
+) or pod2usage(2);
+
+pod2usage(1) if $help;
+pod2usage(-exitstatus => 0, -verbose => 2) if $man;
+pod2usage(2) if (scalar @ARGV < 2 || scalar @ARGV > 3);
+
+my ($file_in, $file_out, $file_exceptions) = @ARGV;
+
+my $data;
+my %ioctls;
+my %defines;
+my %typedefs;
+my %enums;
+my %enum_symbols;
+my %structs;
+
+require Data::Dumper if ($debug);
+
+#
+# read the file and get identifiers
+#
+
+my $is_enum = 0;
+my $is_comment = 0;
+open IN, $file_in or die "Can't open $file_in";
+while (<IN>) {
+ $data .= $_;
+
+ my $ln = $_;
+ if (!$is_comment) {
+ $ln =~ s,/\*.*(\*/),,g;
+
+ $is_comment = 1 if ($ln =~ s,/\*.*,,);
+ } else {
+ if ($ln =~ s,^(.*\*/),,) {
+ $is_comment = 0;
+ } else {
+ next;
+ }
+ }
+
+ if ($is_enum && $ln =~ m/^\s*([_\w][\w\d_]+)\s*[\,=]?/) {
+ my $s = $1;
+ my $n = $1;
+ $n =~ tr/A-Z/a-z/;
+ $n =~ tr/_/-/;
+
+ $enum_symbols{$s} = "\\ :ref:`$s <$n>`\\ ";
+
+ $is_enum = 0 if ($is_enum && m/\}/);
+ next;
+ }
+ $is_enum = 0 if ($is_enum && m/\}/);
+
+ if ($ln =~ m/^\s*#\s*define\s+([_\w][\w\d_]+)\s+_IO/) {
+ my $s = $1;
+ my $n = $1;
+ $n =~ tr/A-Z/a-z/;
+
+ $ioctls{$s} = "\\ :ref:`$s <$n>`\\ ";
+ next;
+ }
+
+ if ($ln =~ m/^\s*#\s*define\s+([_\w][\w\d_]+)\s+/) {
+ my $s = $1;
+ my $n = $1;
+ $n =~ tr/A-Z/a-z/;
+ $n =~ tr/_/-/;
+
+ $defines{$s} = "\\ :ref:`$s <$n>`\\ ";
+ next;
+ }
+
+ if ($ln =~ m/^\s*typedef\s+([_\w][\w\d_]+)\s+(.*)\s+([_\w][\w\d_]+);/) {
+ my $s = $2;
+ my $n = $3;
+
+ $typedefs{$n} = "\\ :c:type:`$n <$s>`\\ ";
+ next;
+ }
+ if ($ln =~ m/^\s*enum\s+([_\w][\w\d_]+)\s+\{/
+ || $ln =~ m/^\s*enum\s+([_\w][\w\d_]+)$/
+ || $ln =~ m/^\s*typedef\s*enum\s+([_\w][\w\d_]+)\s+\{/
+ || $ln =~ m/^\s*typedef\s*enum\s+([_\w][\w\d_]+)$/) {
+ my $s = $1;
+
+ $enums{$s} = "enum :c:type:`$s`\\ ";
+
+ $is_enum = $1;
+ next;
+ }
+ if ($ln =~ m/^\s*struct\s+([_\w][\w\d_]+)\s+\{/
+ || $ln =~ m/^\s*struct\s+([[_\w][\w\d_]+)$/
+ || $ln =~ m/^\s*typedef\s*struct\s+([_\w][\w\d_]+)\s+\{/
+ || $ln =~ m/^\s*typedef\s*struct\s+([[_\w][\w\d_]+)$/
+ ) {
+ my $s = $1;
+
+ $structs{$s} = "struct $s\\ ";
+ next;
+ }
+}
+close IN;
+
+#
+# Handle multi-line typedefs
+#
+
+my @matches = ($data =~ m/typedef\s+struct\s+\S+?\s*\{[^\}]+\}\s*(\S+)\s*\;/g,
+ $data =~ m/typedef\s+enum\s+\S+?\s*\{[^\}]+\}\s*(\S+)\s*\;/g,);
+foreach my $m (@matches) {
+ my $s = $m;
+
+ $typedefs{$s} = "\\ :c:type:`$s`\\ ";
+ next;
+}
+
+#
+# Handle exceptions, if any
+#
+
+my %def_reftype = (
+ "ioctl" => ":ref",
+ "define" => ":ref",
+ "symbol" => ":ref",
+ "typedef" => ":c:type",
+ "enum" => ":c:type",
+ "struct" => ":c:type",
+);
+
+if ($file_exceptions) {
+ open IN, $file_exceptions or die "Can't read $file_exceptions";
+ while (<IN>) {
+ next if (m/^\s*$/ || m/^\s*#/);
+
+ # Parsers to ignore a symbol
+
+ if (m/^ignore\s+ioctl\s+(\S+)/) {
+ delete $ioctls{$1} if (exists($ioctls{$1}));
+ next;
+ }
+ if (m/^ignore\s+define\s+(\S+)/) {
+ delete $defines{$1} if (exists($defines{$1}));
+ next;
+ }
+ if (m/^ignore\s+typedef\s+(\S+)/) {
+ delete $typedefs{$1} if (exists($typedefs{$1}));
+ next;
+ }
+ if (m/^ignore\s+enum\s+(\S+)/) {
+ delete $enums{$1} if (exists($enums{$1}));
+ next;
+ }
+ if (m/^ignore\s+struct\s+(\S+)/) {
+ delete $structs{$1} if (exists($structs{$1}));
+ next;
+ }
+ if (m/^ignore\s+symbol\s+(\S+)/) {
+ delete $enum_symbols{$1} if (exists($enum_symbols{$1}));
+ next;
+ }
+
+ # Parsers to replace a symbol
+ my ($type, $old, $new, $reftype);
+
+ if (m/^replace\s+(\S+)\s+(\S+)\s+(\S+)/) {
+ $type = $1;
+ $old = $2;
+ $new = $3;
+ } else {
+ die "Can't parse $file_exceptions: $_";
+ }
+
+ if ($new =~ m/^\:c\:(data|func|macro|type)\:\`(.+)\`/) {
+ $reftype = ":c:$1";
+ $new = $2;
+ } elsif ($new =~ m/\:ref\:\`(.+)\`/) {
+ $reftype = ":ref";
+ $new = $1;
+ } else {
+ $reftype = $def_reftype{$type};
+ }
+ $new = "$reftype:`$old <$new>`";
+
+ if ($type eq "ioctl") {
+ $ioctls{$old} = $new if (exists($ioctls{$old}));
+ next;
+ }
+ if ($type eq "define") {
+ $defines{$old} = $new if (exists($defines{$old}));
+ next;
+ }
+ if ($type eq "symbol") {
+ $enum_symbols{$old} = $new if (exists($enum_symbols{$old}));
+ next;
+ }
+ if ($type eq "typedef") {
+ $typedefs{$old} = $new if (exists($typedefs{$old}));
+ next;
+ }
+ if ($type eq "enum") {
+ $enums{$old} = $new if (exists($enums{$old}));
+ next;
+ }
+ if ($type eq "struct") {
+ $structs{$old} = $new if (exists($structs{$old}));
+ next;
+ }
+
+ die "Can't parse $file_exceptions: $_";
+ }
+}
+
+if ($debug) {
+ print Data::Dumper->Dump([\%ioctls], [qw(*ioctls)]) if (%ioctls);
+ print Data::Dumper->Dump([\%typedefs], [qw(*typedefs)]) if (%typedefs);
+ print Data::Dumper->Dump([\%enums], [qw(*enums)]) if (%enums);
+ print Data::Dumper->Dump([\%structs], [qw(*structs)]) if (%structs);
+ print Data::Dumper->Dump([\%defines], [qw(*defines)]) if (%defines);
+ print Data::Dumper->Dump([\%enum_symbols], [qw(*enum_symbols)]) if (%enum_symbols);
+}
+
+#
+# Align block
+#
+$data = expand($data);
+$data = " " . $data;
+$data =~ s/\n/\n /g;
+$data =~ s/\n\s+$/\n/g;
+$data =~ s/\n\s+\n/\n\n/g;
+
+#
+# Add escape codes for special characters
+#
+$data =~ s,([\_\`\*\<\>\&\\\\:\/\|\%\$\#\{\}\~\^]),\\$1,g;
+
+$data =~ s,DEPRECATED,**DEPRECATED**,g;
+
+#
+# Add references
+#
+
+my $start_delim = "[ \n\t\(\=\*\@]";
+my $end_delim = "(\\s|,|\\\\=|\\\\:|\\;|\\\)|\\}|\\{)";
+
+foreach my $r (keys %ioctls) {
+ my $s = $ioctls{$r};
+
+ $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
+
+ print "$r -> $s\n" if ($debug);
+
+ $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
+}
+
+foreach my $r (keys %defines) {
+ my $s = $defines{$r};
+
+ $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
+
+ print "$r -> $s\n" if ($debug);
+
+ $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
+}
+
+foreach my $r (keys %enum_symbols) {
+ my $s = $enum_symbols{$r};
+
+ $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
+
+ print "$r -> $s\n" if ($debug);
+
+ $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
+}
+
+foreach my $r (keys %enums) {
+ my $s = $enums{$r};
+
+ $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
+
+ print "$r -> $s\n" if ($debug);
+
+ $data =~ s/enum\s+($r)$end_delim/$s$2/g;
+}
+
+foreach my $r (keys %structs) {
+ my $s = $structs{$r};
+
+ $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
+
+ print "$r -> $s\n" if ($debug);
+
+ $data =~ s/struct\s+($r)$end_delim/$s$2/g;
+}
+
+foreach my $r (keys %typedefs) {
+ my $s = $typedefs{$r};
+
+ $r =~ s,([\_\`\*\<\>\&\\\\:\/]),\\\\$1,g;
+
+ print "$r -> $s\n" if ($debug);
+ $data =~ s/($start_delim)($r)$end_delim/$1$s$3/g;
+}
+
+$data =~ s/\\ ([\n\s])/\1/g;
+
+#
+# Generate output file
+#
+
+my $title = $file_in;
+$title =~ s,.*/,,;
+
+open OUT, "> $file_out" or die "Can't open $file_out";
+print OUT ".. -*- coding: utf-8; mode: rst -*-\n\n";
+print OUT "$title\n";
+print OUT "=" x length($title);
+print OUT "\n\n.. parsed-literal::\n\n";
+print OUT $data;
+close OUT;
+
+__END__
+
+=head1 NAME
+
+parse_headers.pl - parse a C file, in order to identify functions, structs,
+enums and defines and create cross-references to a Sphinx book.
+
+=head1 SYNOPSIS
+
+B<parse_headers.pl> [<options>] <C_FILE> <OUT_FILE> [<EXCEPTIONS_FILE>]
+
+Where <options> can be: --debug, --help or --usage.
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--debug>
+
+Put the script in verbose mode, useful for debugging.
+
+=item B<--usage>
+
+Prints a brief help message and exits.
+
+=item B<--help>
+
+Prints a more detailed help message and exits.
+
+=back
+
+=head1 DESCRIPTION
+
+Convert a C header or source file (C_FILE), into a ReStructured Text
+included via ..parsed-literal block with cross-references for the
+documentation files that describe the API. It accepts an optional
+EXCEPTIONS_FILE with describes what elements will be either ignored or
+be pointed to a non-default reference.
+
+The output is written at the (OUT_FILE).
+
+It is capable of identifying defines, functions, structs, typedefs,
+enums and enum symbols and create cross-references for all of them.
+It is also capable of distinguish #define used for specifying a Linux
+ioctl.
+
+The EXCEPTIONS_FILE contain two rules to allow ignoring a symbol or
+to replace the default references by a custom one.
+
+Please read Documentation/doc-guide/parse-headers.rst at the Kernel's
+tree for more details.
+
+=head1 BUGS
+
+Report bugs to Mauro Carvalho Chehab <mchehab@kernel.org>
+
+=head1 COPYRIGHT
+
+Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>.
+
+License GPLv2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>.
+
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.
+
+=cut
diff --git a/Documentation/sphinx/requirements.txt b/Documentation/sphinx/requirements.txt
new file mode 100644
index 000000000..489f6626d
--- /dev/null
+++ b/Documentation/sphinx/requirements.txt
@@ -0,0 +1,3 @@
+docutils
+Sphinx==2.4.4
+sphinx_rtd_theme
diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py
new file mode 100755
index 000000000..2019a55f6
--- /dev/null
+++ b/Documentation/sphinx/rstFlatTable.py
@@ -0,0 +1,374 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8; mode: python -*-
+# pylint: disable=C0330, R0903, R0912
+
+u"""
+ flat-table
+ ~~~~~~~~~~
+
+ Implementation of the ``flat-table`` reST-directive.
+
+ :copyright: Copyright (C) 2016 Markus Heiser
+ :license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+ The ``flat-table`` (:py:class:`FlatTable`) is a double-stage list similar to
+ the ``list-table`` with some additional features:
+
+ * *column-span*: with the role ``cspan`` a cell can be extended through
+ additional columns
+
+ * *row-span*: with the role ``rspan`` a cell can be extended through
+ additional rows
+
+ * *auto span* rightmost cell of a table row over the missing cells on the
+ right side of that table-row. With Option ``:fill-cells:`` this behavior
+ can changed from *auto span* to *auto fill*, which automaticly inserts
+ (empty) cells instead of spanning the last cell.
+
+ Options:
+
+ * header-rows: [int] count of header rows
+ * stub-columns: [int] count of stub columns
+ * widths: [[int] [int] ... ] widths of columns
+ * fill-cells: instead of autospann missing cells, insert missing cells
+
+ roles:
+
+ * cspan: [int] additionale columns (*morecols*)
+ * rspan: [int] additionale rows (*morerows*)
+"""
+
+# ==============================================================================
+# imports
+# ==============================================================================
+
+import sys
+
+from docutils import nodes
+from docutils.parsers.rst import directives, roles
+from docutils.parsers.rst.directives.tables import Table
+from docutils.utils import SystemMessagePropagation
+
+# ==============================================================================
+# common globals
+# ==============================================================================
+
+__version__ = '1.0'
+
+PY3 = sys.version_info[0] == 3
+PY2 = sys.version_info[0] == 2
+
+if PY3:
+ # pylint: disable=C0103, W0622
+ unicode = str
+ basestring = str
+
+# ==============================================================================
+def setup(app):
+# ==============================================================================
+
+ app.add_directive("flat-table", FlatTable)
+ roles.register_local_role('cspan', c_span)
+ roles.register_local_role('rspan', r_span)
+
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
+
+# ==============================================================================
+def c_span(name, rawtext, text, lineno, inliner, options=None, content=None):
+# ==============================================================================
+ # pylint: disable=W0613
+
+ options = options if options is not None else {}
+ content = content if content is not None else []
+ nodelist = [colSpan(span=int(text))]
+ msglist = []
+ return nodelist, msglist
+
+# ==============================================================================
+def r_span(name, rawtext, text, lineno, inliner, options=None, content=None):
+# ==============================================================================
+ # pylint: disable=W0613
+
+ options = options if options is not None else {}
+ content = content if content is not None else []
+ nodelist = [rowSpan(span=int(text))]
+ msglist = []
+ return nodelist, msglist
+
+
+# ==============================================================================
+class rowSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
+class colSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
+# ==============================================================================
+
+# ==============================================================================
+class FlatTable(Table):
+# ==============================================================================
+
+ u"""FlatTable (``flat-table``) directive"""
+
+ option_spec = {
+ 'name': directives.unchanged
+ , 'class': directives.class_option
+ , 'header-rows': directives.nonnegative_int
+ , 'stub-columns': directives.nonnegative_int
+ , 'widths': directives.positive_int_list
+ , 'fill-cells' : directives.flag }
+
+ def run(self):
+
+ if not self.content:
+ error = self.state_machine.reporter.error(
+ 'The "%s" directive is empty; content required.' % self.name,
+ nodes.literal_block(self.block_text, self.block_text),
+ line=self.lineno)
+ return [error]
+
+ title, messages = self.make_title()
+ node = nodes.Element() # anonymous container for parsing
+ self.state.nested_parse(self.content, self.content_offset, node)
+
+ tableBuilder = ListTableBuilder(self)
+ tableBuilder.parseFlatTableNode(node)
+ tableNode = tableBuilder.buildTableNode()
+ # SDK.CONSOLE() # print --> tableNode.asdom().toprettyxml()
+ if title:
+ tableNode.insert(0, title)
+ return [tableNode] + messages
+
+
+# ==============================================================================
+class ListTableBuilder(object):
+# ==============================================================================
+
+ u"""Builds a table from a double-stage list"""
+
+ def __init__(self, directive):
+ self.directive = directive
+ self.rows = []
+ self.max_cols = 0
+
+ def buildTableNode(self):
+
+ colwidths = self.directive.get_column_widths(self.max_cols)
+ if isinstance(colwidths, tuple):
+ # Since docutils 0.13, get_column_widths returns a (widths,
+ # colwidths) tuple, where widths is a string (i.e. 'auto').
+ # See https://sourceforge.net/p/docutils/patches/120/.
+ colwidths = colwidths[1]
+ stub_columns = self.directive.options.get('stub-columns', 0)
+ header_rows = self.directive.options.get('header-rows', 0)
+
+ table = nodes.table()
+ tgroup = nodes.tgroup(cols=len(colwidths))
+ table += tgroup
+
+
+ for colwidth in colwidths:
+ colspec = nodes.colspec(colwidth=colwidth)
+ # FIXME: It seems, that the stub method only works well in the
+ # absence of rowspan (observed by the html buidler, the docutils-xml
+ # build seems OK). This is not extraordinary, because there exists
+ # no table directive (except *this* flat-table) which allows to
+ # define coexistent of rowspan and stubs (there was no use-case
+ # before flat-table). This should be reviewed (later).
+ if stub_columns:
+ colspec.attributes['stub'] = 1
+ stub_columns -= 1
+ tgroup += colspec
+ stub_columns = self.directive.options.get('stub-columns', 0)
+
+ if header_rows:
+ thead = nodes.thead()
+ tgroup += thead
+ for row in self.rows[:header_rows]:
+ thead += self.buildTableRowNode(row)
+
+ tbody = nodes.tbody()
+ tgroup += tbody
+
+ for row in self.rows[header_rows:]:
+ tbody += self.buildTableRowNode(row)
+ return table
+
+ def buildTableRowNode(self, row_data, classes=None):
+ classes = [] if classes is None else classes
+ row = nodes.row()
+ for cell in row_data:
+ if cell is None:
+ continue
+ cspan, rspan, cellElements = cell
+
+ attributes = {"classes" : classes}
+ if rspan:
+ attributes['morerows'] = rspan
+ if cspan:
+ attributes['morecols'] = cspan
+ entry = nodes.entry(**attributes)
+ entry.extend(cellElements)
+ row += entry
+ return row
+
+ def raiseError(self, msg):
+ error = self.directive.state_machine.reporter.error(
+ msg
+ , nodes.literal_block(self.directive.block_text
+ , self.directive.block_text)
+ , line = self.directive.lineno )
+ raise SystemMessagePropagation(error)
+
+ def parseFlatTableNode(self, node):
+ u"""parses the node from a :py:class:`FlatTable` directive's body"""
+
+ if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
+ self.raiseError(
+ 'Error parsing content block for the "%s" directive: '
+ 'exactly one bullet list expected.' % self.directive.name )
+
+ for rowNum, rowItem in enumerate(node[0]):
+ row = self.parseRowItem(rowItem, rowNum)
+ self.rows.append(row)
+ self.roundOffTableDefinition()
+
+ def roundOffTableDefinition(self):
+ u"""Round off the table definition.
+
+ This method rounds off the table definition in :py:member:`rows`.
+
+ * This method inserts the needed ``None`` values for the missing cells
+ arising from spanning cells over rows and/or columns.
+
+ * recount the :py:member:`max_cols`
+
+ * Autospan or fill (option ``fill-cells``) missing cells on the right
+ side of the table-row
+ """
+
+ y = 0
+ while y < len(self.rows):
+ x = 0
+
+ while x < len(self.rows[y]):
+ cell = self.rows[y][x]
+ if cell is None:
+ x += 1
+ continue
+ cspan, rspan = cell[:2]
+ # handle colspan in current row
+ for c in range(cspan):
+ try:
+ self.rows[y].insert(x+c+1, None)
+ except: # pylint: disable=W0702
+ # the user sets ambiguous rowspans
+ pass # SDK.CONSOLE()
+ # handle colspan in spanned rows
+ for r in range(rspan):
+ for c in range(cspan + 1):
+ try:
+ self.rows[y+r+1].insert(x+c, None)
+ except: # pylint: disable=W0702
+ # the user sets ambiguous rowspans
+ pass # SDK.CONSOLE()
+ x += 1
+ y += 1
+
+ # Insert the missing cells on the right side. For this, first
+ # re-calculate the max columns.
+
+ for row in self.rows:
+ if self.max_cols < len(row):
+ self.max_cols = len(row)
+
+ # fill with empty cells or cellspan?
+
+ fill_cells = False
+ if 'fill-cells' in self.directive.options:
+ fill_cells = True
+
+ for row in self.rows:
+ x = self.max_cols - len(row)
+ if x and not fill_cells:
+ if row[-1] is None:
+ row.append( ( x - 1, 0, []) )
+ else:
+ cspan, rspan, content = row[-1]
+ row[-1] = (cspan + x, rspan, content)
+ elif x and fill_cells:
+ for i in range(x):
+ row.append( (0, 0, nodes.comment()) )
+
+ def pprint(self):
+ # for debugging
+ retVal = "[ "
+ for row in self.rows:
+ retVal += "[ "
+ for col in row:
+ if col is None:
+ retVal += ('%r' % col)
+ retVal += "\n , "
+ else:
+ content = col[2][0].astext()
+ if len (content) > 30:
+ content = content[:30] + "..."
+ retVal += ('(cspan=%s, rspan=%s, %r)'
+ % (col[0], col[1], content))
+ retVal += "]\n , "
+ retVal = retVal[:-2]
+ retVal += "]\n , "
+ retVal = retVal[:-2]
+ return retVal + "]"
+
+ def parseRowItem(self, rowItem, rowNum):
+ row = []
+ childNo = 0
+ error = False
+ cell = None
+ target = None
+
+ for child in rowItem:
+ if (isinstance(child , nodes.comment)
+ or isinstance(child, nodes.system_message)):
+ pass
+ elif isinstance(child , nodes.target):
+ target = child
+ elif isinstance(child, nodes.bullet_list):
+ childNo += 1
+ cell = child
+ else:
+ error = True
+ break
+
+ if childNo != 1 or error:
+ self.raiseError(
+ 'Error parsing content block for the "%s" directive: '
+ 'two-level bullet list expected, but row %s does not '
+ 'contain a second-level bullet list.'
+ % (self.directive.name, rowNum + 1))
+
+ for cellItem in cell:
+ cspan, rspan, cellElements = self.parseCellItem(cellItem)
+ if target is not None:
+ cellElements.insert(0, target)
+ row.append( (cspan, rspan, cellElements) )
+ return row
+
+ def parseCellItem(self, cellItem):
+ # search and remove cspan, rspan colspec from the first element in
+ # this listItem (field).
+ cspan = rspan = 0
+ if not len(cellItem):
+ return cspan, rspan, []
+ for elem in cellItem[0]:
+ if isinstance(elem, colSpan):
+ cspan = elem.get("span")
+ elem.parent.remove(elem)
+ continue
+ if isinstance(elem, rowSpan):
+ rspan = elem.get("span")
+ elem.parent.remove(elem)
+ continue
+ return cspan, rspan, cellItem[:]