summaryrefslogtreecommitdiffstats
path: root/sphinx/ext
diff options
context:
space:
mode:
Diffstat (limited to 'sphinx/ext')
-rw-r--r--sphinx/ext/__init__.py1
-rw-r--r--sphinx/ext/apidoc.py492
-rw-r--r--sphinx/ext/autodoc/__init__.py2850
-rw-r--r--sphinx/ext/autodoc/directive.py151
-rw-r--r--sphinx/ext/autodoc/importer.py342
-rw-r--r--sphinx/ext/autodoc/mock.py198
-rw-r--r--sphinx/ext/autodoc/preserve_defaults.py199
-rw-r--r--sphinx/ext/autodoc/type_comment.py140
-rw-r--r--sphinx/ext/autodoc/typehints.py219
-rw-r--r--sphinx/ext/autosectionlabel.py69
-rw-r--r--sphinx/ext/autosummary/__init__.py847
-rw-r--r--sphinx/ext/autosummary/generate.py754
-rw-r--r--sphinx/ext/autosummary/templates/autosummary/base.rst5
-rw-r--r--sphinx/ext/autosummary/templates/autosummary/class.rst29
-rw-r--r--sphinx/ext/autosummary/templates/autosummary/module.rst60
-rw-r--r--sphinx/ext/coverage.py400
-rw-r--r--sphinx/ext/doctest.py575
-rw-r--r--sphinx/ext/duration.py92
-rw-r--r--sphinx/ext/extlinks.py125
-rw-r--r--sphinx/ext/githubpages.py57
-rw-r--r--sphinx/ext/graphviz.py467
-rw-r--r--sphinx/ext/ifconfig.py81
-rw-r--r--sphinx/ext/imgconverter.py94
-rw-r--r--sphinx/ext/imgmath.py407
-rw-r--r--sphinx/ext/inheritance_diagram.py494
-rw-r--r--sphinx/ext/intersphinx.py742
-rw-r--r--sphinx/ext/linkcode.py77
-rw-r--r--sphinx/ext/mathjax.py126
-rw-r--r--sphinx/ext/napoleon/__init__.py474
-rw-r--r--sphinx/ext/napoleon/docstring.py1363
-rw-r--r--sphinx/ext/todo.py246
-rw-r--r--sphinx/ext/viewcode.py361
32 files changed, 12537 insertions, 0 deletions
diff --git a/sphinx/ext/__init__.py b/sphinx/ext/__init__.py
new file mode 100644
index 0000000..16170d4
--- /dev/null
+++ b/sphinx/ext/__init__.py
@@ -0,0 +1 @@
+"""Contains Sphinx features not activated by default."""
diff --git a/sphinx/ext/apidoc.py b/sphinx/ext/apidoc.py
new file mode 100644
index 0000000..42df848
--- /dev/null
+++ b/sphinx/ext/apidoc.py
@@ -0,0 +1,492 @@
+"""Creates reST files corresponding to Python modules for code documentation.
+
+Parses a directory tree looking for Python modules and packages and creates
+ReST files appropriately to create code documentation with Sphinx. It also
+creates a modules index (named modules.<suffix>).
+
+This is derived from the "sphinx-autopackage" script, which is:
+Copyright 2008 Société des arts technologiques (SAT),
+https://sat.qc.ca/
+"""
+
+from __future__ import annotations
+
+import argparse
+import fnmatch
+import glob
+import locale
+import os
+import re
+import sys
+from copy import copy
+from importlib.machinery import EXTENSION_SUFFIXES
+from os import path
+from typing import TYPE_CHECKING, Any
+
+import sphinx.locale
+from sphinx import __display_version__, package_dir
+from sphinx.cmd.quickstart import EXTENSIONS
+from sphinx.locale import __
+from sphinx.util import logging
+from sphinx.util.osutil import FileAvoidWrite, ensuredir
+from sphinx.util.template import ReSTRenderer
+
+if TYPE_CHECKING:
+ from collections.abc import Generator, Sequence
+
+logger = logging.getLogger(__name__)
+
+# automodule options
+if 'SPHINX_APIDOC_OPTIONS' in os.environ:
+ OPTIONS = os.environ['SPHINX_APIDOC_OPTIONS'].split(',')
+else:
+ OPTIONS = [
+ 'members',
+ 'undoc-members',
+ # 'inherited-members', # disabled because there's a bug in sphinx
+ 'show-inheritance',
+ ]
+
+PY_SUFFIXES = ('.py', '.pyx') + tuple(EXTENSION_SUFFIXES)
+
+template_dir = path.join(package_dir, 'templates', 'apidoc')
+
+
+def is_initpy(filename: str) -> bool:
+ """Check *filename* is __init__ file or not."""
+ basename = path.basename(filename)
+ return any(
+ basename == '__init__' + suffix
+ for suffix in sorted(PY_SUFFIXES, key=len, reverse=True)
+ )
+
+
+def module_join(*modnames: str | None) -> str:
+ """Join module names with dots."""
+ return '.'.join(filter(None, modnames))
+
+
+def is_packagedir(dirname: str | None = None, files: list[str] | None = None) -> bool:
+ """Check given *files* contains __init__ file."""
+ if files is None and dirname is None:
+ return False
+
+ if files is None:
+ files = os.listdir(dirname)
+ return any(f for f in files if is_initpy(f))
+
+
+def write_file(name: str, text: str, opts: Any) -> None:
+ """Write the output file for module/package <name>."""
+ quiet = getattr(opts, 'quiet', None)
+
+ fname = path.join(opts.destdir, f'{name}.{opts.suffix}')
+ if opts.dryrun:
+ if not quiet:
+ logger.info(__('Would create file %s.'), fname)
+ return
+ if not opts.force and path.isfile(fname):
+ if not quiet:
+ logger.info(__('File %s already exists, skipping.'), fname)
+ else:
+ if not quiet:
+ logger.info(__('Creating file %s.'), fname)
+ with FileAvoidWrite(fname) as f:
+ f.write(text)
+
+
+def create_module_file(package: str | None, basename: str, opts: Any,
+ user_template_dir: str | None = None) -> None:
+ """Build the text of the file and write the file."""
+ options = copy(OPTIONS)
+ if opts.includeprivate and 'private-members' not in options:
+ options.append('private-members')
+
+ qualname = module_join(package, basename)
+ context = {
+ 'show_headings': not opts.noheadings,
+ 'basename': basename,
+ 'qualname': qualname,
+ 'automodule_options': options,
+ }
+ if user_template_dir is not None:
+ template_path = [user_template_dir, template_dir]
+ else:
+ template_path = [template_dir]
+ text = ReSTRenderer(template_path).render('module.rst_t', context)
+ write_file(qualname, text, opts)
+
+
+def create_package_file(root: str, master_package: str | None, subroot: str,
+ py_files: list[str],
+ opts: Any, subs: list[str], is_namespace: bool,
+ excludes: Sequence[re.Pattern[str]] = (),
+ user_template_dir: str | None = None,
+ ) -> None:
+ """Build the text of the file and write the file."""
+ # build a list of sub packages (directories containing an __init__ file)
+ subpackages = [module_join(master_package, subroot, pkgname)
+ for pkgname in subs
+ if not is_skipped_package(path.join(root, pkgname), opts, excludes)]
+ # build a list of sub modules
+ submodules = [sub.split('.')[0] for sub in py_files
+ if not is_skipped_module(path.join(root, sub), opts, excludes) and
+ not is_initpy(sub)]
+ submodules = sorted(set(submodules))
+ submodules = [module_join(master_package, subroot, modname)
+ for modname in submodules]
+ options = copy(OPTIONS)
+ if opts.includeprivate and 'private-members' not in options:
+ options.append('private-members')
+
+ pkgname = module_join(master_package, subroot)
+ context = {
+ 'pkgname': pkgname,
+ 'subpackages': subpackages,
+ 'submodules': submodules,
+ 'is_namespace': is_namespace,
+ 'modulefirst': opts.modulefirst,
+ 'separatemodules': opts.separatemodules,
+ 'automodule_options': options,
+ 'show_headings': not opts.noheadings,
+ 'maxdepth': opts.maxdepth,
+ }
+ if user_template_dir is not None:
+ template_path = [user_template_dir, template_dir]
+ else:
+ template_path = [template_dir]
+ text = ReSTRenderer(template_path).render('package.rst_t', context)
+ write_file(pkgname, text, opts)
+
+ if submodules and opts.separatemodules:
+ for submodule in submodules:
+ create_module_file(None, submodule, opts, user_template_dir)
+
+
+def create_modules_toc_file(modules: list[str], opts: Any, name: str = 'modules',
+ user_template_dir: str | None = None) -> None:
+ """Create the module's index."""
+ modules.sort()
+ prev_module = ''
+ for module in modules[:]:
+ # look if the module is a subpackage and, if yes, ignore it
+ if module.startswith(prev_module + '.'):
+ modules.remove(module)
+ else:
+ prev_module = module
+
+ context = {
+ 'header': opts.header,
+ 'maxdepth': opts.maxdepth,
+ 'docnames': modules,
+ }
+ if user_template_dir is not None:
+ template_path = [user_template_dir, template_dir]
+ else:
+ template_path = [template_dir]
+ text = ReSTRenderer(template_path).render('toc.rst_t', context)
+ write_file(name, text, opts)
+
+
+def is_skipped_package(dirname: str, opts: Any,
+ excludes: Sequence[re.Pattern[str]] = ()) -> bool:
+ """Check if we want to skip this module."""
+ if not path.isdir(dirname):
+ return False
+
+ files = glob.glob(path.join(dirname, '*.py'))
+ regular_package = any(f for f in files if is_initpy(f))
+ if not regular_package and not opts.implicit_namespaces:
+ # *dirname* is not both a regular package and an implicit namespace package
+ return True
+
+ # Check there is some showable module inside package
+ return all(is_excluded(path.join(dirname, f), excludes) for f in files)
+
+
+def is_skipped_module(filename: str, opts: Any, _excludes: Sequence[re.Pattern[str]]) -> bool:
+ """Check if we want to skip this module."""
+ if not path.exists(filename):
+ # skip if the file doesn't exist
+ return True
+ if path.basename(filename).startswith('_') and not opts.includeprivate:
+ # skip if the module has a "private" name
+ return True
+ return False
+
+
+def walk(rootpath: str, excludes: Sequence[re.Pattern[str]], opts: Any,
+ ) -> Generator[tuple[str, list[str], list[str]], None, None]:
+ """Walk through the directory and list files and subdirectories up."""
+ followlinks = getattr(opts, 'followlinks', False)
+ includeprivate = getattr(opts, 'includeprivate', False)
+
+ for root, subs, files in os.walk(rootpath, followlinks=followlinks):
+ # document only Python module files (that aren't excluded)
+ files = sorted(f for f in files
+ if f.endswith(PY_SUFFIXES) and
+ not is_excluded(path.join(root, f), excludes))
+
+ # remove hidden ('.') and private ('_') directories, as well as
+ # excluded dirs
+ if includeprivate:
+ exclude_prefixes: tuple[str, ...] = ('.',)
+ else:
+ exclude_prefixes = ('.', '_')
+
+ subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and
+ not is_excluded(path.join(root, sub), excludes))
+
+ yield root, subs, files
+
+
+def has_child_module(rootpath: str, excludes: Sequence[re.Pattern[str]], opts: Any) -> bool:
+ """Check the given directory contains child module/s (at least one)."""
+ return any(
+ files
+ for _root, _subs, files in walk(rootpath, excludes, opts)
+ )
+
+
+def recurse_tree(rootpath: str, excludes: Sequence[re.Pattern[str]], opts: Any,
+ user_template_dir: str | None = None) -> list[str]:
+ """
+ Look for every file in the directory tree and create the corresponding
+ ReST files.
+ """
+ implicit_namespaces = getattr(opts, 'implicit_namespaces', False)
+
+ # check if the base directory is a package and get its name
+ if is_packagedir(rootpath) or implicit_namespaces:
+ root_package = rootpath.split(path.sep)[-1]
+ else:
+ # otherwise, the base is a directory with packages
+ root_package = None
+
+ toplevels = []
+ for root, subs, files in walk(rootpath, excludes, opts):
+ is_pkg = is_packagedir(None, files)
+ is_namespace = not is_pkg and implicit_namespaces
+ if is_pkg:
+ for f in files[:]:
+ if is_initpy(f):
+ files.remove(f)
+ files.insert(0, f)
+ elif root != rootpath:
+ # only accept non-package at toplevel unless using implicit namespaces
+ if not implicit_namespaces:
+ del subs[:]
+ continue
+
+ if is_pkg or is_namespace:
+ # we are in a package with something to document
+ if subs or len(files) > 1 or not is_skipped_package(root, opts):
+ subpackage = root[len(rootpath):].lstrip(path.sep).\
+ replace(path.sep, '.')
+ # if this is not a namespace or
+ # a namespace and there is something there to document
+ if not is_namespace or has_child_module(root, excludes, opts):
+ create_package_file(root, root_package, subpackage,
+ files, opts, subs, is_namespace, excludes,
+ user_template_dir)
+ toplevels.append(module_join(root_package, subpackage))
+ else:
+ # if we are at the root level, we don't require it to be a package
+ assert root == rootpath
+ assert root_package is None
+ for py_file in files:
+ if not is_skipped_module(path.join(rootpath, py_file), opts, excludes):
+ module = py_file.split('.')[0]
+ create_module_file(root_package, module, opts, user_template_dir)
+ toplevels.append(module)
+
+ return toplevels
+
+
+def is_excluded(root: str, excludes: Sequence[re.Pattern[str]]) -> bool:
+ """Check if the directory is in the exclude list.
+
+ Note: by having trailing slashes, we avoid common prefix issues, like
+ e.g. an exclude "foo" also accidentally excluding "foobar".
+ """
+ return any(exclude.match(root) for exclude in excludes)
+
+
+def get_parser() -> argparse.ArgumentParser:
+ parser = argparse.ArgumentParser(
+ usage='%(prog)s [OPTIONS] -o <OUTPUT_PATH> <MODULE_PATH> '
+ '[EXCLUDE_PATTERN, ...]',
+ epilog=__('For more information, visit <https://www.sphinx-doc.org/>.'),
+ description=__("""
+Look recursively in <MODULE_PATH> for Python modules and packages and create
+one reST file with automodule directives per package in the <OUTPUT_PATH>.
+
+The <EXCLUDE_PATTERN>s can be file and/or directory patterns that will be
+excluded from generation.
+
+Note: By default this script will not overwrite already created files."""))
+
+ parser.add_argument('--version', action='version', dest='show_version',
+ version='%%(prog)s %s' % __display_version__)
+
+ parser.add_argument('module_path',
+ help=__('path to module to document'))
+ parser.add_argument('exclude_pattern', nargs='*',
+ help=__('fnmatch-style file and/or directory patterns '
+ 'to exclude from generation'))
+
+ parser.add_argument('-o', '--output-dir', action='store', dest='destdir',
+ required=True,
+ help=__('directory to place all output'))
+ parser.add_argument('-q', action='store_true', dest='quiet',
+ help=__('no output on stdout, just warnings on stderr'))
+ parser.add_argument('-d', '--maxdepth', action='store', dest='maxdepth',
+ type=int, default=4,
+ help=__('maximum depth of submodules to show in the TOC '
+ '(default: 4)'))
+ parser.add_argument('-f', '--force', action='store_true', dest='force',
+ help=__('overwrite existing files'))
+ parser.add_argument('-l', '--follow-links', action='store_true',
+ dest='followlinks', default=False,
+ help=__('follow symbolic links. Powerful when combined '
+ 'with collective.recipe.omelette.'))
+ parser.add_argument('-n', '--dry-run', action='store_true', dest='dryrun',
+ help=__('run the script without creating files'))
+ parser.add_argument('-e', '--separate', action='store_true',
+ dest='separatemodules',
+ help=__('put documentation for each module on its own page'))
+ parser.add_argument('-P', '--private', action='store_true',
+ dest='includeprivate',
+ help=__('include "_private" modules'))
+ parser.add_argument('--tocfile', action='store', dest='tocfile', default='modules',
+ help=__("filename of table of contents (default: modules)"))
+ parser.add_argument('-T', '--no-toc', action='store_false', dest='tocfile',
+ help=__("don't create a table of contents file"))
+ parser.add_argument('-E', '--no-headings', action='store_true',
+ dest='noheadings',
+ help=__("don't create headings for the module/package "
+ "packages (e.g. when the docstrings already "
+ "contain them)"))
+ parser.add_argument('-M', '--module-first', action='store_true',
+ dest='modulefirst',
+ help=__('put module documentation before submodule '
+ 'documentation'))
+ parser.add_argument('--implicit-namespaces', action='store_true',
+ dest='implicit_namespaces',
+ help=__('interpret module paths according to PEP-0420 '
+ 'implicit namespaces specification'))
+ parser.add_argument('-s', '--suffix', action='store', dest='suffix',
+ default='rst',
+ help=__('file suffix (default: rst)'))
+ parser.add_argument('-F', '--full', action='store_true', dest='full',
+ help=__('generate a full project with sphinx-quickstart'))
+ parser.add_argument('-a', '--append-syspath', action='store_true',
+ dest='append_syspath',
+ help=__('append module_path to sys.path, used when --full is given'))
+ parser.add_argument('-H', '--doc-project', action='store', dest='header',
+ help=__('project name (default: root module name)'))
+ parser.add_argument('-A', '--doc-author', action='store', dest='author',
+ help=__('project author(s), used when --full is given'))
+ parser.add_argument('-V', '--doc-version', action='store', dest='version',
+ help=__('project version, used when --full is given'))
+ parser.add_argument('-R', '--doc-release', action='store', dest='release',
+ help=__('project release, used when --full is given, '
+ 'defaults to --doc-version'))
+
+ group = parser.add_argument_group(__('extension options'))
+ group.add_argument('--extensions', metavar='EXTENSIONS', dest='extensions',
+ action='append', help=__('enable arbitrary extensions'))
+ for ext in EXTENSIONS:
+ group.add_argument('--ext-%s' % ext, action='append_const',
+ const='sphinx.ext.%s' % ext, dest='extensions',
+ help=__('enable %s extension') % ext)
+
+ group = parser.add_argument_group(__('Project templating'))
+ group.add_argument('-t', '--templatedir', metavar='TEMPLATEDIR',
+ dest='templatedir',
+ help=__('template directory for template files'))
+
+ return parser
+
+
+def main(argv: Sequence[str] = (), /) -> int:
+ """Parse and check the command line arguments."""
+ locale.setlocale(locale.LC_ALL, '')
+ sphinx.locale.init_console()
+
+ parser = get_parser()
+ args = parser.parse_args(argv or sys.argv[1:])
+
+ rootpath = path.abspath(args.module_path)
+
+ # normalize opts
+
+ if args.header is None:
+ args.header = rootpath.split(path.sep)[-1]
+ if args.suffix.startswith('.'):
+ args.suffix = args.suffix[1:]
+ if not path.isdir(rootpath):
+ logger.error(__('%s is not a directory.'), rootpath)
+ raise SystemExit(1)
+ if not args.dryrun:
+ ensuredir(args.destdir)
+ excludes = tuple(
+ re.compile(fnmatch.translate(path.abspath(exclude)))
+ for exclude in dict.fromkeys(args.exclude_pattern)
+ )
+ modules = recurse_tree(rootpath, excludes, args, args.templatedir)
+
+ if args.full:
+ from sphinx.cmd import quickstart as qs
+ modules.sort()
+ prev_module = ''
+ text = ''
+ for module in modules:
+ if module.startswith(prev_module + '.'):
+ continue
+ prev_module = module
+ text += ' %s\n' % module
+ d = {
+ 'path': args.destdir,
+ 'sep': False,
+ 'dot': '_',
+ 'project': args.header,
+ 'author': args.author or 'Author',
+ 'version': args.version or '',
+ 'release': args.release or args.version or '',
+ 'suffix': '.' + args.suffix,
+ 'master': 'index',
+ 'epub': True,
+ 'extensions': ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
+ 'sphinx.ext.todo'],
+ 'makefile': True,
+ 'batchfile': True,
+ 'make_mode': True,
+ 'mastertocmaxdepth': args.maxdepth,
+ 'mastertoctree': text,
+ 'language': 'en',
+ 'module_path': rootpath,
+ 'append_syspath': args.append_syspath,
+ }
+ if args.extensions:
+ d['extensions'].extend(args.extensions)
+ if args.quiet:
+ d['quiet'] = True
+
+ for ext in d['extensions'][:]:
+ if ',' in ext:
+ d['extensions'].remove(ext)
+ d['extensions'].extend(ext.split(','))
+
+ if not args.dryrun:
+ qs.generate(d, silent=True, overwrite=args.force,
+ templatedir=args.templatedir)
+ elif args.tocfile:
+ create_modules_toc_file(modules, args, args.tocfile, args.templatedir)
+
+ return 0
+
+
+# So program can be started with "python -m sphinx.apidoc ..."
+if __name__ == "__main__":
+ raise SystemExit(main(sys.argv[1:]))
diff --git a/sphinx/ext/autodoc/__init__.py b/sphinx/ext/autodoc/__init__.py
new file mode 100644
index 0000000..8d68f72
--- /dev/null
+++ b/sphinx/ext/autodoc/__init__.py
@@ -0,0 +1,2850 @@
+"""Extension to create automatic documentation from code docstrings.
+
+Automatically insert docstrings for functions, classes or whole modules into
+the doctree, thus avoiding duplication between docstrings and documentation
+for those who like elaborate docstrings.
+"""
+
+from __future__ import annotations
+
+import re
+import sys
+import warnings
+from inspect import Parameter, Signature
+from typing import TYPE_CHECKING, Any, Callable, TypeVar
+
+from docutils.statemachine import StringList
+
+import sphinx
+from sphinx.config import ENUM, Config
+from sphinx.deprecation import RemovedInSphinx80Warning
+from sphinx.ext.autodoc.importer import get_class_members, import_module, import_object
+from sphinx.ext.autodoc.mock import ismock, mock, undecorate
+from sphinx.locale import _, __
+from sphinx.pycode import ModuleAnalyzer, PycodeError
+from sphinx.util import inspect, logging
+from sphinx.util.docstrings import prepare_docstring, separate_metadata
+from sphinx.util.inspect import (
+ evaluate_signature,
+ getdoc,
+ object_description,
+ safe_getattr,
+ stringify_signature,
+)
+from sphinx.util.typing import OptionSpec, get_type_hints, restify, stringify_annotation
+
+if TYPE_CHECKING:
+ from collections.abc import Iterator, Sequence
+ from types import ModuleType
+
+ from sphinx.application import Sphinx
+ from sphinx.environment import BuildEnvironment
+ from sphinx.ext.autodoc.directive import DocumenterBridge
+
+logger = logging.getLogger(__name__)
+
+
+# This type isn't exposed directly in any modules, but can be found
+# here in most Python versions
+MethodDescriptorType = type(type.__subclasses__)
+
+
+#: extended signature RE: with explicit module name separated by ::
+py_ext_sig_re = re.compile(
+ r'''^ ([\w.]+::)? # explicit module name
+ ([\w.]+\.)? # module and/or class name(s)
+ (\w+) \s* # thing name
+ (?: \[\s*(.*)\s*])? # optional: type parameters list
+ (?: \((.*)\) # optional: arguments
+ (?:\s* -> \s* (.*))? # return annotation
+ )? $ # and nothing more
+ ''', re.VERBOSE)
+special_member_re = re.compile(r'^__\S+__$')
+
+
+def identity(x: Any) -> Any:
+ return x
+
+
+class _All:
+ """A special value for :*-members: that matches to any member."""
+
+ def __contains__(self, item: Any) -> bool:
+ return True
+
+ def append(self, item: Any) -> None:
+ pass # nothing
+
+
+class _Empty:
+ """A special value for :exclude-members: that never matches to any member."""
+
+ def __contains__(self, item: Any) -> bool:
+ return False
+
+
+ALL = _All()
+EMPTY = _Empty()
+UNINITIALIZED_ATTR = object()
+INSTANCEATTR = object()
+SLOTSATTR = object()
+
+
+def members_option(arg: Any) -> object | list[str]:
+ """Used to convert the :members: option to auto directives."""
+ if arg in (None, True):
+ return ALL
+ elif arg is False:
+ return None
+ else:
+ return [x.strip() for x in arg.split(',') if x.strip()]
+
+
+def exclude_members_option(arg: Any) -> object | set[str]:
+ """Used to convert the :exclude-members: option."""
+ if arg in (None, True):
+ return EMPTY
+ return {x.strip() for x in arg.split(',') if x.strip()}
+
+
+def inherited_members_option(arg: Any) -> set[str]:
+ """Used to convert the :inherited-members: option to auto directives."""
+ if arg in (None, True):
+ return {'object'}
+ elif arg:
+ return {x.strip() for x in arg.split(',')}
+ else:
+ return set()
+
+
+def member_order_option(arg: Any) -> str | None:
+ """Used to convert the :member-order: option to auto directives."""
+ if arg in (None, True):
+ return None
+ elif arg in ('alphabetical', 'bysource', 'groupwise'):
+ return arg
+ else:
+ raise ValueError(__('invalid value for member-order option: %s') % arg)
+
+
+def class_doc_from_option(arg: Any) -> str | None:
+ """Used to convert the :class-doc-from: option to autoclass directives."""
+ if arg in ('both', 'class', 'init'):
+ return arg
+ else:
+ raise ValueError(__('invalid value for class-doc-from option: %s') % arg)
+
+
+SUPPRESS = object()
+
+
+def annotation_option(arg: Any) -> Any:
+ if arg in (None, True):
+ # suppress showing the representation of the object
+ return SUPPRESS
+ else:
+ return arg
+
+
+def bool_option(arg: Any) -> bool:
+ """Used to convert flag options to auto directives. (Instead of
+ directives.flag(), which returns None).
+ """
+ return True
+
+
+def merge_members_option(options: dict) -> None:
+ """Merge :private-members: and :special-members: options to the
+ :members: option.
+ """
+ if options.get('members') is ALL:
+ # merging is not needed when members: ALL
+ return
+
+ members = options.setdefault('members', [])
+ for key in {'private-members', 'special-members'}:
+ if key in options and options[key] not in (ALL, None):
+ for member in options[key]:
+ if member not in members:
+ members.append(member)
+
+
+# Some useful event listener factories for autodoc-process-docstring.
+
+def cut_lines(pre: int, post: int = 0, what: str | None = None) -> Callable:
+ """Return a listener that removes the first *pre* and last *post*
+ lines of every docstring. If *what* is a sequence of strings,
+ only docstrings of a type in *what* will be processed.
+
+ Use like this (e.g. in the ``setup()`` function of :file:`conf.py`)::
+
+ from sphinx.ext.autodoc import cut_lines
+ app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
+
+ This can (and should) be used in place of :confval:`automodule_skip_lines`.
+ """
+ def process(app: Sphinx, what_: str, name: str, obj: Any, options: Any, lines: list[str],
+ ) -> None:
+ if what and what_ not in what:
+ return
+ del lines[:pre]
+ if post:
+ # remove one trailing blank line.
+ if lines and not lines[-1]:
+ lines.pop(-1)
+ del lines[-post:]
+ # make sure there is a blank line at the end
+ if lines and lines[-1]:
+ lines.append('')
+ return process
+
+
+def between(
+ marker: str,
+ what: Sequence[str] | None = None,
+ keepempty: bool = False,
+ exclude: bool = False,
+) -> Callable:
+ """Return a listener that either keeps, or if *exclude* is True excludes,
+ lines between lines that match the *marker* regular expression. If no line
+ matches, the resulting docstring would be empty, so no change will be made
+ unless *keepempty* is true.
+
+ If *what* is a sequence of strings, only docstrings of a type in *what* will
+ be processed.
+ """
+ marker_re = re.compile(marker)
+
+ def process(app: Sphinx, what_: str, name: str, obj: Any, options: Any, lines: list[str],
+ ) -> None:
+ if what and what_ not in what:
+ return
+ deleted = 0
+ delete = not exclude
+ orig_lines = lines[:]
+ for i, line in enumerate(orig_lines):
+ if delete:
+ lines.pop(i - deleted)
+ deleted += 1
+ if marker_re.match(line):
+ delete = not delete
+ if delete:
+ lines.pop(i - deleted)
+ deleted += 1
+ if not lines and not keepempty:
+ lines[:] = orig_lines
+ # make sure there is a blank line at the end
+ if lines and lines[-1]:
+ lines.append('')
+ return process
+
+
+# This class is used only in ``sphinx.ext.autodoc.directive``,
+# But we define this class here to keep compatibility (see #4538)
+class Options(dict):
+ """A dict/attribute hybrid that returns None on nonexisting keys."""
+ def copy(self) -> Options:
+ return Options(super().copy())
+
+ def __getattr__(self, name: str) -> Any:
+ try:
+ return self[name.replace('_', '-')]
+ except KeyError:
+ return None
+
+
+class ObjectMember:
+ """A member of object.
+
+ This is used for the result of `Documenter.get_module_members()` to
+ represent each member of the object.
+
+ .. Note::
+
+ An instance of this class behaves as a tuple of (name, object)
+ for compatibility to old Sphinx. The behavior will be dropped
+ in the future. Therefore extensions should not use the tuple
+ interface.
+ """
+
+ def __init__(self, name: str, obj: Any, *, docstring: str | None = None,
+ class_: Any = None, skipped: bool = False) -> None:
+ self.__name__ = name
+ self.object = obj
+ self.docstring = docstring
+ self.skipped = skipped
+ self.class_ = class_
+
+ def __getitem__(self, index):
+ warnings.warn('The tuple interface of ObjectMember is deprecated. '
+ 'Use (obj.__name__, obj.object) instead.',
+ RemovedInSphinx80Warning, stacklevel=2)
+ return (self.__name__, self.object)[index]
+
+
+class Documenter:
+ """
+ A Documenter knows how to autodocument a single object type. When
+ registered with the AutoDirective, it will be used to document objects
+ of that type when needed by autodoc.
+
+ Its *objtype* attribute selects what auto directive it is assigned to
+ (the directive name is 'auto' + objtype), and what directive it generates
+ by default, though that can be overridden by an attribute called
+ *directivetype*.
+
+ A Documenter has an *option_spec* that works like a docutils directive's;
+ in fact, it will be used to parse an auto directive's options that matches
+ the Documenter.
+ """
+ #: name by which the directive is called (auto...) and the default
+ #: generated directive name
+ objtype = 'object'
+ #: indentation by which to indent the directive content
+ content_indent = ' '
+ #: priority if multiple documenters return True from can_document_member
+ priority = 0
+ #: order if autodoc_member_order is set to 'groupwise'
+ member_order = 0
+ #: true if the generated content may contain titles
+ titles_allowed = True
+
+ option_spec: OptionSpec = {
+ 'no-index': bool_option,
+ 'noindex': bool_option,
+ }
+
+ def get_attr(self, obj: Any, name: str, *defargs: Any) -> Any:
+ """getattr() override for types such as Zope interfaces."""
+ return autodoc_attrgetter(self.env.app, obj, name, *defargs)
+
+ @classmethod
+ def can_document_member(cls, member: Any, membername: str, isattr: bool, parent: Any,
+ ) -> bool:
+ """Called to see if a member can be documented by this Documenter."""
+ msg = 'must be implemented in subclasses'
+ raise NotImplementedError(msg)
+
+ def __init__(self, directive: DocumenterBridge, name: str, indent: str = '') -> None:
+ self.directive = directive
+ self.config: Config = directive.env.config
+ self.env: BuildEnvironment = directive.env
+ self.options = directive.genopt
+ self.name = name
+ self.indent = indent
+ # the module and object path within the module, and the fully
+ # qualified name (all set after resolve_name succeeds)
+ self.modname: str = ''
+ self.module: ModuleType | None = None
+ self.objpath: list[str] = []
+ self.fullname = ''
+ # extra signature items (arguments and return annotation,
+ # also set after resolve_name succeeds)
+ self.args: str | None = None
+ self.retann: str = ''
+ # the object to document (set after import_object succeeds)
+ self.object: Any = None
+ self.object_name = ''
+ # the parent/owner of the object to document
+ self.parent: Any = None
+ # the module analyzer to get at attribute docs, or None
+ self.analyzer: ModuleAnalyzer | None = None
+
+ @property
+ def documenters(self) -> dict[str, type[Documenter]]:
+ """Returns registered Documenter classes"""
+ return self.env.app.registry.documenters
+
+ def add_line(self, line: str, source: str, *lineno: int) -> None:
+ """Append one line of generated reST to the output."""
+ if line.strip(): # not a blank line
+ self.directive.result.append(self.indent + line, source, *lineno)
+ else:
+ self.directive.result.append('', source, *lineno)
+
+ def resolve_name(self, modname: str | None, parents: Any, path: str, base: str,
+ ) -> tuple[str | None, list[str]]:
+ """Resolve the module and name of the object to document given by the
+ arguments and the current module/class.
+
+ Must return a pair of the module name and a chain of attributes; for
+ example, it would return ``('zipfile', ['ZipFile', 'open'])`` for the
+ ``zipfile.ZipFile.open`` method.
+ """
+ msg = 'must be implemented in subclasses'
+ raise NotImplementedError(msg)
+
+ def parse_name(self) -> bool:
+ """Determine what module to import and what attribute to document.
+
+ Returns True and sets *self.modname*, *self.objpath*, *self.fullname*,
+ *self.args* and *self.retann* if parsing and resolving was successful.
+ """
+ # first, parse the definition -- auto directives for classes and
+ # functions can contain a signature which is then used instead of
+ # an autogenerated one
+ matched = py_ext_sig_re.match(self.name)
+ if matched is None:
+ logger.warning(__('invalid signature for auto%s (%r)') % (self.objtype, self.name),
+ type='autodoc')
+ return False
+ explicit_modname, path, base, tp_list, args, retann = matched.groups()
+
+ # support explicit module and class name separation via ::
+ if explicit_modname is not None:
+ modname = explicit_modname[:-2]
+ parents = path.rstrip('.').split('.') if path else []
+ else:
+ modname = None
+ parents = []
+
+ with mock(self.config.autodoc_mock_imports):
+ modname, self.objpath = self.resolve_name(modname, parents, path, base)
+
+ if not modname:
+ return False
+
+ self.modname = modname
+ self.args = args
+ self.retann = retann
+ self.fullname = ((self.modname or '') +
+ ('.' + '.'.join(self.objpath) if self.objpath else ''))
+ return True
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ """Import the object given by *self.modname* and *self.objpath* and set
+ it as *self.object*.
+
+ Returns True if successful, False if an error occurred.
+ """
+ with mock(self.config.autodoc_mock_imports):
+ try:
+ ret = import_object(self.modname, self.objpath, self.objtype,
+ attrgetter=self.get_attr,
+ warningiserror=self.config.autodoc_warningiserror)
+ self.module, self.parent, self.object_name, self.object = ret
+ if ismock(self.object):
+ self.object = undecorate(self.object)
+ return True
+ except ImportError as exc:
+ if raiseerror:
+ raise
+ logger.warning(exc.args[0], type='autodoc', subtype='import_object')
+ self.env.note_reread()
+ return False
+
+ def get_real_modname(self) -> str:
+ """Get the real module name of an object to document.
+
+ It can differ from the name of the module through which the object was
+ imported.
+ """
+ return self.get_attr(self.object, '__module__', None) or self.modname
+
+ def check_module(self) -> bool:
+ """Check if *self.object* is really defined in the module given by
+ *self.modname*.
+ """
+ if self.options.imported_members:
+ return True
+
+ subject = inspect.unpartial(self.object)
+ modname = self.get_attr(subject, '__module__', None)
+ if modname and modname != self.modname:
+ return False
+ return True
+
+ def format_args(self, **kwargs: Any) -> str:
+ """Format the argument signature of *self.object*.
+
+ Should return None if the object does not have a signature.
+ """
+ return ''
+
+ def format_name(self) -> str:
+ """Format the name of *self.object*.
+
+ This normally should be something that can be parsed by the generated
+ directive, but doesn't need to be (Sphinx will display it unparsed
+ then).
+ """
+ # normally the name doesn't contain the module (except for module
+ # directives of course)
+ return '.'.join(self.objpath) or self.modname
+
+ def _call_format_args(self, **kwargs: Any) -> str:
+ if kwargs:
+ try:
+ return self.format_args(**kwargs)
+ except TypeError:
+ # avoid chaining exceptions, by putting nothing here
+ pass
+
+ # retry without arguments for old documenters
+ return self.format_args()
+
+ def format_signature(self, **kwargs: Any) -> str:
+ """Format the signature (arguments and return annotation) of the object.
+
+ Let the user process it via the ``autodoc-process-signature`` event.
+ """
+ if self.args is not None:
+ # signature given explicitly
+ args = "(%s)" % self.args
+ retann = self.retann
+ else:
+ # try to introspect the signature
+ try:
+ retann = None
+ args = self._call_format_args(**kwargs)
+ if args:
+ matched = re.match(r'^(\(.*\))\s+->\s+(.*)$', args)
+ if matched:
+ args = matched.group(1)
+ retann = matched.group(2)
+ except Exception as exc:
+ logger.warning(__('error while formatting arguments for %s: %s'),
+ self.fullname, exc, type='autodoc')
+ args = None
+
+ result = self.env.events.emit_firstresult('autodoc-process-signature',
+ self.objtype, self.fullname,
+ self.object, self.options, args, retann)
+ if result:
+ args, retann = result
+
+ if args is not None:
+ return args + ((' -> %s' % retann) if retann else '')
+ else:
+ return ''
+
+ def add_directive_header(self, sig: str) -> None:
+ """Add the directive header and options to the generated content."""
+ domain = getattr(self, 'domain', 'py')
+ directive = getattr(self, 'directivetype', self.objtype)
+ name = self.format_name()
+ sourcename = self.get_sourcename()
+
+ # one signature per line, indented by column
+ prefix = f'.. {domain}:{directive}:: '
+ for i, sig_line in enumerate(sig.split("\n")):
+ self.add_line(f'{prefix}{name}{sig_line}',
+ sourcename)
+ if i == 0:
+ prefix = " " * len(prefix)
+
+ if self.options.no_index or self.options.noindex:
+ self.add_line(' :no-index:', sourcename)
+ if self.objpath:
+ # Be explicit about the module, this is necessary since .. class::
+ # etc. don't support a prepended module name
+ self.add_line(' :module: %s' % self.modname, sourcename)
+
+ def get_doc(self) -> list[list[str]] | None:
+ """Decode and return lines of the docstring(s) for the object.
+
+ When it returns None, autodoc-process-docstring will not be called for this
+ object.
+ """
+ docstring = getdoc(self.object, self.get_attr, self.config.autodoc_inherit_docstrings,
+ self.parent, self.object_name)
+ if docstring:
+ tab_width = self.directive.state.document.settings.tab_width
+ return [prepare_docstring(docstring, tab_width)]
+ return []
+
+ def process_doc(self, docstrings: list[list[str]]) -> Iterator[str]:
+ """Let the user process the docstrings before adding them."""
+ for docstringlines in docstrings:
+ if self.env.app:
+ # let extensions preprocess docstrings
+ self.env.app.emit('autodoc-process-docstring',
+ self.objtype, self.fullname, self.object,
+ self.options, docstringlines)
+
+ if docstringlines and docstringlines[-1] != '':
+ # append a blank line to the end of the docstring
+ docstringlines.append('')
+
+ yield from docstringlines
+
+ def get_sourcename(self) -> str:
+ if (inspect.safe_getattr(self.object, '__module__', None) and
+ inspect.safe_getattr(self.object, '__qualname__', None)):
+ # Get the correct location of docstring from self.object
+ # to support inherited methods
+ fullname = f'{self.object.__module__}.{self.object.__qualname__}'
+ else:
+ fullname = self.fullname
+
+ if self.analyzer:
+ return f'{self.analyzer.srcname}:docstring of {fullname}'
+ else:
+ return 'docstring of %s' % fullname
+
+ def add_content(self, more_content: StringList | None) -> None:
+ """Add content from docstrings, attribute documentation and user."""
+ docstring = True
+
+ # set sourcename and add content from attribute documentation
+ sourcename = self.get_sourcename()
+ if self.analyzer:
+ attr_docs = self.analyzer.find_attr_docs()
+ if self.objpath:
+ key = ('.'.join(self.objpath[:-1]), self.objpath[-1])
+ if key in attr_docs:
+ docstring = False
+ # make a copy of docstring for attributes to avoid cache
+ # the change of autodoc-process-docstring event.
+ attribute_docstrings = [list(attr_docs[key])]
+
+ for i, line in enumerate(self.process_doc(attribute_docstrings)):
+ self.add_line(line, sourcename, i)
+
+ # add content from docstrings
+ if docstring:
+ docstrings = self.get_doc()
+ if docstrings is None:
+ # Do not call autodoc-process-docstring on get_doc() returns None.
+ pass
+ else:
+ if not docstrings:
+ # append at least a dummy docstring, so that the event
+ # autodoc-process-docstring is fired and can add some
+ # content if desired
+ docstrings.append([])
+ for i, line in enumerate(self.process_doc(docstrings)):
+ self.add_line(line, sourcename, i)
+
+ # add additional content (e.g. from document), if present
+ if more_content:
+ for line, src in zip(more_content.data, more_content.items):
+ self.add_line(line, src[0], src[1])
+
+ def get_object_members(self, want_all: bool) -> tuple[bool, list[ObjectMember]]:
+ """Return `(members_check_module, members)` where `members` is a
+ list of `(membername, member)` pairs of the members of *self.object*.
+
+ If *want_all* is True, return all members. Else, only return those
+ members given by *self.options.members* (which may also be None).
+ """
+ msg = 'must be implemented in subclasses'
+ raise NotImplementedError(msg)
+
+ def filter_members(self, members: list[ObjectMember], want_all: bool,
+ ) -> list[tuple[str, Any, bool]]:
+ """Filter the given member list.
+
+ Members are skipped if
+
+ - they are private (except if given explicitly or the private-members
+ option is set)
+ - they are special methods (except if given explicitly or the
+ special-members option is set)
+ - they are undocumented (except if the undoc-members option is set)
+
+ The user can override the skipping decision by connecting to the
+ ``autodoc-skip-member`` event.
+ """
+ def is_filtered_inherited_member(name: str, obj: Any) -> bool:
+ inherited_members = self.options.inherited_members or set()
+
+ if inspect.isclass(self.object):
+ for cls in self.object.__mro__:
+ if cls.__name__ in inherited_members and cls != self.object:
+ # given member is a member of specified *super class*
+ return True
+ if name in cls.__dict__:
+ return False
+ if name in self.get_attr(cls, '__annotations__', {}):
+ return False
+ if isinstance(obj, ObjectMember) and obj.class_ is cls:
+ return False
+
+ return False
+
+ ret = []
+
+ # search for members in source code too
+ namespace = '.'.join(self.objpath) # will be empty for modules
+
+ if self.analyzer:
+ attr_docs = self.analyzer.find_attr_docs()
+ else:
+ attr_docs = {}
+
+ # process members and determine which to skip
+ for obj in members:
+ try:
+ membername = obj.__name__
+ member = obj.object
+ except AttributeError:
+ if isinstance(obj, ObjectMember):
+ raise
+ # To be removed, retained for compatibility.
+ # See https://github.com/sphinx-doc/sphinx/issues/11631
+ membername, member = obj
+ warnings.warn(
+ 'Returning tuples of (name, object) as '
+ 'the second return value from get_object_members() is deprecated. '
+ 'Return ObjectMember(name, object) instances instead.',
+ RemovedInSphinx80Warning, stacklevel=2,
+ )
+
+ # if isattr is True, the member is documented as an attribute
+ isattr = member is INSTANCEATTR or (namespace, membername) in attr_docs
+
+ try:
+ doc = getdoc(member, self.get_attr, self.config.autodoc_inherit_docstrings,
+ self.object, membername)
+ if not isinstance(doc, str):
+ # Ignore non-string __doc__
+ doc = None
+
+ # if the member __doc__ is the same as self's __doc__, it's just
+ # inherited and therefore not the member's doc
+ cls = self.get_attr(member, '__class__', None)
+ if cls:
+ cls_doc = self.get_attr(cls, '__doc__', None)
+ if cls_doc == doc:
+ doc = None
+
+ if isinstance(obj, ObjectMember) and obj.docstring:
+ # hack for ClassDocumenter to inject docstring via ObjectMember
+ doc = obj.docstring
+
+ doc, metadata = separate_metadata(doc)
+ has_doc = bool(doc)
+
+ if 'private' in metadata:
+ # consider a member private if docstring has "private" metadata
+ isprivate = True
+ elif 'public' in metadata:
+ # consider a member public if docstring has "public" metadata
+ isprivate = False
+ else:
+ isprivate = membername.startswith('_')
+
+ keep = False
+ if ismock(member) and (namespace, membername) not in attr_docs:
+ # mocked module or object
+ pass
+ elif (self.options.exclude_members and
+ membername in self.options.exclude_members):
+ # remove members given by exclude-members
+ keep = False
+ elif want_all and special_member_re.match(membername):
+ # special __methods__
+ if (self.options.special_members and
+ membername in self.options.special_members):
+ if membername == '__doc__': # NoQA: SIM114
+ keep = False
+ elif is_filtered_inherited_member(membername, obj):
+ keep = False
+ else:
+ keep = has_doc or self.options.undoc_members
+ else:
+ keep = False
+ elif (namespace, membername) in attr_docs:
+ if want_all and isprivate:
+ if self.options.private_members is None:
+ keep = False
+ else:
+ keep = membername in self.options.private_members
+ else:
+ # keep documented attributes
+ keep = True
+ elif want_all and isprivate:
+ if has_doc or self.options.undoc_members:
+ if self.options.private_members is None: # NoQA: SIM114
+ keep = False
+ elif is_filtered_inherited_member(membername, obj):
+ keep = False
+ else:
+ keep = membername in self.options.private_members
+ else:
+ keep = False
+ else:
+ if (self.options.members is ALL and
+ is_filtered_inherited_member(membername, obj)):
+ keep = False
+ else:
+ # ignore undocumented members if :undoc-members: is not given
+ keep = has_doc or self.options.undoc_members
+
+ if isinstance(obj, ObjectMember) and obj.skipped:
+ # forcedly skipped member (ex. a module attribute not defined in __all__)
+ keep = False
+
+ # give the user a chance to decide whether this member
+ # should be skipped
+ if self.env.app:
+ # let extensions preprocess docstrings
+ skip_user = self.env.app.emit_firstresult(
+ 'autodoc-skip-member', self.objtype, membername, member,
+ not keep, self.options)
+ if skip_user is not None:
+ keep = not skip_user
+ except Exception as exc:
+ logger.warning(__('autodoc: failed to determine %s.%s (%r) to be documented, '
+ 'the following exception was raised:\n%s'),
+ self.name, membername, member, exc, type='autodoc')
+ keep = False
+
+ if keep:
+ ret.append((membername, member, isattr))
+
+ return ret
+
+ def document_members(self, all_members: bool = False) -> None:
+ """Generate reST for member documentation.
+
+ If *all_members* is True, document all members, else those given by
+ *self.options.members*.
+ """
+ # set current namespace for finding members
+ self.env.temp_data['autodoc:module'] = self.modname
+ if self.objpath:
+ self.env.temp_data['autodoc:class'] = self.objpath[0]
+
+ want_all = (all_members or
+ self.options.inherited_members or
+ self.options.members is ALL)
+ # find out which members are documentable
+ members_check_module, members = self.get_object_members(want_all)
+
+ # document non-skipped members
+ memberdocumenters: list[tuple[Documenter, bool]] = []
+ for (mname, member, isattr) in self.filter_members(members, want_all):
+ classes = [cls for cls in self.documenters.values()
+ if cls.can_document_member(member, mname, isattr, self)]
+ if not classes:
+ # don't know how to document this member
+ continue
+ # prefer the documenter with the highest priority
+ classes.sort(key=lambda cls: cls.priority)
+ # give explicitly separated module name, so that members
+ # of inner classes can be documented
+ full_mname = f'{self.modname}::' + '.'.join((*self.objpath, mname))
+ documenter = classes[-1](self.directive, full_mname, self.indent)
+ memberdocumenters.append((documenter, isattr))
+
+ member_order = self.options.member_order or self.config.autodoc_member_order
+ memberdocumenters = self.sort_members(memberdocumenters, member_order)
+
+ for documenter, isattr in memberdocumenters:
+ documenter.generate(
+ all_members=True, real_modname=self.real_modname,
+ check_module=members_check_module and not isattr)
+
+ # reset current objects
+ self.env.temp_data['autodoc:module'] = None
+ self.env.temp_data['autodoc:class'] = None
+
+ def sort_members(self, documenters: list[tuple[Documenter, bool]],
+ order: str) -> list[tuple[Documenter, bool]]:
+ """Sort the given member list."""
+ if order == 'groupwise':
+ # sort by group; alphabetically within groups
+ documenters.sort(key=lambda e: (e[0].member_order, e[0].name))
+ elif order == 'bysource':
+ # By default, member discovery order matches source order,
+ # as dicts are insertion-ordered from Python 3.7.
+ if self.analyzer:
+ # sort by source order, by virtue of the module analyzer
+ tagorder = self.analyzer.tagorder
+
+ def keyfunc(entry: tuple[Documenter, bool]) -> int:
+ fullname = entry[0].name.split('::')[1]
+ return tagorder.get(fullname, len(tagorder))
+ documenters.sort(key=keyfunc)
+ else: # alphabetical
+ documenters.sort(key=lambda e: e[0].name)
+
+ return documenters
+
+ def generate(
+ self,
+ more_content: StringList | None = None,
+ real_modname: str | None = None,
+ check_module: bool = False,
+ all_members: bool = False,
+ ) -> None:
+ """Generate reST for the object given by *self.name*, and possibly for
+ its members.
+
+ If *more_content* is given, include that content. If *real_modname* is
+ given, use that module name to find attribute docs. If *check_module* is
+ True, only generate if the object is defined in the module name it is
+ imported from. If *all_members* is True, document all members.
+ """
+ if not self.parse_name():
+ # need a module to import
+ logger.warning(
+ __("don't know which module to import for autodocumenting "
+ '%r (try placing a "module" or "currentmodule" directive '
+ 'in the document, or giving an explicit module name)') %
+ self.name, type='autodoc')
+ return
+
+ # now, import the module and get object to document
+ if not self.import_object():
+ return
+
+ # If there is no real module defined, figure out which to use.
+ # The real module is used in the module analyzer to look up the module
+ # where the attribute documentation would actually be found in.
+ # This is used for situations where you have a module that collects the
+ # functions and classes of internal submodules.
+ guess_modname = self.get_real_modname()
+ self.real_modname: str = real_modname or guess_modname
+
+ # try to also get a source code analyzer for attribute docs
+ try:
+ self.analyzer = ModuleAnalyzer.for_module(self.real_modname)
+ # parse right now, to get PycodeErrors on parsing (results will
+ # be cached anyway)
+ self.analyzer.find_attr_docs()
+ except PycodeError as exc:
+ logger.debug('[autodoc] module analyzer failed: %s', exc)
+ # no source file -- e.g. for builtin and C modules
+ self.analyzer = None
+ # at least add the module.__file__ as a dependency
+ if module___file__ := getattr(self.module, '__file__', ''):
+ self.directive.record_dependencies.add(module___file__)
+ else:
+ self.directive.record_dependencies.add(self.analyzer.srcname)
+
+ if self.real_modname != guess_modname:
+ # Add module to dependency list if target object is defined in other module.
+ try:
+ analyzer = ModuleAnalyzer.for_module(guess_modname)
+ self.directive.record_dependencies.add(analyzer.srcname)
+ except PycodeError:
+ pass
+
+ docstrings: list[str] = sum(self.get_doc() or [], [])
+ if ismock(self.object) and not docstrings:
+ logger.warning(__('A mocked object is detected: %r'),
+ self.name, type='autodoc')
+
+ # check __module__ of object (for members not given explicitly)
+ if check_module:
+ if not self.check_module():
+ return
+
+ sourcename = self.get_sourcename()
+
+ # make sure that the result starts with an empty line. This is
+ # necessary for some situations where another directive preprocesses
+ # reST and no starting newline is present
+ self.add_line('', sourcename)
+
+ # format the object's signature, if any
+ try:
+ sig = self.format_signature()
+ except Exception as exc:
+ logger.warning(__('error while formatting signature for %s: %s'),
+ self.fullname, exc, type='autodoc')
+ return
+
+ # generate the directive header and options, if applicable
+ self.add_directive_header(sig)
+ self.add_line('', sourcename)
+
+ # e.g. the module directive doesn't have content
+ self.indent += self.content_indent
+
+ # add all content (from docstrings, attribute docs etc.)
+ self.add_content(more_content)
+
+ # document members, if possible
+ self.document_members(all_members)
+
+
+class ModuleDocumenter(Documenter):
+ """
+ Specialized Documenter subclass for modules.
+ """
+ objtype = 'module'
+ content_indent = ''
+ _extra_indent = ' '
+
+ option_spec: OptionSpec = {
+ 'members': members_option, 'undoc-members': bool_option,
+ 'no-index': bool_option, 'inherited-members': inherited_members_option,
+ 'show-inheritance': bool_option, 'synopsis': identity,
+ 'platform': identity, 'deprecated': bool_option,
+ 'member-order': member_order_option, 'exclude-members': exclude_members_option,
+ 'private-members': members_option, 'special-members': members_option,
+ 'imported-members': bool_option, 'ignore-module-all': bool_option,
+ 'no-value': bool_option,
+ 'noindex': bool_option,
+ }
+
+ def __init__(self, *args: Any) -> None:
+ super().__init__(*args)
+ merge_members_option(self.options)
+ self.__all__: Sequence[str] | None = None
+
+ def add_content(self, more_content: StringList | None) -> None:
+ old_indent = self.indent
+ self.indent += self._extra_indent
+ super().add_content(None)
+ self.indent = old_indent
+ if more_content:
+ for line, src in zip(more_content.data, more_content.items):
+ self.add_line(line, src[0], src[1])
+
+ @classmethod
+ def can_document_member(cls, member: Any, membername: str, isattr: bool, parent: Any,
+ ) -> bool:
+ # don't document submodules automatically
+ return False
+
+ def resolve_name(self, modname: str | None, parents: Any, path: str, base: str,
+ ) -> tuple[str | None, list[str]]:
+ if modname is not None:
+ logger.warning(__('"::" in automodule name doesn\'t make sense'),
+ type='autodoc')
+ return (path or '') + base, []
+
+ def parse_name(self) -> bool:
+ ret = super().parse_name()
+ if self.args or self.retann:
+ logger.warning(__('signature arguments or return annotation '
+ 'given for automodule %s') % self.fullname,
+ type='autodoc')
+ return ret
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ ret = super().import_object(raiseerror)
+
+ try:
+ if not self.options.ignore_module_all:
+ self.__all__ = inspect.getall(self.object)
+ except ValueError as exc:
+ # invalid __all__ found.
+ logger.warning(__('__all__ should be a list of strings, not %r '
+ '(in module %s) -- ignoring __all__') %
+ (exc.args[0], self.fullname), type='autodoc')
+
+ return ret
+
+ def add_directive_header(self, sig: str) -> None:
+ Documenter.add_directive_header(self, sig)
+
+ sourcename = self.get_sourcename()
+
+ # add some module-specific options
+ if self.options.synopsis:
+ self.add_line(' :synopsis: ' + self.options.synopsis, sourcename)
+ if self.options.platform:
+ self.add_line(' :platform: ' + self.options.platform, sourcename)
+ if self.options.deprecated:
+ self.add_line(' :deprecated:', sourcename)
+
+ def get_module_members(self) -> dict[str, ObjectMember]:
+ """Get members of target module."""
+ if self.analyzer:
+ attr_docs = self.analyzer.attr_docs
+ else:
+ attr_docs = {}
+
+ members: dict[str, ObjectMember] = {}
+ for name in dir(self.object):
+ try:
+ value = safe_getattr(self.object, name, None)
+ if ismock(value):
+ value = undecorate(value)
+ docstring = attr_docs.get(('', name), [])
+ members[name] = ObjectMember(name, value, docstring="\n".join(docstring))
+ except AttributeError:
+ continue
+
+ # annotation only member (ex. attr: int)
+ for name in inspect.getannotations(self.object):
+ if name not in members:
+ docstring = attr_docs.get(('', name), [])
+ members[name] = ObjectMember(name, INSTANCEATTR,
+ docstring="\n".join(docstring))
+
+ return members
+
+ def get_object_members(self, want_all: bool) -> tuple[bool, list[ObjectMember]]:
+ members = self.get_module_members()
+ if want_all:
+ if self.__all__ is None:
+ # for implicit module members, check __module__ to avoid
+ # documenting imported objects
+ return True, list(members.values())
+ else:
+ for member in members.values():
+ if member.__name__ not in self.__all__:
+ member.skipped = True
+
+ return False, list(members.values())
+ else:
+ memberlist = self.options.members or []
+ ret = []
+ for name in memberlist:
+ if name in members:
+ ret.append(members[name])
+ else:
+ logger.warning(__('missing attribute mentioned in :members: option: '
+ 'module %s, attribute %s') %
+ (safe_getattr(self.object, '__name__', '???'), name),
+ type='autodoc')
+ return False, ret
+
+ def sort_members(self, documenters: list[tuple[Documenter, bool]],
+ order: str) -> list[tuple[Documenter, bool]]:
+ if order == 'bysource' and self.__all__:
+ assert self.__all__ is not None
+ module_all = self.__all__
+ module_all_set = set(module_all)
+ module_all_len = len(module_all)
+
+ # Sort alphabetically first (for members not listed on the __all__)
+ documenters.sort(key=lambda e: e[0].name)
+
+ # Sort by __all__
+ def keyfunc(entry: tuple[Documenter, bool]) -> int:
+ name = entry[0].name.split('::')[1]
+ if name in module_all_set:
+ return module_all.index(name)
+ else:
+ return module_all_len
+ documenters.sort(key=keyfunc)
+
+ return documenters
+ else:
+ return super().sort_members(documenters, order)
+
+
+class ModuleLevelDocumenter(Documenter):
+ """
+ Specialized Documenter subclass for objects on module level (functions,
+ classes, data/constants).
+ """
+ def resolve_name(self, modname: str | None, parents: Any, path: str, base: str,
+ ) -> tuple[str | None, list[str]]:
+ if modname is not None:
+ return modname, parents + [base]
+ if path:
+ modname = path.rstrip('.')
+ return modname, parents + [base]
+
+ # if documenting a toplevel object without explicit module,
+ # it can be contained in another auto directive ...
+ modname = self.env.temp_data.get('autodoc:module')
+ # ... or in the scope of a module directive
+ if not modname:
+ modname = self.env.ref_context.get('py:module')
+ # ... else, it stays None, which means invalid
+ return modname, parents + [base]
+
+
+class ClassLevelDocumenter(Documenter):
+ """
+ Specialized Documenter subclass for objects on class level (methods,
+ attributes).
+ """
+ def resolve_name(self, modname: str | None, parents: Any, path: str, base: str,
+ ) -> tuple[str | None, list[str]]:
+ if modname is not None:
+ return modname, parents + [base]
+
+ if path:
+ mod_cls = path.rstrip('.')
+ else:
+ # if documenting a class-level object without path,
+ # there must be a current class, either from a parent
+ # auto directive ...
+ mod_cls_ = self.env.temp_data.get('autodoc:class')
+ # ... or from a class directive
+ if mod_cls_ is None:
+ mod_cls_ = self.env.ref_context.get('py:class')
+ # ... if still None, there's no way to know
+ if mod_cls_ is None:
+ return None, []
+ mod_cls = mod_cls_
+ modname, sep, cls = mod_cls.rpartition('.')
+ parents = [cls]
+ # if the module name is still missing, get it like above
+ if not modname:
+ modname = self.env.temp_data.get('autodoc:module')
+ if not modname:
+ modname = self.env.ref_context.get('py:module')
+ # ... else, it stays None, which means invalid
+ return modname, parents + [base]
+
+
+class DocstringSignatureMixin:
+ """
+ Mixin for FunctionDocumenter and MethodDocumenter to provide the
+ feature of reading the signature from the docstring.
+ """
+ _new_docstrings: list[list[str]] | None = None
+ _signatures: list[str] = []
+
+ def _find_signature(self) -> tuple[str | None, str | None] | None:
+ # candidates of the object name
+ valid_names = [self.objpath[-1]] # type: ignore[attr-defined]
+ if isinstance(self, ClassDocumenter):
+ valid_names.append('__init__')
+ if hasattr(self.object, '__mro__'):
+ valid_names.extend(cls.__name__ for cls in self.object.__mro__)
+
+ docstrings = self.get_doc()
+ if docstrings is None:
+ return None, None
+ self._new_docstrings = docstrings[:]
+ self._signatures = []
+ result = None
+ for i, doclines in enumerate(docstrings):
+ for j, line in enumerate(doclines):
+ if not line:
+ # no lines in docstring, no match
+ break
+
+ if line.endswith('\\'):
+ line = line.rstrip('\\').rstrip()
+
+ # match first line of docstring against signature RE
+ match = py_ext_sig_re.match(line)
+ if not match:
+ break
+ exmod, path, base, tp_list, args, retann = match.groups()
+
+ # the base name must match ours
+ if base not in valid_names:
+ break
+
+ # re-prepare docstring to ignore more leading indentation
+ directive = self.directive # type: ignore[attr-defined]
+ tab_width = directive.state.document.settings.tab_width
+ self._new_docstrings[i] = prepare_docstring('\n'.join(doclines[j + 1:]),
+ tab_width)
+
+ if result is None:
+ # first signature
+ result = args, retann
+ else:
+ # subsequent signatures
+ self._signatures.append(f"({args}) -> {retann}")
+
+ if result is not None:
+ # finish the loop when signature found
+ break
+
+ return result
+
+ def get_doc(self) -> list[list[str]] | None:
+ if self._new_docstrings is not None:
+ return self._new_docstrings
+ return super().get_doc() # type: ignore[misc]
+
+ def format_signature(self, **kwargs: Any) -> str:
+ self.args: str | None
+ if (self.args is None
+ and self.config.autodoc_docstring_signature): # type: ignore[attr-defined]
+ # only act if a signature is not explicitly given already, and if
+ # the feature is enabled
+ result = self._find_signature()
+ if result is not None:
+ self.args, self.retann = result
+ sig = super().format_signature(**kwargs) # type: ignore[misc]
+ if self._signatures:
+ return "\n".join([sig] + self._signatures)
+ else:
+ return sig
+
+
+class DocstringStripSignatureMixin(DocstringSignatureMixin):
+ """
+ Mixin for AttributeDocumenter to provide the
+ feature of stripping any function signature from the docstring.
+ """
+ def format_signature(self, **kwargs: Any) -> str:
+ if (
+ self.args is None
+ and self.config.autodoc_docstring_signature # type: ignore[attr-defined]
+ ):
+ # only act if a signature is not explicitly given already, and if
+ # the feature is enabled
+ result = self._find_signature()
+ if result is not None:
+ # Discarding _args is a only difference with
+ # DocstringSignatureMixin.format_signature.
+ # Documenter.format_signature use self.args value to format.
+ _args, self.retann = result
+ return super().format_signature(**kwargs)
+
+
+class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type: ignore[misc]
+ """
+ Specialized Documenter subclass for functions.
+ """
+ objtype = 'function'
+ member_order = 30
+
+ @classmethod
+ def can_document_member(cls, member: Any, membername: str, isattr: bool, parent: Any,
+ ) -> bool:
+ # supports functions, builtins and bound methods exported at the module level
+ return (inspect.isfunction(member) or inspect.isbuiltin(member) or
+ (inspect.isroutine(member) and isinstance(parent, ModuleDocumenter)))
+
+ def format_args(self, **kwargs: Any) -> str:
+ if self.config.autodoc_typehints in ('none', 'description'):
+ kwargs.setdefault('show_annotation', False)
+ if self.config.autodoc_typehints_format == "short":
+ kwargs.setdefault('unqualified_typehints', True)
+
+ try:
+ self.env.app.emit('autodoc-before-process-signature', self.object, False)
+ sig = inspect.signature(self.object, type_aliases=self.config.autodoc_type_aliases)
+ args = stringify_signature(sig, **kwargs)
+ except TypeError as exc:
+ logger.warning(__("Failed to get a function signature for %s: %s"),
+ self.fullname, exc)
+ return ''
+ except ValueError:
+ args = ''
+
+ if self.config.strip_signature_backslash:
+ # escape backslashes for reST
+ args = args.replace('\\', '\\\\')
+ return args
+
+ def document_members(self, all_members: bool = False) -> None:
+ pass
+
+ def add_directive_header(self, sig: str) -> None:
+ sourcename = self.get_sourcename()
+ super().add_directive_header(sig)
+
+ if inspect.iscoroutinefunction(self.object) or inspect.isasyncgenfunction(self.object):
+ self.add_line(' :async:', sourcename)
+
+ def format_signature(self, **kwargs: Any) -> str:
+ if self.config.autodoc_typehints_format == "short":
+ kwargs.setdefault('unqualified_typehints', True)
+
+ sigs = []
+ if (self.analyzer and
+ '.'.join(self.objpath) in self.analyzer.overloads and
+ self.config.autodoc_typehints != 'none'):
+ # Use signatures for overloaded functions instead of the implementation function.
+ overloaded = True
+ else:
+ overloaded = False
+ sig = super().format_signature(**kwargs)
+ sigs.append(sig)
+
+ if inspect.is_singledispatch_function(self.object):
+ # append signature of singledispatch'ed functions
+ for typ, func in self.object.registry.items():
+ if typ is object:
+ pass # default implementation. skipped.
+ else:
+ dispatchfunc = self.annotate_to_first_argument(func, typ)
+ if dispatchfunc:
+ documenter = FunctionDocumenter(self.directive, '')
+ documenter.object = dispatchfunc
+ documenter.objpath = ['']
+ sigs.append(documenter.format_signature())
+ if overloaded and self.analyzer is not None:
+ actual = inspect.signature(self.object,
+ type_aliases=self.config.autodoc_type_aliases)
+ __globals__ = safe_getattr(self.object, '__globals__', {})
+ for overload in self.analyzer.overloads['.'.join(self.objpath)]:
+ overload = self.merge_default_value(actual, overload)
+ overload = evaluate_signature(overload, __globals__,
+ self.config.autodoc_type_aliases)
+
+ sig = stringify_signature(overload, **kwargs)
+ sigs.append(sig)
+
+ return "\n".join(sigs)
+
+ def merge_default_value(self, actual: Signature, overload: Signature) -> Signature:
+ """Merge default values of actual implementation to the overload variants."""
+ parameters = list(overload.parameters.values())
+ for i, param in enumerate(parameters):
+ actual_param = actual.parameters.get(param.name)
+ if actual_param and param.default == '...':
+ parameters[i] = param.replace(default=actual_param.default)
+
+ return overload.replace(parameters=parameters)
+
+ def annotate_to_first_argument(self, func: Callable, typ: type) -> Callable | None:
+ """Annotate type hint to the first argument of function if needed."""
+ try:
+ sig = inspect.signature(func, type_aliases=self.config.autodoc_type_aliases)
+ except TypeError as exc:
+ logger.warning(__("Failed to get a function signature for %s: %s"),
+ self.fullname, exc)
+ return None
+ except ValueError:
+ return None
+
+ if len(sig.parameters) == 0:
+ return None
+
+ def dummy():
+ pass
+
+ params = list(sig.parameters.values())
+ if params[0].annotation is Parameter.empty:
+ params[0] = params[0].replace(annotation=typ)
+ try:
+ dummy.__signature__ = sig.replace( # type: ignore[attr-defined]
+ parameters=params)
+ return dummy
+ except (AttributeError, TypeError):
+ # failed to update signature (ex. built-in or extension types)
+ return None
+
+ return func
+
+
+class DecoratorDocumenter(FunctionDocumenter):
+ """
+ Specialized Documenter subclass for decorator functions.
+ """
+ objtype = 'decorator'
+
+ # must be lower than FunctionDocumenter
+ priority = -1
+
+ def format_args(self, **kwargs: Any) -> str:
+ args = super().format_args(**kwargs)
+ if ',' in args:
+ return args
+ else:
+ return ''
+
+
+# Types which have confusing metaclass signatures it would be best not to show.
+# These are listed by name, rather than storing the objects themselves, to avoid
+# needing to import the modules.
+_METACLASS_CALL_BLACKLIST = [
+ 'enum.EnumMeta.__call__',
+]
+
+
+# Types whose __new__ signature is a pass-through.
+_CLASS_NEW_BLACKLIST = [
+ 'typing.Generic.__new__',
+]
+
+
+class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type: ignore[misc]
+ """
+ Specialized Documenter subclass for classes.
+ """
+ objtype = 'class'
+ member_order = 20
+ option_spec: OptionSpec = {
+ 'members': members_option, 'undoc-members': bool_option,
+ 'no-index': bool_option, 'inherited-members': inherited_members_option,
+ 'show-inheritance': bool_option, 'member-order': member_order_option,
+ 'exclude-members': exclude_members_option,
+ 'private-members': members_option, 'special-members': members_option,
+ 'class-doc-from': class_doc_from_option,
+ 'noindex': bool_option,
+ }
+
+ # Must be higher than FunctionDocumenter, ClassDocumenter, and
+ # AttributeDocumenter as NewType can be an attribute and is a class
+ # after Python 3.10. Before 3.10 it is a kind of function object
+ priority = 15
+
+ _signature_class: Any = None
+ _signature_method_name: str = ''
+
+ def __init__(self, *args: Any) -> None:
+ super().__init__(*args)
+
+ if self.config.autodoc_class_signature == 'separated':
+ self.options = self.options.copy()
+
+ # show __init__() method
+ if self.options.special_members is None:
+ self.options['special-members'] = ['__new__', '__init__']
+ else:
+ self.options.special_members.append('__new__')
+ self.options.special_members.append('__init__')
+
+ merge_members_option(self.options)
+
+ @classmethod
+ def can_document_member(cls, member: Any, membername: str, isattr: bool, parent: Any,
+ ) -> bool:
+ return isinstance(member, type) or (
+ isattr and (inspect.isNewType(member) or isinstance(member, TypeVar)))
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ ret = super().import_object(raiseerror)
+ # if the class is documented under another name, document it
+ # as data/attribute
+ if ret:
+ if hasattr(self.object, '__name__'):
+ self.doc_as_attr = (self.objpath[-1] != self.object.__name__)
+ else:
+ self.doc_as_attr = True
+ if inspect.isNewType(self.object) or isinstance(self.object, TypeVar):
+ modname = getattr(self.object, '__module__', self.modname)
+ if modname != self.modname and self.modname.startswith(modname):
+ bases = self.modname[len(modname):].strip('.').split('.')
+ self.objpath = bases + self.objpath
+ self.modname = modname
+ return ret
+
+ def _get_signature(self) -> tuple[Any | None, str | None, Signature | None]:
+ if inspect.isNewType(self.object) or isinstance(self.object, TypeVar):
+ # Suppress signature
+ return None, None, None
+
+ def get_user_defined_function_or_method(obj: Any, attr: str) -> Any:
+ """ Get the `attr` function or method from `obj`, if it is user-defined. """
+ if inspect.is_builtin_class_method(obj, attr):
+ return None
+ attr = self.get_attr(obj, attr, None)
+ if not (inspect.ismethod(attr) or inspect.isfunction(attr)):
+ return None
+ return attr
+
+ # This sequence is copied from inspect._signature_from_callable.
+ # ValueError means that no signature could be found, so we keep going.
+
+ # First, we check the obj has a __signature__ attribute
+ if (hasattr(self.object, '__signature__') and
+ isinstance(self.object.__signature__, Signature)):
+ return None, None, self.object.__signature__
+
+ # Next, let's see if it has an overloaded __call__ defined
+ # in its metaclass
+ call = get_user_defined_function_or_method(type(self.object), '__call__')
+
+ if call is not None:
+ if f"{call.__module__}.{call.__qualname__}" in _METACLASS_CALL_BLACKLIST:
+ call = None
+
+ if call is not None:
+ self.env.app.emit('autodoc-before-process-signature', call, True)
+ try:
+ sig = inspect.signature(call, bound_method=True,
+ type_aliases=self.config.autodoc_type_aliases)
+ return type(self.object), '__call__', sig
+ except ValueError:
+ pass
+
+ # Now we check if the 'obj' class has a '__new__' method
+ new = get_user_defined_function_or_method(self.object, '__new__')
+
+ if new is not None:
+ if f"{new.__module__}.{new.__qualname__}" in _CLASS_NEW_BLACKLIST:
+ new = None
+
+ if new is not None:
+ self.env.app.emit('autodoc-before-process-signature', new, True)
+ try:
+ sig = inspect.signature(new, bound_method=True,
+ type_aliases=self.config.autodoc_type_aliases)
+ return self.object, '__new__', sig
+ except ValueError:
+ pass
+
+ # Finally, we should have at least __init__ implemented
+ init = get_user_defined_function_or_method(self.object, '__init__')
+ if init is not None:
+ self.env.app.emit('autodoc-before-process-signature', init, True)
+ try:
+ sig = inspect.signature(init, bound_method=True,
+ type_aliases=self.config.autodoc_type_aliases)
+ return self.object, '__init__', sig
+ except ValueError:
+ pass
+
+ # None of the attributes are user-defined, so fall back to let inspect
+ # handle it.
+ # We don't know the exact method that inspect.signature will read
+ # the signature from, so just pass the object itself to our hook.
+ self.env.app.emit('autodoc-before-process-signature', self.object, False)
+ try:
+ sig = inspect.signature(self.object, bound_method=False,
+ type_aliases=self.config.autodoc_type_aliases)
+ return None, None, sig
+ except ValueError:
+ pass
+
+ # Still no signature: happens e.g. for old-style classes
+ # with __init__ in C and no `__text_signature__`.
+ return None, None, None
+
+ def format_args(self, **kwargs: Any) -> str:
+ if self.config.autodoc_typehints in ('none', 'description'):
+ kwargs.setdefault('show_annotation', False)
+ if self.config.autodoc_typehints_format == "short":
+ kwargs.setdefault('unqualified_typehints', True)
+
+ try:
+ self._signature_class, _signature_method_name, sig = self._get_signature()
+ except TypeError as exc:
+ # __signature__ attribute contained junk
+ logger.warning(__("Failed to get a constructor signature for %s: %s"),
+ self.fullname, exc)
+ return ''
+ self._signature_method_name = _signature_method_name or ''
+
+ if sig is None:
+ return ''
+
+ return stringify_signature(sig, show_return_annotation=False, **kwargs)
+
+ def _find_signature(self) -> tuple[str | None, str | None] | None:
+ result = super()._find_signature()
+ if result is not None:
+ # Strip a return value from signature of constructor in docstring (first entry)
+ result = (result[0], None)
+
+ for i, sig in enumerate(self._signatures):
+ if sig.endswith(' -> None'):
+ # Strip a return value from signatures of constructor in docstring (subsequent
+ # entries)
+ self._signatures[i] = sig[:-8]
+
+ return result
+
+ def format_signature(self, **kwargs: Any) -> str:
+ if self.doc_as_attr:
+ return ''
+ if self.config.autodoc_class_signature == 'separated':
+ # do not show signatures
+ return ''
+
+ if self.config.autodoc_typehints_format == "short":
+ kwargs.setdefault('unqualified_typehints', True)
+
+ sig = super().format_signature()
+ sigs = []
+
+ overloads = self.get_overloaded_signatures()
+ if overloads and self.config.autodoc_typehints != 'none':
+ # Use signatures for overloaded methods instead of the implementation method.
+ method = safe_getattr(self._signature_class, self._signature_method_name, None)
+ __globals__ = safe_getattr(method, '__globals__', {})
+ for overload in overloads:
+ overload = evaluate_signature(overload, __globals__,
+ self.config.autodoc_type_aliases)
+
+ parameters = list(overload.parameters.values())
+ overload = overload.replace(parameters=parameters[1:],
+ return_annotation=Parameter.empty)
+ sig = stringify_signature(overload, **kwargs)
+ sigs.append(sig)
+ else:
+ sigs.append(sig)
+
+ return "\n".join(sigs)
+
+ def get_overloaded_signatures(self) -> list[Signature]:
+ if self._signature_class and self._signature_method_name:
+ for cls in self._signature_class.__mro__:
+ try:
+ analyzer = ModuleAnalyzer.for_module(cls.__module__)
+ analyzer.analyze()
+ qualname = '.'.join([cls.__qualname__, self._signature_method_name])
+ if qualname in analyzer.overloads:
+ return analyzer.overloads.get(qualname, [])
+ elif qualname in analyzer.tagorder:
+ # the constructor is defined in the class, but not overridden.
+ return []
+ except PycodeError:
+ pass
+
+ return []
+
+ def get_canonical_fullname(self) -> str | None:
+ __modname__ = safe_getattr(self.object, '__module__', self.modname)
+ __qualname__ = safe_getattr(self.object, '__qualname__', None)
+ if __qualname__ is None:
+ __qualname__ = safe_getattr(self.object, '__name__', None)
+ if __qualname__ and '<locals>' in __qualname__:
+ # No valid qualname found if the object is defined as locals
+ __qualname__ = None
+
+ if __modname__ and __qualname__:
+ return '.'.join([__modname__, __qualname__])
+ else:
+ return None
+
+ def add_directive_header(self, sig: str) -> None:
+ sourcename = self.get_sourcename()
+
+ if self.doc_as_attr:
+ self.directivetype = 'attribute'
+ super().add_directive_header(sig)
+
+ if inspect.isNewType(self.object) or isinstance(self.object, TypeVar):
+ return
+
+ if self.analyzer and '.'.join(self.objpath) in self.analyzer.finals:
+ self.add_line(' :final:', sourcename)
+
+ canonical_fullname = self.get_canonical_fullname()
+ if (not self.doc_as_attr and not inspect.isNewType(self.object)
+ and canonical_fullname and self.fullname != canonical_fullname):
+ self.add_line(' :canonical: %s' % canonical_fullname, sourcename)
+
+ # add inheritance info, if wanted
+ if not self.doc_as_attr and self.options.show_inheritance:
+ if inspect.getorigbases(self.object):
+ # A subclass of generic types
+ # refs: PEP-560 <https://peps.python.org/pep-0560/>
+ bases = list(self.object.__orig_bases__)
+ elif hasattr(self.object, '__bases__') and len(self.object.__bases__):
+ # A normal class
+ bases = list(self.object.__bases__)
+ else:
+ bases = []
+
+ self.env.events.emit('autodoc-process-bases',
+ self.fullname, self.object, self.options, bases)
+
+ if self.config.autodoc_typehints_format == "short":
+ base_classes = [restify(cls, "smart") for cls in bases]
+ else:
+ base_classes = [restify(cls) for cls in bases]
+
+ sourcename = self.get_sourcename()
+ self.add_line('', sourcename)
+ self.add_line(' ' + _('Bases: %s') % ', '.join(base_classes), sourcename)
+
+ def get_object_members(self, want_all: bool) -> tuple[bool, list[ObjectMember]]:
+ members = get_class_members(self.object, self.objpath, self.get_attr,
+ self.config.autodoc_inherit_docstrings)
+ if not want_all:
+ if not self.options.members:
+ return False, []
+ # specific members given
+ selected = []
+ for name in self.options.members:
+ if name in members:
+ selected.append(members[name])
+ else:
+ logger.warning(__('missing attribute %s in object %s') %
+ (name, self.fullname), type='autodoc')
+ return False, selected
+ elif self.options.inherited_members:
+ return False, list(members.values())
+ else:
+ return False, [m for m in members.values() if m.class_ == self.object]
+
+ def get_doc(self) -> list[list[str]] | None:
+ if isinstance(self.object, TypeVar):
+ if self.object.__doc__ == TypeVar.__doc__:
+ return []
+ if sys.version_info[:2] < (3, 10):
+ if inspect.isNewType(self.object) or isinstance(self.object, TypeVar):
+ parts = self.modname.strip('.').split('.')
+ orig_objpath = self.objpath
+ for i in range(len(parts)):
+ new_modname = '.'.join(parts[:len(parts) - i])
+ new_objpath = parts[len(parts) - i:] + orig_objpath
+ try:
+ analyzer = ModuleAnalyzer.for_module(new_modname)
+ analyzer.analyze()
+ key = ('', new_objpath[-1])
+ comment = list(analyzer.attr_docs.get(key, []))
+ if comment:
+ self.objpath = new_objpath
+ self.modname = new_modname
+ return [comment]
+ except PycodeError:
+ pass
+ if self.doc_as_attr:
+ # Don't show the docstring of the class when it is an alias.
+ if self.get_variable_comment():
+ return []
+ else:
+ return None
+
+ lines = getattr(self, '_new_docstrings', None)
+ if lines is not None:
+ return lines
+
+ classdoc_from = self.options.get('class-doc-from', self.config.autoclass_content)
+
+ docstrings = []
+ attrdocstring = getdoc(self.object, self.get_attr)
+ if attrdocstring:
+ docstrings.append(attrdocstring)
+
+ # for classes, what the "docstring" is can be controlled via a
+ # config value; the default is only the class docstring
+ if classdoc_from in ('both', 'init'):
+ __init__ = self.get_attr(self.object, '__init__', None)
+ initdocstring = getdoc(__init__, self.get_attr,
+ self.config.autodoc_inherit_docstrings,
+ self.object, '__init__')
+ # for new-style classes, no __init__ means default __init__
+ if (initdocstring is not None and
+ (initdocstring == object.__init__.__doc__ or # for pypy
+ initdocstring.strip() == object.__init__.__doc__)): # for !pypy
+ initdocstring = None
+ if not initdocstring:
+ # try __new__
+ __new__ = self.get_attr(self.object, '__new__', None)
+ initdocstring = getdoc(__new__, self.get_attr,
+ self.config.autodoc_inherit_docstrings,
+ self.object, '__new__')
+ # for new-style classes, no __new__ means default __new__
+ if (initdocstring is not None and
+ (initdocstring == object.__new__.__doc__ or # for pypy
+ initdocstring.strip() == object.__new__.__doc__)): # for !pypy
+ initdocstring = None
+ if initdocstring:
+ if classdoc_from == 'init':
+ docstrings = [initdocstring]
+ else:
+ docstrings.append(initdocstring)
+
+ tab_width = self.directive.state.document.settings.tab_width
+ return [prepare_docstring(docstring, tab_width) for docstring in docstrings]
+
+ def get_variable_comment(self) -> list[str] | None:
+ try:
+ key = ('', '.'.join(self.objpath))
+ if self.doc_as_attr:
+ analyzer = ModuleAnalyzer.for_module(self.modname)
+ else:
+ analyzer = ModuleAnalyzer.for_module(self.get_real_modname())
+ analyzer.analyze()
+ return list(analyzer.attr_docs.get(key, []))
+ except PycodeError:
+ return None
+
+ def add_content(self, more_content: StringList | None) -> None:
+ if inspect.isNewType(self.object):
+ if self.config.autodoc_typehints_format == "short":
+ supertype = restify(self.object.__supertype__, "smart")
+ else:
+ supertype = restify(self.object.__supertype__)
+
+ more_content = StringList([_('alias of %s') % supertype, ''], source='')
+ if isinstance(self.object, TypeVar):
+ attrs = [repr(self.object.__name__)]
+ for constraint in self.object.__constraints__:
+ if self.config.autodoc_typehints_format == "short":
+ attrs.append(stringify_annotation(constraint, "smart"))
+ else:
+ attrs.append(stringify_annotation(constraint))
+ if self.object.__bound__:
+ if self.config.autodoc_typehints_format == "short":
+ bound = restify(self.object.__bound__, "smart")
+ else:
+ bound = restify(self.object.__bound__)
+ attrs.append(r"bound=\ " + bound)
+ if self.object.__covariant__:
+ attrs.append("covariant=True")
+ if self.object.__contravariant__:
+ attrs.append("contravariant=True")
+
+ more_content = StringList(
+ [_('alias of TypeVar(%s)') % ", ".join(attrs), ''],
+ source='',
+ )
+ if self.doc_as_attr and self.modname != self.get_real_modname():
+ try:
+ # override analyzer to obtain doccomment around its definition.
+ self.analyzer = ModuleAnalyzer.for_module(self.modname)
+ self.analyzer.analyze()
+ except PycodeError:
+ pass
+
+ if self.doc_as_attr and not self.get_variable_comment():
+ try:
+ if self.config.autodoc_typehints_format == "short":
+ alias = restify(self.object, "smart")
+ else:
+ alias = restify(self.object)
+ more_content = StringList([_('alias of %s') % alias], source='')
+ except AttributeError:
+ pass # Invalid class object is passed.
+
+ super().add_content(more_content)
+
+ def document_members(self, all_members: bool = False) -> None:
+ if self.doc_as_attr:
+ return
+ super().document_members(all_members)
+
+ def generate(
+ self,
+ more_content: StringList | None = None,
+ real_modname: str | None = None,
+ check_module: bool = False,
+ all_members: bool = False,
+ ) -> None:
+ # Do not pass real_modname and use the name from the __module__
+ # attribute of the class.
+ # If a class gets imported into the module real_modname
+ # the analyzer won't find the source of the class, if
+ # it looks in real_modname.
+ return super().generate(more_content=more_content,
+ check_module=check_module,
+ all_members=all_members)
+
+
+class ExceptionDocumenter(ClassDocumenter):
+ """
+ Specialized ClassDocumenter subclass for exceptions.
+ """
+ objtype = 'exception'
+ member_order = 10
+
+ # needs a higher priority than ClassDocumenter
+ priority = ClassDocumenter.priority + 5
+
+ @classmethod
+ def can_document_member(cls, member: Any, membername: str, isattr: bool, parent: Any,
+ ) -> bool:
+ try:
+ return isinstance(member, type) and issubclass(member, BaseException)
+ except TypeError as exc:
+ # It's possible for a member to be considered a type, but fail
+ # issubclass checks due to not being a class. For example:
+ # https://github.com/sphinx-doc/sphinx/issues/11654#issuecomment-1696790436
+ msg = (
+ f'{cls.__name__} failed to discern if member {member} with'
+ f' membername {membername} is a BaseException subclass.'
+ )
+ raise ValueError(msg) from exc
+
+
+class DataDocumenterMixinBase:
+ # define types of instance variables
+ config: Config
+ env: BuildEnvironment
+ modname: str
+ parent: Any
+ object: Any
+ objpath: list[str]
+
+ def should_suppress_directive_header(self) -> bool:
+ """Check directive header should be suppressed."""
+ return False
+
+ def should_suppress_value_header(self) -> bool:
+ """Check :value: header should be suppressed."""
+ return False
+
+ def update_content(self, more_content: StringList) -> None:
+ """Update docstring, for example with TypeVar variance."""
+ pass
+
+
+class GenericAliasMixin(DataDocumenterMixinBase):
+ """
+ Mixin for DataDocumenter and AttributeDocumenter to provide the feature for
+ supporting GenericAliases.
+ """
+
+ def should_suppress_directive_header(self) -> bool:
+ return (inspect.isgenericalias(self.object) or
+ super().should_suppress_directive_header())
+
+ def update_content(self, more_content: StringList) -> None:
+ if inspect.isgenericalias(self.object):
+ if self.config.autodoc_typehints_format == "short":
+ alias = restify(self.object, "smart")
+ else:
+ alias = restify(self.object)
+
+ more_content.append(_('alias of %s') % alias, '')
+ more_content.append('', '')
+
+ super().update_content(more_content)
+
+
+class UninitializedGlobalVariableMixin(DataDocumenterMixinBase):
+ """
+ Mixin for DataDocumenter to provide the feature for supporting uninitialized
+ (type annotation only) global variables.
+ """
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ try:
+ return super().import_object(raiseerror=True) # type: ignore[misc]
+ except ImportError as exc:
+ # annotation only instance variable (PEP-526)
+ try:
+ with mock(self.config.autodoc_mock_imports):
+ parent = import_module(self.modname, self.config.autodoc_warningiserror)
+ annotations = get_type_hints(parent, None,
+ self.config.autodoc_type_aliases)
+ if self.objpath[-1] in annotations:
+ self.object = UNINITIALIZED_ATTR
+ self.parent = parent
+ return True
+ except ImportError:
+ pass
+
+ if raiseerror:
+ raise
+ logger.warning(exc.args[0], type='autodoc', subtype='import_object')
+ self.env.note_reread()
+ return False
+
+ def should_suppress_value_header(self) -> bool:
+ return (self.object is UNINITIALIZED_ATTR or
+ super().should_suppress_value_header())
+
+ def get_doc(self) -> list[list[str]] | None:
+ if self.object is UNINITIALIZED_ATTR:
+ return []
+ else:
+ return super().get_doc() # type: ignore[misc]
+
+
+class DataDocumenter(GenericAliasMixin,
+ UninitializedGlobalVariableMixin, ModuleLevelDocumenter):
+ """
+ Specialized Documenter subclass for data items.
+ """
+ objtype = 'data'
+ member_order = 40
+ priority = -10
+ option_spec: OptionSpec = dict(ModuleLevelDocumenter.option_spec)
+ option_spec["annotation"] = annotation_option
+ option_spec["no-value"] = bool_option
+
+ @classmethod
+ def can_document_member(cls, member: Any, membername: str, isattr: bool, parent: Any,
+ ) -> bool:
+ return isinstance(parent, ModuleDocumenter) and isattr
+
+ def update_annotations(self, parent: Any) -> None:
+ """Update __annotations__ to support type_comment and so on."""
+ annotations = dict(inspect.getannotations(parent))
+ parent.__annotations__ = annotations
+
+ try:
+ analyzer = ModuleAnalyzer.for_module(self.modname)
+ analyzer.analyze()
+ for (classname, attrname), annotation in analyzer.annotations.items():
+ if classname == '' and attrname not in annotations:
+ annotations[attrname] = annotation
+ except PycodeError:
+ pass
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ ret = super().import_object(raiseerror)
+ if self.parent:
+ self.update_annotations(self.parent)
+
+ return ret
+
+ def should_suppress_value_header(self) -> bool:
+ if super().should_suppress_value_header():
+ return True
+ else:
+ doc = self.get_doc() or []
+ docstring, metadata = separate_metadata('\n'.join(sum(doc, [])))
+ if 'hide-value' in metadata:
+ return True
+
+ return False
+
+ def add_directive_header(self, sig: str) -> None:
+ super().add_directive_header(sig)
+ sourcename = self.get_sourcename()
+ if self.options.annotation is SUPPRESS or self.should_suppress_directive_header():
+ pass
+ elif self.options.annotation:
+ self.add_line(' :annotation: %s' % self.options.annotation,
+ sourcename)
+ else:
+ if self.config.autodoc_typehints != 'none':
+ # obtain annotation for this data
+ annotations = get_type_hints(self.parent, None,
+ self.config.autodoc_type_aliases)
+ if self.objpath[-1] in annotations:
+ if self.config.autodoc_typehints_format == "short":
+ objrepr = stringify_annotation(annotations.get(self.objpath[-1]),
+ "smart")
+ else:
+ objrepr = stringify_annotation(annotations.get(self.objpath[-1]),
+ "fully-qualified-except-typing")
+ self.add_line(' :type: ' + objrepr, sourcename)
+
+ try:
+ if (self.options.no_value or self.should_suppress_value_header() or
+ ismock(self.object)):
+ pass
+ else:
+ objrepr = object_description(self.object)
+ self.add_line(' :value: ' + objrepr, sourcename)
+ except ValueError:
+ pass
+
+ def document_members(self, all_members: bool = False) -> None:
+ pass
+
+ def get_real_modname(self) -> str:
+ real_modname = self.get_attr(self.parent or self.object, '__module__', None)
+ return real_modname or self.modname
+
+ def get_module_comment(self, attrname: str) -> list[str] | None:
+ try:
+ analyzer = ModuleAnalyzer.for_module(self.modname)
+ analyzer.analyze()
+ key = ('', attrname)
+ if key in analyzer.attr_docs:
+ return list(analyzer.attr_docs[key])
+ except PycodeError:
+ pass
+
+ return None
+
+ def get_doc(self) -> list[list[str]] | None:
+ # Check the variable has a docstring-comment
+ comment = self.get_module_comment(self.objpath[-1])
+ if comment:
+ return [comment]
+ else:
+ return super().get_doc()
+
+ def add_content(self, more_content: StringList | None) -> None:
+ # Disable analyzing variable comment on Documenter.add_content() to control it on
+ # DataDocumenter.add_content()
+ self.analyzer = None
+
+ if not more_content:
+ more_content = StringList()
+
+ self.update_content(more_content)
+ super().add_content(more_content)
+
+
+class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type: ignore[misc]
+ """
+ Specialized Documenter subclass for methods (normal, static and class).
+ """
+ objtype = 'method'
+ directivetype = 'method'
+ member_order = 50
+ priority = 1 # must be more than FunctionDocumenter
+
+ @classmethod
+ def can_document_member(cls, member: Any, membername: str, isattr: bool, parent: Any,
+ ) -> bool:
+ return inspect.isroutine(member) and not isinstance(parent, ModuleDocumenter)
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ ret = super().import_object(raiseerror)
+ if not ret:
+ return ret
+
+ # to distinguish classmethod/staticmethod
+ obj = self.parent.__dict__.get(self.object_name)
+ if obj is None:
+ obj = self.object
+
+ if (inspect.isclassmethod(obj) or
+ inspect.isstaticmethod(obj, cls=self.parent, name=self.object_name)):
+ # document class and static members before ordinary ones
+ self.member_order = self.member_order - 1
+
+ return ret
+
+ def format_args(self, **kwargs: Any) -> str:
+ if self.config.autodoc_typehints in ('none', 'description'):
+ kwargs.setdefault('show_annotation', False)
+ if self.config.autodoc_typehints_format == "short":
+ kwargs.setdefault('unqualified_typehints', True)
+
+ try:
+ if self.object == object.__init__ and self.parent != object:
+ # Classes not having own __init__() method are shown as no arguments.
+ #
+ # Note: The signature of object.__init__() is (self, /, *args, **kwargs).
+ # But it makes users confused.
+ args = '()'
+ else:
+ if inspect.isstaticmethod(self.object, cls=self.parent, name=self.object_name):
+ self.env.app.emit('autodoc-before-process-signature', self.object, False)
+ sig = inspect.signature(self.object, bound_method=False,
+ type_aliases=self.config.autodoc_type_aliases)
+ else:
+ self.env.app.emit('autodoc-before-process-signature', self.object, True)
+ sig = inspect.signature(self.object, bound_method=True,
+ type_aliases=self.config.autodoc_type_aliases)
+ args = stringify_signature(sig, **kwargs)
+ except TypeError as exc:
+ logger.warning(__("Failed to get a method signature for %s: %s"),
+ self.fullname, exc)
+ return ''
+ except ValueError:
+ args = ''
+
+ if self.config.strip_signature_backslash:
+ # escape backslashes for reST
+ args = args.replace('\\', '\\\\')
+ return args
+
+ def add_directive_header(self, sig: str) -> None:
+ super().add_directive_header(sig)
+
+ sourcename = self.get_sourcename()
+ obj = self.parent.__dict__.get(self.object_name, self.object)
+ if inspect.isabstractmethod(obj):
+ self.add_line(' :abstractmethod:', sourcename)
+ if inspect.iscoroutinefunction(obj) or inspect.isasyncgenfunction(obj):
+ self.add_line(' :async:', sourcename)
+ if inspect.isclassmethod(obj):
+ self.add_line(' :classmethod:', sourcename)
+ if inspect.isstaticmethod(obj, cls=self.parent, name=self.object_name):
+ self.add_line(' :staticmethod:', sourcename)
+ if self.analyzer and '.'.join(self.objpath) in self.analyzer.finals:
+ self.add_line(' :final:', sourcename)
+
+ def document_members(self, all_members: bool = False) -> None:
+ pass
+
+ def format_signature(self, **kwargs: Any) -> str:
+ if self.config.autodoc_typehints_format == "short":
+ kwargs.setdefault('unqualified_typehints', True)
+
+ sigs = []
+ if (self.analyzer and
+ '.'.join(self.objpath) in self.analyzer.overloads and
+ self.config.autodoc_typehints != 'none'):
+ # Use signatures for overloaded methods instead of the implementation method.
+ overloaded = True
+ else:
+ overloaded = False
+ sig = super().format_signature(**kwargs)
+ sigs.append(sig)
+
+ meth = self.parent.__dict__.get(self.objpath[-1])
+ if inspect.is_singledispatch_method(meth):
+ # append signature of singledispatch'ed functions
+ for typ, func in meth.dispatcher.registry.items():
+ if typ is object:
+ pass # default implementation. skipped.
+ else:
+ dispatchmeth = self.annotate_to_first_argument(func, typ)
+ if dispatchmeth:
+ documenter = MethodDocumenter(self.directive, '')
+ documenter.parent = self.parent
+ documenter.object = dispatchmeth
+ documenter.objpath = ['']
+ sigs.append(documenter.format_signature())
+ if overloaded and self.analyzer is not None:
+ if inspect.isstaticmethod(self.object, cls=self.parent, name=self.object_name):
+ actual = inspect.signature(self.object, bound_method=False,
+ type_aliases=self.config.autodoc_type_aliases)
+ else:
+ actual = inspect.signature(self.object, bound_method=True,
+ type_aliases=self.config.autodoc_type_aliases)
+
+ __globals__ = safe_getattr(self.object, '__globals__', {})
+ for overload in self.analyzer.overloads['.'.join(self.objpath)]:
+ overload = self.merge_default_value(actual, overload)
+ overload = evaluate_signature(overload, __globals__,
+ self.config.autodoc_type_aliases)
+
+ if not inspect.isstaticmethod(self.object, cls=self.parent,
+ name=self.object_name):
+ parameters = list(overload.parameters.values())
+ overload = overload.replace(parameters=parameters[1:])
+ sig = stringify_signature(overload, **kwargs)
+ sigs.append(sig)
+
+ return "\n".join(sigs)
+
+ def merge_default_value(self, actual: Signature, overload: Signature) -> Signature:
+ """Merge default values of actual implementation to the overload variants."""
+ parameters = list(overload.parameters.values())
+ for i, param in enumerate(parameters):
+ actual_param = actual.parameters.get(param.name)
+ if actual_param and param.default == '...':
+ parameters[i] = param.replace(default=actual_param.default)
+
+ return overload.replace(parameters=parameters)
+
+ def annotate_to_first_argument(self, func: Callable, typ: type) -> Callable | None:
+ """Annotate type hint to the first argument of function if needed."""
+ try:
+ sig = inspect.signature(func, type_aliases=self.config.autodoc_type_aliases)
+ except TypeError as exc:
+ logger.warning(__("Failed to get a method signature for %s: %s"),
+ self.fullname, exc)
+ return None
+ except ValueError:
+ return None
+
+ if len(sig.parameters) == 1:
+ return None
+
+ def dummy():
+ pass
+
+ params = list(sig.parameters.values())
+ if params[1].annotation is Parameter.empty:
+ params[1] = params[1].replace(annotation=typ)
+ try:
+ dummy.__signature__ = sig.replace( # type: ignore[attr-defined]
+ parameters=params)
+ return dummy
+ except (AttributeError, TypeError):
+ # failed to update signature (ex. built-in or extension types)
+ return None
+
+ return func
+
+ def get_doc(self) -> list[list[str]] | None:
+ if self._new_docstrings is not None:
+ # docstring already returned previously, then modified by
+ # `DocstringSignatureMixin`. Just return the previously-computed
+ # result, so that we don't lose the processing done by
+ # `DocstringSignatureMixin`.
+ return self._new_docstrings
+ if self.objpath[-1] == '__init__':
+ docstring = getdoc(self.object, self.get_attr,
+ self.config.autodoc_inherit_docstrings,
+ self.parent, self.object_name)
+ if (docstring is not None and
+ (docstring == object.__init__.__doc__ or # for pypy
+ docstring.strip() == object.__init__.__doc__)): # for !pypy
+ docstring = None
+ if docstring:
+ tab_width = self.directive.state.document.settings.tab_width
+ return [prepare_docstring(docstring, tabsize=tab_width)]
+ else:
+ return []
+ elif self.objpath[-1] == '__new__':
+ docstring = getdoc(self.object, self.get_attr,
+ self.config.autodoc_inherit_docstrings,
+ self.parent, self.object_name)
+ if (docstring is not None and
+ (docstring == object.__new__.__doc__ or # for pypy
+ docstring.strip() == object.__new__.__doc__)): # for !pypy
+ docstring = None
+ if docstring:
+ tab_width = self.directive.state.document.settings.tab_width
+ return [prepare_docstring(docstring, tabsize=tab_width)]
+ else:
+ return []
+ else:
+ return super().get_doc()
+
+
+class NonDataDescriptorMixin(DataDocumenterMixinBase):
+ """
+ Mixin for AttributeDocumenter to provide the feature for supporting non
+ data-descriptors.
+
+ .. note:: This mix-in must be inherited after other mix-ins. Otherwise, docstring
+ and :value: header will be suppressed unexpectedly.
+ """
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ ret = super().import_object(raiseerror) # type: ignore[misc]
+ if ret and not inspect.isattributedescriptor(self.object):
+ self.non_data_descriptor = True
+ else:
+ self.non_data_descriptor = False
+
+ return ret
+
+ def should_suppress_value_header(self) -> bool:
+ return (not getattr(self, 'non_data_descriptor', False) or
+ super().should_suppress_directive_header())
+
+ def get_doc(self) -> list[list[str]] | None:
+ if getattr(self, 'non_data_descriptor', False):
+ # the docstring of non datadescriptor is very probably the wrong thing
+ # to display
+ return None
+ else:
+ return super().get_doc() # type: ignore[misc]
+
+
+class SlotsMixin(DataDocumenterMixinBase):
+ """
+ Mixin for AttributeDocumenter to provide the feature for supporting __slots__.
+ """
+
+ def isslotsattribute(self) -> bool:
+ """Check the subject is an attribute in __slots__."""
+ try:
+ if parent___slots__ := inspect.getslots(self.parent):
+ return self.objpath[-1] in parent___slots__
+ else:
+ return False
+ except (ValueError, TypeError):
+ return False
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ ret = super().import_object(raiseerror) # type: ignore[misc]
+ if self.isslotsattribute():
+ self.object = SLOTSATTR
+
+ return ret
+
+ def should_suppress_value_header(self) -> bool:
+ if self.object is SLOTSATTR:
+ return True
+ else:
+ return super().should_suppress_value_header()
+
+ def get_doc(self) -> list[list[str]] | None:
+ if self.object is SLOTSATTR:
+ try:
+ parent___slots__ = inspect.getslots(self.parent)
+ if parent___slots__ and parent___slots__.get(self.objpath[-1]):
+ docstring = prepare_docstring(parent___slots__[self.objpath[-1]])
+ return [docstring]
+ else:
+ return []
+ except ValueError as exc:
+ logger.warning(__('Invalid __slots__ found on %s. Ignored.'),
+ (self.parent.__qualname__, exc), type='autodoc')
+ return []
+ else:
+ return super().get_doc() # type: ignore[misc]
+
+
+class RuntimeInstanceAttributeMixin(DataDocumenterMixinBase):
+ """
+ Mixin for AttributeDocumenter to provide the feature for supporting runtime
+ instance attributes (that are defined in __init__() methods with doc-comments).
+
+ Example:
+
+ class Foo:
+ def __init__(self):
+ self.attr = None #: This is a target of this mix-in.
+ """
+
+ RUNTIME_INSTANCE_ATTRIBUTE = object()
+
+ def is_runtime_instance_attribute(self, parent: Any) -> bool:
+ """Check the subject is an attribute defined in __init__()."""
+ # An instance variable defined in __init__().
+ if self.get_attribute_comment(parent, self.objpath[-1]): # type: ignore[attr-defined]
+ return True
+ if self.is_runtime_instance_attribute_not_commented(parent):
+ return True
+ return False
+
+ def is_runtime_instance_attribute_not_commented(self, parent: Any) -> bool:
+ """Check the subject is an attribute defined in __init__() without comment."""
+ for cls in inspect.getmro(parent):
+ try:
+ module = safe_getattr(cls, '__module__')
+ qualname = safe_getattr(cls, '__qualname__')
+
+ analyzer = ModuleAnalyzer.for_module(module)
+ analyzer.analyze()
+ if qualname and self.objpath:
+ key = '.'.join([qualname, self.objpath[-1]])
+ if key in analyzer.tagorder:
+ return True
+ except (AttributeError, PycodeError):
+ pass
+
+ return False
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ """Check the existence of runtime instance attribute after failing to import the
+ attribute."""
+ try:
+ return super().import_object(raiseerror=True) # type: ignore[misc]
+ except ImportError as exc:
+ try:
+ with mock(self.config.autodoc_mock_imports):
+ ret = import_object(self.modname, self.objpath[:-1], 'class',
+ attrgetter=self.get_attr, # type: ignore[attr-defined]
+ warningiserror=self.config.autodoc_warningiserror)
+ parent = ret[3]
+ if self.is_runtime_instance_attribute(parent):
+ self.object = self.RUNTIME_INSTANCE_ATTRIBUTE
+ self.parent = parent
+ return True
+ except ImportError:
+ pass
+
+ if raiseerror:
+ raise
+ logger.warning(exc.args[0], type='autodoc', subtype='import_object')
+ self.env.note_reread()
+ return False
+
+ def should_suppress_value_header(self) -> bool:
+ return (self.object is self.RUNTIME_INSTANCE_ATTRIBUTE or
+ super().should_suppress_value_header())
+
+ def get_doc(self) -> list[list[str]] | None:
+ if (self.object is self.RUNTIME_INSTANCE_ATTRIBUTE and
+ self.is_runtime_instance_attribute_not_commented(self.parent)):
+ return None
+ else:
+ return super().get_doc() # type: ignore[misc]
+
+
+class UninitializedInstanceAttributeMixin(DataDocumenterMixinBase):
+ """
+ Mixin for AttributeDocumenter to provide the feature for supporting uninitialized
+ instance attributes (PEP-526 styled, annotation only attributes).
+
+ Example:
+
+ class Foo:
+ attr: int #: This is a target of this mix-in.
+ """
+
+ def is_uninitialized_instance_attribute(self, parent: Any) -> bool:
+ """Check the subject is an annotation only attribute."""
+ annotations = get_type_hints(parent, None, self.config.autodoc_type_aliases)
+ return self.objpath[-1] in annotations
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ """Check the exisitence of uninitialized instance attribute when failed to import
+ the attribute."""
+ try:
+ return super().import_object(raiseerror=True) # type: ignore[misc]
+ except ImportError as exc:
+ try:
+ ret = import_object(self.modname, self.objpath[:-1], 'class',
+ attrgetter=self.get_attr, # type: ignore[attr-defined]
+ warningiserror=self.config.autodoc_warningiserror)
+ parent = ret[3]
+ if self.is_uninitialized_instance_attribute(parent):
+ self.object = UNINITIALIZED_ATTR
+ self.parent = parent
+ return True
+ except ImportError:
+ pass
+
+ if raiseerror:
+ raise
+ logger.warning(exc.args[0], type='autodoc', subtype='import_object')
+ self.env.note_reread()
+ return False
+
+ def should_suppress_value_header(self) -> bool:
+ return (self.object is UNINITIALIZED_ATTR or
+ super().should_suppress_value_header())
+
+ def get_doc(self) -> list[list[str]] | None:
+ if self.object is UNINITIALIZED_ATTR:
+ return None
+ return super().get_doc() # type: ignore[misc]
+
+
+class AttributeDocumenter(GenericAliasMixin, SlotsMixin, # type: ignore[misc]
+ RuntimeInstanceAttributeMixin,
+ UninitializedInstanceAttributeMixin, NonDataDescriptorMixin,
+ DocstringStripSignatureMixin, ClassLevelDocumenter):
+ """
+ Specialized Documenter subclass for attributes.
+ """
+ objtype = 'attribute'
+ member_order = 60
+ option_spec: OptionSpec = dict(ModuleLevelDocumenter.option_spec)
+ option_spec["annotation"] = annotation_option
+ option_spec["no-value"] = bool_option
+
+ # must be higher than the MethodDocumenter, else it will recognize
+ # some non-data descriptors as methods
+ priority = 10
+
+ @staticmethod
+ def is_function_or_method(obj: Any) -> bool:
+ return inspect.isfunction(obj) or inspect.isbuiltin(obj) or inspect.ismethod(obj)
+
+ @classmethod
+ def can_document_member(cls, member: Any, membername: str, isattr: bool, parent: Any,
+ ) -> bool:
+ if isinstance(parent, ModuleDocumenter):
+ return False
+ if inspect.isattributedescriptor(member):
+ return True
+ if not inspect.isroutine(member) and not isinstance(member, type):
+ return True
+ return False
+
+ def document_members(self, all_members: bool = False) -> None:
+ pass
+
+ def update_annotations(self, parent: Any) -> None:
+ """Update __annotations__ to support type_comment and so on."""
+ try:
+ annotations = dict(inspect.getannotations(parent))
+ parent.__annotations__ = annotations
+
+ for cls in inspect.getmro(parent):
+ try:
+ module = safe_getattr(cls, '__module__')
+ qualname = safe_getattr(cls, '__qualname__')
+
+ analyzer = ModuleAnalyzer.for_module(module)
+ analyzer.analyze()
+ for (classname, attrname), annotation in analyzer.annotations.items():
+ if classname == qualname and attrname not in annotations:
+ annotations[attrname] = annotation
+ except (AttributeError, PycodeError):
+ pass
+ except (AttributeError, TypeError):
+ # Failed to set __annotations__ (built-in, extensions, etc.)
+ pass
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ ret = super().import_object(raiseerror)
+ if inspect.isenumattribute(self.object):
+ self.object = self.object.value
+ if self.parent:
+ self.update_annotations(self.parent)
+
+ return ret
+
+ def get_real_modname(self) -> str:
+ real_modname = self.get_attr(self.parent or self.object, '__module__', None)
+ return real_modname or self.modname
+
+ def should_suppress_value_header(self) -> bool:
+ if super().should_suppress_value_header():
+ return True
+ else:
+ doc = self.get_doc()
+ if doc:
+ docstring, metadata = separate_metadata('\n'.join(sum(doc, [])))
+ if 'hide-value' in metadata:
+ return True
+
+ return False
+
+ def add_directive_header(self, sig: str) -> None:
+ super().add_directive_header(sig)
+ sourcename = self.get_sourcename()
+ if self.options.annotation is SUPPRESS or self.should_suppress_directive_header():
+ pass
+ elif self.options.annotation:
+ self.add_line(' :annotation: %s' % self.options.annotation, sourcename)
+ else:
+ if self.config.autodoc_typehints != 'none':
+ # obtain type annotation for this attribute
+ annotations = get_type_hints(self.parent, None,
+ self.config.autodoc_type_aliases)
+ if self.objpath[-1] in annotations:
+ if self.config.autodoc_typehints_format == "short":
+ objrepr = stringify_annotation(annotations.get(self.objpath[-1]),
+ "smart")
+ else:
+ objrepr = stringify_annotation(annotations.get(self.objpath[-1]),
+ "fully-qualified-except-typing")
+ self.add_line(' :type: ' + objrepr, sourcename)
+
+ try:
+ if (self.options.no_value or self.should_suppress_value_header() or
+ ismock(self.object)):
+ pass
+ else:
+ objrepr = object_description(self.object)
+ self.add_line(' :value: ' + objrepr, sourcename)
+ except ValueError:
+ pass
+
+ def get_attribute_comment(self, parent: Any, attrname: str) -> list[str] | None:
+ for cls in inspect.getmro(parent):
+ try:
+ module = safe_getattr(cls, '__module__')
+ qualname = safe_getattr(cls, '__qualname__')
+
+ analyzer = ModuleAnalyzer.for_module(module)
+ analyzer.analyze()
+ if qualname and self.objpath:
+ key = (qualname, attrname)
+ if key in analyzer.attr_docs:
+ return list(analyzer.attr_docs[key])
+ except (AttributeError, PycodeError):
+ pass
+
+ return None
+
+ def get_doc(self) -> list[list[str]] | None:
+ # Check the attribute has a docstring-comment
+ comment = self.get_attribute_comment(self.parent, self.objpath[-1])
+ if comment:
+ return [comment]
+
+ try:
+ # Disable `autodoc_inherit_docstring` temporarily to avoid to obtain
+ # a docstring from the value which descriptor returns unexpectedly.
+ # ref: https://github.com/sphinx-doc/sphinx/issues/7805
+ orig = self.config.autodoc_inherit_docstrings
+ self.config.autodoc_inherit_docstrings = False # type: ignore[attr-defined]
+ return super().get_doc()
+ finally:
+ self.config.autodoc_inherit_docstrings = orig # type: ignore[attr-defined]
+
+ def add_content(self, more_content: StringList | None) -> None:
+ # Disable analyzing attribute comment on Documenter.add_content() to control it on
+ # AttributeDocumenter.add_content()
+ self.analyzer = None
+
+ if more_content is None:
+ more_content = StringList()
+ self.update_content(more_content)
+ super().add_content(more_content)
+
+
+class PropertyDocumenter(DocstringStripSignatureMixin, # type: ignore[misc]
+ ClassLevelDocumenter):
+ """
+ Specialized Documenter subclass for properties.
+ """
+ objtype = 'property'
+ member_order = 60
+
+ # before AttributeDocumenter
+ priority = AttributeDocumenter.priority + 1
+
+ @classmethod
+ def can_document_member(cls, member: Any, membername: str, isattr: bool, parent: Any,
+ ) -> bool:
+ if isinstance(parent, ClassDocumenter):
+ if inspect.isproperty(member):
+ return True
+ else:
+ __dict__ = safe_getattr(parent.object, '__dict__', {})
+ obj = __dict__.get(membername)
+ return isinstance(obj, classmethod) and inspect.isproperty(obj.__func__)
+ else:
+ return False
+
+ def import_object(self, raiseerror: bool = False) -> bool:
+ """Check the exisitence of uninitialized instance attribute when failed to import
+ the attribute."""
+ ret = super().import_object(raiseerror)
+ if ret and not inspect.isproperty(self.object):
+ __dict__ = safe_getattr(self.parent, '__dict__', {})
+ obj = __dict__.get(self.objpath[-1])
+ if isinstance(obj, classmethod) and inspect.isproperty(obj.__func__):
+ self.object = obj.__func__
+ self.isclassmethod = True
+ return True
+ else:
+ return False
+
+ self.isclassmethod = False
+ return ret
+
+ def format_args(self, **kwargs: Any) -> str:
+ func = self._get_property_getter()
+ if func is None:
+ return ''
+
+ # update the annotations of the property getter
+ self.env.app.emit('autodoc-before-process-signature', func, False)
+ # correctly format the arguments for a property
+ return super().format_args(**kwargs)
+
+ def document_members(self, all_members: bool = False) -> None:
+ pass
+
+ def get_real_modname(self) -> str:
+ real_modname = self.get_attr(self.parent or self.object, '__module__', None)
+ return real_modname or self.modname
+
+ def add_directive_header(self, sig: str) -> None:
+ super().add_directive_header(sig)
+ sourcename = self.get_sourcename()
+ if inspect.isabstractmethod(self.object):
+ self.add_line(' :abstractmethod:', sourcename)
+ if self.isclassmethod:
+ self.add_line(' :classmethod:', sourcename)
+
+ func = self._get_property_getter()
+ if func is None or self.config.autodoc_typehints == 'none':
+ return
+
+ try:
+ signature = inspect.signature(func,
+ type_aliases=self.config.autodoc_type_aliases)
+ if signature.return_annotation is not Parameter.empty:
+ if self.config.autodoc_typehints_format == "short":
+ objrepr = stringify_annotation(signature.return_annotation, "smart")
+ else:
+ objrepr = stringify_annotation(signature.return_annotation,
+ "fully-qualified-except-typing")
+ self.add_line(' :type: ' + objrepr, sourcename)
+ except TypeError as exc:
+ logger.warning(__("Failed to get a function signature for %s: %s"),
+ self.fullname, exc)
+ pass
+ except ValueError:
+ pass
+
+ def _get_property_getter(self):
+ if safe_getattr(self.object, 'fget', None): # property
+ return self.object.fget
+ if safe_getattr(self.object, 'func', None): # cached_property
+ return self.object.func
+ return None
+
+
+def autodoc_attrgetter(app: Sphinx, obj: Any, name: str, *defargs: Any) -> Any:
+ """Alternative getattr() for types"""
+ for typ, func in app.registry.autodoc_attrgettrs.items():
+ if isinstance(obj, typ):
+ return func(obj, name, *defargs)
+
+ return safe_getattr(obj, name, *defargs)
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_autodocumenter(ModuleDocumenter)
+ app.add_autodocumenter(ClassDocumenter)
+ app.add_autodocumenter(ExceptionDocumenter)
+ app.add_autodocumenter(DataDocumenter)
+ app.add_autodocumenter(FunctionDocumenter)
+ app.add_autodocumenter(DecoratorDocumenter)
+ app.add_autodocumenter(MethodDocumenter)
+ app.add_autodocumenter(AttributeDocumenter)
+ app.add_autodocumenter(PropertyDocumenter)
+
+ app.add_config_value('autoclass_content', 'class', True, ENUM('both', 'class', 'init'))
+ app.add_config_value('autodoc_member_order', 'alphabetical', True,
+ ENUM('alphabetical', 'bysource', 'groupwise'))
+ app.add_config_value('autodoc_class_signature', 'mixed', True, ENUM('mixed', 'separated'))
+ app.add_config_value('autodoc_default_options', {}, True)
+ app.add_config_value('autodoc_docstring_signature', True, True)
+ app.add_config_value('autodoc_mock_imports', [], True)
+ app.add_config_value('autodoc_typehints', "signature", True,
+ ENUM("signature", "description", "none", "both"))
+ app.add_config_value('autodoc_typehints_description_target', 'all', True,
+ ENUM('all', 'documented', 'documented_params'))
+ app.add_config_value('autodoc_type_aliases', {}, True)
+ app.add_config_value('autodoc_typehints_format', "short", 'env',
+ ENUM("fully-qualified", "short"))
+ app.add_config_value('autodoc_warningiserror', True, True)
+ app.add_config_value('autodoc_inherit_docstrings', True, True)
+ app.add_event('autodoc-before-process-signature')
+ app.add_event('autodoc-process-docstring')
+ app.add_event('autodoc-process-signature')
+ app.add_event('autodoc-skip-member')
+ app.add_event('autodoc-process-bases')
+
+ app.setup_extension('sphinx.ext.autodoc.preserve_defaults')
+ app.setup_extension('sphinx.ext.autodoc.type_comment')
+ app.setup_extension('sphinx.ext.autodoc.typehints')
+
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/autodoc/directive.py b/sphinx/ext/autodoc/directive.py
new file mode 100644
index 0000000..64cbc9b
--- /dev/null
+++ b/sphinx/ext/autodoc/directive.py
@@ -0,0 +1,151 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, Callable
+
+from docutils import nodes
+from docutils.statemachine import StringList
+from docutils.utils import Reporter, assemble_option_dict
+
+from sphinx.ext.autodoc import Documenter, Options
+from sphinx.util import logging
+from sphinx.util.docutils import SphinxDirective, switch_source_input
+from sphinx.util.nodes import nested_parse_with_titles
+
+if TYPE_CHECKING:
+ from docutils.nodes import Element, Node
+ from docutils.parsers.rst.states import RSTState
+
+ from sphinx.config import Config
+ from sphinx.environment import BuildEnvironment
+
+logger = logging.getLogger(__name__)
+
+
+# common option names for autodoc directives
+AUTODOC_DEFAULT_OPTIONS = ['members', 'undoc-members', 'inherited-members',
+ 'show-inheritance', 'private-members', 'special-members',
+ 'ignore-module-all', 'exclude-members', 'member-order',
+ 'imported-members', 'class-doc-from', 'no-value']
+
+AUTODOC_EXTENDABLE_OPTIONS = ['members', 'private-members', 'special-members',
+ 'exclude-members']
+
+
+class DummyOptionSpec(dict):
+ """An option_spec allows any options."""
+
+ def __bool__(self) -> bool:
+ """Behaves like some options are defined."""
+ return True
+
+ def __getitem__(self, key: str) -> Callable[[str], str]:
+ return lambda x: x
+
+
+class DocumenterBridge:
+ """A parameters container for Documenters."""
+
+ def __init__(self, env: BuildEnvironment, reporter: Reporter | None, options: Options,
+ lineno: int, state: Any) -> None:
+ self.env = env
+ self._reporter = reporter
+ self.genopt = options
+ self.lineno = lineno
+ self.record_dependencies: set[str] = set()
+ self.result = StringList()
+ self.state = state
+
+
+def process_documenter_options(documenter: type[Documenter], config: Config, options: dict,
+ ) -> Options:
+ """Recognize options of Documenter from user input."""
+ for name in AUTODOC_DEFAULT_OPTIONS:
+ if name not in documenter.option_spec:
+ continue
+ negated = options.pop('no-' + name, True) is None
+ if name in config.autodoc_default_options and not negated:
+ if name in options and isinstance(config.autodoc_default_options[name], str):
+ # take value from options if present or extend it
+ # with autodoc_default_options if necessary
+ if name in AUTODOC_EXTENDABLE_OPTIONS:
+ if options[name] is not None and options[name].startswith('+'):
+ options[name] = ','.join([config.autodoc_default_options[name],
+ options[name][1:]])
+ else:
+ options[name] = config.autodoc_default_options[name]
+
+ elif options.get(name) is not None:
+ # remove '+' from option argument if there's nothing to merge it with
+ options[name] = options[name].lstrip('+')
+
+ return Options(assemble_option_dict(options.items(), documenter.option_spec))
+
+
+def parse_generated_content(state: RSTState, content: StringList, documenter: Documenter,
+ ) -> list[Node]:
+ """Parse an item of content generated by Documenter."""
+ with switch_source_input(state, content):
+ if documenter.titles_allowed:
+ node: Element = nodes.section()
+ # necessary so that the child nodes get the right source/line set
+ node.document = state.document
+ nested_parse_with_titles(state, content, node)
+ else:
+ node = nodes.paragraph()
+ node.document = state.document
+ state.nested_parse(content, 0, node)
+
+ return node.children
+
+
+class AutodocDirective(SphinxDirective):
+ """A directive class for all autodoc directives. It works as a dispatcher of Documenters.
+
+ It invokes a Documenter upon running. After the processing, it parses and returns
+ the content generated by Documenter.
+ """
+ option_spec = DummyOptionSpec()
+ has_content = True
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = True
+
+ def run(self) -> list[Node]:
+ reporter = self.state.document.reporter
+
+ try:
+ source, lineno = reporter.get_source_and_line( # type: ignore[attr-defined]
+ self.lineno)
+ except AttributeError:
+ source, lineno = (None, None)
+ logger.debug('[autodoc] %s:%s: input:\n%s', source, lineno, self.block_text)
+
+ # look up target Documenter
+ objtype = self.name[4:] # strip prefix (auto-).
+ doccls = self.env.app.registry.documenters[objtype]
+
+ # process the options with the selected documenter's option_spec
+ try:
+ documenter_options = process_documenter_options(doccls, self.config, self.options)
+ except (KeyError, ValueError, TypeError) as exc:
+ # an option is either unknown or has a wrong type
+ logger.error('An option to %s is either unknown or has an invalid value: %s' %
+ (self.name, exc), location=(self.env.docname, lineno))
+ return []
+
+ # generate the output
+ params = DocumenterBridge(self.env, reporter, documenter_options, lineno, self.state)
+ documenter = doccls(params, self.arguments[0])
+ documenter.generate(more_content=self.content)
+ if not params.result:
+ return []
+
+ logger.debug('[autodoc] output:\n%s', '\n'.join(params.result))
+
+ # record all filenames as dependencies -- this will at least
+ # partially make automatic invalidation possible
+ for fn in params.record_dependencies:
+ self.state.document.settings.record_dependencies.add(fn)
+
+ result = parse_generated_content(self.state, params.result, documenter)
+ return result
diff --git a/sphinx/ext/autodoc/importer.py b/sphinx/ext/autodoc/importer.py
new file mode 100644
index 0000000..84bfee5
--- /dev/null
+++ b/sphinx/ext/autodoc/importer.py
@@ -0,0 +1,342 @@
+"""Importer utilities for autodoc"""
+
+from __future__ import annotations
+
+import contextlib
+import importlib
+import os
+import sys
+import traceback
+import typing
+from typing import TYPE_CHECKING, Any, Callable, NamedTuple
+
+from sphinx.ext.autodoc.mock import ismock, undecorate
+from sphinx.pycode import ModuleAnalyzer, PycodeError
+from sphinx.util import logging
+from sphinx.util.inspect import (
+ getannotations,
+ getmro,
+ getslots,
+ isclass,
+ isenumclass,
+ safe_getattr,
+)
+
+if TYPE_CHECKING:
+ from types import ModuleType
+
+ from sphinx.ext.autodoc import ObjectMember
+
+logger = logging.getLogger(__name__)
+
+
+def mangle(subject: Any, name: str) -> str:
+ """Mangle the given name."""
+ try:
+ if isclass(subject) and name.startswith('__') and not name.endswith('__'):
+ return f"_{subject.__name__}{name}"
+ except AttributeError:
+ pass
+
+ return name
+
+
+def unmangle(subject: Any, name: str) -> str | None:
+ """Unmangle the given name."""
+ try:
+ if isclass(subject) and not name.endswith('__'):
+ prefix = "_%s__" % subject.__name__
+ if name.startswith(prefix):
+ return name.replace(prefix, "__", 1)
+ else:
+ for cls in subject.__mro__:
+ prefix = "_%s__" % cls.__name__
+ if name.startswith(prefix):
+ # mangled attribute defined in parent class
+ return None
+ except AttributeError:
+ pass
+
+ return name
+
+
+def import_module(modname: str, warningiserror: bool = False) -> Any:
+ """
+ Call importlib.import_module(modname), convert exceptions to ImportError
+ """
+ try:
+ with logging.skip_warningiserror(not warningiserror):
+ return importlib.import_module(modname)
+ except BaseException as exc:
+ # Importing modules may cause any side effects, including
+ # SystemExit, so we need to catch all errors.
+ raise ImportError(exc, traceback.format_exc()) from exc
+
+
+def _reload_module(module: ModuleType, warningiserror: bool = False) -> Any:
+ """
+ Call importlib.reload(module), convert exceptions to ImportError
+ """
+ try:
+ with logging.skip_warningiserror(not warningiserror):
+ return importlib.reload(module)
+ except BaseException as exc:
+ # Importing modules may cause any side effects, including
+ # SystemExit, so we need to catch all errors.
+ raise ImportError(exc, traceback.format_exc()) from exc
+
+
+def import_object(modname: str, objpath: list[str], objtype: str = '',
+ attrgetter: Callable[[Any, str], Any] = safe_getattr,
+ warningiserror: bool = False) -> Any:
+ if objpath:
+ logger.debug('[autodoc] from %s import %s', modname, '.'.join(objpath))
+ else:
+ logger.debug('[autodoc] import %s', modname)
+
+ try:
+ module = None
+ exc_on_importing = None
+ objpath = list(objpath)
+ while module is None:
+ try:
+ original_module_names = frozenset(sys.modules)
+ module = import_module(modname, warningiserror=warningiserror)
+ if os.environ.get('SPHINX_AUTODOC_RELOAD_MODULES'):
+ new_modules = [m for m in sys.modules if m not in original_module_names]
+ # Try reloading modules with ``typing.TYPE_CHECKING == True``.
+ try:
+ typing.TYPE_CHECKING = True
+ # Ignore failures; we've already successfully loaded these modules
+ with contextlib.suppress(ImportError, KeyError):
+ for m in new_modules:
+ _reload_module(sys.modules[m])
+ finally:
+ typing.TYPE_CHECKING = False
+ module = sys.modules[modname]
+ logger.debug('[autodoc] import %s => %r', modname, module)
+ except ImportError as exc:
+ logger.debug('[autodoc] import %s => failed', modname)
+ exc_on_importing = exc
+ if '.' in modname:
+ # retry with parent module
+ modname, name = modname.rsplit('.', 1)
+ objpath.insert(0, name)
+ else:
+ raise
+
+ obj = module
+ parent = None
+ object_name = None
+ for attrname in objpath:
+ parent = obj
+ logger.debug('[autodoc] getattr(_, %r)', attrname)
+ mangled_name = mangle(obj, attrname)
+ obj = attrgetter(obj, mangled_name)
+
+ try:
+ logger.debug('[autodoc] => %r', obj)
+ except TypeError:
+ # fallback of failure on logging for broken object
+ # refs: https://github.com/sphinx-doc/sphinx/issues/9095
+ logger.debug('[autodoc] => %r', (obj,))
+
+ object_name = attrname
+ return [module, parent, object_name, obj]
+ except (AttributeError, ImportError) as exc:
+ if isinstance(exc, AttributeError) and exc_on_importing:
+ # restore ImportError
+ exc = exc_on_importing
+
+ if objpath:
+ errmsg = ('autodoc: failed to import %s %r from module %r' %
+ (objtype, '.'.join(objpath), modname))
+ else:
+ errmsg = f'autodoc: failed to import {objtype} {modname!r}'
+
+ if isinstance(exc, ImportError):
+ # import_module() raises ImportError having real exception obj and
+ # traceback
+ real_exc, traceback_msg = exc.args
+ if isinstance(real_exc, SystemExit):
+ errmsg += ('; the module executes module level statement '
+ 'and it might call sys.exit().')
+ elif isinstance(real_exc, ImportError) and real_exc.args:
+ errmsg += '; the following exception was raised:\n%s' % real_exc.args[0]
+ else:
+ errmsg += '; the following exception was raised:\n%s' % traceback_msg
+ else:
+ errmsg += '; the following exception was raised:\n%s' % traceback.format_exc()
+
+ logger.debug(errmsg)
+ raise ImportError(errmsg) from exc
+
+
+class Attribute(NamedTuple):
+ name: str
+ directly_defined: bool
+ value: Any
+
+
+def get_object_members(
+ subject: Any,
+ objpath: list[str],
+ attrgetter: Callable,
+ analyzer: ModuleAnalyzer | None = None,
+) -> dict[str, Attribute]:
+ """Get members and attributes of target object."""
+ from sphinx.ext.autodoc import INSTANCEATTR
+
+ # the members directly defined in the class
+ obj_dict = attrgetter(subject, '__dict__', {})
+
+ members: dict[str, Attribute] = {}
+
+ # enum members
+ if isenumclass(subject):
+ for name, value in subject.__members__.items():
+ if name not in members:
+ members[name] = Attribute(name, True, value)
+
+ superclass = subject.__mro__[1]
+ for name in obj_dict:
+ if name not in superclass.__dict__:
+ value = safe_getattr(subject, name)
+ members[name] = Attribute(name, True, value)
+
+ # members in __slots__
+ try:
+ subject___slots__ = getslots(subject)
+ if subject___slots__:
+ from sphinx.ext.autodoc import SLOTSATTR
+
+ for name in subject___slots__:
+ members[name] = Attribute(name, True, SLOTSATTR)
+ except (TypeError, ValueError):
+ pass
+
+ # other members
+ for name in dir(subject):
+ try:
+ value = attrgetter(subject, name)
+ directly_defined = name in obj_dict
+ name = unmangle(subject, name)
+ if name and name not in members:
+ members[name] = Attribute(name, directly_defined, value)
+ except AttributeError:
+ continue
+
+ # annotation only member (ex. attr: int)
+ for i, cls in enumerate(getmro(subject)):
+ for name in getannotations(cls):
+ name = unmangle(cls, name)
+ if name and name not in members:
+ members[name] = Attribute(name, i == 0, INSTANCEATTR)
+
+ if analyzer:
+ # append instance attributes (cf. self.attr1) if analyzer knows
+ namespace = '.'.join(objpath)
+ for (ns, name) in analyzer.find_attr_docs():
+ if namespace == ns and name not in members:
+ members[name] = Attribute(name, True, INSTANCEATTR)
+
+ return members
+
+
+def get_class_members(subject: Any, objpath: Any, attrgetter: Callable,
+ inherit_docstrings: bool = True) -> dict[str, ObjectMember]:
+ """Get members and attributes of target class."""
+ from sphinx.ext.autodoc import INSTANCEATTR, ObjectMember
+
+ # the members directly defined in the class
+ obj_dict = attrgetter(subject, '__dict__', {})
+
+ members: dict[str, ObjectMember] = {}
+
+ # enum members
+ if isenumclass(subject):
+ for name, value in subject.__members__.items():
+ if name not in members:
+ members[name] = ObjectMember(name, value, class_=subject)
+
+ superclass = subject.__mro__[1]
+ for name in obj_dict:
+ if name not in superclass.__dict__:
+ value = safe_getattr(subject, name)
+ members[name] = ObjectMember(name, value, class_=subject)
+
+ # members in __slots__
+ try:
+ subject___slots__ = getslots(subject)
+ if subject___slots__:
+ from sphinx.ext.autodoc import SLOTSATTR
+
+ for name, docstring in subject___slots__.items():
+ members[name] = ObjectMember(name, SLOTSATTR, class_=subject,
+ docstring=docstring)
+ except (TypeError, ValueError):
+ pass
+
+ # other members
+ for name in dir(subject):
+ try:
+ value = attrgetter(subject, name)
+ if ismock(value):
+ value = undecorate(value)
+
+ unmangled = unmangle(subject, name)
+ if unmangled and unmangled not in members:
+ if name in obj_dict:
+ members[unmangled] = ObjectMember(unmangled, value, class_=subject)
+ else:
+ members[unmangled] = ObjectMember(unmangled, value)
+ except AttributeError:
+ continue
+
+ try:
+ for cls in getmro(subject):
+ try:
+ modname = safe_getattr(cls, '__module__')
+ qualname = safe_getattr(cls, '__qualname__')
+ analyzer = ModuleAnalyzer.for_module(modname)
+ analyzer.analyze()
+ except AttributeError:
+ qualname = None
+ analyzer = None
+ except PycodeError:
+ analyzer = None
+
+ # annotation only member (ex. attr: int)
+ for name in getannotations(cls):
+ name = unmangle(cls, name)
+ if name and name not in members:
+ if analyzer and (qualname, name) in analyzer.attr_docs:
+ docstring = '\n'.join(analyzer.attr_docs[qualname, name])
+ else:
+ docstring = None
+
+ members[name] = ObjectMember(name, INSTANCEATTR, class_=cls,
+ docstring=docstring)
+
+ # append or complete instance attributes (cf. self.attr1) if analyzer knows
+ if analyzer:
+ for (ns, name), docstring in analyzer.attr_docs.items():
+ if ns == qualname and name not in members:
+ # otherwise unknown instance attribute
+ members[name] = ObjectMember(name, INSTANCEATTR, class_=cls,
+ docstring='\n'.join(docstring))
+ elif (ns == qualname and docstring and
+ isinstance(members[name], ObjectMember) and
+ not members[name].docstring):
+ if cls != subject and not inherit_docstrings:
+ # If we are in the MRO of the class and not the class itself,
+ # and we do not want to inherit docstrings, then skip setting
+ # the docstring below
+ continue
+ # attribute is already known, because dir(subject) enumerates it.
+ # But it has no docstring yet
+ members[name].docstring = '\n'.join(docstring)
+ except AttributeError:
+ pass
+
+ return members
diff --git a/sphinx/ext/autodoc/mock.py b/sphinx/ext/autodoc/mock.py
new file mode 100644
index 0000000..7034977
--- /dev/null
+++ b/sphinx/ext/autodoc/mock.py
@@ -0,0 +1,198 @@
+"""mock for autodoc"""
+
+from __future__ import annotations
+
+import contextlib
+import os
+import sys
+from importlib.abc import Loader, MetaPathFinder
+from importlib.machinery import ModuleSpec
+from types import MethodType, ModuleType
+from typing import TYPE_CHECKING, Any
+
+from sphinx.util import logging
+from sphinx.util.inspect import isboundmethod, safe_getattr
+
+if TYPE_CHECKING:
+ from collections.abc import Generator, Iterator, Sequence
+
+logger = logging.getLogger(__name__)
+
+
+class _MockObject:
+ """Used by autodoc_mock_imports."""
+
+ __display_name__ = '_MockObject'
+ __name__ = ''
+ __sphinx_mock__ = True
+ __sphinx_decorator_args__: tuple[Any, ...] = ()
+
+ def __new__(cls, *args: Any, **kwargs: Any) -> Any:
+ if len(args) == 3 and isinstance(args[1], tuple):
+ superclass = args[1][-1].__class__
+ if superclass is cls:
+ # subclassing MockObject
+ return _make_subclass(args[0], superclass.__display_name__,
+ superclass=superclass, attributes=args[2])
+
+ return super().__new__(cls)
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ self.__qualname__ = self.__name__
+
+ def __len__(self) -> int:
+ return 0
+
+ def __contains__(self, key: str) -> bool:
+ return False
+
+ def __iter__(self) -> Iterator:
+ return iter([])
+
+ def __mro_entries__(self, bases: tuple) -> tuple:
+ return (self.__class__,)
+
+ def __getitem__(self, key: Any) -> _MockObject:
+ return _make_subclass(str(key), self.__display_name__, self.__class__)()
+
+ def __getattr__(self, key: str) -> _MockObject:
+ return _make_subclass(key, self.__display_name__, self.__class__)()
+
+ def __call__(self, *args: Any, **kwargs: Any) -> Any:
+ call = self.__class__()
+ call.__sphinx_decorator_args__ = args
+ return call
+
+ def __repr__(self) -> str:
+ return self.__display_name__
+
+
+def _make_subclass(name: str, module: str, superclass: Any = _MockObject,
+ attributes: Any = None, decorator_args: tuple = ()) -> Any:
+ attrs = {'__module__': module,
+ '__display_name__': module + '.' + name,
+ '__name__': name,
+ '__sphinx_decorator_args__': decorator_args}
+ attrs.update(attributes or {})
+
+ return type(name, (superclass,), attrs)
+
+
+class _MockModule(ModuleType):
+ """Used by autodoc_mock_imports."""
+ __file__ = os.devnull
+ __sphinx_mock__ = True
+
+ def __init__(self, name: str) -> None:
+ super().__init__(name)
+ self.__all__: list[str] = []
+ self.__path__: list[str] = []
+
+ def __getattr__(self, name: str) -> _MockObject:
+ return _make_subclass(name, self.__name__)()
+
+ def __repr__(self) -> str:
+ return self.__name__
+
+
+class MockLoader(Loader):
+ """A loader for mocking."""
+ def __init__(self, finder: MockFinder) -> None:
+ super().__init__()
+ self.finder = finder
+
+ def create_module(self, spec: ModuleSpec) -> ModuleType:
+ logger.debug('[autodoc] adding a mock module as %s!', spec.name)
+ self.finder.mocked_modules.append(spec.name)
+ return _MockModule(spec.name)
+
+ def exec_module(self, module: ModuleType) -> None:
+ pass # nothing to do
+
+
+class MockFinder(MetaPathFinder):
+ """A finder for mocking."""
+
+ def __init__(self, modnames: list[str]) -> None:
+ super().__init__()
+ self.modnames = modnames
+ self.loader = MockLoader(self)
+ self.mocked_modules: list[str] = []
+
+ def find_spec(self, fullname: str, path: Sequence[bytes | str] | None,
+ target: ModuleType | None = None) -> ModuleSpec | None:
+ for modname in self.modnames:
+ # check if fullname is (or is a descendant of) one of our targets
+ if modname == fullname or fullname.startswith(modname + '.'):
+ return ModuleSpec(fullname, self.loader)
+
+ return None
+
+ def invalidate_caches(self) -> None:
+ """Invalidate mocked modules on sys.modules."""
+ for modname in self.mocked_modules:
+ sys.modules.pop(modname, None)
+
+
+@contextlib.contextmanager
+def mock(modnames: list[str]) -> Generator[None, None, None]:
+ """Insert mock modules during context::
+
+ with mock(['target.module.name']):
+ # mock modules are enabled here
+ ...
+ """
+ try:
+ finder = MockFinder(modnames)
+ sys.meta_path.insert(0, finder)
+ yield
+ finally:
+ sys.meta_path.remove(finder)
+ finder.invalidate_caches()
+
+
+def ismockmodule(subject: Any) -> bool:
+ """Check if the object is a mocked module."""
+ return isinstance(subject, _MockModule)
+
+
+def ismock(subject: Any) -> bool:
+ """Check if the object is mocked."""
+ # check the object has '__sphinx_mock__' attribute
+ try:
+ if safe_getattr(subject, '__sphinx_mock__', None) is None:
+ return False
+ except AttributeError:
+ return False
+
+ # check the object is mocked module
+ if isinstance(subject, _MockModule):
+ return True
+
+ # check the object is bound method
+ if isinstance(subject, MethodType) and isboundmethod(subject):
+ tmp_subject = subject.__func__
+ else:
+ tmp_subject = subject
+
+ try:
+ # check the object is mocked object
+ __mro__ = safe_getattr(type(tmp_subject), '__mro__', [])
+ if len(__mro__) > 2 and __mro__[-2] is _MockObject:
+ # A mocked object has a MRO that ends with (..., _MockObject, object).
+ return True
+ except AttributeError:
+ pass
+
+ return False
+
+
+def undecorate(subject: _MockObject) -> Any:
+ """Unwrap mock if *subject* is decorated by mocked object.
+
+ If not decorated, returns given *subject* itself.
+ """
+ if ismock(subject) and subject.__sphinx_decorator_args__:
+ return subject.__sphinx_decorator_args__[0]
+ else:
+ return subject
diff --git a/sphinx/ext/autodoc/preserve_defaults.py b/sphinx/ext/autodoc/preserve_defaults.py
new file mode 100644
index 0000000..5f957ce
--- /dev/null
+++ b/sphinx/ext/autodoc/preserve_defaults.py
@@ -0,0 +1,199 @@
+"""Preserve function defaults.
+
+Preserve the default argument values of function signatures in source code
+and keep them not evaluated for readability.
+"""
+
+from __future__ import annotations
+
+import ast
+import inspect
+import types
+import warnings
+from typing import TYPE_CHECKING
+
+import sphinx
+from sphinx.deprecation import RemovedInSphinx90Warning
+from sphinx.locale import __
+from sphinx.pycode.ast import unparse as ast_unparse
+from sphinx.util import logging
+
+if TYPE_CHECKING:
+ from typing import Any
+
+ from sphinx.application import Sphinx
+
+logger = logging.getLogger(__name__)
+_LAMBDA_NAME = (lambda: None).__name__
+
+
+class DefaultValue:
+ def __init__(self, name: str) -> None:
+ self.name = name
+
+ def __repr__(self) -> str:
+ return self.name
+
+
+def get_function_def(obj: Any) -> ast.FunctionDef | None:
+ """Get FunctionDef object from living object.
+
+ This tries to parse original code for living object and returns
+ AST node for given *obj*.
+ """
+ warnings.warn('sphinx.ext.autodoc.preserve_defaults.get_function_def is'
+ ' deprecated and scheduled for removal in Sphinx 9.'
+ ' Use sphinx.ext.autodoc.preserve_defaults._get_arguments() to'
+ ' extract AST arguments objects from a lambda or regular'
+ ' function.', RemovedInSphinx90Warning, stacklevel=2)
+
+ try:
+ source = inspect.getsource(obj)
+ if source.startswith((' ', '\t')):
+ # subject is placed inside class or block. To read its docstring,
+ # this adds if-block before the declaration.
+ module = ast.parse('if True:\n' + source)
+ return module.body[0].body[0] # type: ignore[attr-defined]
+ else:
+ module = ast.parse(source)
+ return module.body[0] # type: ignore[return-value]
+ except (OSError, TypeError): # failed to load source code
+ return None
+
+
+def _get_arguments(obj: Any, /) -> ast.arguments | None:
+ """Parse 'ast.arguments' from an object.
+
+ This tries to parse the original code for an object and returns
+ an 'ast.arguments' node.
+ """
+ try:
+ source = inspect.getsource(obj)
+ if source.startswith((' ', '\t')):
+ # 'obj' is in some indented block.
+ module = ast.parse('if True:\n' + source)
+ subject = module.body[0].body[0] # type: ignore[attr-defined]
+ else:
+ module = ast.parse(source)
+ subject = module.body[0]
+ except (OSError, TypeError):
+ # bail; failed to load source for 'obj'.
+ return None
+ except SyntaxError:
+ if _is_lambda(obj):
+ # Most likely a multi-line arising from detecting a lambda, e.g.:
+ #
+ # class Egg:
+ # x = property(
+ # lambda self: 1, doc="...")
+ return None
+
+ # Other syntax errors that are not due to the fact that we are
+ # documenting a lambda function are propagated
+ # (in particular if a lambda is renamed by the user).
+ raise
+
+ return _get_arguments_inner(subject)
+
+
+def _is_lambda(x, /):
+ return isinstance(x, types.LambdaType) and x.__name__ == _LAMBDA_NAME
+
+
+def _get_arguments_inner(x: Any, /) -> ast.arguments | None:
+ if isinstance(x, (ast.AsyncFunctionDef, ast.FunctionDef, ast.Lambda)):
+ return x.args
+ if isinstance(x, (ast.Assign, ast.AnnAssign)):
+ return _get_arguments_inner(x.value)
+ return None
+
+
+def get_default_value(lines: list[str], position: ast.AST) -> str | None:
+ try:
+ if position.lineno == position.end_lineno:
+ line = lines[position.lineno - 1]
+ return line[position.col_offset:position.end_col_offset]
+ else:
+ # multiline value is not supported now
+ return None
+ except (AttributeError, IndexError):
+ return None
+
+
+def update_defvalue(app: Sphinx, obj: Any, bound_method: bool) -> None:
+ """Update defvalue info of *obj* using type_comments."""
+ if not app.config.autodoc_preserve_defaults:
+ return
+
+ try:
+ lines = inspect.getsource(obj).splitlines()
+ if lines[0].startswith((' ', '\t')):
+ # insert a dummy line to follow what _get_arguments() does.
+ lines.insert(0, '')
+ except (OSError, TypeError):
+ lines = []
+
+ try:
+ args = _get_arguments(obj)
+ except SyntaxError:
+ return
+ if args is None:
+ # If the object is a built-in, we won't be always able to recover
+ # the function definition and its arguments. This happens if *obj*
+ # is the `__init__` method generated automatically for dataclasses.
+ return
+
+ if not args.defaults and not args.kw_defaults:
+ return
+
+ try:
+ if bound_method and inspect.ismethod(obj) and hasattr(obj, '__func__'):
+ sig = inspect.signature(obj.__func__)
+ else:
+ sig = inspect.signature(obj)
+ defaults = list(args.defaults)
+ kw_defaults = list(args.kw_defaults)
+ parameters = list(sig.parameters.values())
+ for i, param in enumerate(parameters):
+ if param.default is param.empty:
+ if param.kind == param.KEYWORD_ONLY:
+ # Consume kw_defaults for kwonly args
+ kw_defaults.pop(0)
+ else:
+ if param.kind in (param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD):
+ default = defaults.pop(0)
+ value = get_default_value(lines, default)
+ if value is None:
+ value = ast_unparse(default)
+ parameters[i] = param.replace(default=DefaultValue(value))
+ else:
+ default = kw_defaults.pop(0) # type: ignore[assignment]
+ value = get_default_value(lines, default)
+ if value is None:
+ value = ast_unparse(default)
+ parameters[i] = param.replace(default=DefaultValue(value))
+
+ sig = sig.replace(parameters=parameters)
+ try:
+ obj.__signature__ = sig
+ except AttributeError:
+ # __signature__ can't be set directly on bound methods.
+ obj.__dict__['__signature__'] = sig
+ except (AttributeError, TypeError):
+ # Failed to update signature (e.g. built-in or extension types).
+ # For user-defined functions, "obj" may not have __dict__,
+ # e.g. when decorated with a class that defines __slots__.
+ # In this case, we can't set __signature__.
+ return
+ except NotImplementedError as exc: # failed to ast_unparse()
+ logger.warning(__("Failed to parse a default argument value for %r: %s"), obj, exc)
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_config_value('autodoc_preserve_defaults', False, True)
+ app.connect('autodoc-before-process-signature', update_defvalue)
+
+ return {
+ 'version': sphinx.__display_version__,
+ 'parallel_read_safe': True,
+ }
diff --git a/sphinx/ext/autodoc/type_comment.py b/sphinx/ext/autodoc/type_comment.py
new file mode 100644
index 0000000..e2c9ae2
--- /dev/null
+++ b/sphinx/ext/autodoc/type_comment.py
@@ -0,0 +1,140 @@
+"""Update annotations info of living objects using type_comments."""
+
+from __future__ import annotations
+
+import ast
+from inspect import Parameter, Signature, getsource
+from typing import TYPE_CHECKING, Any, cast
+
+import sphinx
+from sphinx.locale import __
+from sphinx.pycode.ast import unparse as ast_unparse
+from sphinx.util import inspect, logging
+
+if TYPE_CHECKING:
+ from collections.abc import Sequence
+
+ from sphinx.application import Sphinx
+
+logger = logging.getLogger(__name__)
+
+
+def not_suppressed(argtypes: Sequence[ast.expr] = ()) -> bool:
+ """Check given *argtypes* is suppressed type_comment or not."""
+ if len(argtypes) == 0: # no argtypees
+ return False
+ if len(argtypes) == 1:
+ arg = argtypes[0]
+ if isinstance(arg, ast.Constant) and arg.value is ...: # suppressed
+ return False
+ # not suppressed
+ return True
+
+
+def signature_from_ast(node: ast.FunctionDef, bound_method: bool,
+ type_comment: ast.FunctionDef) -> Signature:
+ """Return a Signature object for the given *node*.
+
+ :param bound_method: Specify *node* is a bound method or not
+ """
+ params = []
+ for arg in node.args.posonlyargs:
+ param = Parameter(arg.arg, Parameter.POSITIONAL_ONLY, annotation=arg.type_comment)
+ params.append(param)
+
+ for arg in node.args.args:
+ param = Parameter(arg.arg, Parameter.POSITIONAL_OR_KEYWORD,
+ annotation=arg.type_comment or Parameter.empty)
+ params.append(param)
+
+ if node.args.vararg:
+ param = Parameter(node.args.vararg.arg, Parameter.VAR_POSITIONAL,
+ annotation=node.args.vararg.type_comment or Parameter.empty)
+ params.append(param)
+
+ for arg in node.args.kwonlyargs:
+ param = Parameter(arg.arg, Parameter.KEYWORD_ONLY,
+ annotation=arg.type_comment or Parameter.empty)
+ params.append(param)
+
+ if node.args.kwarg:
+ param = Parameter(node.args.kwarg.arg, Parameter.VAR_KEYWORD,
+ annotation=node.args.kwarg.type_comment or Parameter.empty)
+ params.append(param)
+
+ # Remove first parameter when *obj* is bound_method
+ if bound_method and params:
+ params.pop(0)
+
+ # merge type_comment into signature
+ if not_suppressed(type_comment.argtypes): # type: ignore[attr-defined]
+ for i, param in enumerate(params):
+ params[i] = param.replace(
+ annotation=type_comment.argtypes[i]) # type: ignore[attr-defined]
+
+ if node.returns:
+ return Signature(params, return_annotation=node.returns)
+ elif type_comment.returns:
+ return Signature(params, return_annotation=ast_unparse(type_comment.returns))
+ else:
+ return Signature(params)
+
+
+def get_type_comment(obj: Any, bound_method: bool = False) -> Signature | None:
+ """Get type_comment'ed FunctionDef object from living object.
+
+ This tries to parse original code for living object and returns
+ Signature for given *obj*.
+ """
+ try:
+ source = getsource(obj)
+ if source.startswith((' ', r'\t')):
+ # subject is placed inside class or block. To read its docstring,
+ # this adds if-block before the declaration.
+ module = ast.parse('if True:\n' + source, type_comments=True)
+ subject = cast(
+ ast.FunctionDef, module.body[0].body[0], # type: ignore[attr-defined]
+ )
+ else:
+ module = ast.parse(source, type_comments=True)
+ subject = cast(ast.FunctionDef, module.body[0])
+
+ type_comment = getattr(subject, "type_comment", None)
+ if type_comment:
+ function = ast.parse(type_comment, mode='func_type', type_comments=True)
+ return signature_from_ast(
+ subject, bound_method, function, # type: ignore[arg-type]
+ )
+ else:
+ return None
+ except (OSError, TypeError): # failed to load source code
+ return None
+ except SyntaxError: # failed to parse type_comments
+ return None
+
+
+def update_annotations_using_type_comments(app: Sphinx, obj: Any, bound_method: bool) -> None:
+ """Update annotations info of *obj* using type_comments."""
+ try:
+ type_sig = get_type_comment(obj, bound_method)
+ if type_sig:
+ sig = inspect.signature(obj, bound_method)
+ for param in sig.parameters.values():
+ if param.name not in obj.__annotations__:
+ annotation = type_sig.parameters[param.name].annotation
+ if annotation is not Parameter.empty:
+ obj.__annotations__[param.name] = ast_unparse(annotation)
+
+ if 'return' not in obj.__annotations__:
+ obj.__annotations__['return'] = type_sig.return_annotation
+ except KeyError as exc:
+ logger.warning(__("Failed to update signature for %r: parameter not found: %s"),
+ obj, exc)
+ except NotImplementedError as exc: # failed to ast.unparse()
+ logger.warning(__("Failed to parse type_comment for %r: %s"), obj, exc)
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.connect('autodoc-before-process-signature', update_annotations_using_type_comments)
+
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/autodoc/typehints.py b/sphinx/ext/autodoc/typehints.py
new file mode 100644
index 0000000..79906fb
--- /dev/null
+++ b/sphinx/ext/autodoc/typehints.py
@@ -0,0 +1,219 @@
+"""Generating content for autodoc using typehints"""
+
+from __future__ import annotations
+
+import re
+from collections.abc import Iterable
+from typing import TYPE_CHECKING, Any, cast
+
+from docutils import nodes
+
+import sphinx
+from sphinx import addnodes
+from sphinx.util import inspect
+from sphinx.util.typing import stringify_annotation
+
+if TYPE_CHECKING:
+ from docutils.nodes import Element
+
+ from sphinx.application import Sphinx
+
+
+def record_typehints(app: Sphinx, objtype: str, name: str, obj: Any,
+ options: dict, args: str, retann: str) -> None:
+ """Record type hints to env object."""
+ if app.config.autodoc_typehints_format == 'short':
+ mode = 'smart'
+ else:
+ mode = 'fully-qualified'
+
+ try:
+ if callable(obj):
+ annotations = app.env.temp_data.setdefault('annotations', {})
+ annotation = annotations.setdefault(name, {})
+ sig = inspect.signature(obj, type_aliases=app.config.autodoc_type_aliases)
+ for param in sig.parameters.values():
+ if param.annotation is not param.empty:
+ annotation[param.name] = stringify_annotation(param.annotation, mode)
+ if sig.return_annotation is not sig.empty:
+ annotation['return'] = stringify_annotation(sig.return_annotation, mode)
+ except (TypeError, ValueError):
+ pass
+
+
+def merge_typehints(app: Sphinx, domain: str, objtype: str, contentnode: Element) -> None:
+ if domain != 'py':
+ return
+ if app.config.autodoc_typehints not in ('both', 'description'):
+ return
+
+ try:
+ signature = cast(addnodes.desc_signature, contentnode.parent[0])
+ if signature['module']:
+ fullname = '.'.join([signature['module'], signature['fullname']])
+ else:
+ fullname = signature['fullname']
+ except KeyError:
+ # signature node does not have valid context info for the target object
+ return
+
+ annotations = app.env.temp_data.get('annotations', {})
+ if annotations.get(fullname, {}):
+ field_lists = [n for n in contentnode if isinstance(n, nodes.field_list)]
+ if field_lists == []:
+ field_list = insert_field_list(contentnode)
+ field_lists.append(field_list)
+
+ for field_list in field_lists:
+ if app.config.autodoc_typehints_description_target == "all":
+ if objtype == 'class':
+ modify_field_list(field_list, annotations[fullname], suppress_rtype=True)
+ else:
+ modify_field_list(field_list, annotations[fullname])
+ elif app.config.autodoc_typehints_description_target == "documented_params":
+ augment_descriptions_with_types(
+ field_list, annotations[fullname], force_rtype=True,
+ )
+ else:
+ augment_descriptions_with_types(
+ field_list, annotations[fullname], force_rtype=False,
+ )
+
+
+def insert_field_list(node: Element) -> nodes.field_list:
+ field_list = nodes.field_list()
+ desc = [n for n in node if isinstance(n, addnodes.desc)]
+ if desc:
+ # insert just before sub object descriptions (ex. methods, nested classes, etc.)
+ index = node.index(desc[0])
+ node.insert(index - 1, [field_list])
+ else:
+ node += field_list
+
+ return field_list
+
+
+def modify_field_list(node: nodes.field_list, annotations: dict[str, str],
+ suppress_rtype: bool = False) -> None:
+ arguments: dict[str, dict[str, bool]] = {}
+ fields = cast(Iterable[nodes.field], node)
+ for field in fields:
+ field_name = field[0].astext()
+ parts = re.split(' +', field_name)
+ if parts[0] == 'param':
+ if len(parts) == 2:
+ # :param xxx:
+ arg = arguments.setdefault(parts[1], {})
+ arg['param'] = True
+ elif len(parts) > 2:
+ # :param xxx yyy:
+ name = ' '.join(parts[2:])
+ arg = arguments.setdefault(name, {})
+ arg['param'] = True
+ arg['type'] = True
+ elif parts[0] == 'type':
+ name = ' '.join(parts[1:])
+ arg = arguments.setdefault(name, {})
+ arg['type'] = True
+ elif parts[0] == 'rtype':
+ arguments['return'] = {'type': True}
+
+ for name, annotation in annotations.items():
+ if name == 'return':
+ continue
+
+ if '*' + name in arguments:
+ name = '*' + name
+ arguments.get(name)
+ elif '**' + name in arguments:
+ name = '**' + name
+ arguments.get(name)
+ else:
+ arg = arguments.get(name, {})
+
+ if not arg.get('type'):
+ field = nodes.field()
+ field += nodes.field_name('', 'type ' + name)
+ field += nodes.field_body('', nodes.paragraph('', annotation))
+ node += field
+ if not arg.get('param'):
+ field = nodes.field()
+ field += nodes.field_name('', 'param ' + name)
+ field += nodes.field_body('', nodes.paragraph('', ''))
+ node += field
+
+ if 'return' in annotations and 'return' not in arguments:
+ annotation = annotations['return']
+ if annotation == 'None' and suppress_rtype:
+ return
+
+ field = nodes.field()
+ field += nodes.field_name('', 'rtype')
+ field += nodes.field_body('', nodes.paragraph('', annotation))
+ node += field
+
+
+def augment_descriptions_with_types(
+ node: nodes.field_list,
+ annotations: dict[str, str],
+ force_rtype: bool,
+) -> None:
+ fields = cast(Iterable[nodes.field], node)
+ has_description: set[str] = set()
+ has_type: set[str] = set()
+ for field in fields:
+ field_name = field[0].astext()
+ parts = re.split(' +', field_name)
+ if parts[0] == 'param':
+ if len(parts) == 2:
+ # :param xxx:
+ has_description.add(parts[1])
+ elif len(parts) > 2:
+ # :param xxx yyy:
+ name = ' '.join(parts[2:])
+ has_description.add(name)
+ has_type.add(name)
+ elif parts[0] == 'type':
+ name = ' '.join(parts[1:])
+ has_type.add(name)
+ elif parts[0] in ('return', 'returns'):
+ has_description.add('return')
+ elif parts[0] == 'rtype':
+ has_type.add('return')
+
+ # Add 'type' for parameters with a description but no declared type.
+ for name, annotation in annotations.items():
+ if name in ('return', 'returns'):
+ continue
+
+ if '*' + name in has_description:
+ name = '*' + name
+ elif '**' + name in has_description:
+ name = '**' + name
+
+ if name in has_description and name not in has_type:
+ field = nodes.field()
+ field += nodes.field_name('', 'type ' + name)
+ field += nodes.field_body('', nodes.paragraph('', annotation))
+ node += field
+
+ # Add 'rtype' if 'return' is present and 'rtype' isn't.
+ if 'return' in annotations:
+ rtype = annotations['return']
+ if 'return' not in has_type and ('return' in has_description or
+ (force_rtype and rtype != "None")):
+ field = nodes.field()
+ field += nodes.field_name('', 'rtype')
+ field += nodes.field_body('', nodes.paragraph('', rtype))
+ node += field
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.connect('autodoc-process-signature', record_typehints)
+ app.connect('object-description-transform', merge_typehints)
+
+ return {
+ 'version': sphinx.__display_version__,
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/ext/autosectionlabel.py b/sphinx/ext/autosectionlabel.py
new file mode 100644
index 0000000..d423fcc
--- /dev/null
+++ b/sphinx/ext/autosectionlabel.py
@@ -0,0 +1,69 @@
+"""Allow reference sections by :ref: role using its title."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, cast
+
+from docutils import nodes
+
+import sphinx
+from sphinx.domains.std import StandardDomain
+from sphinx.locale import __
+from sphinx.util import logging
+from sphinx.util.nodes import clean_astext
+
+if TYPE_CHECKING:
+ from docutils.nodes import Node
+
+ from sphinx.application import Sphinx
+
+logger = logging.getLogger(__name__)
+
+
+def get_node_depth(node: Node) -> int:
+ i = 0
+ cur_node = node
+ while cur_node.parent != node.document:
+ cur_node = cur_node.parent
+ i += 1
+ return i
+
+
+def register_sections_as_label(app: Sphinx, document: Node) -> None:
+ domain = cast(StandardDomain, app.env.get_domain('std'))
+ for node in document.findall(nodes.section):
+ if (app.config.autosectionlabel_maxdepth and
+ get_node_depth(node) >= app.config.autosectionlabel_maxdepth):
+ continue
+ labelid = node['ids'][0]
+ docname = app.env.docname
+ title = cast(nodes.title, node[0])
+ ref_name = getattr(title, 'rawsource', title.astext())
+ if app.config.autosectionlabel_prefix_document:
+ name = nodes.fully_normalize_name(docname + ':' + ref_name)
+ else:
+ name = nodes.fully_normalize_name(ref_name)
+ sectname = clean_astext(title)
+
+ logger.debug(__('section "%s" gets labeled as "%s"'),
+ ref_name, name,
+ location=node, type='autosectionlabel', subtype=docname)
+ if name in domain.labels:
+ logger.warning(__('duplicate label %s, other instance in %s'),
+ name, app.env.doc2path(domain.labels[name][0]),
+ location=node, type='autosectionlabel', subtype=docname)
+
+ domain.anonlabels[name] = docname, labelid
+ domain.labels[name] = docname, labelid, sectname
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_config_value('autosectionlabel_prefix_document', False, 'env')
+ app.add_config_value('autosectionlabel_maxdepth', None, 'env')
+ app.connect('doctree-read', register_sections_as_label)
+
+ return {
+ 'version': sphinx.__display_version__,
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py
new file mode 100644
index 0000000..edb8f0d
--- /dev/null
+++ b/sphinx/ext/autosummary/__init__.py
@@ -0,0 +1,847 @@
+"""Extension that adds an autosummary:: directive.
+
+The directive can be used to generate function/method/attribute/etc. summary
+lists, similar to those output eg. by Epydoc and other API doc generation tools.
+
+An :autolink: role is also provided.
+
+autosummary directive
+---------------------
+
+The autosummary directive has the form::
+
+ .. autosummary::
+ :nosignatures:
+ :toctree: generated/
+
+ module.function_1
+ module.function_2
+ ...
+
+and it generates an output table (containing signatures, optionally)
+
+ ======================== =============================================
+ module.function_1(args) Summary line from the docstring of function_1
+ module.function_2(args) Summary line from the docstring
+ ...
+ ======================== =============================================
+
+If the :toctree: option is specified, files matching the function names
+are inserted to the toctree with the given prefix:
+
+ generated/module.function_1
+ generated/module.function_2
+ ...
+
+Note: The file names contain the module:: or currentmodule:: prefixes.
+
+.. seealso:: autosummary_generate.py
+
+
+autolink role
+-------------
+
+The autolink role functions as ``:obj:`` when the name referred can be
+resolved to a Python object, and otherwise it becomes simple emphasis.
+This can be used as the default role to make links 'smart'.
+"""
+
+from __future__ import annotations
+
+import inspect
+import os
+import posixpath
+import re
+import sys
+from inspect import Parameter
+from os import path
+from types import ModuleType
+from typing import TYPE_CHECKING, Any, cast
+
+from docutils import nodes
+from docutils.parsers.rst import directives
+from docutils.parsers.rst.states import RSTStateMachine, Struct, state_classes
+from docutils.statemachine import StringList
+
+import sphinx
+from sphinx import addnodes
+from sphinx.config import Config
+from sphinx.environment import BuildEnvironment
+from sphinx.ext.autodoc import INSTANCEATTR, Documenter
+from sphinx.ext.autodoc.directive import DocumenterBridge, Options
+from sphinx.ext.autodoc.importer import import_module
+from sphinx.ext.autodoc.mock import mock
+from sphinx.locale import __
+from sphinx.project import Project
+from sphinx.pycode import ModuleAnalyzer, PycodeError
+from sphinx.registry import SphinxComponentRegistry
+from sphinx.util import logging, rst
+from sphinx.util.docutils import (
+ NullReporter,
+ SphinxDirective,
+ SphinxRole,
+ new_document,
+ switch_source_input,
+)
+from sphinx.util.inspect import getmro, signature_from_str
+from sphinx.util.matching import Matcher
+
+if TYPE_CHECKING:
+ from collections.abc import Sequence
+
+ from docutils.nodes import Node, system_message
+
+ from sphinx.application import Sphinx
+ from sphinx.extension import Extension
+ from sphinx.util.typing import OptionSpec
+ from sphinx.writers.html import HTML5Translator
+
+logger = logging.getLogger(__name__)
+
+
+periods_re = re.compile(r'\.(?:\s+)')
+literal_re = re.compile(r'::\s*$')
+
+WELL_KNOWN_ABBREVIATIONS = ('et al.', 'e.g.', 'i.e.')
+
+
+# -- autosummary_toc node ------------------------------------------------------
+
+class autosummary_toc(nodes.comment):
+ pass
+
+
+def autosummary_toc_visit_html(self: nodes.NodeVisitor, node: autosummary_toc) -> None:
+ """Hide autosummary toctree list in HTML output."""
+ raise nodes.SkipNode
+
+
+def autosummary_noop(self: nodes.NodeVisitor, node: Node) -> None:
+ pass
+
+
+# -- autosummary_table node ----------------------------------------------------
+
+class autosummary_table(nodes.comment):
+ pass
+
+
+def autosummary_table_visit_html(self: HTML5Translator, node: autosummary_table) -> None:
+ """Make the first column of the table non-breaking."""
+ try:
+ table = cast(nodes.table, node[0])
+ tgroup = cast(nodes.tgroup, table[0])
+ tbody = cast(nodes.tbody, tgroup[-1])
+ rows = cast(list[nodes.row], tbody)
+ for row in rows:
+ col1_entry = cast(nodes.entry, row[0])
+ par = cast(nodes.paragraph, col1_entry[0])
+ for j, subnode in enumerate(list(par)):
+ if isinstance(subnode, nodes.Text):
+ new_text = subnode.astext().replace(" ", "\u00a0")
+ par[j] = nodes.Text(new_text)
+ except IndexError:
+ pass
+
+
+# -- autodoc integration -------------------------------------------------------
+
+class FakeApplication:
+ def __init__(self) -> None:
+ self.doctreedir = None
+ self.events = None
+ self.extensions: dict[str, Extension] = {}
+ self.srcdir = None
+ self.config = Config()
+ self.project = Project('', {})
+ self.registry = SphinxComponentRegistry()
+
+
+class FakeDirective(DocumenterBridge):
+ def __init__(self) -> None:
+ settings = Struct(tab_width=8)
+ document = Struct(settings=settings)
+ app = FakeApplication()
+ app.config.add('autodoc_class_signature', 'mixed', True, None)
+ env = BuildEnvironment(app) # type: ignore[arg-type]
+ state = Struct(document=document)
+ super().__init__(env, None, Options(), 0, state)
+
+
+def get_documenter(app: Sphinx, obj: Any, parent: Any) -> type[Documenter]:
+ """Get an autodoc.Documenter class suitable for documenting the given
+ object.
+
+ *obj* is the Python object to be documented, and *parent* is an
+ another Python object (e.g. a module or a class) to which *obj*
+ belongs to.
+ """
+ from sphinx.ext.autodoc import DataDocumenter, ModuleDocumenter
+
+ if inspect.ismodule(obj):
+ # ModuleDocumenter.can_document_member always returns False
+ return ModuleDocumenter
+
+ # Construct a fake documenter for *parent*
+ if parent is not None:
+ parent_doc_cls = get_documenter(app, parent, None)
+ else:
+ parent_doc_cls = ModuleDocumenter
+
+ if hasattr(parent, '__name__'):
+ parent_doc = parent_doc_cls(FakeDirective(), parent.__name__)
+ else:
+ parent_doc = parent_doc_cls(FakeDirective(), "")
+
+ # Get the correct documenter class for *obj*
+ classes = [cls for cls in app.registry.documenters.values()
+ if cls.can_document_member(obj, '', False, parent_doc)]
+ if classes:
+ classes.sort(key=lambda cls: cls.priority)
+ return classes[-1]
+ else:
+ return DataDocumenter
+
+
+# -- .. autosummary:: ----------------------------------------------------------
+
+class Autosummary(SphinxDirective):
+ """
+ Pretty table containing short signatures and summaries of functions etc.
+
+ autosummary can also optionally generate a hidden toctree:: node.
+ """
+
+ required_arguments = 0
+ optional_arguments = 0
+ final_argument_whitespace = False
+ has_content = True
+ option_spec: OptionSpec = {
+ 'caption': directives.unchanged_required,
+ 'toctree': directives.unchanged,
+ 'nosignatures': directives.flag,
+ 'recursive': directives.flag,
+ 'template': directives.unchanged,
+ }
+
+ def run(self) -> list[Node]:
+ self.bridge = DocumenterBridge(self.env, self.state.document.reporter,
+ Options(), self.lineno, self.state)
+
+ names = [x.strip().split()[0] for x in self.content
+ if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])]
+ items = self.get_items(names)
+ nodes = self.get_table(items)
+
+ if 'toctree' in self.options:
+ dirname = posixpath.dirname(self.env.docname)
+
+ tree_prefix = self.options['toctree'].strip()
+ docnames = []
+ excluded = Matcher(self.config.exclude_patterns)
+ filename_map = self.config.autosummary_filename_map
+ for _name, _sig, _summary, real_name in items:
+ real_name = filename_map.get(real_name, real_name)
+ docname = posixpath.join(tree_prefix, real_name)
+ docname = posixpath.normpath(posixpath.join(dirname, docname))
+ if docname not in self.env.found_docs:
+ if excluded(self.env.doc2path(docname, False)):
+ msg = __('autosummary references excluded document %r. Ignored.')
+ else:
+ msg = __('autosummary: stub file not found %r. '
+ 'Check your autosummary_generate setting.')
+
+ logger.warning(msg, real_name, location=self.get_location())
+ continue
+
+ docnames.append(docname)
+
+ if docnames:
+ tocnode = addnodes.toctree()
+ tocnode['includefiles'] = docnames
+ tocnode['entries'] = [(None, docn) for docn in docnames]
+ tocnode['maxdepth'] = -1
+ tocnode['glob'] = None
+ tocnode['caption'] = self.options.get('caption')
+
+ nodes.append(autosummary_toc('', '', tocnode))
+
+ if 'toctree' not in self.options and 'caption' in self.options:
+ logger.warning(__('A captioned autosummary requires :toctree: option. ignored.'),
+ location=nodes[-1])
+
+ return nodes
+
+ def import_by_name(
+ self, name: str, prefixes: list[str | None],
+ ) -> tuple[str, Any, Any, str]:
+ with mock(self.config.autosummary_mock_imports):
+ try:
+ return import_by_name(name, prefixes)
+ except ImportExceptionGroup as exc:
+ # check existence of instance attribute
+ try:
+ return import_ivar_by_name(name, prefixes)
+ except ImportError as exc2:
+ if exc2.__cause__:
+ errors: list[BaseException] = exc.exceptions + [exc2.__cause__]
+ else:
+ errors = exc.exceptions + [exc2]
+
+ raise ImportExceptionGroup(exc.args[0], errors) from None
+
+ def create_documenter(self, app: Sphinx, obj: Any,
+ parent: Any, full_name: str) -> Documenter:
+ """Get an autodoc.Documenter class suitable for documenting the given
+ object.
+
+ Wraps get_documenter and is meant as a hook for extensions.
+ """
+ doccls = get_documenter(app, obj, parent)
+ return doccls(self.bridge, full_name)
+
+ def get_items(self, names: list[str]) -> list[tuple[str, str, str, str]]:
+ """Try to import the given names, and return a list of
+ ``[(name, signature, summary_string, real_name), ...]``.
+ """
+ prefixes = get_import_prefixes_from_env(self.env)
+
+ items: list[tuple[str, str, str, str]] = []
+
+ max_item_chars = 50
+
+ for name in names:
+ display_name = name
+ if name.startswith('~'):
+ name = name[1:]
+ display_name = name.split('.')[-1]
+
+ try:
+ real_name, obj, parent, modname = self.import_by_name(name, prefixes=prefixes)
+ except ImportExceptionGroup as exc:
+ errors = list({f"* {type(e).__name__}: {e}" for e in exc.exceptions})
+ logger.warning(__('autosummary: failed to import %s.\nPossible hints:\n%s'),
+ name, '\n'.join(errors), location=self.get_location())
+ continue
+
+ self.bridge.result = StringList() # initialize for each documenter
+ full_name = real_name
+ if not isinstance(obj, ModuleType):
+ # give explicitly separated module name, so that members
+ # of inner classes can be documented
+ full_name = modname + '::' + full_name[len(modname) + 1:]
+ # NB. using full_name here is important, since Documenters
+ # handle module prefixes slightly differently
+ documenter = self.create_documenter(self.env.app, obj, parent, full_name)
+ if not documenter.parse_name():
+ logger.warning(__('failed to parse name %s'), real_name,
+ location=self.get_location())
+ items.append((display_name, '', '', real_name))
+ continue
+ if not documenter.import_object():
+ logger.warning(__('failed to import object %s'), real_name,
+ location=self.get_location())
+ items.append((display_name, '', '', real_name))
+ continue
+
+ # try to also get a source code analyzer for attribute docs
+ try:
+ documenter.analyzer = ModuleAnalyzer.for_module(
+ documenter.get_real_modname())
+ # parse right now, to get PycodeErrors on parsing (results will
+ # be cached anyway)
+ documenter.analyzer.find_attr_docs()
+ except PycodeError as err:
+ logger.debug('[autodoc] module analyzer failed: %s', err)
+ # no source file -- e.g. for builtin and C modules
+ documenter.analyzer = None
+
+ # -- Grab the signature
+
+ try:
+ sig = documenter.format_signature(show_annotation=False)
+ except TypeError:
+ # the documenter does not support ``show_annotation`` option
+ sig = documenter.format_signature()
+
+ if not sig:
+ sig = ''
+ else:
+ max_chars = max(10, max_item_chars - len(display_name))
+ sig = mangle_signature(sig, max_chars=max_chars)
+
+ # -- Grab the summary
+
+ # bodge for ModuleDocumenter
+ documenter._extra_indent = '' # type: ignore[attr-defined]
+
+ documenter.add_content(None)
+ summary = extract_summary(self.bridge.result.data[:], self.state.document)
+
+ items.append((display_name, sig, summary, real_name))
+
+ return items
+
+ def get_table(self, items: list[tuple[str, str, str, str]]) -> list[Node]:
+ """Generate a proper list of table nodes for autosummary:: directive.
+
+ *items* is a list produced by :meth:`get_items`.
+ """
+ table_spec = addnodes.tabular_col_spec()
+ table_spec['spec'] = r'\X{1}{2}\X{1}{2}'
+
+ table = autosummary_table('')
+ real_table = nodes.table('', classes=['autosummary longtable'])
+ table.append(real_table)
+ group = nodes.tgroup('', cols=2)
+ real_table.append(group)
+ group.append(nodes.colspec('', colwidth=10))
+ group.append(nodes.colspec('', colwidth=90))
+ body = nodes.tbody('')
+ group.append(body)
+
+ def append_row(*column_texts: str) -> None:
+ row = nodes.row('')
+ source, line = self.state_machine.get_source_and_line()
+ for text in column_texts:
+ node = nodes.paragraph('')
+ vl = StringList()
+ vl.append(text, '%s:%d:<autosummary>' % (source, line))
+ with switch_source_input(self.state, vl):
+ self.state.nested_parse(vl, 0, node)
+ try:
+ if isinstance(node[0], nodes.paragraph):
+ node = node[0]
+ except IndexError:
+ pass
+ row.append(nodes.entry('', node))
+ body.append(row)
+
+ for name, sig, summary, real_name in items:
+ qualifier = 'obj'
+ if 'nosignatures' not in self.options:
+ col1 = f':py:{qualifier}:`{name} <{real_name}>`\\ {rst.escape(sig)}'
+ else:
+ col1 = f':py:{qualifier}:`{name} <{real_name}>`'
+ col2 = summary
+ append_row(col1, col2)
+
+ return [table_spec, table]
+
+
+def strip_arg_typehint(s: str) -> str:
+ """Strip a type hint from argument definition."""
+ return s.split(':')[0].strip()
+
+
+def _cleanup_signature(s: str) -> str:
+ """Clean up signature using inspect.signautre() for mangle_signature()"""
+ try:
+ sig = signature_from_str(s)
+ parameters = list(sig.parameters.values())
+ for i, param in enumerate(parameters):
+ if param.annotation is not Parameter.empty:
+ # Remove typehints
+ param = param.replace(annotation=Parameter.empty)
+ if param.default is not Parameter.empty:
+ # Replace default value by "None"
+ param = param.replace(default=None)
+ parameters[i] = param
+ sig = sig.replace(parameters=parameters, return_annotation=Parameter.empty)
+ return str(sig)
+ except Exception:
+ # Return the original signature string if failed to clean (ex. parsing error)
+ return s
+
+
+def mangle_signature(sig: str, max_chars: int = 30) -> str:
+ """Reformat a function signature to a more compact form."""
+ s = _cleanup_signature(sig)
+
+ # Strip return type annotation
+ s = re.sub(r"\)\s*->\s.*$", ")", s)
+
+ # Remove parenthesis
+ s = re.sub(r"^\((.*)\)$", r"\1", s).strip()
+
+ # Strip literals (which can contain things that confuse the code below)
+ s = re.sub(r"\\\\", "", s) # escaped backslash (maybe inside string)
+ s = re.sub(r"\\'", "", s) # escaped single quote
+ s = re.sub(r'\\"', "", s) # escaped double quote
+ s = re.sub(r"'[^']*'", "", s) # string literal (w/ single quote)
+ s = re.sub(r'"[^"]*"', "", s) # string literal (w/ double quote)
+
+ # Strip complex objects (maybe default value of arguments)
+ while re.search(r'\([^)]*\)', s): # contents of parenthesis (ex. NamedTuple(attr=...))
+ s = re.sub(r'\([^)]*\)', '', s)
+ while re.search(r'<[^>]*>', s): # contents of angle brackets (ex. <object>)
+ s = re.sub(r'<[^>]*>', '', s)
+ while re.search(r'{[^}]*}', s): # contents of curly brackets (ex. dict)
+ s = re.sub(r'{[^}]*}', '', s)
+
+ # Parse the signature to arguments + options
+ args: list[str] = []
+ opts: list[str] = []
+
+ opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)\s*=\s*")
+ while s:
+ m = opt_re.search(s)
+ if not m:
+ # The rest are arguments
+ args = s.split(', ')
+ break
+
+ opts.insert(0, m.group(2))
+ s = m.group(1)[:-2]
+
+ # Strip typehints
+ for i, arg in enumerate(args):
+ args[i] = strip_arg_typehint(arg)
+
+ for i, opt in enumerate(opts):
+ opts[i] = strip_arg_typehint(opt)
+
+ # Produce a more compact signature
+ sig = limited_join(", ", args, max_chars=max_chars - 2)
+ if opts:
+ if not sig:
+ sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars - 4)
+ elif len(sig) < max_chars - 4 - 2 - 3:
+ sig += "[, %s]" % limited_join(", ", opts,
+ max_chars=max_chars - len(sig) - 4 - 2)
+
+ return "(%s)" % sig
+
+
+def extract_summary(doc: list[str], document: Any) -> str:
+ """Extract summary from docstring."""
+ def parse(doc: list[str], settings: Any) -> nodes.document:
+ state_machine = RSTStateMachine(state_classes, 'Body')
+ node = new_document('', settings)
+ node.reporter = NullReporter()
+ state_machine.run(doc, node)
+
+ return node
+
+ # Skip a blank lines at the top
+ while doc and not doc[0].strip():
+ doc.pop(0)
+
+ # If there's a blank line, then we can assume the first sentence /
+ # paragraph has ended, so anything after shouldn't be part of the
+ # summary
+ for i, piece in enumerate(doc):
+ if not piece.strip():
+ doc = doc[:i]
+ break
+
+ if doc == []:
+ return ''
+
+ # parse the docstring
+ node = parse(doc, document.settings)
+ if isinstance(node[0], nodes.section):
+ # document starts with a section heading, so use that.
+ summary = node[0].astext().strip()
+ elif not isinstance(node[0], nodes.paragraph):
+ # document starts with non-paragraph: pick up the first line
+ summary = doc[0].strip()
+ else:
+ # Try to find the "first sentence", which may span multiple lines
+ sentences = periods_re.split(" ".join(doc))
+ if len(sentences) == 1:
+ summary = sentences[0].strip()
+ else:
+ summary = ''
+ for i in range(len(sentences)):
+ summary = ". ".join(sentences[:i + 1]).rstrip(".") + "."
+ node[:] = []
+ node = parse(doc, document.settings)
+ if summary.endswith(WELL_KNOWN_ABBREVIATIONS):
+ pass
+ elif not any(node.findall(nodes.system_message)):
+ # considered as that splitting by period does not break inline markups
+ break
+
+ # strip literal notation mark ``::`` from tail of summary
+ summary = literal_re.sub('.', summary)
+
+ return summary
+
+
+def limited_join(sep: str, items: list[str], max_chars: int = 30,
+ overflow_marker: str = "...") -> str:
+ """Join a number of strings into one, limiting the length to *max_chars*.
+
+ If the string overflows this limit, replace the last fitting item by
+ *overflow_marker*.
+
+ Returns: joined_string
+ """
+ full_str = sep.join(items)
+ if len(full_str) < max_chars:
+ return full_str
+
+ n_chars = 0
+ n_items = 0
+ for item in items:
+ n_chars += len(item) + len(sep)
+ if n_chars < max_chars - len(overflow_marker):
+ n_items += 1
+ else:
+ break
+
+ return sep.join(list(items[:n_items]) + [overflow_marker])
+
+
+# -- Importing items -----------------------------------------------------------
+
+
+class ImportExceptionGroup(Exception):
+ """Exceptions raised during importing the target objects.
+
+ It contains an error messages and a list of exceptions as its arguments.
+ """
+
+ def __init__(self, message: str | None, exceptions: Sequence[BaseException]):
+ super().__init__(message)
+ self.exceptions = list(exceptions)
+
+
+def get_import_prefixes_from_env(env: BuildEnvironment) -> list[str | None]:
+ """
+ Obtain current Python import prefixes (for `import_by_name`)
+ from ``document.env``
+ """
+ prefixes: list[str | None] = [None]
+
+ currmodule = env.ref_context.get('py:module')
+ if currmodule:
+ prefixes.insert(0, currmodule)
+
+ currclass = env.ref_context.get('py:class')
+ if currclass:
+ if currmodule:
+ prefixes.insert(0, currmodule + "." + currclass)
+ else:
+ prefixes.insert(0, currclass)
+
+ return prefixes
+
+
+def import_by_name(
+ name: str, prefixes: Sequence[str | None] = (None,),
+) -> tuple[str, Any, Any, str]:
+ """Import a Python object that has the given *name*, under one of the
+ *prefixes*. The first name that succeeds is used.
+ """
+ tried = []
+ errors: list[ImportExceptionGroup] = []
+ for prefix in prefixes:
+ try:
+ if prefix:
+ prefixed_name = '.'.join([prefix, name])
+ else:
+ prefixed_name = name
+ obj, parent, modname = _import_by_name(prefixed_name, grouped_exception=True)
+ return prefixed_name, obj, parent, modname
+ except ImportError:
+ tried.append(prefixed_name)
+ except ImportExceptionGroup as exc:
+ tried.append(prefixed_name)
+ errors.append(exc)
+
+ exceptions: list[BaseException] = sum((e.exceptions for e in errors), [])
+ raise ImportExceptionGroup('no module named %s' % ' or '.join(tried), exceptions)
+
+
+def _import_by_name(name: str, grouped_exception: bool = True) -> tuple[Any, Any, str]:
+ """Import a Python object given its full name."""
+ errors: list[BaseException] = []
+
+ try:
+ name_parts = name.split('.')
+
+ # try first interpret `name` as MODNAME.OBJ
+ modname = '.'.join(name_parts[:-1])
+ if modname:
+ try:
+ mod = import_module(modname)
+ return getattr(mod, name_parts[-1]), mod, modname
+ except (ImportError, IndexError, AttributeError) as exc:
+ errors.append(exc.__cause__ or exc)
+
+ # ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ...
+ last_j = 0
+ modname = ''
+ for j in reversed(range(1, len(name_parts) + 1)):
+ last_j = j
+ modname = '.'.join(name_parts[:j])
+ try:
+ import_module(modname)
+ except ImportError as exc:
+ errors.append(exc.__cause__ or exc)
+
+ if modname in sys.modules:
+ break
+
+ if last_j < len(name_parts):
+ parent = None
+ obj = sys.modules[modname]
+ for obj_name in name_parts[last_j:]:
+ parent = obj
+ obj = getattr(obj, obj_name)
+ return obj, parent, modname
+ else:
+ return sys.modules[modname], None, modname
+ except (ValueError, ImportError, AttributeError, KeyError) as exc:
+ errors.append(exc)
+ if grouped_exception:
+ raise ImportExceptionGroup('', errors) from None # NoQA: EM101
+ else:
+ raise ImportError(*exc.args) from exc
+
+
+def import_ivar_by_name(name: str, prefixes: Sequence[str | None] = (None,),
+ grouped_exception: bool = True) -> tuple[str, Any, Any, str]:
+ """Import an instance variable that has the given *name*, under one of the
+ *prefixes*. The first name that succeeds is used.
+ """
+ try:
+ name, attr = name.rsplit(".", 1)
+ real_name, obj, parent, modname = import_by_name(name, prefixes)
+
+ # Get ancestors of the object (class.__mro__ includes the class itself as
+ # the first entry)
+ candidate_objects = getmro(obj)
+ if len(candidate_objects) == 0:
+ candidate_objects = (obj,)
+
+ for candidate_obj in candidate_objects:
+ analyzer = ModuleAnalyzer.for_module(getattr(candidate_obj, '__module__', modname))
+ analyzer.analyze()
+ # check for presence in `annotations` to include dataclass attributes
+ found_attrs = set()
+ found_attrs |= {attr for (qualname, attr) in analyzer.attr_docs}
+ found_attrs |= {attr for (qualname, attr) in analyzer.annotations}
+ if attr in found_attrs:
+ return real_name + "." + attr, INSTANCEATTR, obj, modname
+ except (ImportError, ValueError, PycodeError) as exc:
+ raise ImportError from exc
+ except ImportExceptionGroup:
+ raise # pass through it as is
+
+ raise ImportError
+
+
+# -- :autolink: (smart default role) -------------------------------------------
+
+class AutoLink(SphinxRole):
+ """Smart linking role.
+
+ Expands to ':obj:`text`' if `text` is an object that can be imported;
+ otherwise expands to '*text*'.
+ """
+ def run(self) -> tuple[list[Node], list[system_message]]:
+ pyobj_role = self.env.get_domain('py').role('obj')
+ assert pyobj_role is not None
+ objects, errors = pyobj_role('obj', self.rawtext, self.text, self.lineno,
+ self.inliner, self.options, self.content)
+ if errors:
+ return objects, errors
+
+ assert len(objects) == 1
+ pending_xref = cast(addnodes.pending_xref, objects[0])
+ try:
+ # try to import object by name
+ prefixes = get_import_prefixes_from_env(self.env)
+ import_by_name(pending_xref['reftarget'], prefixes)
+ except ImportExceptionGroup:
+ literal = cast(nodes.literal, pending_xref[0])
+ objects[0] = nodes.emphasis(self.rawtext, literal.astext(),
+ classes=literal['classes'])
+
+ return objects, errors
+
+
+def get_rst_suffix(app: Sphinx) -> str | None:
+ def get_supported_format(suffix: str) -> tuple[str, ...]:
+ parser_class = app.registry.get_source_parsers().get(suffix)
+ if parser_class is None:
+ return ('restructuredtext',)
+ return parser_class.supported
+
+ suffix = None
+ for suffix in app.config.source_suffix:
+ if 'restructuredtext' in get_supported_format(suffix):
+ return suffix
+
+ return None
+
+
+def process_generate_options(app: Sphinx) -> None:
+ genfiles = app.config.autosummary_generate
+
+ if genfiles is True:
+ env = app.builder.env
+ genfiles = [env.doc2path(x, base=False) for x in env.found_docs
+ if os.path.isfile(env.doc2path(x))]
+ elif genfiles is False:
+ pass
+ else:
+ ext = list(app.config.source_suffix)
+ genfiles = [genfile + (ext[0] if not genfile.endswith(tuple(ext)) else '')
+ for genfile in genfiles]
+
+ for entry in genfiles[:]:
+ if not path.isfile(path.join(app.srcdir, entry)):
+ logger.warning(__('autosummary_generate: file not found: %s'), entry)
+ genfiles.remove(entry)
+
+ if not genfiles:
+ return
+
+ suffix = get_rst_suffix(app)
+ if suffix is None:
+ logger.warning(__('autosummary generats .rst files internally. '
+ 'But your source_suffix does not contain .rst. Skipped.'))
+ return
+
+ from sphinx.ext.autosummary.generate import generate_autosummary_docs
+
+ imported_members = app.config.autosummary_imported_members
+ with mock(app.config.autosummary_mock_imports):
+ generate_autosummary_docs(genfiles, suffix=suffix, base_path=app.srcdir,
+ app=app, imported_members=imported_members,
+ overwrite=app.config.autosummary_generate_overwrite,
+ encoding=app.config.source_encoding)
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ # I need autodoc
+ app.setup_extension('sphinx.ext.autodoc')
+ app.add_node(autosummary_toc,
+ html=(autosummary_toc_visit_html, autosummary_noop),
+ latex=(autosummary_noop, autosummary_noop),
+ text=(autosummary_noop, autosummary_noop),
+ man=(autosummary_noop, autosummary_noop),
+ texinfo=(autosummary_noop, autosummary_noop))
+ app.add_node(autosummary_table,
+ html=(autosummary_table_visit_html, autosummary_noop),
+ latex=(autosummary_noop, autosummary_noop),
+ text=(autosummary_noop, autosummary_noop),
+ man=(autosummary_noop, autosummary_noop),
+ texinfo=(autosummary_noop, autosummary_noop))
+ app.add_directive('autosummary', Autosummary)
+ app.add_role('autolink', AutoLink())
+ app.connect('builder-inited', process_generate_options)
+ app.add_config_value('autosummary_context', {}, True)
+ app.add_config_value('autosummary_filename_map', {}, 'html')
+ app.add_config_value('autosummary_generate', True, True, [bool, list])
+ app.add_config_value('autosummary_generate_overwrite', True, False)
+ app.add_config_value('autosummary_mock_imports',
+ lambda config: config.autodoc_mock_imports, 'env')
+ app.add_config_value('autosummary_imported_members', [], False, [bool])
+ app.add_config_value('autosummary_ignore_module_all', True, 'env', bool)
+
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py
new file mode 100644
index 0000000..06814f9
--- /dev/null
+++ b/sphinx/ext/autosummary/generate.py
@@ -0,0 +1,754 @@
+"""Generates reST source files for autosummary.
+
+Usable as a library or script to generate automatic RST source files for
+items referred to in autosummary:: directives.
+
+Each generated RST file contains a single auto*:: directive which
+extracts the docstring of the referred item.
+
+Example Makefile rule::
+
+ generate:
+ sphinx-autogen -o source/generated source/*.rst
+"""
+
+from __future__ import annotations
+
+import argparse
+import importlib
+import inspect
+import locale
+import os
+import pkgutil
+import pydoc
+import re
+import sys
+from os import path
+from typing import TYPE_CHECKING, Any, NamedTuple
+
+from jinja2 import TemplateNotFound
+from jinja2.sandbox import SandboxedEnvironment
+
+import sphinx.locale
+from sphinx import __display_version__, package_dir
+from sphinx.builders import Builder
+from sphinx.config import Config
+from sphinx.ext.autodoc.importer import import_module
+from sphinx.ext.autosummary import (
+ ImportExceptionGroup,
+ get_documenter,
+ import_by_name,
+ import_ivar_by_name,
+)
+from sphinx.locale import __
+from sphinx.pycode import ModuleAnalyzer, PycodeError
+from sphinx.registry import SphinxComponentRegistry
+from sphinx.util import logging, rst
+from sphinx.util.inspect import getall, safe_getattr
+from sphinx.util.osutil import ensuredir
+from sphinx.util.template import SphinxTemplateLoader
+
+if TYPE_CHECKING:
+ from collections.abc import Sequence, Set
+ from gettext import NullTranslations
+
+ from sphinx.application import Sphinx
+ from sphinx.ext.autodoc import Documenter
+
+logger = logging.getLogger(__name__)
+
+
+class DummyApplication:
+ """Dummy Application class for sphinx-autogen command."""
+
+ def __init__(self, translator: NullTranslations) -> None:
+ self.config = Config()
+ self.registry = SphinxComponentRegistry()
+ self.messagelog: list[str] = []
+ self.srcdir = "/"
+ self.translator = translator
+ self.verbosity = 0
+ self._warncount = 0
+ self.warningiserror = False
+
+ self.config.add('autosummary_context', {}, True, None)
+ self.config.add('autosummary_filename_map', {}, True, None)
+ self.config.add('autosummary_ignore_module_all', True, 'env', bool)
+ self.config.init_values()
+
+ def emit_firstresult(self, *args: Any) -> None:
+ pass
+
+
+class AutosummaryEntry(NamedTuple):
+ name: str
+ path: str | None
+ template: str
+ recursive: bool
+
+
+def setup_documenters(app: Any) -> None:
+ from sphinx.ext.autodoc import (
+ AttributeDocumenter,
+ ClassDocumenter,
+ DataDocumenter,
+ DecoratorDocumenter,
+ ExceptionDocumenter,
+ FunctionDocumenter,
+ MethodDocumenter,
+ ModuleDocumenter,
+ PropertyDocumenter,
+ )
+ documenters: list[type[Documenter]] = [
+ ModuleDocumenter, ClassDocumenter, ExceptionDocumenter, DataDocumenter,
+ FunctionDocumenter, MethodDocumenter,
+ AttributeDocumenter, DecoratorDocumenter, PropertyDocumenter,
+ ]
+ for documenter in documenters:
+ app.registry.add_documenter(documenter.objtype, documenter)
+
+
+def _underline(title: str, line: str = '=') -> str:
+ if '\n' in title:
+ msg = 'Can only underline single lines'
+ raise ValueError(msg)
+ return title + '\n' + line * len(title)
+
+
+class AutosummaryRenderer:
+ """A helper class for rendering."""
+
+ def __init__(self, app: Sphinx) -> None:
+ if isinstance(app, Builder):
+ msg = 'Expected a Sphinx application object!'
+ raise ValueError(msg)
+
+ system_templates_path = [os.path.join(package_dir, 'ext', 'autosummary', 'templates')]
+ loader = SphinxTemplateLoader(app.srcdir, app.config.templates_path,
+ system_templates_path)
+
+ self.env = SandboxedEnvironment(loader=loader)
+ self.env.filters['escape'] = rst.escape
+ self.env.filters['e'] = rst.escape
+ self.env.filters['underline'] = _underline
+
+ if app.translator:
+ self.env.add_extension("jinja2.ext.i18n")
+ self.env.install_gettext_translations(app.translator)
+
+ def render(self, template_name: str, context: dict) -> str:
+ """Render a template file."""
+ try:
+ template = self.env.get_template(template_name)
+ except TemplateNotFound:
+ try:
+ # objtype is given as template_name
+ template = self.env.get_template('autosummary/%s.rst' % template_name)
+ except TemplateNotFound:
+ # fallback to base.rst
+ template = self.env.get_template('autosummary/base.rst')
+
+ return template.render(context)
+
+
+def _split_full_qualified_name(name: str) -> tuple[str | None, str]:
+ """Split full qualified name to a pair of modname and qualname.
+
+ A qualname is an abbreviation for "Qualified name" introduced at PEP-3155
+ (https://peps.python.org/pep-3155/). It is a dotted path name
+ from the module top-level.
+
+ A "full" qualified name means a string containing both module name and
+ qualified name.
+
+ .. note:: This function actually imports the module to check its existence.
+ Therefore you need to mock 3rd party modules if needed before
+ calling this function.
+ """
+ parts = name.split('.')
+ for i, _part in enumerate(parts, 1):
+ try:
+ modname = ".".join(parts[:i])
+ importlib.import_module(modname)
+ except ImportError:
+ if parts[:i - 1]:
+ return ".".join(parts[:i - 1]), ".".join(parts[i - 1:])
+ else:
+ return None, ".".join(parts)
+ except IndexError:
+ pass
+
+ return name, ""
+
+
+# -- Generating output ---------------------------------------------------------
+
+
+class ModuleScanner:
+ def __init__(self, app: Any, obj: Any) -> None:
+ self.app = app
+ self.object = obj
+
+ def get_object_type(self, name: str, value: Any) -> str:
+ return get_documenter(self.app, value, self.object).objtype
+
+ def is_skipped(self, name: str, value: Any, objtype: str) -> bool:
+ try:
+ return self.app.emit_firstresult('autodoc-skip-member', objtype,
+ name, value, False, {})
+ except Exception as exc:
+ logger.warning(__('autosummary: failed to determine %r to be documented, '
+ 'the following exception was raised:\n%s'),
+ name, exc, type='autosummary')
+ return False
+
+ def scan(self, imported_members: bool) -> list[str]:
+ members = []
+ try:
+ analyzer = ModuleAnalyzer.for_module(self.object.__name__)
+ attr_docs = analyzer.find_attr_docs()
+ except PycodeError:
+ attr_docs = {}
+
+ for name in members_of(self.object, self.app.config):
+ try:
+ value = safe_getattr(self.object, name)
+ except AttributeError:
+ value = None
+
+ objtype = self.get_object_type(name, value)
+ if self.is_skipped(name, value, objtype):
+ continue
+
+ try:
+ if ('', name) in attr_docs:
+ imported = False
+ elif inspect.ismodule(value): # NoQA: SIM114
+ imported = True
+ elif safe_getattr(value, '__module__') != self.object.__name__:
+ imported = True
+ else:
+ imported = False
+ except AttributeError:
+ imported = False
+
+ respect_module_all = not self.app.config.autosummary_ignore_module_all
+ if (
+ # list all members up
+ imported_members
+ # list not-imported members
+ or imported is False
+ # list members that have __all__ set
+ or (respect_module_all and '__all__' in dir(self.object))
+ ):
+ members.append(name)
+
+ return members
+
+
+def members_of(obj: Any, conf: Config) -> Sequence[str]:
+ """Get the members of ``obj``, possibly ignoring the ``__all__`` module attribute
+
+ Follows the ``conf.autosummary_ignore_module_all`` setting."""
+
+ if conf.autosummary_ignore_module_all:
+ return dir(obj)
+ else:
+ return getall(obj) or dir(obj)
+
+
+def generate_autosummary_content(name: str, obj: Any, parent: Any,
+ template: AutosummaryRenderer, template_name: str,
+ imported_members: bool, app: Any,
+ recursive: bool, context: dict,
+ modname: str | None = None,
+ qualname: str | None = None) -> str:
+ doc = get_documenter(app, obj, parent)
+
+ ns: dict[str, Any] = {}
+ ns.update(context)
+
+ if doc.objtype == 'module':
+ scanner = ModuleScanner(app, obj)
+ ns['members'] = scanner.scan(imported_members)
+
+ respect_module_all = not app.config.autosummary_ignore_module_all
+ imported_members = imported_members or ('__all__' in dir(obj) and respect_module_all)
+
+ ns['functions'], ns['all_functions'] = \
+ _get_members(doc, app, obj, {'function'}, imported=imported_members)
+ ns['classes'], ns['all_classes'] = \
+ _get_members(doc, app, obj, {'class'}, imported=imported_members)
+ ns['exceptions'], ns['all_exceptions'] = \
+ _get_members(doc, app, obj, {'exception'}, imported=imported_members)
+ ns['attributes'], ns['all_attributes'] = \
+ _get_module_attrs(name, ns['members'])
+ ispackage = hasattr(obj, '__path__')
+ if ispackage and recursive:
+ # Use members that are not modules as skip list, because it would then mean
+ # that module was overwritten in the package namespace
+ skip = (
+ ns["all_functions"]
+ + ns["all_classes"]
+ + ns["all_exceptions"]
+ + ns["all_attributes"]
+ )
+
+ # If respect_module_all and module has a __all__ attribute, first get
+ # modules that were explicitly imported. Next, find the rest with the
+ # get_modules method, but only put in "public" modules that are in the
+ # __all__ list
+ #
+ # Otherwise, use get_modules method normally
+ if respect_module_all and '__all__' in dir(obj):
+ imported_modules, all_imported_modules = \
+ _get_members(doc, app, obj, {'module'}, imported=True)
+ skip += all_imported_modules
+ imported_modules = [name + '.' + modname for modname in imported_modules]
+ all_imported_modules = \
+ [name + '.' + modname for modname in all_imported_modules]
+ public_members = getall(obj)
+ else:
+ imported_modules, all_imported_modules = [], []
+ public_members = None
+
+ modules, all_modules = _get_modules(obj, skip=skip, name=name,
+ public_members=public_members)
+ ns['modules'] = imported_modules + modules
+ ns["all_modules"] = all_imported_modules + all_modules
+ elif doc.objtype == 'class':
+ ns['members'] = dir(obj)
+ ns['inherited_members'] = \
+ set(dir(obj)) - set(obj.__dict__.keys())
+ ns['methods'], ns['all_methods'] = \
+ _get_members(doc, app, obj, {'method'}, include_public={'__init__'})
+ ns['attributes'], ns['all_attributes'] = \
+ _get_members(doc, app, obj, {'attribute', 'property'})
+
+ if modname is None or qualname is None:
+ modname, qualname = _split_full_qualified_name(name)
+
+ if doc.objtype in ('method', 'attribute', 'property'):
+ ns['class'] = qualname.rsplit(".", 1)[0]
+
+ if doc.objtype in ('class',):
+ shortname = qualname
+ else:
+ shortname = qualname.rsplit(".", 1)[-1]
+
+ ns['fullname'] = name
+ ns['module'] = modname
+ ns['objname'] = qualname
+ ns['name'] = shortname
+
+ ns['objtype'] = doc.objtype
+ ns['underline'] = len(name) * '='
+
+ if template_name:
+ return template.render(template_name, ns)
+ else:
+ return template.render(doc.objtype, ns)
+
+
+def _skip_member(app: Sphinx, obj: Any, name: str, objtype: str) -> bool:
+ try:
+ return app.emit_firstresult('autodoc-skip-member', objtype, name,
+ obj, False, {})
+ except Exception as exc:
+ logger.warning(__('autosummary: failed to determine %r to be documented, '
+ 'the following exception was raised:\n%s'),
+ name, exc, type='autosummary')
+ return False
+
+
+def _get_class_members(obj: Any) -> dict[str, Any]:
+ members = sphinx.ext.autodoc.get_class_members(obj, None, safe_getattr)
+ return {name: member.object for name, member in members.items()}
+
+
+def _get_module_members(app: Sphinx, obj: Any) -> dict[str, Any]:
+ members = {}
+ for name in members_of(obj, app.config):
+ try:
+ members[name] = safe_getattr(obj, name)
+ except AttributeError:
+ continue
+ return members
+
+
+def _get_all_members(doc: type[Documenter], app: Sphinx, obj: Any) -> dict[str, Any]:
+ if doc.objtype == 'module':
+ return _get_module_members(app, obj)
+ elif doc.objtype == 'class':
+ return _get_class_members(obj)
+ return {}
+
+
+def _get_members(doc: type[Documenter], app: Sphinx, obj: Any, types: set[str], *,
+ include_public: Set[str] = frozenset(),
+ imported: bool = True) -> tuple[list[str], list[str]]:
+ items: list[str] = []
+ public: list[str] = []
+
+ all_members = _get_all_members(doc, app, obj)
+ for name, value in all_members.items():
+ documenter = get_documenter(app, value, obj)
+ if documenter.objtype in types:
+ # skip imported members if expected
+ if imported or getattr(value, '__module__', None) == obj.__name__:
+ skipped = _skip_member(app, value, name, documenter.objtype)
+ if skipped is True:
+ pass
+ elif skipped is False:
+ # show the member forcedly
+ items.append(name)
+ public.append(name)
+ else:
+ items.append(name)
+ if name in include_public or not name.startswith('_'):
+ # considers member as public
+ public.append(name)
+ return public, items
+
+
+def _get_module_attrs(name: str, members: Any) -> tuple[list[str], list[str]]:
+ """Find module attributes with docstrings."""
+ attrs, public = [], []
+ try:
+ analyzer = ModuleAnalyzer.for_module(name)
+ attr_docs = analyzer.find_attr_docs()
+ for namespace, attr_name in attr_docs:
+ if namespace == '' and attr_name in members:
+ attrs.append(attr_name)
+ if not attr_name.startswith('_'):
+ public.append(attr_name)
+ except PycodeError:
+ pass # give up if ModuleAnalyzer fails to parse code
+ return public, attrs
+
+
+def _get_modules(
+ obj: Any,
+ *,
+ skip: Sequence[str],
+ name: str,
+ public_members: Sequence[str] | None = None) -> tuple[list[str], list[str]]:
+ items: list[str] = []
+ public: list[str] = []
+ for _, modname, _ispkg in pkgutil.iter_modules(obj.__path__):
+
+ if modname in skip:
+ # module was overwritten in __init__.py, so not accessible
+ continue
+ fullname = name + '.' + modname
+ try:
+ module = import_module(fullname)
+ if module and hasattr(module, '__sphinx_mock__'):
+ continue
+ except ImportError:
+ pass
+
+ items.append(fullname)
+ if public_members is not None:
+ if modname in public_members:
+ public.append(fullname)
+ else:
+ if not modname.startswith('_'):
+ public.append(fullname)
+ return public, items
+
+
+def generate_autosummary_docs(sources: list[str],
+ output_dir: str | os.PathLike[str] | None = None,
+ suffix: str = '.rst',
+ base_path: str | os.PathLike[str] | None = None,
+ imported_members: bool = False, app: Any = None,
+ overwrite: bool = True, encoding: str = 'utf-8') -> None:
+ showed_sources = sorted(sources)
+ if len(showed_sources) > 20:
+ showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:]
+ logger.info(__('[autosummary] generating autosummary for: %s') %
+ ', '.join(showed_sources))
+
+ if output_dir:
+ logger.info(__('[autosummary] writing to %s') % output_dir)
+
+ if base_path is not None:
+ sources = [os.path.join(base_path, filename) for filename in sources]
+
+ template = AutosummaryRenderer(app)
+
+ # read
+ items = find_autosummary_in_files(sources)
+
+ # keep track of new files
+ new_files = []
+
+ if app:
+ filename_map = app.config.autosummary_filename_map
+ else:
+ filename_map = {}
+
+ # write
+ for entry in sorted(set(items), key=str):
+ if entry.path is None:
+ # The corresponding autosummary:: directive did not have
+ # a :toctree: option
+ continue
+
+ path = output_dir or os.path.abspath(entry.path)
+ ensuredir(path)
+
+ try:
+ name, obj, parent, modname = import_by_name(entry.name)
+ qualname = name.replace(modname + ".", "")
+ except ImportExceptionGroup as exc:
+ try:
+ # try to import as an instance attribute
+ name, obj, parent, modname = import_ivar_by_name(entry.name)
+ qualname = name.replace(modname + ".", "")
+ except ImportError as exc2:
+ if exc2.__cause__:
+ exceptions: list[BaseException] = exc.exceptions + [exc2.__cause__]
+ else:
+ exceptions = exc.exceptions + [exc2]
+
+ errors = list({f"* {type(e).__name__}: {e}" for e in exceptions})
+ logger.warning(__('[autosummary] failed to import %s.\nPossible hints:\n%s'),
+ entry.name, '\n'.join(errors))
+ continue
+
+ context: dict[str, Any] = {}
+ if app:
+ context.update(app.config.autosummary_context)
+
+ content = generate_autosummary_content(name, obj, parent, template, entry.template,
+ imported_members, app, entry.recursive, context,
+ modname, qualname)
+
+ filename = os.path.join(path, filename_map.get(name, name) + suffix)
+ if os.path.isfile(filename):
+ with open(filename, encoding=encoding) as f:
+ old_content = f.read()
+
+ if content == old_content:
+ continue
+ if overwrite: # content has changed
+ with open(filename, 'w', encoding=encoding) as f:
+ f.write(content)
+ new_files.append(filename)
+ else:
+ with open(filename, 'w', encoding=encoding) as f:
+ f.write(content)
+ new_files.append(filename)
+
+ # descend recursively to new files
+ if new_files:
+ generate_autosummary_docs(new_files, output_dir=output_dir,
+ suffix=suffix, base_path=base_path,
+ imported_members=imported_members, app=app,
+ overwrite=overwrite)
+
+
+# -- Finding documented entries in files ---------------------------------------
+
+def find_autosummary_in_files(filenames: list[str]) -> list[AutosummaryEntry]:
+ """Find out what items are documented in source/*.rst.
+
+ See `find_autosummary_in_lines`.
+ """
+ documented: list[AutosummaryEntry] = []
+ for filename in filenames:
+ with open(filename, encoding='utf-8', errors='ignore') as f:
+ lines = f.read().splitlines()
+ documented.extend(find_autosummary_in_lines(lines, filename=filename))
+ return documented
+
+
+def find_autosummary_in_docstring(
+ name: str, filename: str | None = None,
+) -> list[AutosummaryEntry]:
+ """Find out what items are documented in the given object's docstring.
+
+ See `find_autosummary_in_lines`.
+ """
+ try:
+ real_name, obj, parent, modname = import_by_name(name)
+ lines = pydoc.getdoc(obj).splitlines()
+ return find_autosummary_in_lines(lines, module=name, filename=filename)
+ except AttributeError:
+ pass
+ except ImportExceptionGroup as exc:
+ errors = '\n'.join({f"* {type(e).__name__}: {e}" for e in exc.exceptions})
+ logger.warning(f'Failed to import {name}.\nPossible hints:\n{errors}') # NoQA: G004
+ except SystemExit:
+ logger.warning("Failed to import '%s'; the module executes module level "
+ 'statement and it might call sys.exit().', name)
+ return []
+
+
+def find_autosummary_in_lines(
+ lines: list[str], module: str | None = None, filename: str | None = None,
+) -> list[AutosummaryEntry]:
+ """Find out what items appear in autosummary:: directives in the
+ given lines.
+
+ Returns a list of (name, toctree, template) where *name* is a name
+ of an object and *toctree* the :toctree: path of the corresponding
+ autosummary directive (relative to the root of the file name), and
+ *template* the value of the :template: option. *toctree* and
+ *template* ``None`` if the directive does not have the
+ corresponding options set.
+ """
+ autosummary_re = re.compile(r'^(\s*)\.\.\s+autosummary::\s*')
+ automodule_re = re.compile(
+ r'^\s*\.\.\s+automodule::\s*([A-Za-z0-9_.]+)\s*$')
+ module_re = re.compile(
+ r'^\s*\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
+ autosummary_item_re = re.compile(r'^\s+(~?[_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?')
+ recursive_arg_re = re.compile(r'^\s+:recursive:\s*$')
+ toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
+ template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
+
+ documented: list[AutosummaryEntry] = []
+
+ recursive = False
+ toctree: str | None = None
+ template = ''
+ current_module = module
+ in_autosummary = False
+ base_indent = ""
+
+ for line in lines:
+ if in_autosummary:
+ m = recursive_arg_re.match(line)
+ if m:
+ recursive = True
+ continue
+
+ m = toctree_arg_re.match(line)
+ if m:
+ toctree = m.group(1)
+ if filename:
+ toctree = os.path.join(os.path.dirname(filename),
+ toctree)
+ continue
+
+ m = template_arg_re.match(line)
+ if m:
+ template = m.group(1).strip()
+ continue
+
+ if line.strip().startswith(':'):
+ continue # skip options
+
+ m = autosummary_item_re.match(line)
+ if m:
+ name = m.group(1).strip()
+ if name.startswith('~'):
+ name = name[1:]
+ if current_module and \
+ not name.startswith(current_module + '.'):
+ name = f"{current_module}.{name}"
+ documented.append(AutosummaryEntry(name, toctree, template, recursive))
+ continue
+
+ if not line.strip() or line.startswith(base_indent + " "):
+ continue
+
+ in_autosummary = False
+
+ m = autosummary_re.match(line)
+ if m:
+ in_autosummary = True
+ base_indent = m.group(1)
+ recursive = False
+ toctree = None
+ template = ''
+ continue
+
+ m = automodule_re.search(line)
+ if m:
+ current_module = m.group(1).strip()
+ # recurse into the automodule docstring
+ documented.extend(find_autosummary_in_docstring(
+ current_module, filename=filename))
+ continue
+
+ m = module_re.match(line)
+ if m:
+ current_module = m.group(2)
+ continue
+
+ return documented
+
+
+def get_parser() -> argparse.ArgumentParser:
+ parser = argparse.ArgumentParser(
+ usage='%(prog)s [OPTIONS] <SOURCE_FILE>...',
+ epilog=__('For more information, visit <https://www.sphinx-doc.org/>.'),
+ description=__("""
+Generate ReStructuredText using autosummary directives.
+
+sphinx-autogen is a frontend to sphinx.ext.autosummary.generate. It generates
+the reStructuredText files from the autosummary directives contained in the
+given input files.
+
+The format of the autosummary directive is documented in the
+``sphinx.ext.autosummary`` Python module and can be read using::
+
+ pydoc sphinx.ext.autosummary
+"""))
+
+ parser.add_argument('--version', action='version', dest='show_version',
+ version='%%(prog)s %s' % __display_version__)
+
+ parser.add_argument('source_file', nargs='+',
+ help=__('source files to generate rST files for'))
+
+ parser.add_argument('-o', '--output-dir', action='store',
+ dest='output_dir',
+ help=__('directory to place all output in'))
+ parser.add_argument('-s', '--suffix', action='store', dest='suffix',
+ default='rst',
+ help=__('default suffix for files (default: '
+ '%(default)s)'))
+ parser.add_argument('-t', '--templates', action='store', dest='templates',
+ default=None,
+ help=__('custom template directory (default: '
+ '%(default)s)'))
+ parser.add_argument('-i', '--imported-members', action='store_true',
+ dest='imported_members', default=False,
+ help=__('document imported members (default: '
+ '%(default)s)'))
+ parser.add_argument('-a', '--respect-module-all', action='store_true',
+ dest='respect_module_all', default=False,
+ help=__('document exactly the members in module __all__ attribute. '
+ '(default: %(default)s)'))
+
+ return parser
+
+
+def main(argv: Sequence[str] = (), /) -> None:
+ locale.setlocale(locale.LC_ALL, '')
+ sphinx.locale.init_console()
+
+ app = DummyApplication(sphinx.locale.get_translator())
+ logging.setup(app, sys.stdout, sys.stderr) # type: ignore[arg-type]
+ setup_documenters(app)
+ args = get_parser().parse_args(argv or sys.argv[1:])
+
+ if args.templates:
+ app.config.templates_path.append(path.abspath(args.templates))
+ app.config.autosummary_ignore_module_all = ( # type: ignore[attr-defined]
+ not args.respect_module_all
+ )
+
+ generate_autosummary_docs(args.source_file, args.output_dir,
+ '.' + args.suffix,
+ imported_members=args.imported_members,
+ app=app)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/sphinx/ext/autosummary/templates/autosummary/base.rst b/sphinx/ext/autosummary/templates/autosummary/base.rst
new file mode 100644
index 0000000..b7556eb
--- /dev/null
+++ b/sphinx/ext/autosummary/templates/autosummary/base.rst
@@ -0,0 +1,5 @@
+{{ fullname | escape | underline}}
+
+.. currentmodule:: {{ module }}
+
+.. auto{{ objtype }}:: {{ objname }}
diff --git a/sphinx/ext/autosummary/templates/autosummary/class.rst b/sphinx/ext/autosummary/templates/autosummary/class.rst
new file mode 100644
index 0000000..0f7d6f3
--- /dev/null
+++ b/sphinx/ext/autosummary/templates/autosummary/class.rst
@@ -0,0 +1,29 @@
+{{ fullname | escape | underline}}
+
+.. currentmodule:: {{ module }}
+
+.. autoclass:: {{ objname }}
+
+ {% block methods %}
+ .. automethod:: __init__
+
+ {% if methods %}
+ .. rubric:: {{ _('Methods') }}
+
+ .. autosummary::
+ {% for item in methods %}
+ ~{{ name }}.{{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block attributes %}
+ {% if attributes %}
+ .. rubric:: {{ _('Attributes') }}
+
+ .. autosummary::
+ {% for item in attributes %}
+ ~{{ name }}.{{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
diff --git a/sphinx/ext/autosummary/templates/autosummary/module.rst b/sphinx/ext/autosummary/templates/autosummary/module.rst
new file mode 100644
index 0000000..e74c012
--- /dev/null
+++ b/sphinx/ext/autosummary/templates/autosummary/module.rst
@@ -0,0 +1,60 @@
+{{ fullname | escape | underline}}
+
+.. automodule:: {{ fullname }}
+
+ {% block attributes %}
+ {% if attributes %}
+ .. rubric:: {{ _('Module Attributes') }}
+
+ .. autosummary::
+ {% for item in attributes %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block functions %}
+ {% if functions %}
+ .. rubric:: {{ _('Functions') }}
+
+ .. autosummary::
+ {% for item in functions %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block classes %}
+ {% if classes %}
+ .. rubric:: {{ _('Classes') }}
+
+ .. autosummary::
+ {% for item in classes %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block exceptions %}
+ {% if exceptions %}
+ .. rubric:: {{ _('Exceptions') }}
+
+ .. autosummary::
+ {% for item in exceptions %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+{% block modules %}
+{% if modules %}
+.. rubric:: Modules
+
+.. autosummary::
+ :toctree:
+ :recursive:
+{% for item in modules %}
+ {{ item }}
+{%- endfor %}
+{% endif %}
+{% endblock %}
diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py
new file mode 100644
index 0000000..e3d9745
--- /dev/null
+++ b/sphinx/ext/coverage.py
@@ -0,0 +1,400 @@
+"""Check Python modules and C API for coverage.
+
+Mostly written by Josip Dzolonga for the Google Highly Open Participation
+contest.
+"""
+
+from __future__ import annotations
+
+import glob
+import inspect
+import pickle
+import re
+import sys
+from importlib import import_module
+from os import path
+from typing import IO, TYPE_CHECKING, Any, TextIO
+
+import sphinx
+from sphinx.builders import Builder
+from sphinx.locale import __
+from sphinx.util import logging
+from sphinx.util.console import red # type: ignore[attr-defined]
+from sphinx.util.inspect import safe_getattr
+
+if TYPE_CHECKING:
+ from collections.abc import Iterator
+
+ from sphinx.application import Sphinx
+
+logger = logging.getLogger(__name__)
+
+
+# utility
+def write_header(f: IO[str], text: str, char: str = '-') -> None:
+ f.write(text + '\n')
+ f.write(char * len(text) + '\n\n')
+
+
+def compile_regex_list(name: str, exps: str) -> list[re.Pattern[str]]:
+ lst = []
+ for exp in exps:
+ try:
+ lst.append(re.compile(exp))
+ except Exception:
+ logger.warning(__('invalid regex %r in %s'), exp, name)
+ return lst
+
+
+def _write_table(table: list[list[str]]) -> Iterator[str]:
+ sizes = [max(len(x[column]) for x in table) + 1 for column in range(len(table[0]))]
+
+ yield _add_line(sizes, '-')
+ yield from _add_row(sizes, table[0], '=')
+
+ for row in table[1:]:
+ yield from _add_row(sizes, row, '-')
+
+
+def _add_line(sizes: list[int], separator: str) -> str:
+ return '+' + ''.join((separator * (size + 1)) + '+' for size in sizes)
+
+
+def _add_row(col_widths: list[int], columns: list[str], separator: str) -> Iterator[str]:
+ yield ''.join(f'| {column: <{col_widths[i]}}' for i, column in enumerate(columns)) + '|'
+ yield _add_line(col_widths, separator)
+
+
+class CoverageBuilder(Builder):
+ """
+ Evaluates coverage of code in the documentation.
+ """
+ name = 'coverage'
+ epilog = __('Testing of coverage in the sources finished, look at the '
+ 'results in %(outdir)s' + path.sep + 'python.txt.')
+
+ def init(self) -> None:
+ self.c_sourcefiles: list[str] = []
+ for pattern in self.config.coverage_c_path:
+ pattern = path.join(self.srcdir, pattern)
+ self.c_sourcefiles.extend(glob.glob(pattern))
+
+ self.c_regexes: list[tuple[str, re.Pattern[str]]] = []
+ for (name, exp) in self.config.coverage_c_regexes.items():
+ try:
+ self.c_regexes.append((name, re.compile(exp)))
+ except Exception:
+ logger.warning(__('invalid regex %r in coverage_c_regexes'), exp)
+
+ self.c_ignorexps: dict[str, list[re.Pattern[str]]] = {}
+ for (name, exps) in self.config.coverage_ignore_c_items.items():
+ self.c_ignorexps[name] = compile_regex_list('coverage_ignore_c_items',
+ exps)
+ self.mod_ignorexps = compile_regex_list('coverage_ignore_modules',
+ self.config.coverage_ignore_modules)
+ self.cls_ignorexps = compile_regex_list('coverage_ignore_classes',
+ self.config.coverage_ignore_classes)
+ self.fun_ignorexps = compile_regex_list('coverage_ignore_functions',
+ self.config.coverage_ignore_functions)
+ self.py_ignorexps = compile_regex_list('coverage_ignore_pyobjects',
+ self.config.coverage_ignore_pyobjects)
+
+ def get_outdated_docs(self) -> str:
+ return 'coverage overview'
+
+ def write(self, *ignored: Any) -> None:
+ self.py_undoc: dict[str, dict[str, Any]] = {}
+ self.py_undocumented: dict[str, set[str]] = {}
+ self.py_documented: dict[str, set[str]] = {}
+ self.build_py_coverage()
+ self.write_py_coverage()
+
+ self.c_undoc: dict[str, set[tuple[str, str]]] = {}
+ self.build_c_coverage()
+ self.write_c_coverage()
+
+ def build_c_coverage(self) -> None:
+ c_objects = {}
+ for obj in self.env.domains['c'].get_objects():
+ c_objects[obj[2]] = obj[1]
+ for filename in self.c_sourcefiles:
+ undoc: set[tuple[str, str]] = set()
+ with open(filename, encoding="utf-8") as f:
+ for line in f:
+ for key, regex in self.c_regexes:
+ match = regex.match(line)
+ if match:
+ name = match.groups()[0]
+ if key not in c_objects:
+ undoc.add((key, name))
+ continue
+
+ if name not in c_objects[key]:
+ for exp in self.c_ignorexps.get(key, []):
+ if exp.match(name):
+ break
+ else:
+ undoc.add((key, name))
+ continue
+ if undoc:
+ self.c_undoc[filename] = undoc
+
+ def write_c_coverage(self) -> None:
+ output_file = path.join(self.outdir, 'c.txt')
+ with open(output_file, 'w', encoding="utf-8") as op:
+ if self.config.coverage_write_headline:
+ write_header(op, 'Undocumented C API elements', '=')
+ op.write('\n')
+
+ for filename, undoc in self.c_undoc.items():
+ write_header(op, filename)
+ for typ, name in sorted(undoc):
+ op.write(' * %-50s [%9s]\n' % (name, typ))
+ if self.config.coverage_show_missing_items:
+ if self.app.quiet or self.app.warningiserror:
+ logger.warning(__('undocumented c api: %s [%s] in file %s'),
+ name, typ, filename)
+ else:
+ logger.info(red('undocumented ') + 'c ' + 'api ' +
+ '%-30s' % (name + " [%9s]" % typ) +
+ red(' - in file ') + filename)
+ op.write('\n')
+
+ def ignore_pyobj(self, full_name: str) -> bool:
+ return any(
+ exp.search(full_name)
+ for exp in self.py_ignorexps
+ )
+
+ def build_py_coverage(self) -> None:
+ objects = self.env.domaindata['py']['objects']
+ modules = self.env.domaindata['py']['modules']
+
+ skip_undoc = self.config.coverage_skip_undoc_in_source
+
+ for mod_name in modules:
+ ignore = False
+ for exp in self.mod_ignorexps:
+ if exp.match(mod_name):
+ ignore = True
+ break
+ if ignore or self.ignore_pyobj(mod_name):
+ continue
+
+ try:
+ mod = import_module(mod_name)
+ except ImportError as err:
+ logger.warning(__('module %s could not be imported: %s'), mod_name, err)
+ self.py_undoc[mod_name] = {'error': err}
+ continue
+
+ documented_objects: set[str] = set()
+ undocumented_objects: set[str] = set()
+
+ funcs = []
+ classes: dict[str, list[str]] = {}
+
+ for name, obj in inspect.getmembers(mod):
+ # diverse module attributes are ignored:
+ if name[0] == '_':
+ # begins in an underscore
+ continue
+ if not hasattr(obj, '__module__'):
+ # cannot be attributed to a module
+ continue
+ if obj.__module__ != mod_name:
+ # is not defined in this module
+ continue
+
+ full_name = f'{mod_name}.{name}'
+ if self.ignore_pyobj(full_name):
+ continue
+
+ if inspect.isfunction(obj):
+ if full_name not in objects:
+ for exp in self.fun_ignorexps:
+ if exp.match(name):
+ break
+ else:
+ if skip_undoc and not obj.__doc__:
+ continue
+ funcs.append(name)
+ undocumented_objects.add(full_name)
+ else:
+ documented_objects.add(full_name)
+ elif inspect.isclass(obj):
+ for exp in self.cls_ignorexps:
+ if exp.match(name):
+ break
+ else:
+ if full_name not in objects:
+ if skip_undoc and not obj.__doc__:
+ continue
+ # not documented at all
+ classes[name] = []
+ continue
+
+ attrs: list[str] = []
+
+ for attr_name in dir(obj):
+ if attr_name not in obj.__dict__:
+ continue
+ try:
+ attr = safe_getattr(obj, attr_name)
+ except AttributeError:
+ continue
+ if not (inspect.ismethod(attr) or
+ inspect.isfunction(attr)):
+ continue
+ if attr_name[0] == '_':
+ # starts with an underscore, ignore it
+ continue
+ if skip_undoc and not attr.__doc__:
+ # skip methods without docstring if wished
+ continue
+ full_attr_name = f'{full_name}.{attr_name}'
+ if self.ignore_pyobj(full_attr_name):
+ continue
+ if full_attr_name not in objects:
+ attrs.append(attr_name)
+ undocumented_objects.add(full_attr_name)
+ else:
+ documented_objects.add(full_attr_name)
+
+ if attrs:
+ # some attributes are undocumented
+ classes[name] = attrs
+
+ self.py_undoc[mod_name] = {'funcs': funcs, 'classes': classes}
+ self.py_undocumented[mod_name] = undocumented_objects
+ self.py_documented[mod_name] = documented_objects
+
+ def _write_py_statistics(self, op: TextIO) -> None:
+ """ Outputs the table of ``op``."""
+ all_modules = set(self.py_documented.keys()).union(
+ set(self.py_undocumented.keys()))
+ all_objects: set[str] = set()
+ all_documented_objects: set[str] = set()
+ for module in all_modules:
+ all_module_objects = self.py_documented[module].union(self.py_undocumented[module])
+ all_objects = all_objects.union(all_module_objects)
+ all_documented_objects = all_documented_objects.union(self.py_documented[module])
+
+ # prepare tabular
+ table = [['Module', 'Coverage', 'Undocumented']]
+ for module in all_modules:
+ module_objects = self.py_documented[module].union(self.py_undocumented[module])
+ if len(module_objects):
+ value = 100.0 * len(self.py_documented[module]) / len(module_objects)
+ else:
+ value = 100.0
+
+ table.append([module, '%.2f%%' % value, '%d' % len(self.py_undocumented[module])])
+ table.append([
+ 'TOTAL',
+ f'{100 * len(all_documented_objects) / len(all_objects):.2f}%',
+ f'{len(all_objects) - len(all_documented_objects)}',
+ ])
+
+ for line in _write_table(table):
+ op.write(f'{line}\n')
+
+ def write_py_coverage(self) -> None:
+ output_file = path.join(self.outdir, 'python.txt')
+ failed = []
+ with open(output_file, 'w', encoding="utf-8") as op:
+ if self.config.coverage_write_headline:
+ write_header(op, 'Undocumented Python objects', '=')
+
+ if self.config.coverage_statistics_to_stdout:
+ self._write_py_statistics(sys.stdout)
+
+ if self.config.coverage_statistics_to_report:
+ write_header(op, 'Statistics')
+ self._write_py_statistics(op)
+ op.write('\n')
+
+ keys = sorted(self.py_undoc.keys())
+ for name in keys:
+ undoc = self.py_undoc[name]
+ if 'error' in undoc:
+ failed.append((name, undoc['error']))
+ else:
+ if not undoc['classes'] and not undoc['funcs']:
+ continue
+
+ write_header(op, name)
+ if undoc['funcs']:
+ op.write('Functions:\n')
+ op.writelines(' * %s\n' % x for x in undoc['funcs'])
+ if self.config.coverage_show_missing_items:
+ if self.app.quiet or self.app.warningiserror:
+ for func in undoc['funcs']:
+ logger.warning(
+ __('undocumented python function: %s :: %s'),
+ name, func)
+ else:
+ for func in undoc['funcs']:
+ logger.info(red('undocumented ') + 'py ' + 'function ' +
+ '%-30s' % func + red(' - in module ') + name)
+ op.write('\n')
+ if undoc['classes']:
+ op.write('Classes:\n')
+ for class_name, methods in sorted(
+ undoc['classes'].items()):
+ if not methods:
+ op.write(' * %s\n' % class_name)
+ if self.config.coverage_show_missing_items:
+ if self.app.quiet or self.app.warningiserror:
+ logger.warning(
+ __('undocumented python class: %s :: %s'),
+ name, class_name)
+ else:
+ logger.info(red('undocumented ') + 'py ' +
+ 'class ' + '%-30s' % class_name +
+ red(' - in module ') + name)
+ else:
+ op.write(' * %s -- missing methods:\n\n' % class_name)
+ op.writelines(' - %s\n' % x for x in methods)
+ if self.config.coverage_show_missing_items:
+ if self.app.quiet or self.app.warningiserror:
+ for meth in methods:
+ logger.warning(
+ __('undocumented python method:' +
+ ' %s :: %s :: %s'),
+ name, class_name, meth)
+ else:
+ for meth in methods:
+ logger.info(red('undocumented ') + 'py ' +
+ 'method ' + '%-30s' %
+ (class_name + '.' + meth) +
+ red(' - in module ') + name)
+ op.write('\n')
+
+ if failed:
+ write_header(op, 'Modules that failed to import')
+ op.writelines(' * %s -- %s\n' % x for x in failed)
+
+ def finish(self) -> None:
+ # dump the coverage data to a pickle file too
+ picklepath = path.join(self.outdir, 'undoc.pickle')
+ with open(picklepath, 'wb') as dumpfile:
+ pickle.dump((self.py_undoc, self.c_undoc,
+ self.py_undocumented, self.py_documented), dumpfile)
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_builder(CoverageBuilder)
+ app.add_config_value('coverage_ignore_modules', [], False)
+ app.add_config_value('coverage_ignore_functions', [], False)
+ app.add_config_value('coverage_ignore_classes', [], False)
+ app.add_config_value('coverage_ignore_pyobjects', [], False)
+ app.add_config_value('coverage_c_path', [], False)
+ app.add_config_value('coverage_c_regexes', {}, False)
+ app.add_config_value('coverage_ignore_c_items', {}, False)
+ app.add_config_value('coverage_write_headline', True, False)
+ app.add_config_value('coverage_statistics_to_report', True, False, (bool,))
+ app.add_config_value('coverage_statistics_to_stdout', True, False, (bool,))
+ app.add_config_value('coverage_skip_undoc_in_source', False, False)
+ app.add_config_value('coverage_show_missing_items', False, False)
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py
new file mode 100644
index 0000000..c55ef2f
--- /dev/null
+++ b/sphinx/ext/doctest.py
@@ -0,0 +1,575 @@
+"""Mimic doctest in Sphinx.
+
+The extension automatically execute code snippets and checks their results.
+"""
+
+from __future__ import annotations
+
+import doctest
+import re
+import sys
+import time
+from io import StringIO
+from os import path
+from typing import TYPE_CHECKING, Any, Callable
+
+from docutils import nodes
+from docutils.parsers.rst import directives
+from packaging.specifiers import InvalidSpecifier, SpecifierSet
+from packaging.version import Version
+
+import sphinx
+from sphinx.builders import Builder
+from sphinx.locale import __
+from sphinx.util import logging
+from sphinx.util.console import bold # type: ignore[attr-defined]
+from sphinx.util.docutils import SphinxDirective
+from sphinx.util.osutil import relpath
+
+if TYPE_CHECKING:
+ from collections.abc import Iterable, Sequence
+
+ from docutils.nodes import Element, Node, TextElement
+
+ from sphinx.application import Sphinx
+ from sphinx.util.typing import OptionSpec
+
+
+logger = logging.getLogger(__name__)
+
+blankline_re = re.compile(r'^\s*<BLANKLINE>', re.MULTILINE)
+doctestopt_re = re.compile(r'#\s*doctest:.+$', re.MULTILINE)
+
+
+def is_allowed_version(spec: str, version: str) -> bool:
+ """Check `spec` satisfies `version` or not.
+
+ This obeys PEP-440 specifiers:
+ https://peps.python.org/pep-0440/#version-specifiers
+
+ Some examples:
+
+ >>> is_allowed_version('<=3.5', '3.3')
+ True
+ >>> is_allowed_version('<=3.2', '3.3')
+ False
+ >>> is_allowed_version('>3.2, <4.0', '3.3')
+ True
+ """
+ return Version(version) in SpecifierSet(spec)
+
+
+# set up the necessary directives
+
+class TestDirective(SphinxDirective):
+ """
+ Base class for doctest-related directives.
+ """
+
+ has_content = True
+ required_arguments = 0
+ optional_arguments = 1
+ final_argument_whitespace = True
+
+ def run(self) -> list[Node]:
+ # use ordinary docutils nodes for test code: they get special attributes
+ # so that our builder recognizes them, and the other builders are happy.
+ code = '\n'.join(self.content)
+ test = None
+ if self.name == 'doctest':
+ if '<BLANKLINE>' in code:
+ # convert <BLANKLINE>s to ordinary blank lines for presentation
+ test = code
+ code = blankline_re.sub('', code)
+ if doctestopt_re.search(code) and 'no-trim-doctest-flags' not in self.options:
+ if not test:
+ test = code
+ code = doctestopt_re.sub('', code)
+ nodetype: type[TextElement] = nodes.literal_block
+ if self.name in ('testsetup', 'testcleanup') or 'hide' in self.options:
+ nodetype = nodes.comment
+ if self.arguments:
+ groups = [x.strip() for x in self.arguments[0].split(',')]
+ else:
+ groups = ['default']
+ node = nodetype(code, code, testnodetype=self.name, groups=groups)
+ self.set_source_info(node)
+ if test is not None:
+ # only save if it differs from code
+ node['test'] = test
+ if self.name == 'doctest':
+ node['language'] = 'pycon'
+ elif self.name == 'testcode':
+ node['language'] = 'python'
+ elif self.name == 'testoutput':
+ # don't try to highlight output
+ node['language'] = 'none'
+ node['options'] = {}
+ if self.name in ('doctest', 'testoutput') and 'options' in self.options:
+ # parse doctest-like output comparison flags
+ option_strings = self.options['options'].replace(',', ' ').split()
+ for option in option_strings:
+ prefix, option_name = option[0], option[1:]
+ if prefix not in '+-':
+ self.state.document.reporter.warning(
+ __("missing '+' or '-' in '%s' option.") % option,
+ line=self.lineno)
+ continue
+ if option_name not in doctest.OPTIONFLAGS_BY_NAME:
+ self.state.document.reporter.warning(
+ __("'%s' is not a valid option.") % option_name,
+ line=self.lineno)
+ continue
+ flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]]
+ node['options'][flag] = (option[0] == '+')
+ if self.name == 'doctest' and 'pyversion' in self.options:
+ try:
+ spec = self.options['pyversion']
+ python_version = '.'.join([str(v) for v in sys.version_info[:3]])
+ if not is_allowed_version(spec, python_version):
+ flag = doctest.OPTIONFLAGS_BY_NAME['SKIP']
+ node['options'][flag] = True # Skip the test
+ except InvalidSpecifier:
+ self.state.document.reporter.warning(
+ __("'%s' is not a valid pyversion option") % spec,
+ line=self.lineno)
+ if 'skipif' in self.options:
+ node['skipif'] = self.options['skipif']
+ if 'trim-doctest-flags' in self.options:
+ node['trim_flags'] = True
+ elif 'no-trim-doctest-flags' in self.options:
+ node['trim_flags'] = False
+ return [node]
+
+
+class TestsetupDirective(TestDirective):
+ option_spec: OptionSpec = {
+ 'skipif': directives.unchanged_required,
+ }
+
+
+class TestcleanupDirective(TestDirective):
+ option_spec: OptionSpec = {
+ 'skipif': directives.unchanged_required,
+ }
+
+
+class DoctestDirective(TestDirective):
+ option_spec: OptionSpec = {
+ 'hide': directives.flag,
+ 'no-trim-doctest-flags': directives.flag,
+ 'options': directives.unchanged,
+ 'pyversion': directives.unchanged_required,
+ 'skipif': directives.unchanged_required,
+ 'trim-doctest-flags': directives.flag,
+ }
+
+
+class TestcodeDirective(TestDirective):
+ option_spec: OptionSpec = {
+ 'hide': directives.flag,
+ 'no-trim-doctest-flags': directives.flag,
+ 'pyversion': directives.unchanged_required,
+ 'skipif': directives.unchanged_required,
+ 'trim-doctest-flags': directives.flag,
+ }
+
+
+class TestoutputDirective(TestDirective):
+ option_spec: OptionSpec = {
+ 'hide': directives.flag,
+ 'no-trim-doctest-flags': directives.flag,
+ 'options': directives.unchanged,
+ 'pyversion': directives.unchanged_required,
+ 'skipif': directives.unchanged_required,
+ 'trim-doctest-flags': directives.flag,
+ }
+
+
+parser = doctest.DocTestParser()
+
+
+# helper classes
+
+class TestGroup:
+ def __init__(self, name: str) -> None:
+ self.name = name
+ self.setup: list[TestCode] = []
+ self.tests: list[list[TestCode] | tuple[TestCode, None]] = []
+ self.cleanup: list[TestCode] = []
+
+ def add_code(self, code: TestCode, prepend: bool = False) -> None:
+ if code.type == 'testsetup':
+ if prepend:
+ self.setup.insert(0, code)
+ else:
+ self.setup.append(code)
+ elif code.type == 'testcleanup':
+ self.cleanup.append(code)
+ elif code.type == 'doctest':
+ self.tests.append([code])
+ elif code.type == 'testcode':
+ # "testoutput" may replace the second element
+ self.tests.append((code, None))
+ elif code.type == 'testoutput':
+ if self.tests:
+ latest_test = self.tests[-1]
+ if len(latest_test) == 2:
+ self.tests[-1] = [latest_test[0], code]
+ else:
+ raise RuntimeError(__('invalid TestCode type'))
+
+ def __repr__(self) -> str:
+ return (f'TestGroup(name={self.name!r}, setup={self.setup!r}, '
+ f'cleanup={self.cleanup!r}, tests={self.tests!r})')
+
+
+class TestCode:
+ def __init__(self, code: str, type: str, filename: str,
+ lineno: int, options: dict | None = None) -> None:
+ self.code = code
+ self.type = type
+ self.filename = filename
+ self.lineno = lineno
+ self.options = options or {}
+
+ def __repr__(self) -> str:
+ return (f'TestCode({self.code!r}, {self.type!r}, filename={self.filename!r}, '
+ f'lineno={self.lineno!r}, options={self.options!r})')
+
+
+class SphinxDocTestRunner(doctest.DocTestRunner):
+ def summarize(self, out: Callable, verbose: bool | None = None, # type: ignore[override]
+ ) -> tuple[int, int]:
+ string_io = StringIO()
+ old_stdout = sys.stdout
+ sys.stdout = string_io
+ try:
+ res = super().summarize(verbose)
+ finally:
+ sys.stdout = old_stdout
+ out(string_io.getvalue())
+ return res
+
+ def _DocTestRunner__patched_linecache_getlines(self, filename: str,
+ module_globals: Any = None) -> Any:
+ # this is overridden from DocTestRunner adding the try-except below
+ m = self._DocTestRunner__LINECACHE_FILENAME_RE.match( # type: ignore[attr-defined]
+ filename)
+ if m and m.group('name') == self.test.name:
+ try:
+ example = self.test.examples[int(m.group('examplenum'))]
+ # because we compile multiple doctest blocks with the same name
+ # (viz. the group name) this might, for outer stack frames in a
+ # traceback, get the wrong test which might not have enough examples
+ except IndexError:
+ pass
+ else:
+ return example.source.splitlines(True)
+ return self.save_linecache_getlines( # type: ignore[attr-defined]
+ filename, module_globals)
+
+
+# the new builder -- use sphinx-build.py -b doctest to run
+
+class DocTestBuilder(Builder):
+ """
+ Runs test snippets in the documentation.
+ """
+ name = 'doctest'
+ epilog = __('Testing of doctests in the sources finished, look at the '
+ 'results in %(outdir)s/output.txt.')
+
+ def init(self) -> None:
+ # default options
+ self.opt = self.config.doctest_default_flags
+
+ # HACK HACK HACK
+ # doctest compiles its snippets with type 'single'. That is nice
+ # for doctest examples but unusable for multi-statement code such
+ # as setup code -- to be able to use doctest error reporting with
+ # that code nevertheless, we monkey-patch the "compile" it uses.
+ doctest.compile = self.compile # type: ignore[attr-defined]
+
+ sys.path[0:0] = self.config.doctest_path
+
+ self.type = 'single'
+
+ self.total_failures = 0
+ self.total_tries = 0
+ self.setup_failures = 0
+ self.setup_tries = 0
+ self.cleanup_failures = 0
+ self.cleanup_tries = 0
+
+ date = time.strftime('%Y-%m-%d %H:%M:%S')
+
+ outpath = self.outdir.joinpath('output.txt')
+ self.outfile = outpath.open('w', encoding='utf-8') # NoQA: SIM115
+ self.outfile.write(('Results of doctest builder run on %s\n'
+ '==================================%s\n') %
+ (date, '=' * len(date)))
+
+ def _out(self, text: str) -> None:
+ logger.info(text, nonl=True)
+ self.outfile.write(text)
+
+ def _warn_out(self, text: str) -> None:
+ if self.app.quiet or self.app.warningiserror:
+ logger.warning(text)
+ else:
+ logger.info(text, nonl=True)
+ self.outfile.write(text)
+
+ def get_target_uri(self, docname: str, typ: str | None = None) -> str:
+ return ''
+
+ def get_outdated_docs(self) -> set[str]:
+ return self.env.found_docs
+
+ def finish(self) -> None:
+ # write executive summary
+ def s(v: int) -> str:
+ return 's' if v != 1 else ''
+ repl = (self.total_tries, s(self.total_tries),
+ self.total_failures, s(self.total_failures),
+ self.setup_failures, s(self.setup_failures),
+ self.cleanup_failures, s(self.cleanup_failures))
+ self._out('''
+Doctest summary
+===============
+%5d test%s
+%5d failure%s in tests
+%5d failure%s in setup code
+%5d failure%s in cleanup code
+''' % repl)
+ self.outfile.close()
+
+ if self.total_failures or self.setup_failures or self.cleanup_failures:
+ self.app.statuscode = 1
+
+ def write(self, build_docnames: Iterable[str] | None, updated_docnames: Sequence[str],
+ method: str = 'update') -> None:
+ if build_docnames is None:
+ build_docnames = sorted(self.env.all_docs)
+
+ logger.info(bold('running tests...'))
+ for docname in build_docnames:
+ # no need to resolve the doctree
+ doctree = self.env.get_doctree(docname)
+ self.test_doc(docname, doctree)
+
+ def get_filename_for_node(self, node: Node, docname: str) -> str:
+ """Try to get the file which actually contains the doctest, not the
+ filename of the document it's included in."""
+ try:
+ filename = relpath(node.source, self.env.srcdir)\
+ .rsplit(':docstring of ', maxsplit=1)[0]
+ except Exception:
+ filename = self.env.doc2path(docname, False)
+ return filename
+
+ @staticmethod
+ def get_line_number(node: Node) -> int:
+ """Get the real line number or admit we don't know."""
+ # TODO: Work out how to store or calculate real (file-relative)
+ # line numbers for doctest blocks in docstrings.
+ if ':docstring of ' in path.basename(node.source or ''):
+ # The line number is given relative to the stripped docstring,
+ # not the file. This is correct where it is set, in
+ # `docutils.nodes.Node.setup_child`, but Sphinx should report
+ # relative to the file, not the docstring.
+ return None # type: ignore[return-value]
+ if node.line is not None:
+ # TODO: find the root cause of this off by one error.
+ return node.line - 1
+ return None
+
+ def skipped(self, node: Element) -> bool:
+ if 'skipif' not in node:
+ return False
+ else:
+ condition = node['skipif']
+ context: dict[str, Any] = {}
+ if self.config.doctest_global_setup:
+ exec(self.config.doctest_global_setup, context) # NoQA: S102
+ should_skip = eval(condition, context) # NoQA: PGH001
+ if self.config.doctest_global_cleanup:
+ exec(self.config.doctest_global_cleanup, context) # NoQA: S102
+ return should_skip
+
+ def test_doc(self, docname: str, doctree: Node) -> None:
+ groups: dict[str, TestGroup] = {}
+ add_to_all_groups = []
+ self.setup_runner = SphinxDocTestRunner(verbose=False,
+ optionflags=self.opt)
+ self.test_runner = SphinxDocTestRunner(verbose=False,
+ optionflags=self.opt)
+ self.cleanup_runner = SphinxDocTestRunner(verbose=False,
+ optionflags=self.opt)
+
+ self.test_runner._fakeout = self.setup_runner._fakeout # type: ignore[attr-defined]
+ self.cleanup_runner._fakeout = self.setup_runner._fakeout # type: ignore[attr-defined]
+
+ if self.config.doctest_test_doctest_blocks:
+ def condition(node: Node) -> bool:
+ return (isinstance(node, (nodes.literal_block, nodes.comment)) and
+ 'testnodetype' in node) or \
+ isinstance(node, nodes.doctest_block)
+ else:
+ def condition(node: Node) -> bool:
+ return isinstance(node, (nodes.literal_block, nodes.comment)) \
+ and 'testnodetype' in node
+ for node in doctree.findall(condition): # type: Element
+ if self.skipped(node):
+ continue
+
+ source = node['test'] if 'test' in node else node.astext()
+ filename = self.get_filename_for_node(node, docname)
+ line_number = self.get_line_number(node)
+ if not source:
+ logger.warning(__('no code/output in %s block at %s:%s'),
+ node.get('testnodetype', 'doctest'),
+ filename, line_number)
+ code = TestCode(source, type=node.get('testnodetype', 'doctest'),
+ filename=filename, lineno=line_number,
+ options=node.get('options'))
+ node_groups = node.get('groups', ['default'])
+ if '*' in node_groups:
+ add_to_all_groups.append(code)
+ continue
+ for groupname in node_groups:
+ if groupname not in groups:
+ groups[groupname] = TestGroup(groupname)
+ groups[groupname].add_code(code)
+ for code in add_to_all_groups:
+ for group in groups.values():
+ group.add_code(code)
+ if self.config.doctest_global_setup:
+ code = TestCode(self.config.doctest_global_setup,
+ 'testsetup', filename='<global_setup>', lineno=0)
+ for group in groups.values():
+ group.add_code(code, prepend=True)
+ if self.config.doctest_global_cleanup:
+ code = TestCode(self.config.doctest_global_cleanup,
+ 'testcleanup', filename='<global_cleanup>', lineno=0)
+ for group in groups.values():
+ group.add_code(code)
+ if not groups:
+ return
+
+ show_successes = self.config.doctest_show_successes
+ if show_successes:
+ self._out('\n'
+ f'Document: {docname}\n'
+ f'----------{"-" * len(docname)}\n')
+ for group in groups.values():
+ self.test_group(group)
+ # Separately count results from setup code
+ res_f, res_t = self.setup_runner.summarize(self._out, verbose=False)
+ self.setup_failures += res_f
+ self.setup_tries += res_t
+ if self.test_runner.tries:
+ res_f, res_t = self.test_runner.summarize(
+ self._out, verbose=show_successes)
+ self.total_failures += res_f
+ self.total_tries += res_t
+ if self.cleanup_runner.tries:
+ res_f, res_t = self.cleanup_runner.summarize(
+ self._out, verbose=show_successes)
+ self.cleanup_failures += res_f
+ self.cleanup_tries += res_t
+
+ def compile(self, code: str, name: str, type: str, flags: Any, dont_inherit: bool) -> Any:
+ return compile(code, name, self.type, flags, dont_inherit)
+
+ def test_group(self, group: TestGroup) -> None:
+ ns: dict = {}
+
+ def run_setup_cleanup(runner: Any, testcodes: list[TestCode], what: Any) -> bool:
+ examples = []
+ for testcode in testcodes:
+ example = doctest.Example(testcode.code, '', lineno=testcode.lineno)
+ examples.append(example)
+ if not examples:
+ return True
+ # simulate a doctest with the code
+ sim_doctest = doctest.DocTest(examples, {},
+ f'{group.name} ({what} code)',
+ testcodes[0].filename, 0, None)
+ sim_doctest.globs = ns
+ old_f = runner.failures
+ self.type = 'exec' # the snippet may contain multiple statements
+ runner.run(sim_doctest, out=self._warn_out, clear_globs=False)
+ if runner.failures > old_f:
+ return False
+ return True
+
+ # run the setup code
+ if not run_setup_cleanup(self.setup_runner, group.setup, 'setup'):
+ # if setup failed, don't run the group
+ return
+
+ # run the tests
+ for code in group.tests:
+ if len(code) == 1:
+ # ordinary doctests (code/output interleaved)
+ try:
+ test = parser.get_doctest(code[0].code, {}, group.name,
+ code[0].filename, code[0].lineno)
+ except Exception:
+ logger.warning(__('ignoring invalid doctest code: %r'), code[0].code,
+ location=(code[0].filename, code[0].lineno))
+ continue
+ if not test.examples:
+ continue
+ for example in test.examples:
+ # apply directive's comparison options
+ new_opt = code[0].options.copy()
+ new_opt.update(example.options)
+ example.options = new_opt
+ self.type = 'single' # as for ordinary doctests
+ else:
+ # testcode and output separate
+ output = code[1].code if code[1] else ''
+ options = code[1].options if code[1] else {}
+ # disable <BLANKLINE> processing as it is not needed
+ options[doctest.DONT_ACCEPT_BLANKLINE] = True
+ # find out if we're testing an exception
+ m = parser._EXCEPTION_RE.match(output) # type: ignore[attr-defined]
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+ example = doctest.Example(code[0].code, output, exc_msg=exc_msg,
+ lineno=code[0].lineno, options=options)
+ test = doctest.DocTest([example], {}, group.name,
+ code[0].filename, code[0].lineno, None)
+ self.type = 'exec' # multiple statements again
+ # DocTest.__init__ copies the globs namespace, which we don't want
+ test.globs = ns
+ # also don't clear the globs namespace after running the doctest
+ self.test_runner.run(test, out=self._warn_out, clear_globs=False)
+
+ # run the cleanup
+ run_setup_cleanup(self.cleanup_runner, group.cleanup, 'cleanup')
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_directive('testsetup', TestsetupDirective)
+ app.add_directive('testcleanup', TestcleanupDirective)
+ app.add_directive('doctest', DoctestDirective)
+ app.add_directive('testcode', TestcodeDirective)
+ app.add_directive('testoutput', TestoutputDirective)
+ app.add_builder(DocTestBuilder)
+ # this config value adds to sys.path
+ app.add_config_value('doctest_show_successes', True, False, (bool,))
+ app.add_config_value('doctest_path', [], False)
+ app.add_config_value('doctest_test_doctest_blocks', 'default', False)
+ app.add_config_value('doctest_global_setup', '', False)
+ app.add_config_value('doctest_global_cleanup', '', False)
+ app.add_config_value(
+ 'doctest_default_flags',
+ doctest.DONT_ACCEPT_TRUE_FOR_1 | doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL,
+ False)
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/duration.py b/sphinx/ext/duration.py
new file mode 100644
index 0000000..26e197f
--- /dev/null
+++ b/sphinx/ext/duration.py
@@ -0,0 +1,92 @@
+"""Measure document reading durations."""
+
+from __future__ import annotations
+
+import time
+from itertools import islice
+from operator import itemgetter
+from typing import TYPE_CHECKING, cast
+
+import sphinx
+from sphinx.domains import Domain
+from sphinx.locale import __
+from sphinx.util import logging
+
+if TYPE_CHECKING:
+ from docutils import nodes
+
+ from sphinx.application import Sphinx
+
+logger = logging.getLogger(__name__)
+
+
+class DurationDomain(Domain):
+ """A domain for durations of Sphinx processing."""
+ name = 'duration'
+
+ @property
+ def reading_durations(self) -> dict[str, float]:
+ return self.data.setdefault('reading_durations', {})
+
+ def note_reading_duration(self, duration: float) -> None:
+ self.reading_durations[self.env.docname] = duration
+
+ def clear(self) -> None:
+ self.reading_durations.clear()
+
+ def clear_doc(self, docname: str) -> None:
+ self.reading_durations.pop(docname, None)
+
+ def merge_domaindata(self, docnames: list[str], otherdata: dict[str, float]) -> None:
+ for docname, duration in otherdata.items():
+ if docname in docnames:
+ self.reading_durations[docname] = duration
+
+
+def on_builder_inited(app: Sphinx) -> None:
+ """Initialize DurationDomain on bootstrap.
+
+ This clears the results of the last build.
+ """
+ domain = cast(DurationDomain, app.env.get_domain('duration'))
+ domain.clear()
+
+
+def on_source_read(app: Sphinx, docname: str, content: list[str]) -> None:
+ """Start to measure reading duration."""
+ app.env.temp_data['started_at'] = time.monotonic()
+
+
+def on_doctree_read(app: Sphinx, doctree: nodes.document) -> None:
+ """Record a reading duration."""
+ started_at = app.env.temp_data['started_at']
+ duration = time.monotonic() - started_at
+ domain = cast(DurationDomain, app.env.get_domain('duration'))
+ domain.note_reading_duration(duration)
+
+
+def on_build_finished(app: Sphinx, error: Exception) -> None:
+ """Display duration ranking on the current build."""
+ domain = cast(DurationDomain, app.env.get_domain('duration'))
+ if not domain.reading_durations:
+ return
+ durations = sorted(domain.reading_durations.items(), key=itemgetter(1), reverse=True)
+
+ logger.info('')
+ logger.info(__('====================== slowest reading durations ======================='))
+ for docname, d in islice(durations, 5):
+ logger.info(f'{d:.3f} {docname}') # NoQA: G004
+
+
+def setup(app: Sphinx) -> dict[str, bool | str]:
+ app.add_domain(DurationDomain)
+ app.connect('builder-inited', on_builder_inited)
+ app.connect('source-read', on_source_read)
+ app.connect('doctree-read', on_doctree_read)
+ app.connect('build-finished', on_build_finished)
+
+ return {
+ 'version': sphinx.__display_version__,
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/ext/extlinks.py b/sphinx/ext/extlinks.py
new file mode 100644
index 0000000..173df4d
--- /dev/null
+++ b/sphinx/ext/extlinks.py
@@ -0,0 +1,125 @@
+"""Extension to save typing and prevent hard-coding of base URLs in reST files.
+
+This adds a new config value called ``extlinks`` that is created like this::
+
+ extlinks = {'exmpl': ('https://example.invalid/%s.html', caption), ...}
+
+Now you can use e.g. :exmpl:`foo` in your documents. This will create a
+link to ``https://example.invalid/foo.html``. The link caption depends on
+the *caption* value given:
+
+- If it is ``None``, the caption will be the full URL.
+- If it is a string, it must contain ``%s`` exactly once. In this case the
+ caption will be *caption* with the role content substituted for ``%s``.
+
+You can also give an explicit caption, e.g. :exmpl:`Foo <foo>`.
+
+Both, the url string and the caption string must escape ``%`` as ``%%``.
+"""
+
+from __future__ import annotations
+
+import re
+from typing import TYPE_CHECKING, Any
+
+from docutils import nodes, utils
+
+import sphinx
+from sphinx.locale import __
+from sphinx.transforms.post_transforms import SphinxPostTransform
+from sphinx.util import logging, rst
+from sphinx.util.nodes import split_explicit_title
+
+if TYPE_CHECKING:
+ from collections.abc import Sequence
+
+ from docutils.nodes import Node, system_message
+ from docutils.parsers.rst.states import Inliner
+
+ from sphinx.application import Sphinx
+ from sphinx.util.typing import RoleFunction
+
+logger = logging.getLogger(__name__)
+
+
+class ExternalLinksChecker(SphinxPostTransform):
+ """
+ For each external link, check if it can be replaced by an extlink.
+
+ We treat each ``reference`` node without ``internal`` attribute as an external link.
+ """
+
+ default_priority = 500
+
+ def run(self, **kwargs: Any) -> None:
+ if not self.config.extlinks_detect_hardcoded_links:
+ return
+
+ for refnode in self.document.findall(nodes.reference):
+ self.check_uri(refnode)
+
+ def check_uri(self, refnode: nodes.reference) -> None:
+ """
+ If the URI in ``refnode`` has a replacement in ``extlinks``,
+ emit a warning with a replacement suggestion.
+ """
+ if 'internal' in refnode or 'refuri' not in refnode:
+ return
+
+ uri = refnode['refuri']
+ title = refnode.astext()
+
+ for alias, (base_uri, _caption) in self.app.config.extlinks.items():
+ uri_pattern = re.compile(re.escape(base_uri).replace('%s', '(?P<value>.+)'))
+
+ match = uri_pattern.match(uri)
+ if (
+ match and
+ match.groupdict().get('value') and
+ '/' not in match.groupdict()['value']
+ ):
+ # build a replacement suggestion
+ msg = __('hardcoded link %r could be replaced by an extlink '
+ '(try using %r instead)')
+ value = match.groupdict().get('value')
+ if uri != title:
+ replacement = f":{alias}:`{rst.escape(title)} <{value}>`"
+ else:
+ replacement = f":{alias}:`{value}`"
+ logger.warning(msg, uri, replacement, location=refnode)
+
+
+def make_link_role(name: str, base_url: str, caption: str) -> RoleFunction:
+ # Check whether we have base_url and caption strings have an '%s' for
+ # expansion. If not, fall back the the old behaviour and use the string as
+ # a prefix.
+ # Remark: It is an implementation detail that we use Pythons %-formatting.
+ # So far we only expose ``%s`` and require quoting of ``%`` using ``%%``.
+ def role(typ: str, rawtext: str, text: str, lineno: int,
+ inliner: Inliner, options: dict | None = None, content: Sequence[str] = (),
+ ) -> tuple[list[Node], list[system_message]]:
+ text = utils.unescape(text)
+ has_explicit_title, title, part = split_explicit_title(text)
+ full_url = base_url % part
+ if not has_explicit_title:
+ if caption is None:
+ title = full_url
+ else:
+ title = caption % part
+ pnode = nodes.reference(title, title, internal=False, refuri=full_url)
+ return [pnode], []
+ return role
+
+
+def setup_link_roles(app: Sphinx) -> None:
+ for name, (base_url, caption) in app.config.extlinks.items():
+ app.add_role(name, make_link_role(name, base_url, caption))
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_config_value('extlinks', {}, 'env')
+ app.add_config_value('extlinks_detect_hardcoded_links', False, 'env')
+
+ app.connect('builder-inited', setup_link_roles)
+ app.add_post_transform(ExternalLinksChecker)
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/githubpages.py b/sphinx/ext/githubpages.py
new file mode 100644
index 0000000..c9be928
--- /dev/null
+++ b/sphinx/ext/githubpages.py
@@ -0,0 +1,57 @@
+"""To publish HTML docs at GitHub Pages, create .nojekyll file."""
+
+from __future__ import annotations
+
+import contextlib
+import os
+import urllib.parse
+from typing import TYPE_CHECKING, Any
+
+import sphinx
+
+if TYPE_CHECKING:
+ from sphinx.application import Sphinx
+ from sphinx.environment import BuildEnvironment
+
+
+def _get_domain_from_url(url: str) -> str:
+ """Get the domain from a URL."""
+ return url and urllib.parse.urlparse(url).hostname or ''
+
+
+def create_nojekyll_and_cname(app: Sphinx, env: BuildEnvironment) -> None:
+ """Manage the ``.nojekyll`` and ``CNAME`` files for GitHub Pages.
+
+ For HTML-format builders (e.g. 'html', 'dirhtml') we unconditionally create
+ the ``.nojekyll`` file to signal that GitHub Pages should not run Jekyll
+ processing.
+
+ If the :confval:`html_baseurl` option is set, we also create a CNAME file
+ with the domain from ``html_baseurl``, so long as it is not a ``github.io``
+ domain.
+
+ If this extension is loaded and the domain in ``html_baseurl`` no longer
+ requires a CNAME file, we remove any existing ``CNAME`` files from the
+ output directory.
+ """
+ if app.builder.format != 'html':
+ return
+
+ app.builder.outdir.joinpath('.nojekyll').touch()
+ cname_path = os.path.join(app.builder.outdir, 'CNAME')
+
+ domain = _get_domain_from_url(app.config.html_baseurl)
+ # Filter out GitHub Pages domains, as they do not require CNAME files.
+ if domain and not domain.endswith(".github.io"):
+ with open(cname_path, 'w', encoding="utf-8") as f:
+ # NOTE: don't write a trailing newline. The `CNAME` file that's
+ # auto-generated by the GitHub UI doesn't have one.
+ f.write(domain)
+ else:
+ with contextlib.suppress(FileNotFoundError):
+ os.unlink(cname_path)
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.connect('env-updated', create_nojekyll_and_cname)
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py
new file mode 100644
index 0000000..528bf30
--- /dev/null
+++ b/sphinx/ext/graphviz.py
@@ -0,0 +1,467 @@
+"""Allow graphviz-formatted graphs to be included inline in generated documents.
+"""
+
+from __future__ import annotations
+
+import posixpath
+import re
+import subprocess
+import xml.etree.ElementTree as ET
+from hashlib import sha1
+from itertools import chain
+from os import path
+from subprocess import CalledProcessError
+from typing import TYPE_CHECKING, Any
+from urllib.parse import urlsplit, urlunsplit
+
+from docutils import nodes
+from docutils.parsers.rst import Directive, directives
+
+import sphinx
+from sphinx.errors import SphinxError
+from sphinx.locale import _, __
+from sphinx.util import logging
+from sphinx.util.docutils import SphinxDirective
+from sphinx.util.i18n import search_image_for_language
+from sphinx.util.nodes import set_source_info
+from sphinx.util.osutil import ensuredir
+
+if TYPE_CHECKING:
+ from docutils.nodes import Node
+
+ from sphinx.application import Sphinx
+ from sphinx.config import Config
+ from sphinx.util.typing import OptionSpec
+ from sphinx.writers.html import HTML5Translator
+ from sphinx.writers.latex import LaTeXTranslator
+ from sphinx.writers.manpage import ManualPageTranslator
+ from sphinx.writers.texinfo import TexinfoTranslator
+ from sphinx.writers.text import TextTranslator
+
+logger = logging.getLogger(__name__)
+
+
+class GraphvizError(SphinxError):
+ category = 'Graphviz error'
+
+
+class ClickableMapDefinition:
+ """A manipulator for clickable map file of graphviz."""
+ maptag_re = re.compile('<map id="(.*?)"')
+ href_re = re.compile('href=".*?"')
+
+ def __init__(self, filename: str, content: str, dot: str = '') -> None:
+ self.id: str | None = None
+ self.filename = filename
+ self.content = content.splitlines()
+ self.clickable: list[str] = []
+
+ self.parse(dot=dot)
+
+ def parse(self, dot: str) -> None:
+ matched = self.maptag_re.match(self.content[0])
+ if not matched:
+ raise GraphvizError('Invalid clickable map file found: %s' % self.filename)
+
+ self.id = matched.group(1)
+ if self.id == '%3':
+ # graphviz generates wrong ID if graph name not specified
+ # https://gitlab.com/graphviz/graphviz/issues/1327
+ hashed = sha1(dot.encode(), usedforsecurity=False).hexdigest()
+ self.id = 'grapviz%s' % hashed[-10:]
+ self.content[0] = self.content[0].replace('%3', self.id)
+
+ for line in self.content:
+ if self.href_re.search(line):
+ self.clickable.append(line)
+
+ def generate_clickable_map(self) -> str:
+ """Generate clickable map tags if clickable item exists.
+
+ If not exists, this only returns empty string.
+ """
+ if self.clickable:
+ return '\n'.join([self.content[0]] + self.clickable + [self.content[-1]])
+ else:
+ return ''
+
+
+class graphviz(nodes.General, nodes.Inline, nodes.Element):
+ pass
+
+
+def figure_wrapper(directive: Directive, node: graphviz, caption: str) -> nodes.figure:
+ figure_node = nodes.figure('', node)
+ if 'align' in node:
+ figure_node['align'] = node.attributes.pop('align')
+
+ inodes, messages = directive.state.inline_text(caption, directive.lineno)
+ caption_node = nodes.caption(caption, '', *inodes)
+ caption_node.extend(messages)
+ set_source_info(directive, caption_node)
+ figure_node += caption_node
+ return figure_node
+
+
+def align_spec(argument: Any) -> str:
+ return directives.choice(argument, ('left', 'center', 'right'))
+
+
+class Graphviz(SphinxDirective):
+ """
+ Directive to insert arbitrary dot markup.
+ """
+ has_content = True
+ required_arguments = 0
+ optional_arguments = 1
+ final_argument_whitespace = False
+ option_spec: OptionSpec = {
+ 'alt': directives.unchanged,
+ 'align': align_spec,
+ 'caption': directives.unchanged,
+ 'layout': directives.unchanged,
+ 'graphviz_dot': directives.unchanged, # an old alias of `layout` option
+ 'name': directives.unchanged,
+ 'class': directives.class_option,
+ }
+
+ def run(self) -> list[Node]:
+ if self.arguments:
+ document = self.state.document
+ if self.content:
+ return [document.reporter.warning(
+ __('Graphviz directive cannot have both content and '
+ 'a filename argument'), line=self.lineno)]
+ argument = search_image_for_language(self.arguments[0], self.env)
+ rel_filename, filename = self.env.relfn2path(argument)
+ self.env.note_dependency(rel_filename)
+ try:
+ with open(filename, encoding='utf-8') as fp:
+ dotcode = fp.read()
+ except OSError:
+ return [document.reporter.warning(
+ __('External Graphviz file %r not found or reading '
+ 'it failed') % filename, line=self.lineno)]
+ else:
+ dotcode = '\n'.join(self.content)
+ rel_filename = None
+ if not dotcode.strip():
+ return [self.state_machine.reporter.warning(
+ __('Ignoring "graphviz" directive without content.'),
+ line=self.lineno)]
+ node = graphviz()
+ node['code'] = dotcode
+ node['options'] = {'docname': self.env.docname}
+
+ if 'graphviz_dot' in self.options:
+ node['options']['graphviz_dot'] = self.options['graphviz_dot']
+ if 'layout' in self.options:
+ node['options']['graphviz_dot'] = self.options['layout']
+ if 'alt' in self.options:
+ node['alt'] = self.options['alt']
+ if 'align' in self.options:
+ node['align'] = self.options['align']
+ if 'class' in self.options:
+ node['classes'] = self.options['class']
+ if rel_filename:
+ node['filename'] = rel_filename
+
+ if 'caption' not in self.options:
+ self.add_name(node)
+ return [node]
+ else:
+ figure = figure_wrapper(self, node, self.options['caption'])
+ self.add_name(figure)
+ return [figure]
+
+
+class GraphvizSimple(SphinxDirective):
+ """
+ Directive to insert arbitrary dot markup.
+ """
+ has_content = True
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec: OptionSpec = {
+ 'alt': directives.unchanged,
+ 'align': align_spec,
+ 'caption': directives.unchanged,
+ 'layout': directives.unchanged,
+ 'graphviz_dot': directives.unchanged, # an old alias of `layout` option
+ 'name': directives.unchanged,
+ 'class': directives.class_option,
+ }
+
+ def run(self) -> list[Node]:
+ node = graphviz()
+ node['code'] = '%s %s {\n%s\n}\n' % \
+ (self.name, self.arguments[0], '\n'.join(self.content))
+ node['options'] = {'docname': self.env.docname}
+ if 'graphviz_dot' in self.options:
+ node['options']['graphviz_dot'] = self.options['graphviz_dot']
+ if 'layout' in self.options:
+ node['options']['graphviz_dot'] = self.options['layout']
+ if 'alt' in self.options:
+ node['alt'] = self.options['alt']
+ if 'align' in self.options:
+ node['align'] = self.options['align']
+ if 'class' in self.options:
+ node['classes'] = self.options['class']
+
+ if 'caption' not in self.options:
+ self.add_name(node)
+ return [node]
+ else:
+ figure = figure_wrapper(self, node, self.options['caption'])
+ self.add_name(figure)
+ return [figure]
+
+
+def fix_svg_relative_paths(self: HTML5Translator | LaTeXTranslator | TexinfoTranslator,
+ filepath: str) -> None:
+ """Change relative links in generated svg files to be relative to imgpath."""
+ tree = ET.parse(filepath) # NoQA: S314
+ root = tree.getroot()
+ ns = {'svg': 'http://www.w3.org/2000/svg', 'xlink': 'http://www.w3.org/1999/xlink'}
+ href_name = '{http://www.w3.org/1999/xlink}href'
+ modified = False
+
+ for element in chain(
+ root.findall('.//svg:image[@xlink:href]', ns),
+ root.findall('.//svg:a[@xlink:href]', ns),
+ ):
+ scheme, hostname, rel_uri, query, fragment = urlsplit(element.attrib[href_name])
+ if hostname:
+ # not a relative link
+ continue
+
+ docname = self.builder.env.path2doc(self.document["source"])
+ if docname is None:
+ # This shouldn't happen!
+ continue
+ doc_dir = self.builder.app.outdir.joinpath(docname).resolve().parent
+
+ old_path = doc_dir / rel_uri
+ img_path = doc_dir / self.builder.imgpath
+ new_path = path.relpath(old_path, start=img_path)
+ modified_url = urlunsplit((scheme, hostname, new_path, query, fragment))
+
+ element.set(href_name, modified_url)
+ modified = True
+
+ if modified:
+ tree.write(filepath)
+
+
+def render_dot(self: HTML5Translator | LaTeXTranslator | TexinfoTranslator,
+ code: str, options: dict, format: str,
+ prefix: str = 'graphviz', filename: str | None = None,
+ ) -> tuple[str | None, str | None]:
+ """Render graphviz code into a PNG or PDF output file."""
+ graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
+ if not graphviz_dot:
+ raise GraphvizError(
+ __('graphviz_dot executable path must be set! %r') % graphviz_dot,
+ )
+ hashkey = (code + str(options) + str(graphviz_dot) +
+ str(self.builder.config.graphviz_dot_args)).encode()
+
+ fname = f'{prefix}-{sha1(hashkey, usedforsecurity=False).hexdigest()}.{format}'
+ relfn = posixpath.join(self.builder.imgpath, fname)
+ outfn = path.join(self.builder.outdir, self.builder.imagedir, fname)
+
+ if path.isfile(outfn):
+ return relfn, outfn
+
+ if (hasattr(self.builder, '_graphviz_warned_dot') and
+ self.builder._graphviz_warned_dot.get(graphviz_dot)):
+ return None, None
+
+ ensuredir(path.dirname(outfn))
+
+ dot_args = [graphviz_dot]
+ dot_args.extend(self.builder.config.graphviz_dot_args)
+ dot_args.extend(['-T' + format, '-o' + outfn])
+
+ docname = options.get('docname', 'index')
+ if filename:
+ cwd = path.dirname(path.join(self.builder.srcdir, filename))
+ else:
+ cwd = path.dirname(path.join(self.builder.srcdir, docname))
+
+ if format == 'png':
+ dot_args.extend(['-Tcmapx', '-o%s.map' % outfn])
+
+ try:
+ ret = subprocess.run(dot_args, input=code.encode(), capture_output=True,
+ cwd=cwd, check=True)
+ except OSError:
+ logger.warning(__('dot command %r cannot be run (needed for graphviz '
+ 'output), check the graphviz_dot setting'), graphviz_dot)
+ if not hasattr(self.builder, '_graphviz_warned_dot'):
+ self.builder._graphviz_warned_dot = {} # type: ignore[union-attr]
+ self.builder._graphviz_warned_dot[graphviz_dot] = True
+ return None, None
+ except CalledProcessError as exc:
+ raise GraphvizError(__('dot exited with error:\n[stderr]\n%r\n'
+ '[stdout]\n%r') % (exc.stderr, exc.stdout)) from exc
+ if not path.isfile(outfn):
+ raise GraphvizError(__('dot did not produce an output file:\n[stderr]\n%r\n'
+ '[stdout]\n%r') % (ret.stderr, ret.stdout))
+
+ if format == 'svg':
+ fix_svg_relative_paths(self, outfn)
+
+ return relfn, outfn
+
+
+def render_dot_html(self: HTML5Translator, node: graphviz, code: str, options: dict,
+ prefix: str = 'graphviz', imgcls: str | None = None,
+ alt: str | None = None, filename: str | None = None,
+ ) -> tuple[str, str]:
+ format = self.builder.config.graphviz_output_format
+ try:
+ if format not in ('png', 'svg'):
+ raise GraphvizError(__("graphviz_output_format must be one of 'png', "
+ "'svg', but is %r") % format)
+ fname, outfn = render_dot(self, code, options, format, prefix, filename)
+ except GraphvizError as exc:
+ logger.warning(__('dot code %r: %s'), code, exc)
+ raise nodes.SkipNode from exc
+
+ classes = [imgcls, 'graphviz'] + node.get('classes', [])
+ imgcls = ' '.join(filter(None, classes))
+
+ if fname is None:
+ self.body.append(self.encode(code))
+ else:
+ if alt is None:
+ alt = node.get('alt', self.encode(code).strip())
+ if 'align' in node:
+ self.body.append('<div align="%s" class="align-%s">' %
+ (node['align'], node['align']))
+ if format == 'svg':
+ self.body.append('<div class="graphviz">')
+ self.body.append('<object data="%s" type="image/svg+xml" class="%s">\n' %
+ (fname, imgcls))
+ self.body.append('<p class="warning">%s</p>' % alt)
+ self.body.append('</object></div>\n')
+ else:
+ assert outfn is not None
+ with open(outfn + '.map', encoding='utf-8') as mapfile:
+ imgmap = ClickableMapDefinition(outfn + '.map', mapfile.read(), dot=code)
+ if imgmap.clickable:
+ # has a map
+ self.body.append('<div class="graphviz">')
+ self.body.append('<img src="%s" alt="%s" usemap="#%s" class="%s" />' %
+ (fname, alt, imgmap.id, imgcls))
+ self.body.append('</div>\n')
+ self.body.append(imgmap.generate_clickable_map())
+ else:
+ # nothing in image map
+ self.body.append('<div class="graphviz">')
+ self.body.append('<img src="%s" alt="%s" class="%s" />' %
+ (fname, alt, imgcls))
+ self.body.append('</div>\n')
+ if 'align' in node:
+ self.body.append('</div>\n')
+
+ raise nodes.SkipNode
+
+
+def html_visit_graphviz(self: HTML5Translator, node: graphviz) -> None:
+ render_dot_html(self, node, node['code'], node['options'], filename=node.get('filename'))
+
+
+def render_dot_latex(self: LaTeXTranslator, node: graphviz, code: str,
+ options: dict, prefix: str = 'graphviz', filename: str | None = None,
+ ) -> None:
+ try:
+ fname, outfn = render_dot(self, code, options, 'pdf', prefix, filename)
+ except GraphvizError as exc:
+ logger.warning(__('dot code %r: %s'), code, exc)
+ raise nodes.SkipNode from exc
+
+ is_inline = self.is_inline(node)
+
+ if not is_inline:
+ pre = ''
+ post = ''
+ if 'align' in node:
+ if node['align'] == 'left':
+ pre = '{'
+ post = r'\hspace*{\fill}}'
+ elif node['align'] == 'right':
+ pre = r'{\hspace*{\fill}'
+ post = '}'
+ elif node['align'] == 'center':
+ pre = r'{\hfill'
+ post = r'\hspace*{\fill}}'
+ self.body.append('\n%s' % pre)
+
+ self.body.append(r'\sphinxincludegraphics[]{%s}' % fname)
+
+ if not is_inline:
+ self.body.append('%s\n' % post)
+
+ raise nodes.SkipNode
+
+
+def latex_visit_graphviz(self: LaTeXTranslator, node: graphviz) -> None:
+ render_dot_latex(self, node, node['code'], node['options'], filename=node.get('filename'))
+
+
+def render_dot_texinfo(self: TexinfoTranslator, node: graphviz, code: str,
+ options: dict, prefix: str = 'graphviz') -> None:
+ try:
+ fname, outfn = render_dot(self, code, options, 'png', prefix)
+ except GraphvizError as exc:
+ logger.warning(__('dot code %r: %s'), code, exc)
+ raise nodes.SkipNode from exc
+ if fname is not None:
+ self.body.append('@image{%s,,,[graphviz],png}\n' % fname[:-4])
+ raise nodes.SkipNode
+
+
+def texinfo_visit_graphviz(self: TexinfoTranslator, node: graphviz) -> None:
+ render_dot_texinfo(self, node, node['code'], node['options'])
+
+
+def text_visit_graphviz(self: TextTranslator, node: graphviz) -> None:
+ if 'alt' in node.attributes:
+ self.add_text(_('[graph: %s]') % node['alt'])
+ else:
+ self.add_text(_('[graph]'))
+ raise nodes.SkipNode
+
+
+def man_visit_graphviz(self: ManualPageTranslator, node: graphviz) -> None:
+ if 'alt' in node.attributes:
+ self.body.append(_('[graph: %s]') % node['alt'])
+ else:
+ self.body.append(_('[graph]'))
+ raise nodes.SkipNode
+
+
+def on_config_inited(_app: Sphinx, config: Config) -> None:
+ css_path = path.join(sphinx.package_dir, 'templates', 'graphviz', 'graphviz.css')
+ config.html_static_path.append(css_path)
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_node(graphviz,
+ html=(html_visit_graphviz, None),
+ latex=(latex_visit_graphviz, None),
+ texinfo=(texinfo_visit_graphviz, None),
+ text=(text_visit_graphviz, None),
+ man=(man_visit_graphviz, None))
+ app.add_directive('graphviz', Graphviz)
+ app.add_directive('graph', GraphvizSimple)
+ app.add_directive('digraph', GraphvizSimple)
+ app.add_config_value('graphviz_dot', 'dot', 'html')
+ app.add_config_value('graphviz_dot_args', [], 'html')
+ app.add_config_value('graphviz_output_format', 'png', 'html')
+ app.add_css_file('graphviz.css')
+ app.connect('config-inited', on_config_inited)
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/ifconfig.py b/sphinx/ext/ifconfig.py
new file mode 100644
index 0000000..1d2b197
--- /dev/null
+++ b/sphinx/ext/ifconfig.py
@@ -0,0 +1,81 @@
+"""Provides the ``ifconfig`` directive.
+
+The ``ifconfig`` directive enables writing documentation
+that is included depending on configuration variables.
+
+Usage::
+
+ .. ifconfig:: releaselevel in ('alpha', 'beta', 'rc')
+
+ This stuff is only included in the built docs for unstable versions.
+
+The argument for ``ifconfig`` is a plain Python expression, evaluated in the
+namespace of the project configuration (that is, all variables from
+``conf.py`` are available.)
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+
+from docutils import nodes
+
+import sphinx
+from sphinx.util.docutils import SphinxDirective
+from sphinx.util.nodes import nested_parse_with_titles
+
+if TYPE_CHECKING:
+ from docutils.nodes import Node
+
+ from sphinx.application import Sphinx
+ from sphinx.util.typing import OptionSpec
+
+
+class ifconfig(nodes.Element):
+ pass
+
+
+class IfConfig(SphinxDirective):
+
+ has_content = True
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = True
+ option_spec: OptionSpec = {}
+
+ def run(self) -> list[Node]:
+ node = ifconfig()
+ node.document = self.state.document
+ self.set_source_info(node)
+ node['expr'] = self.arguments[0]
+ nested_parse_with_titles(self.state, self.content, node, self.content_offset)
+ return [node]
+
+
+def process_ifconfig_nodes(app: Sphinx, doctree: nodes.document, docname: str) -> None:
+ ns = {confval.name: confval.value for confval in app.config}
+ ns.update(app.config.__dict__.copy())
+ ns['builder'] = app.builder.name
+ for node in list(doctree.findall(ifconfig)):
+ try:
+ res = eval(node['expr'], ns) # NoQA: PGH001
+ except Exception as err:
+ # handle exceptions in a clean fashion
+ from traceback import format_exception_only
+ msg = ''.join(format_exception_only(err.__class__, err))
+ newnode = doctree.reporter.error('Exception occurred in '
+ 'ifconfig expression: \n%s' %
+ msg, base_node=node)
+ node.replace_self(newnode)
+ else:
+ if not res:
+ node.replace_self([])
+ else:
+ node.replace_self(node.children)
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_node(ifconfig)
+ app.add_directive('ifconfig', IfConfig)
+ app.connect('doctree-resolved', process_ifconfig_nodes)
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/imgconverter.py b/sphinx/ext/imgconverter.py
new file mode 100644
index 0000000..071a2cf
--- /dev/null
+++ b/sphinx/ext/imgconverter.py
@@ -0,0 +1,94 @@
+"""Image converter extension for Sphinx"""
+
+from __future__ import annotations
+
+import subprocess
+import sys
+from subprocess import CalledProcessError
+from typing import TYPE_CHECKING, Any
+
+import sphinx
+from sphinx.errors import ExtensionError
+from sphinx.locale import __
+from sphinx.transforms.post_transforms.images import ImageConverter
+from sphinx.util import logging
+
+if TYPE_CHECKING:
+ from sphinx.application import Sphinx
+
+logger = logging.getLogger(__name__)
+
+
+class ImagemagickConverter(ImageConverter):
+ conversion_rules = [
+ ('image/svg+xml', 'image/png'),
+ ('image/gif', 'image/png'),
+ ('application/pdf', 'image/png'),
+ ('application/illustrator', 'image/png'),
+ ]
+
+ def is_available(self) -> bool:
+ """Confirms the converter is available or not."""
+ try:
+ args = [self.config.image_converter, '-version']
+ logger.debug('Invoking %r ...', args)
+ subprocess.run(args, capture_output=True, check=True)
+ return True
+ except OSError as exc:
+ logger.warning(__(
+ "Unable to run the image conversion command %r. "
+ "'sphinx.ext.imgconverter' requires ImageMagick by default. "
+ "Ensure it is installed, or set the 'image_converter' option "
+ "to a custom conversion command.\n\n"
+ "Traceback: %s",
+ ), self.config.image_converter, exc)
+ return False
+ except CalledProcessError as exc:
+ logger.warning(__('convert exited with error:\n'
+ '[stderr]\n%r\n[stdout]\n%r'),
+ exc.stderr, exc.stdout)
+ return False
+
+ def convert(self, _from: str, _to: str) -> bool:
+ """Converts the image to expected one."""
+ try:
+ # append an index 0 to source filename to pick up the first frame
+ # (or first page) of image (ex. Animation GIF, PDF)
+ _from += '[0]'
+
+ args = ([self.config.image_converter] +
+ self.config.image_converter_args +
+ [_from, _to])
+ logger.debug('Invoking %r ...', args)
+ subprocess.run(args, capture_output=True, check=True)
+ return True
+ except OSError:
+ logger.warning(__('convert command %r cannot be run, '
+ 'check the image_converter setting'),
+ self.config.image_converter)
+ return False
+ except CalledProcessError as exc:
+ raise ExtensionError(__('convert exited with error:\n'
+ '[stderr]\n%r\n[stdout]\n%r') %
+ (exc.stderr, exc.stdout)) from exc
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_post_transform(ImagemagickConverter)
+ if sys.platform == 'win32':
+ # On Windows, we use Imagemagik v7 by default to avoid the trouble for
+ # convert.exe bundled with Windows.
+ app.add_config_value('image_converter', 'magick', 'env')
+ app.add_config_value('image_converter_args', ['convert'], 'env')
+ else:
+ # On other platform, we use Imagemagick v6 by default. Especially,
+ # Debian/Ubuntu are still based of v6. So we can't use "magick" command
+ # for these platforms.
+ app.add_config_value('image_converter', 'convert', 'env')
+ app.add_config_value('image_converter_args', [], 'env')
+
+ return {
+ 'version': sphinx.__display_version__,
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/ext/imgmath.py b/sphinx/ext/imgmath.py
new file mode 100644
index 0000000..a5f49d9
--- /dev/null
+++ b/sphinx/ext/imgmath.py
@@ -0,0 +1,407 @@
+"""Render math in HTML via dvipng or dvisvgm."""
+
+from __future__ import annotations
+
+import base64
+import contextlib
+import re
+import shutil
+import subprocess
+import tempfile
+from hashlib import sha1
+from os import path
+from subprocess import CalledProcessError
+from typing import TYPE_CHECKING, Any
+
+from docutils import nodes
+
+import sphinx
+from sphinx import package_dir
+from sphinx.errors import SphinxError
+from sphinx.locale import _, __
+from sphinx.util import logging
+from sphinx.util.math import get_node_equation_number, wrap_displaymath
+from sphinx.util.osutil import ensuredir
+from sphinx.util.png import read_png_depth, write_png_depth
+from sphinx.util.template import LaTeXRenderer
+
+if TYPE_CHECKING:
+ import os
+
+ from docutils.nodes import Element
+
+ from sphinx.application import Sphinx
+ from sphinx.builders import Builder
+ from sphinx.config import Config
+ from sphinx.writers.html import HTML5Translator
+
+logger = logging.getLogger(__name__)
+
+templates_path = path.join(package_dir, 'templates', 'imgmath')
+
+__all__ = ()
+
+
+class MathExtError(SphinxError):
+ category = 'Math extension error'
+
+ def __init__(
+ self, msg: str, stderr: str | None = None, stdout: str | None = None,
+ ) -> None:
+ if stderr:
+ msg += '\n[stderr]\n' + stderr
+ if stdout:
+ msg += '\n[stdout]\n' + stdout
+ super().__init__(msg)
+
+
+class InvokeError(SphinxError):
+ """errors on invoking converters."""
+
+
+SUPPORT_FORMAT = ('png', 'svg')
+
+depth_re = re.compile(r'\[\d+ depth=(-?\d+)\]')
+depthsvg_re = re.compile(r'.*, depth=(.*)pt')
+depthsvgcomment_re = re.compile(r'<!-- DEPTH=(-?\d+) -->')
+
+
+def read_svg_depth(filename: str) -> int | None:
+ """Read the depth from comment at last line of SVG file
+ """
+ with open(filename, encoding="utf-8") as f:
+ for line in f: # noqa: B007
+ pass
+ # Only last line is checked
+ matched = depthsvgcomment_re.match(line)
+ if matched:
+ return int(matched.group(1))
+ return None
+
+
+def write_svg_depth(filename: str, depth: int) -> None:
+ """Write the depth to SVG file as a comment at end of file
+ """
+ with open(filename, 'a', encoding="utf-8") as f:
+ f.write('\n<!-- DEPTH=%s -->' % depth)
+
+
+def generate_latex_macro(image_format: str,
+ math: str,
+ config: Config,
+ confdir: str | os.PathLike[str] = '') -> str:
+ """Generate LaTeX macro."""
+ variables = {
+ 'fontsize': config.imgmath_font_size,
+ 'baselineskip': int(round(config.imgmath_font_size * 1.2)),
+ 'preamble': config.imgmath_latex_preamble,
+ # the dvips option is important when imgmath_latex in ["xelatex", "tectonic"],
+ # it has no impact when imgmath_latex="latex"
+ 'tightpage': '' if image_format == 'png' else ',dvips,tightpage',
+ 'math': math,
+ }
+
+ if config.imgmath_use_preview:
+ template_name = 'preview.tex_t'
+ else:
+ template_name = 'template.tex_t'
+
+ for template_dir in config.templates_path:
+ template = path.join(confdir, template_dir, template_name)
+ if path.exists(template):
+ return LaTeXRenderer().render(template, variables)
+
+ return LaTeXRenderer(templates_path).render(template_name, variables)
+
+
+def ensure_tempdir(builder: Builder) -> str:
+ """Create temporary directory.
+
+ use only one tempdir per build -- the use of a directory is cleaner
+ than using temporary files, since we can clean up everything at once
+ just removing the whole directory (see cleanup_tempdir)
+ """
+ if not hasattr(builder, '_imgmath_tempdir'):
+ builder._imgmath_tempdir = tempfile.mkdtemp() # type: ignore[attr-defined]
+
+ return builder._imgmath_tempdir # type: ignore[attr-defined]
+
+
+def compile_math(latex: str, builder: Builder) -> str:
+ """Compile LaTeX macros for math to DVI."""
+ tempdir = ensure_tempdir(builder)
+ filename = path.join(tempdir, 'math.tex')
+ with open(filename, 'w', encoding='utf-8') as f:
+ f.write(latex)
+
+ imgmath_latex_name = path.basename(builder.config.imgmath_latex)
+
+ # build latex command; old versions of latex don't have the
+ # --output-directory option, so we have to manually chdir to the
+ # temp dir to run it.
+ command = [builder.config.imgmath_latex]
+ if imgmath_latex_name not in ['tectonic']:
+ command.append('--interaction=nonstopmode')
+ # add custom args from the config file
+ command.extend(builder.config.imgmath_latex_args)
+ command.append('math.tex')
+
+ try:
+ subprocess.run(command, capture_output=True, cwd=tempdir, check=True,
+ encoding='ascii')
+ if imgmath_latex_name in ['xelatex', 'tectonic']:
+ return path.join(tempdir, 'math.xdv')
+ else:
+ return path.join(tempdir, 'math.dvi')
+ except OSError as exc:
+ logger.warning(__('LaTeX command %r cannot be run (needed for math '
+ 'display), check the imgmath_latex setting'),
+ builder.config.imgmath_latex)
+ raise InvokeError from exc
+ except CalledProcessError as exc:
+ msg = 'latex exited with error'
+ raise MathExtError(msg, exc.stderr, exc.stdout) from exc
+
+
+def convert_dvi_to_image(command: list[str], name: str) -> tuple[str, str]:
+ """Convert DVI file to specific image format."""
+ try:
+ ret = subprocess.run(command, capture_output=True, check=True, encoding='ascii')
+ return ret.stdout, ret.stderr
+ except OSError as exc:
+ logger.warning(__('%s command %r cannot be run (needed for math '
+ 'display), check the imgmath_%s setting'),
+ name, command[0], name)
+ raise InvokeError from exc
+ except CalledProcessError as exc:
+ raise MathExtError('%s exited with error' % name, exc.stderr, exc.stdout) from exc
+
+
+def convert_dvi_to_png(dvipath: str, builder: Builder, out_path: str) -> int | None:
+ """Convert DVI file to PNG image."""
+ name = 'dvipng'
+ command = [builder.config.imgmath_dvipng, '-o', out_path, '-T', 'tight', '-z9']
+ command.extend(builder.config.imgmath_dvipng_args)
+ if builder.config.imgmath_use_preview:
+ command.append('--depth')
+ command.append(dvipath)
+
+ stdout, stderr = convert_dvi_to_image(command, name)
+
+ depth = None
+ if builder.config.imgmath_use_preview:
+ for line in stdout.splitlines():
+ matched = depth_re.match(line)
+ if matched:
+ depth = int(matched.group(1))
+ write_png_depth(out_path, depth)
+ break
+
+ return depth
+
+
+def convert_dvi_to_svg(dvipath: str, builder: Builder, out_path: str) -> int | None:
+ """Convert DVI file to SVG image."""
+ name = 'dvisvgm'
+ command = [builder.config.imgmath_dvisvgm, '-o', out_path]
+ command.extend(builder.config.imgmath_dvisvgm_args)
+ command.append(dvipath)
+
+ stdout, stderr = convert_dvi_to_image(command, name)
+
+ depth = None
+ if builder.config.imgmath_use_preview:
+ for line in stderr.splitlines(): # not stdout !
+ matched = depthsvg_re.match(line)
+ if matched:
+ depth = round(float(matched.group(1)) * 100 / 72.27) # assume 100ppi
+ write_svg_depth(out_path, depth)
+ break
+
+ return depth
+
+
+def render_math(
+ self: HTML5Translator,
+ math: str,
+) -> tuple[str | None, int | None]:
+ """Render the LaTeX math expression *math* using latex and dvipng or
+ dvisvgm.
+
+ Return the image absolute filename and the "depth",
+ that is, the distance of image bottom and baseline in pixels, if the
+ option to use preview_latex is switched on.
+
+ Error handling may seem strange, but follows a pattern: if LaTeX or dvipng
+ (dvisvgm) aren't available, only a warning is generated (since that enables
+ people on machines without these programs to at least build the rest of the
+ docs successfully). If the programs are there, however, they may not fail
+ since that indicates a problem in the math source.
+ """
+ image_format = self.builder.config.imgmath_image_format.lower()
+ if image_format not in SUPPORT_FORMAT:
+ unsupported_format_msg = 'imgmath_image_format must be either "png" or "svg"'
+ raise MathExtError(unsupported_format_msg)
+
+ latex = generate_latex_macro(image_format,
+ math,
+ self.builder.config,
+ self.builder.confdir)
+
+ filename = f"{sha1(latex.encode(), usedforsecurity=False).hexdigest()}.{image_format}"
+ generated_path = path.join(self.builder.outdir, self.builder.imagedir, 'math', filename)
+ ensuredir(path.dirname(generated_path))
+ if path.isfile(generated_path):
+ if image_format == 'png':
+ depth = read_png_depth(generated_path)
+ elif image_format == 'svg':
+ depth = read_svg_depth(generated_path)
+ return generated_path, depth
+
+ # if latex or dvipng (dvisvgm) has failed once, don't bother to try again
+ if hasattr(self.builder, '_imgmath_warned_latex') or \
+ hasattr(self.builder, '_imgmath_warned_image_translator'):
+ return None, None
+
+ # .tex -> .dvi
+ try:
+ dvipath = compile_math(latex, self.builder)
+ except InvokeError:
+ self.builder._imgmath_warned_latex = True # type: ignore[attr-defined]
+ return None, None
+
+ # .dvi -> .png/.svg
+ try:
+ if image_format == 'png':
+ depth = convert_dvi_to_png(dvipath, self.builder, generated_path)
+ elif image_format == 'svg':
+ depth = convert_dvi_to_svg(dvipath, self.builder, generated_path)
+ except InvokeError:
+ self.builder._imgmath_warned_image_translator = True # type: ignore[attr-defined]
+ return None, None
+
+ return generated_path, depth
+
+
+def render_maths_to_base64(image_format: str, generated_path: str) -> str:
+ with open(generated_path, "rb") as f:
+ encoded = base64.b64encode(f.read()).decode(encoding='utf-8')
+ if image_format == 'png':
+ return f'data:image/png;base64,{encoded}'
+ if image_format == 'svg':
+ return f'data:image/svg+xml;base64,{encoded}'
+ unsupported_format_msg = 'imgmath_image_format must be either "png" or "svg"'
+ raise MathExtError(unsupported_format_msg)
+
+
+def clean_up_files(app: Sphinx, exc: Exception) -> None:
+ if exc:
+ return
+
+ if hasattr(app.builder, '_imgmath_tempdir'):
+ with contextlib.suppress(Exception):
+ shutil.rmtree(app.builder._imgmath_tempdir)
+
+ if app.builder.config.imgmath_embed:
+ # in embed mode, the images are still generated in the math output dir
+ # to be shared across workers, but are not useful to the final document
+ with contextlib.suppress(Exception):
+ shutil.rmtree(path.join(app.builder.outdir, app.builder.imagedir, 'math'))
+
+
+def get_tooltip(self: HTML5Translator, node: Element) -> str:
+ if self.builder.config.imgmath_add_tooltips:
+ return ' alt="%s"' % self.encode(node.astext()).strip()
+ return ''
+
+
+def html_visit_math(self: HTML5Translator, node: nodes.math) -> None:
+ try:
+ rendered_path, depth = render_math(self, '$' + node.astext() + '$')
+ except MathExtError as exc:
+ msg = str(exc)
+ sm = nodes.system_message(msg, type='WARNING', level=2,
+ backrefs=[], source=node.astext())
+ sm.walkabout(self)
+ logger.warning(__('display latex %r: %s'), node.astext(), msg)
+ raise nodes.SkipNode from exc
+
+ if rendered_path is None:
+ # something failed -- use text-only as a bad substitute
+ self.body.append('<span class="math">%s</span>' %
+ self.encode(node.astext()).strip())
+ else:
+ if self.builder.config.imgmath_embed:
+ image_format = self.builder.config.imgmath_image_format.lower()
+ img_src = render_maths_to_base64(image_format, rendered_path)
+ else:
+ bname = path.basename(rendered_path)
+ relative_path = path.join(self.builder.imgpath, 'math', bname)
+ img_src = relative_path.replace(path.sep, '/')
+ c = f'<img class="math" src="{img_src}"' + get_tooltip(self, node)
+ if depth is not None:
+ c += f' style="vertical-align: {-depth:d}px"'
+ self.body.append(c + '/>')
+ raise nodes.SkipNode
+
+
+def html_visit_displaymath(self: HTML5Translator, node: nodes.math_block) -> None:
+ if node['nowrap']:
+ latex = node.astext()
+ else:
+ latex = wrap_displaymath(node.astext(), None, False)
+ try:
+ rendered_path, depth = render_math(self, latex)
+ except MathExtError as exc:
+ msg = str(exc)
+ sm = nodes.system_message(msg, type='WARNING', level=2,
+ backrefs=[], source=node.astext())
+ sm.walkabout(self)
+ logger.warning(__('inline latex %r: %s'), node.astext(), msg)
+ raise nodes.SkipNode from exc
+ self.body.append(self.starttag(node, 'div', CLASS='math'))
+ self.body.append('<p>')
+ if node['number']:
+ number = get_node_equation_number(self, node)
+ self.body.append('<span class="eqno">(%s)' % number)
+ self.add_permalink_ref(node, _('Link to this equation'))
+ self.body.append('</span>')
+
+ if rendered_path is None:
+ # something failed -- use text-only as a bad substitute
+ self.body.append('<span class="math">%s</span></p>\n</div>' %
+ self.encode(node.astext()).strip())
+ else:
+ if self.builder.config.imgmath_embed:
+ image_format = self.builder.config.imgmath_image_format.lower()
+ img_src = render_maths_to_base64(image_format, rendered_path)
+ else:
+ bname = path.basename(rendered_path)
+ relative_path = path.join(self.builder.imgpath, 'math', bname)
+ img_src = relative_path.replace(path.sep, '/')
+ self.body.append(f'<img src="{img_src}"' + get_tooltip(self, node) +
+ '/></p>\n</div>')
+ raise nodes.SkipNode
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_html_math_renderer('imgmath',
+ (html_visit_math, None),
+ (html_visit_displaymath, None))
+
+ app.add_config_value('imgmath_image_format', 'png', 'html')
+ app.add_config_value('imgmath_dvipng', 'dvipng', 'html')
+ app.add_config_value('imgmath_dvisvgm', 'dvisvgm', 'html')
+ app.add_config_value('imgmath_latex', 'latex', 'html')
+ app.add_config_value('imgmath_use_preview', False, 'html')
+ app.add_config_value('imgmath_dvipng_args',
+ ['-gamma', '1.5', '-D', '110', '-bg', 'Transparent'],
+ 'html')
+ app.add_config_value('imgmath_dvisvgm_args', ['--no-fonts'], 'html')
+ app.add_config_value('imgmath_latex_args', [], 'html')
+ app.add_config_value('imgmath_latex_preamble', '', 'html')
+ app.add_config_value('imgmath_add_tooltips', True, 'html')
+ app.add_config_value('imgmath_font_size', 12, 'html')
+ app.add_config_value('imgmath_embed', False, 'html', [bool])
+ app.connect('build-finished', clean_up_files)
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py
new file mode 100644
index 0000000..3a015a2
--- /dev/null
+++ b/sphinx/ext/inheritance_diagram.py
@@ -0,0 +1,494 @@
+r"""Defines a docutils directive for inserting inheritance diagrams.
+
+Provide the directive with one or more classes or modules (separated
+by whitespace). For modules, all of the classes in that module will
+be used.
+
+Example::
+
+ Given the following classes:
+
+ class A: pass
+ class B(A): pass
+ class C(A): pass
+ class D(B, C): pass
+ class E(B): pass
+
+ .. inheritance-diagram: D E
+
+ Produces a graph like the following:
+
+ A
+ / \
+ B C
+ / \ /
+ E D
+
+The graph is inserted as a PNG+image map into HTML and a PDF in
+LaTeX.
+"""
+
+from __future__ import annotations
+
+import builtins
+import hashlib
+import inspect
+import re
+from collections.abc import Iterable, Sequence
+from importlib import import_module
+from os import path
+from typing import TYPE_CHECKING, Any, cast
+
+from docutils import nodes
+from docutils.parsers.rst import directives
+
+import sphinx
+from sphinx import addnodes
+from sphinx.ext.graphviz import (
+ figure_wrapper,
+ graphviz,
+ render_dot_html,
+ render_dot_latex,
+ render_dot_texinfo,
+)
+from sphinx.util.docutils import SphinxDirective
+
+if TYPE_CHECKING:
+ from docutils.nodes import Node
+
+ from sphinx.application import Sphinx
+ from sphinx.environment import BuildEnvironment
+ from sphinx.util.typing import OptionSpec
+ from sphinx.writers.html import HTML5Translator
+ from sphinx.writers.latex import LaTeXTranslator
+ from sphinx.writers.texinfo import TexinfoTranslator
+
+module_sig_re = re.compile(r'''^(?:([\w.]*)\.)? # module names
+ (\w+) \s* $ # class/final module name
+ ''', re.VERBOSE)
+
+
+py_builtins = [obj for obj in vars(builtins).values()
+ if inspect.isclass(obj)]
+
+
+def try_import(objname: str) -> Any:
+ """Import a object or module using *name* and *currentmodule*.
+ *name* should be a relative name from *currentmodule* or
+ a fully-qualified name.
+
+ Returns imported object or module. If failed, returns None value.
+ """
+ try:
+ return import_module(objname)
+ except TypeError:
+ # Relative import
+ return None
+ except ImportError:
+ matched = module_sig_re.match(objname)
+
+ if not matched:
+ return None
+
+ modname, attrname = matched.groups()
+
+ if modname is None:
+ return None
+ try:
+ module = import_module(modname)
+ return getattr(module, attrname, None)
+ except ImportError:
+ return None
+
+
+def import_classes(name: str, currmodule: str) -> Any:
+ """Import a class using its fully-qualified *name*."""
+ target = None
+
+ # import class or module using currmodule
+ if currmodule:
+ target = try_import(currmodule + '.' + name)
+
+ # import class or module without currmodule
+ if target is None:
+ target = try_import(name)
+
+ if target is None:
+ raise InheritanceException(
+ 'Could not import class or module %r specified for '
+ 'inheritance diagram' % name)
+
+ if inspect.isclass(target):
+ # If imported object is a class, just return it
+ return [target]
+ elif inspect.ismodule(target):
+ # If imported object is a module, return classes defined on it
+ classes = []
+ for cls in target.__dict__.values():
+ if inspect.isclass(cls) and cls.__module__ == target.__name__:
+ classes.append(cls)
+ return classes
+ raise InheritanceException('%r specified for inheritance diagram is '
+ 'not a class or module' % name)
+
+
+class InheritanceException(Exception):
+ pass
+
+
+class InheritanceGraph:
+ """
+ Given a list of classes, determines the set of classes that they inherit
+ from all the way to the root "object", and then is able to generate a
+ graphviz dot graph from them.
+ """
+ def __init__(self, class_names: list[str], currmodule: str, show_builtins: bool = False,
+ private_bases: bool = False, parts: int = 0,
+ aliases: dict[str, str] | None = None, top_classes: Sequence[Any] = (),
+ ) -> None:
+ """*class_names* is a list of child classes to show bases from.
+
+ If *show_builtins* is True, then Python builtins will be shown
+ in the graph.
+ """
+ self.class_names = class_names
+ classes = self._import_classes(class_names, currmodule)
+ self.class_info = self._class_info(classes, show_builtins,
+ private_bases, parts, aliases, top_classes)
+ if not self.class_info:
+ msg = 'No classes found for inheritance diagram'
+ raise InheritanceException(msg)
+
+ def _import_classes(self, class_names: list[str], currmodule: str) -> list[Any]:
+ """Import a list of classes."""
+ classes: list[Any] = []
+ for name in class_names:
+ classes.extend(import_classes(name, currmodule))
+ return classes
+
+ def _class_info(self, classes: list[Any], show_builtins: bool, private_bases: bool,
+ parts: int, aliases: dict[str, str] | None, top_classes: Sequence[Any],
+ ) -> list[tuple[str, str, list[str], str]]:
+ """Return name and bases for all classes that are ancestors of
+ *classes*.
+
+ *parts* gives the number of dotted name parts to include in the
+ displayed node names, from right to left. If given as a negative, the
+ number of parts to drop from the left. A value of 0 displays the full
+ dotted name. E.g. ``sphinx.ext.inheritance_diagram.InheritanceGraph``
+ with ``parts=2`` or ``parts=-2`` gets displayed as
+ ``inheritance_diagram.InheritanceGraph``, and as
+ ``ext.inheritance_diagram.InheritanceGraph`` with ``parts=3`` or
+ ``parts=-1``.
+
+ *top_classes* gives the name(s) of the top most ancestor class to
+ traverse to. Multiple names can be specified separated by comma.
+ """
+ all_classes = {}
+
+ def recurse(cls: Any) -> None:
+ if not show_builtins and cls in py_builtins:
+ return
+ if not private_bases and cls.__name__.startswith('_'):
+ return
+
+ nodename = self.class_name(cls, parts, aliases)
+ fullname = self.class_name(cls, 0, aliases)
+
+ # Use first line of docstring as tooltip, if available
+ tooltip = None
+ try:
+ if cls.__doc__:
+ doc = cls.__doc__.strip().split("\n")[0]
+ if doc:
+ tooltip = '"%s"' % doc.replace('"', '\\"')
+ except Exception: # might raise AttributeError for strange classes
+ pass
+
+ baselist: list[str] = []
+ all_classes[cls] = (nodename, fullname, baselist, tooltip)
+
+ if fullname in top_classes:
+ return
+
+ for base in cls.__bases__:
+ if not show_builtins and base in py_builtins:
+ continue
+ if not private_bases and base.__name__.startswith('_'):
+ continue
+ baselist.append(self.class_name(base, parts, aliases))
+ if base not in all_classes:
+ recurse(base)
+
+ for cls in classes:
+ recurse(cls)
+
+ return list(all_classes.values()) # type: ignore[arg-type]
+
+ def class_name(
+ self, cls: Any, parts: int = 0, aliases: dict[str, str] | None = None,
+ ) -> str:
+ """Given a class object, return a fully-qualified name.
+
+ This works for things I've tested in matplotlib so far, but may not be
+ completely general.
+ """
+ module = cls.__module__
+ if module in ('__builtin__', 'builtins'):
+ fullname = cls.__name__
+ else:
+ fullname = f'{module}.{cls.__qualname__}'
+ if parts == 0:
+ result = fullname
+ else:
+ name_parts = fullname.split('.')
+ result = '.'.join(name_parts[-parts:])
+ if aliases is not None and result in aliases:
+ return aliases[result]
+ return result
+
+ def get_all_class_names(self) -> list[str]:
+ """Get all of the class names involved in the graph."""
+ return [fullname for (_, fullname, _, _) in self.class_info]
+
+ # These are the default attrs for graphviz
+ default_graph_attrs = {
+ 'rankdir': 'LR',
+ 'size': '"8.0, 12.0"',
+ 'bgcolor': 'transparent',
+ }
+ default_node_attrs = {
+ 'shape': 'box',
+ 'fontsize': 10,
+ 'height': 0.25,
+ 'fontname': '"Vera Sans, DejaVu Sans, Liberation Sans, '
+ 'Arial, Helvetica, sans"',
+ 'style': '"setlinewidth(0.5),filled"',
+ 'fillcolor': 'white',
+ }
+ default_edge_attrs = {
+ 'arrowsize': 0.5,
+ 'style': '"setlinewidth(0.5)"',
+ }
+
+ def _format_node_attrs(self, attrs: dict[str, Any]) -> str:
+ return ','.join(['%s=%s' % x for x in sorted(attrs.items())])
+
+ def _format_graph_attrs(self, attrs: dict[str, Any]) -> str:
+ return ''.join(['%s=%s;\n' % x for x in sorted(attrs.items())])
+
+ def generate_dot(self, name: str, urls: dict[str, str] | None = None,
+ env: BuildEnvironment | None = None,
+ graph_attrs: dict | None = None,
+ node_attrs: dict | None = None,
+ edge_attrs: dict | None = None,
+ ) -> str:
+ """Generate a graphviz dot graph from the classes that were passed in
+ to __init__.
+
+ *name* is the name of the graph.
+
+ *urls* is a dictionary mapping class names to HTTP URLs.
+
+ *graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
+ key/value pairs to pass on as graphviz properties.
+ """
+ if urls is None:
+ urls = {}
+ g_attrs = self.default_graph_attrs.copy()
+ n_attrs = self.default_node_attrs.copy()
+ e_attrs = self.default_edge_attrs.copy()
+ if graph_attrs is not None:
+ g_attrs.update(graph_attrs)
+ if node_attrs is not None:
+ n_attrs.update(node_attrs)
+ if edge_attrs is not None:
+ e_attrs.update(edge_attrs)
+ if env:
+ g_attrs.update(env.config.inheritance_graph_attrs)
+ n_attrs.update(env.config.inheritance_node_attrs)
+ e_attrs.update(env.config.inheritance_edge_attrs)
+
+ res: list[str] = []
+ res.append('digraph %s {\n' % name)
+ res.append(self._format_graph_attrs(g_attrs))
+
+ for name, fullname, bases, tooltip in sorted(self.class_info):
+ # Write the node
+ this_node_attrs = n_attrs.copy()
+ if fullname in urls:
+ this_node_attrs['URL'] = '"%s"' % urls[fullname]
+ this_node_attrs['target'] = '"_top"'
+ if tooltip:
+ this_node_attrs['tooltip'] = tooltip
+ res.append(' "%s" [%s];\n' %
+ (name, self._format_node_attrs(this_node_attrs)))
+
+ # Write the edges
+ for base_name in bases:
+ res.append(' "%s" -> "%s" [%s];\n' %
+ (base_name, name,
+ self._format_node_attrs(e_attrs)))
+ res.append('}\n')
+ return ''.join(res)
+
+
+class inheritance_diagram(graphviz):
+ """
+ A docutils node to use as a placeholder for the inheritance diagram.
+ """
+ pass
+
+
+class InheritanceDiagram(SphinxDirective):
+ """
+ Run when the inheritance_diagram directive is first encountered.
+ """
+ has_content = False
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = True
+ option_spec: OptionSpec = {
+ 'parts': int,
+ 'private-bases': directives.flag,
+ 'caption': directives.unchanged,
+ 'top-classes': directives.unchanged_required,
+ }
+
+ def run(self) -> list[Node]:
+ node = inheritance_diagram()
+ node.document = self.state.document
+ class_names = self.arguments[0].split()
+ class_role = self.env.get_domain('py').role('class')
+ # Store the original content for use as a hash
+ node['parts'] = self.options.get('parts', 0)
+ node['content'] = ', '.join(class_names)
+ node['top-classes'] = []
+ for cls in self.options.get('top-classes', '').split(','):
+ cls = cls.strip()
+ if cls:
+ node['top-classes'].append(cls)
+
+ # Create a graph starting with the list of classes
+ try:
+ graph = InheritanceGraph(
+ class_names, self.env.ref_context.get('py:module'), # type: ignore[arg-type]
+ parts=node['parts'],
+ private_bases='private-bases' in self.options,
+ aliases=self.config.inheritance_alias,
+ top_classes=node['top-classes'])
+ except InheritanceException as err:
+ return [node.document.reporter.warning(err, line=self.lineno)]
+
+ # Create xref nodes for each target of the graph's image map and
+ # add them to the doc tree so that Sphinx can resolve the
+ # references to real URLs later. These nodes will eventually be
+ # removed from the doctree after we're done with them.
+ for name in graph.get_all_class_names():
+ refnodes, x = class_role( # type: ignore[call-arg,misc]
+ 'class', ':class:`%s`' % name, name, 0, self.state) # type: ignore[arg-type]
+ node.extend(refnodes)
+ # Store the graph object so we can use it to generate the
+ # dot file later
+ node['graph'] = graph
+
+ if 'caption' not in self.options:
+ self.add_name(node)
+ return [node]
+ else:
+ figure = figure_wrapper(self, node, self.options['caption'])
+ self.add_name(figure)
+ return [figure]
+
+
+def get_graph_hash(node: inheritance_diagram) -> str:
+ encoded = (node['content'] + str(node['parts'])).encode()
+ return hashlib.md5(encoded, usedforsecurity=False).hexdigest()[-10:]
+
+
+def html_visit_inheritance_diagram(self: HTML5Translator, node: inheritance_diagram) -> None:
+ """
+ Output the graph for HTML. This will insert a PNG with clickable
+ image map.
+ """
+ graph = node['graph']
+
+ graph_hash = get_graph_hash(node)
+ name = 'inheritance%s' % graph_hash
+
+ # Create a mapping from fully-qualified class names to URLs.
+ graphviz_output_format = self.builder.env.config.graphviz_output_format.upper()
+ current_filename = path.basename(self.builder.current_docname + self.builder.out_suffix)
+ urls = {}
+ pending_xrefs = cast(Iterable[addnodes.pending_xref], node)
+ for child in pending_xrefs:
+ if child.get('refuri') is not None:
+ # Construct the name from the URI if the reference is external via intersphinx
+ if not child.get('internal', True):
+ refname = child['refuri'].rsplit('#', 1)[-1]
+ else:
+ refname = child['reftitle']
+
+ urls[refname] = child.get('refuri')
+ elif child.get('refid') is not None:
+ if graphviz_output_format == 'SVG':
+ urls[child['reftitle']] = current_filename + '#' + child.get('refid')
+ else:
+ urls[child['reftitle']] = '#' + child.get('refid')
+
+ dotcode = graph.generate_dot(name, urls, env=self.builder.env)
+ render_dot_html(self, node, dotcode, {}, 'inheritance', 'inheritance',
+ alt='Inheritance diagram of ' + node['content'])
+ raise nodes.SkipNode
+
+
+def latex_visit_inheritance_diagram(self: LaTeXTranslator, node: inheritance_diagram) -> None:
+ """
+ Output the graph for LaTeX. This will insert a PDF.
+ """
+ graph = node['graph']
+
+ graph_hash = get_graph_hash(node)
+ name = 'inheritance%s' % graph_hash
+
+ dotcode = graph.generate_dot(name, env=self.builder.env,
+ graph_attrs={'size': '"6.0,6.0"'})
+ render_dot_latex(self, node, dotcode, {}, 'inheritance')
+ raise nodes.SkipNode
+
+
+def texinfo_visit_inheritance_diagram(self: TexinfoTranslator, node: inheritance_diagram,
+ ) -> None:
+ """
+ Output the graph for Texinfo. This will insert a PNG.
+ """
+ graph = node['graph']
+
+ graph_hash = get_graph_hash(node)
+ name = 'inheritance%s' % graph_hash
+
+ dotcode = graph.generate_dot(name, env=self.builder.env,
+ graph_attrs={'size': '"6.0,6.0"'})
+ render_dot_texinfo(self, node, dotcode, {}, 'inheritance')
+ raise nodes.SkipNode
+
+
+def skip(self: nodes.NodeVisitor, node: inheritance_diagram) -> None:
+ raise nodes.SkipNode
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.setup_extension('sphinx.ext.graphviz')
+ app.add_node(
+ inheritance_diagram,
+ latex=(latex_visit_inheritance_diagram, None),
+ html=(html_visit_inheritance_diagram, None),
+ text=(skip, None),
+ man=(skip, None),
+ texinfo=(texinfo_visit_inheritance_diagram, None))
+ app.add_directive('inheritance-diagram', InheritanceDiagram)
+ app.add_config_value('inheritance_graph_attrs', {}, False)
+ app.add_config_value('inheritance_node_attrs', {}, False)
+ app.add_config_value('inheritance_edge_attrs', {}, False)
+ app.add_config_value('inheritance_alias', {}, False)
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py
new file mode 100644
index 0000000..453bb6e
--- /dev/null
+++ b/sphinx/ext/intersphinx.py
@@ -0,0 +1,742 @@
+"""Insert links to objects documented in remote Sphinx documentation.
+
+This works as follows:
+
+* Each Sphinx HTML build creates a file named "objects.inv" that contains a
+ mapping from object names to URIs relative to the HTML set's root.
+
+* Projects using the Intersphinx extension can specify links to such mapping
+ files in the `intersphinx_mapping` config value. The mapping will then be
+ used to resolve otherwise missing references to objects into links to the
+ other documentation.
+
+* By default, the mapping file is assumed to be at the same location as the
+ rest of the documentation; however, the location of the mapping file can
+ also be specified individually, e.g. if the docs should be buildable
+ without Internet access.
+"""
+
+from __future__ import annotations
+
+import concurrent.futures
+import functools
+import posixpath
+import re
+import sys
+import time
+from os import path
+from typing import TYPE_CHECKING, cast
+from urllib.parse import urlsplit, urlunsplit
+
+from docutils import nodes
+from docutils.utils import relative_path
+
+import sphinx
+from sphinx.addnodes import pending_xref
+from sphinx.builders.html import INVENTORY_FILENAME
+from sphinx.errors import ExtensionError
+from sphinx.locale import _, __
+from sphinx.transforms.post_transforms import ReferencesResolver
+from sphinx.util import logging, requests
+from sphinx.util.docutils import CustomReSTDispatcher, SphinxRole
+from sphinx.util.inventory import InventoryFile
+
+if TYPE_CHECKING:
+ from collections.abc import Iterable
+ from types import ModuleType
+ from typing import IO, Any, Union
+
+ from docutils.nodes import Node, TextElement, system_message
+ from docutils.utils import Reporter
+
+ from sphinx.application import Sphinx
+ from sphinx.config import Config
+ from sphinx.domains import Domain
+ from sphinx.environment import BuildEnvironment
+ from sphinx.util.typing import Inventory, InventoryItem, RoleFunction
+
+ InventoryCacheEntry = tuple[Union[str, None], int, Inventory]
+
+logger = logging.getLogger(__name__)
+
+
+class InventoryAdapter:
+ """Inventory adapter for environment"""
+
+ def __init__(self, env: BuildEnvironment) -> None:
+ self.env = env
+
+ if not hasattr(env, 'intersphinx_cache'):
+ # initial storage when fetching inventories before processing
+ self.env.intersphinx_cache = {} # type: ignore[attr-defined]
+
+ self.env.intersphinx_inventory = {} # type: ignore[attr-defined]
+ self.env.intersphinx_named_inventory = {} # type: ignore[attr-defined]
+
+ @property
+ def cache(self) -> dict[str, InventoryCacheEntry]:
+ """Intersphinx cache.
+
+ - Key is the URI of the remote inventory
+ - Element one is the key given in the Sphinx intersphinx_mapping
+ configuration value
+ - Element two is a time value for cache invalidation, a float
+ - Element three is the loaded remote inventory, type Inventory
+ """
+ return self.env.intersphinx_cache # type: ignore[attr-defined]
+
+ @property
+ def main_inventory(self) -> Inventory:
+ return self.env.intersphinx_inventory # type: ignore[attr-defined]
+
+ @property
+ def named_inventory(self) -> dict[str, Inventory]:
+ return self.env.intersphinx_named_inventory # type: ignore[attr-defined]
+
+ def clear(self) -> None:
+ self.env.intersphinx_inventory.clear() # type: ignore[attr-defined]
+ self.env.intersphinx_named_inventory.clear() # type: ignore[attr-defined]
+
+
+def _strip_basic_auth(url: str) -> str:
+ """Returns *url* with basic auth credentials removed. Also returns the
+ basic auth username and password if they're present in *url*.
+
+ E.g.: https://user:pass@example.com => https://example.com
+
+ *url* need not include basic auth credentials.
+
+ :param url: url which may or may not contain basic auth credentials
+ :type url: ``str``
+
+ :return: *url* with any basic auth creds removed
+ :rtype: ``str``
+ """
+ frags = list(urlsplit(url))
+ # swap out "user[:pass]@hostname" for "hostname"
+ if '@' in frags[1]:
+ frags[1] = frags[1].split('@')[1]
+ return urlunsplit(frags)
+
+
+def _read_from_url(url: str, *, config: Config) -> IO:
+ """Reads data from *url* with an HTTP *GET*.
+
+ This function supports fetching from resources which use basic HTTP auth as
+ laid out by RFC1738 § 3.1. See § 5 for grammar definitions for URLs.
+
+ .. seealso:
+
+ https://www.ietf.org/rfc/rfc1738.txt
+
+ :param url: URL of an HTTP resource
+ :type url: ``str``
+
+ :return: data read from resource described by *url*
+ :rtype: ``file``-like object
+ """
+ r = requests.get(url, stream=True, timeout=config.intersphinx_timeout,
+ _user_agent=config.user_agent,
+ _tls_info=(config.tls_verify, config.tls_cacerts))
+ r.raise_for_status()
+ r.raw.url = r.url
+ # decode content-body based on the header.
+ # ref: https://github.com/psf/requests/issues/2155
+ r.raw.read = functools.partial(r.raw.read, decode_content=True)
+ return r.raw
+
+
+def _get_safe_url(url: str) -> str:
+ """Gets version of *url* with basic auth passwords obscured. This function
+ returns results suitable for printing and logging.
+
+ E.g.: https://user:12345@example.com => https://user@example.com
+
+ :param url: a url
+ :type url: ``str``
+
+ :return: *url* with password removed
+ :rtype: ``str``
+ """
+ parts = urlsplit(url)
+ if parts.username is None:
+ return url
+ else:
+ frags = list(parts)
+ if parts.port:
+ frags[1] = f'{parts.username}@{parts.hostname}:{parts.port}'
+ else:
+ frags[1] = f'{parts.username}@{parts.hostname}'
+
+ return urlunsplit(frags)
+
+
+def fetch_inventory(app: Sphinx, uri: str, inv: str) -> Inventory:
+ """Fetch, parse and return an intersphinx inventory file."""
+ # both *uri* (base URI of the links to generate) and *inv* (actual
+ # location of the inventory file) can be local or remote URIs
+ if '://' in uri:
+ # case: inv URI points to remote resource; strip any existing auth
+ uri = _strip_basic_auth(uri)
+ try:
+ if '://' in inv:
+ f = _read_from_url(inv, config=app.config)
+ else:
+ f = open(path.join(app.srcdir, inv), 'rb') # NoQA: SIM115
+ except Exception as err:
+ err.args = ('intersphinx inventory %r not fetchable due to %s: %s',
+ inv, err.__class__, str(err))
+ raise
+ try:
+ if hasattr(f, 'url'):
+ newinv = f.url
+ if inv != newinv:
+ logger.info(__('intersphinx inventory has moved: %s -> %s'), inv, newinv)
+
+ if uri in (inv, path.dirname(inv), path.dirname(inv) + '/'):
+ uri = path.dirname(newinv)
+ with f:
+ try:
+ invdata = InventoryFile.load(f, uri, posixpath.join)
+ except ValueError as exc:
+ raise ValueError('unknown or unsupported inventory version: %r' % exc) from exc
+ except Exception as err:
+ err.args = ('intersphinx inventory %r not readable due to %s: %s',
+ inv, err.__class__.__name__, str(err))
+ raise
+ else:
+ return invdata
+
+
+def fetch_inventory_group(
+ name: str | None,
+ uri: str,
+ invs: tuple[str | None, ...],
+ cache: dict[str, InventoryCacheEntry],
+ app: Sphinx,
+ now: int,
+) -> bool:
+ cache_time = now - app.config.intersphinx_cache_limit * 86400
+ failures = []
+ try:
+ for inv in invs:
+ if not inv:
+ inv = posixpath.join(uri, INVENTORY_FILENAME)
+ # decide whether the inventory must be read: always read local
+ # files; remote ones only if the cache time is expired
+ if '://' not in inv or uri not in cache or cache[uri][1] < cache_time:
+ safe_inv_url = _get_safe_url(inv)
+ logger.info(__('loading intersphinx inventory from %s...'), safe_inv_url)
+ try:
+ invdata = fetch_inventory(app, uri, inv)
+ except Exception as err:
+ failures.append(err.args)
+ continue
+ if invdata:
+ cache[uri] = name, now, invdata
+ return True
+ return False
+ finally:
+ if failures == []:
+ pass
+ elif len(failures) < len(invs):
+ logger.info(__("encountered some issues with some of the inventories,"
+ " but they had working alternatives:"))
+ for fail in failures:
+ logger.info(*fail)
+ else:
+ issues = '\n'.join([f[0] % f[1:] for f in failures])
+ logger.warning(__("failed to reach any of the inventories "
+ "with the following issues:") + "\n" + issues)
+
+
+def load_mappings(app: Sphinx) -> None:
+ """Load all intersphinx mappings into the environment."""
+ now = int(time.time())
+ inventories = InventoryAdapter(app.builder.env)
+ intersphinx_cache: dict[str, InventoryCacheEntry] = inventories.cache
+
+ with concurrent.futures.ThreadPoolExecutor() as pool:
+ futures = []
+ name: str | None
+ uri: str
+ invs: tuple[str | None, ...]
+ for name, (uri, invs) in app.config.intersphinx_mapping.values():
+ futures.append(pool.submit(
+ fetch_inventory_group, name, uri, invs, intersphinx_cache, app, now,
+ ))
+ updated = [f.result() for f in concurrent.futures.as_completed(futures)]
+
+ if any(updated):
+ inventories.clear()
+
+ # Duplicate values in different inventories will shadow each
+ # other; which one will override which can vary between builds
+ # since they are specified using an unordered dict. To make
+ # it more consistent, we sort the named inventories and then
+ # add the unnamed inventories last. This means that the
+ # unnamed inventories will shadow the named ones but the named
+ # ones can still be accessed when the name is specified.
+ named_vals = []
+ unnamed_vals = []
+ for name, _expiry, invdata in intersphinx_cache.values():
+ if name:
+ named_vals.append((name, invdata))
+ else:
+ unnamed_vals.append((name, invdata))
+ for name, invdata in sorted(named_vals) + unnamed_vals:
+ if name:
+ inventories.named_inventory[name] = invdata
+ for type, objects in invdata.items():
+ inventories.main_inventory.setdefault(type, {}).update(objects)
+
+
+def _create_element_from_result(domain: Domain, inv_name: str | None,
+ data: InventoryItem,
+ node: pending_xref, contnode: TextElement) -> nodes.reference:
+ proj, version, uri, dispname = data
+ if '://' not in uri and node.get('refdoc'):
+ # get correct path in case of subdirectories
+ uri = posixpath.join(relative_path(node['refdoc'], '.'), uri)
+ if version:
+ reftitle = _('(in %s v%s)') % (proj, version)
+ else:
+ reftitle = _('(in %s)') % (proj,)
+ newnode = nodes.reference('', '', internal=False, refuri=uri, reftitle=reftitle)
+ if node.get('refexplicit'):
+ # use whatever title was given
+ newnode.append(contnode)
+ elif dispname == '-' or \
+ (domain.name == 'std' and node['reftype'] == 'keyword'):
+ # use whatever title was given, but strip prefix
+ title = contnode.astext()
+ if inv_name is not None and title.startswith(inv_name + ':'):
+ newnode.append(contnode.__class__(title[len(inv_name) + 1:],
+ title[len(inv_name) + 1:]))
+ else:
+ newnode.append(contnode)
+ else:
+ # else use the given display name (used for :ref:)
+ newnode.append(contnode.__class__(dispname, dispname))
+ return newnode
+
+
+def _resolve_reference_in_domain_by_target(
+ inv_name: str | None, inventory: Inventory,
+ domain: Domain, objtypes: Iterable[str],
+ target: str,
+ node: pending_xref, contnode: TextElement) -> nodes.reference | None:
+ for objtype in objtypes:
+ if objtype not in inventory:
+ # Continue if there's nothing of this kind in the inventory
+ continue
+
+ if target in inventory[objtype]:
+ # Case sensitive match, use it
+ data = inventory[objtype][target]
+ elif objtype == 'std:term':
+ # Check for potential case insensitive matches for terms only
+ target_lower = target.lower()
+ insensitive_matches = list(filter(lambda k: k.lower() == target_lower,
+ inventory[objtype].keys()))
+ if insensitive_matches:
+ data = inventory[objtype][insensitive_matches[0]]
+ else:
+ # No case insensitive match either, continue to the next candidate
+ continue
+ else:
+ # Could reach here if we're not a term but have a case insensitive match.
+ # This is a fix for terms specifically, but potentially should apply to
+ # other types.
+ continue
+ return _create_element_from_result(domain, inv_name, data, node, contnode)
+ return None
+
+
+def _resolve_reference_in_domain(env: BuildEnvironment,
+ inv_name: str | None, inventory: Inventory,
+ honor_disabled_refs: bool,
+ domain: Domain, objtypes: Iterable[str],
+ node: pending_xref, contnode: TextElement,
+ ) -> nodes.reference | None:
+ obj_types: dict[str, None] = {}.fromkeys(objtypes)
+
+ # we adjust the object types for backwards compatibility
+ if domain.name == 'std' and 'cmdoption' in obj_types:
+ # cmdoptions were stored as std:option until Sphinx 1.6
+ obj_types['option'] = None
+ if domain.name == 'py' and 'attribute' in obj_types:
+ # properties are stored as py:method since Sphinx 2.1
+ obj_types['method'] = None
+
+ # the inventory contains domain:type as objtype
+ domain_name = domain.name
+ obj_types = {f"{domain_name}:{obj_type}": None for obj_type in obj_types}
+
+ # now that the objtypes list is complete we can remove the disabled ones
+ if honor_disabled_refs:
+ disabled = set(env.config.intersphinx_disabled_reftypes)
+ obj_types = {obj_type: None
+ for obj_type in obj_types
+ if obj_type not in disabled}
+
+ objtypes = [*obj_types.keys()]
+
+ # without qualification
+ res = _resolve_reference_in_domain_by_target(inv_name, inventory, domain, objtypes,
+ node['reftarget'], node, contnode)
+ if res is not None:
+ return res
+
+ # try with qualification of the current scope instead
+ full_qualified_name = domain.get_full_qualified_name(node)
+ if full_qualified_name is None:
+ return None
+ return _resolve_reference_in_domain_by_target(inv_name, inventory, domain, objtypes,
+ full_qualified_name, node, contnode)
+
+
+def _resolve_reference(env: BuildEnvironment, inv_name: str | None, inventory: Inventory,
+ honor_disabled_refs: bool,
+ node: pending_xref, contnode: TextElement) -> nodes.reference | None:
+ # disabling should only be done if no inventory is given
+ honor_disabled_refs = honor_disabled_refs and inv_name is None
+
+ if honor_disabled_refs and '*' in env.config.intersphinx_disabled_reftypes:
+ return None
+
+ typ = node['reftype']
+ if typ == 'any':
+ for domain_name, domain in env.domains.items():
+ if (honor_disabled_refs
+ and (domain_name + ":*") in env.config.intersphinx_disabled_reftypes):
+ continue
+ objtypes: Iterable[str] = domain.object_types.keys()
+ res = _resolve_reference_in_domain(env, inv_name, inventory,
+ honor_disabled_refs,
+ domain, objtypes,
+ node, contnode)
+ if res is not None:
+ return res
+ return None
+ else:
+ domain_name = node.get('refdomain')
+ if not domain_name:
+ # only objects in domains are in the inventory
+ return None
+ if honor_disabled_refs \
+ and (domain_name + ":*") in env.config.intersphinx_disabled_reftypes:
+ return None
+ domain = env.get_domain(domain_name)
+ objtypes = domain.objtypes_for_role(typ) or ()
+ if not objtypes:
+ return None
+ return _resolve_reference_in_domain(env, inv_name, inventory,
+ honor_disabled_refs,
+ domain, objtypes,
+ node, contnode)
+
+
+def inventory_exists(env: BuildEnvironment, inv_name: str) -> bool:
+ return inv_name in InventoryAdapter(env).named_inventory
+
+
+def resolve_reference_in_inventory(env: BuildEnvironment,
+ inv_name: str,
+ node: pending_xref, contnode: TextElement,
+ ) -> nodes.reference | None:
+ """Attempt to resolve a missing reference via intersphinx references.
+
+ Resolution is tried in the given inventory with the target as is.
+
+ Requires ``inventory_exists(env, inv_name)``.
+ """
+ assert inventory_exists(env, inv_name)
+ return _resolve_reference(env, inv_name, InventoryAdapter(env).named_inventory[inv_name],
+ False, node, contnode)
+
+
+def resolve_reference_any_inventory(env: BuildEnvironment,
+ honor_disabled_refs: bool,
+ node: pending_xref, contnode: TextElement,
+ ) -> nodes.reference | None:
+ """Attempt to resolve a missing reference via intersphinx references.
+
+ Resolution is tried with the target as is in any inventory.
+ """
+ return _resolve_reference(env, None, InventoryAdapter(env).main_inventory,
+ honor_disabled_refs,
+ node, contnode)
+
+
+def resolve_reference_detect_inventory(env: BuildEnvironment,
+ node: pending_xref, contnode: TextElement,
+ ) -> nodes.reference | None:
+ """Attempt to resolve a missing reference via intersphinx references.
+
+ Resolution is tried first with the target as is in any inventory.
+ If this does not succeed, then the target is split by the first ``:``,
+ to form ``inv_name:newtarget``. If ``inv_name`` is a named inventory, then resolution
+ is tried in that inventory with the new target.
+ """
+
+ # ordinary direct lookup, use data as is
+ res = resolve_reference_any_inventory(env, True, node, contnode)
+ if res is not None:
+ return res
+
+ # try splitting the target into 'inv_name:target'
+ target = node['reftarget']
+ if ':' not in target:
+ return None
+ inv_name, newtarget = target.split(':', 1)
+ if not inventory_exists(env, inv_name):
+ return None
+ node['reftarget'] = newtarget
+ res_inv = resolve_reference_in_inventory(env, inv_name, node, contnode)
+ node['reftarget'] = target
+ return res_inv
+
+
+def missing_reference(app: Sphinx, env: BuildEnvironment, node: pending_xref,
+ contnode: TextElement) -> nodes.reference | None:
+ """Attempt to resolve a missing reference via intersphinx references."""
+
+ return resolve_reference_detect_inventory(env, node, contnode)
+
+
+class IntersphinxDispatcher(CustomReSTDispatcher):
+ """Custom dispatcher for external role.
+
+ This enables :external:***:/:external+***: roles on parsing reST document.
+ """
+
+ def role(
+ self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter,
+ ) -> tuple[RoleFunction, list[system_message]]:
+ if len(role_name) > 9 and role_name.startswith(('external:', 'external+')):
+ return IntersphinxRole(role_name), []
+ else:
+ return super().role(role_name, language_module, lineno, reporter)
+
+
+class IntersphinxRole(SphinxRole):
+ # group 1: just for the optionality of the inventory name
+ # group 2: the inventory name (optional)
+ # group 3: the domain:role or role part
+ _re_inv_ref = re.compile(r"(\+([^:]+))?:(.*)")
+
+ def __init__(self, orig_name: str) -> None:
+ self.orig_name = orig_name
+
+ def run(self) -> tuple[list[Node], list[system_message]]:
+ assert self.name == self.orig_name.lower()
+ inventory, name_suffix = self.get_inventory_and_name_suffix(self.orig_name)
+ if inventory and not inventory_exists(self.env, inventory):
+ logger.warning(__('inventory for external cross-reference not found: %s'),
+ inventory, location=(self.env.docname, self.lineno))
+ return [], []
+
+ role_name = self.get_role_name(name_suffix)
+ if role_name is None:
+ logger.warning(__('role for external cross-reference not found: %s'), name_suffix,
+ location=(self.env.docname, self.lineno))
+ return [], []
+
+ result, messages = self.invoke_role(role_name)
+ for node in result:
+ if isinstance(node, pending_xref):
+ node['intersphinx'] = True
+ node['inventory'] = inventory
+
+ return result, messages
+
+ def get_inventory_and_name_suffix(self, name: str) -> tuple[str | None, str]:
+ assert name.startswith('external'), name
+ # either we have an explicit inventory name, i.e,
+ # :external+inv:role: or
+ # :external+inv:domain:role:
+ # or we look in all inventories, i.e.,
+ # :external:role: or
+ # :external:domain:role:
+ suffix = name[9:]
+ if name[8] == '+':
+ inv_name, suffix = suffix.split(':', 1)
+ return inv_name, suffix
+ elif name[8] == ':':
+ return None, suffix
+ else:
+ msg = f'Malformed :external: role name: {name}'
+ raise ValueError(msg)
+
+ def get_role_name(self, name: str) -> tuple[str, str] | None:
+ names = name.split(':')
+ if len(names) == 1:
+ # role
+ default_domain = self.env.temp_data.get('default_domain')
+ domain = default_domain.name if default_domain else None
+ role = names[0]
+ elif len(names) == 2:
+ # domain:role:
+ domain = names[0]
+ role = names[1]
+ else:
+ return None
+
+ if domain and self.is_existent_role(domain, role):
+ return (domain, role)
+ elif self.is_existent_role('std', role):
+ return ('std', role)
+ else:
+ return None
+
+ def is_existent_role(self, domain_name: str, role_name: str) -> bool:
+ try:
+ domain = self.env.get_domain(domain_name)
+ return role_name in domain.roles
+ except ExtensionError:
+ return False
+
+ def invoke_role(self, role: tuple[str, str]) -> tuple[list[Node], list[system_message]]:
+ domain = self.env.get_domain(role[0])
+ if domain:
+ role_func = domain.role(role[1])
+ assert role_func is not None
+
+ return role_func(':'.join(role), self.rawtext, self.text, self.lineno,
+ self.inliner, self.options, self.content)
+ else:
+ return [], []
+
+
+class IntersphinxRoleResolver(ReferencesResolver):
+ """pending_xref node resolver for intersphinx role.
+
+ This resolves pending_xref nodes generated by :intersphinx:***: role.
+ """
+
+ default_priority = ReferencesResolver.default_priority - 1
+
+ def run(self, **kwargs: Any) -> None:
+ for node in self.document.findall(pending_xref):
+ if 'intersphinx' not in node:
+ continue
+ contnode = cast(nodes.TextElement, node[0].deepcopy())
+ inv_name = node['inventory']
+ if inv_name is not None:
+ assert inventory_exists(self.env, inv_name)
+ newnode = resolve_reference_in_inventory(self.env, inv_name, node, contnode)
+ else:
+ newnode = resolve_reference_any_inventory(self.env, False, node, contnode)
+ if newnode is None:
+ typ = node['reftype']
+ msg = (__('external %s:%s reference target not found: %s') %
+ (node['refdomain'], typ, node['reftarget']))
+ logger.warning(msg, location=node, type='ref', subtype=typ)
+ node.replace_self(contnode)
+ else:
+ node.replace_self(newnode)
+
+
+def install_dispatcher(app: Sphinx, docname: str, source: list[str]) -> None:
+ """Enable IntersphinxDispatcher.
+
+ .. note:: The installed dispatcher will be uninstalled on disabling sphinx_domain
+ automatically.
+ """
+ dispatcher = IntersphinxDispatcher()
+ dispatcher.enable()
+
+
+def normalize_intersphinx_mapping(app: Sphinx, config: Config) -> None:
+ for key, value in config.intersphinx_mapping.copy().items():
+ try:
+ if isinstance(value, (list, tuple)):
+ # new format
+ name, (uri, inv) = key, value
+ if not isinstance(name, str):
+ logger.warning(__('intersphinx identifier %r is not string. Ignored'),
+ name)
+ config.intersphinx_mapping.pop(key)
+ continue
+ else:
+ # old format, no name
+ # xref RemovedInSphinx80Warning
+ name, uri, inv = None, key, value
+ msg = (
+ "The pre-Sphinx 1.0 'intersphinx_mapping' format is "
+ "deprecated and will be removed in Sphinx 8. Update to the "
+ "current format as described in the documentation. "
+ f"Hint: \"intersphinx_mapping = {{'<name>': {(uri, inv)!r}}}\"."
+ "https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#confval-intersphinx_mapping" # NoQA: E501
+ )
+ logger.warning(msg)
+
+ if not isinstance(inv, tuple):
+ config.intersphinx_mapping[key] = (name, (uri, (inv,)))
+ else:
+ config.intersphinx_mapping[key] = (name, (uri, inv))
+ except Exception as exc:
+ logger.warning(__('Failed to read intersphinx_mapping[%s], ignored: %r'), key, exc)
+ config.intersphinx_mapping.pop(key)
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_config_value('intersphinx_mapping', {}, True)
+ app.add_config_value('intersphinx_cache_limit', 5, False)
+ app.add_config_value('intersphinx_timeout', None, False)
+ app.add_config_value('intersphinx_disabled_reftypes', ['std:doc'], True)
+ app.connect('config-inited', normalize_intersphinx_mapping, priority=800)
+ app.connect('builder-inited', load_mappings)
+ app.connect('source-read', install_dispatcher)
+ app.connect('missing-reference', missing_reference)
+ app.add_post_transform(IntersphinxRoleResolver)
+ return {
+ 'version': sphinx.__display_version__,
+ 'env_version': 1,
+ 'parallel_read_safe': True,
+ }
+
+
+def inspect_main(argv: list[str], /) -> int:
+ """Debug functionality to print out an inventory"""
+ if len(argv) < 1:
+ print("Print out an inventory file.\n"
+ "Error: must specify local path or URL to an inventory file.",
+ file=sys.stderr)
+ return 1
+
+ class MockConfig:
+ intersphinx_timeout: int | None = None
+ tls_verify = False
+ tls_cacerts: str | dict[str, str] | None = None
+ user_agent: str = ''
+
+ class MockApp:
+ srcdir = ''
+ config = MockConfig()
+
+ try:
+ filename = argv[0]
+ inv_data = fetch_inventory(MockApp(), '', filename) # type: ignore[arg-type]
+ for key in sorted(inv_data or {}):
+ print(key)
+ inv_entries = sorted(inv_data[key].items())
+ for entry, (_proj, _ver, url_path, display_name) in inv_entries:
+ display_name = display_name * (display_name != '-')
+ print(f' {entry:<40} {display_name:<40}: {url_path}')
+ except ValueError as exc:
+ print(exc.args[0] % exc.args[1:], file=sys.stderr)
+ return 1
+ except Exception as exc:
+ print(f'Unknown error: {exc!r}', file=sys.stderr)
+ return 1
+ else:
+ return 0
+
+
+if __name__ == '__main__':
+ import logging as _logging
+ _logging.basicConfig()
+
+ raise SystemExit(inspect_main(sys.argv[1:]))
diff --git a/sphinx/ext/linkcode.py b/sphinx/ext/linkcode.py
new file mode 100644
index 0000000..ee10406
--- /dev/null
+++ b/sphinx/ext/linkcode.py
@@ -0,0 +1,77 @@
+"""Add external links to module code in Python object descriptions."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+
+from docutils import nodes
+
+import sphinx
+from sphinx import addnodes
+from sphinx.errors import SphinxError
+from sphinx.locale import _
+
+if TYPE_CHECKING:
+ from docutils.nodes import Node
+
+ from sphinx.application import Sphinx
+
+
+class LinkcodeError(SphinxError):
+ category = "linkcode error"
+
+
+def doctree_read(app: Sphinx, doctree: Node) -> None:
+ env = app.builder.env
+
+ resolve_target = getattr(env.config, 'linkcode_resolve', None)
+ if not callable(env.config.linkcode_resolve):
+ msg = 'Function `linkcode_resolve` is not given in conf.py'
+ raise LinkcodeError(msg)
+ assert resolve_target is not None # for mypy
+
+ domain_keys = {
+ 'py': ['module', 'fullname'],
+ 'c': ['names'],
+ 'cpp': ['names'],
+ 'js': ['object', 'fullname'],
+ }
+
+ for objnode in list(doctree.findall(addnodes.desc)):
+ domain = objnode.get('domain')
+ uris: set[str] = set()
+ for signode in objnode:
+ if not isinstance(signode, addnodes.desc_signature):
+ continue
+
+ # Convert signode to a specified format
+ info = {}
+ for key in domain_keys.get(domain, []):
+ value = signode.get(key)
+ if not value:
+ value = ''
+ info[key] = value
+ if not info:
+ continue
+
+ # Call user code to resolve the link
+ uri = resolve_target(domain, info)
+ if not uri:
+ # no source
+ continue
+
+ if uri in uris or not uri:
+ # only one link per name, please
+ continue
+ uris.add(uri)
+
+ inline = nodes.inline('', _('[source]'), classes=['viewcode-link'])
+ onlynode = addnodes.only(expr='html')
+ onlynode += nodes.reference('', '', inline, internal=False, refuri=uri)
+ signode += onlynode
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.connect('doctree-read', doctree_read)
+ app.add_config_value('linkcode_resolve', None, '')
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/mathjax.py b/sphinx/ext/mathjax.py
new file mode 100644
index 0000000..41d18b9
--- /dev/null
+++ b/sphinx/ext/mathjax.py
@@ -0,0 +1,126 @@
+"""Allow `MathJax`_ to be used to display math in Sphinx's HTML writer.
+
+This requires the MathJax JavaScript library on your webserver/computer.
+
+.. _MathJax: https://www.mathjax.org/
+"""
+
+from __future__ import annotations
+
+import json
+from typing import TYPE_CHECKING, Any, cast
+
+from docutils import nodes
+
+import sphinx
+from sphinx.builders.html import StandaloneHTMLBuilder
+from sphinx.domains.math import MathDomain
+from sphinx.errors import ExtensionError
+from sphinx.locale import _
+from sphinx.util.math import get_node_equation_number
+
+if TYPE_CHECKING:
+ from sphinx.application import Sphinx
+ from sphinx.writers.html import HTML5Translator
+
+# more information for mathjax secure url is here:
+# https://docs.mathjax.org/en/latest/start.html#secure-access-to-the-cdn
+MATHJAX_URL = 'https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js'
+
+logger = sphinx.util.logging.getLogger(__name__)
+
+
+def html_visit_math(self: HTML5Translator, node: nodes.math) -> None:
+ self.body.append(self.starttag(node, 'span', '', CLASS='math notranslate nohighlight'))
+ self.body.append(self.builder.config.mathjax_inline[0] +
+ self.encode(node.astext()) +
+ self.builder.config.mathjax_inline[1] + '</span>')
+ raise nodes.SkipNode
+
+
+def html_visit_displaymath(self: HTML5Translator, node: nodes.math_block) -> None:
+ self.body.append(self.starttag(node, 'div', CLASS='math notranslate nohighlight'))
+ if node['nowrap']:
+ self.body.append(self.encode(node.astext()))
+ self.body.append('</div>')
+ raise nodes.SkipNode
+
+ # necessary to e.g. set the id property correctly
+ if node['number']:
+ number = get_node_equation_number(self, node)
+ self.body.append('<span class="eqno">(%s)' % number)
+ self.add_permalink_ref(node, _('Link to this equation'))
+ self.body.append('</span>')
+ self.body.append(self.builder.config.mathjax_display[0])
+ parts = [prt for prt in node.astext().split('\n\n') if prt.strip()]
+ if len(parts) > 1: # Add alignment if there are more than 1 equation
+ self.body.append(r' \begin{align}\begin{aligned}')
+ for i, part in enumerate(parts):
+ part = self.encode(part)
+ if r'\\' in part:
+ self.body.append(r'\begin{split}' + part + r'\end{split}')
+ else:
+ self.body.append(part)
+ if i < len(parts) - 1: # append new line if not the last equation
+ self.body.append(r'\\')
+ if len(parts) > 1: # Add alignment if there are more than 1 equation
+ self.body.append(r'\end{aligned}\end{align} ')
+ self.body.append(self.builder.config.mathjax_display[1])
+ self.body.append('</div>\n')
+ raise nodes.SkipNode
+
+
+def install_mathjax(app: Sphinx, pagename: str, templatename: str, context: dict[str, Any],
+ event_arg: Any) -> None:
+ if (
+ app.builder.format != 'html' or
+ app.builder.math_renderer_name != 'mathjax' # type: ignore[attr-defined]
+ ):
+ return
+ if not app.config.mathjax_path:
+ msg = 'mathjax_path config value must be set for the mathjax extension to work'
+ raise ExtensionError(msg)
+
+ domain = cast(MathDomain, app.env.get_domain('math'))
+ builder = cast(StandaloneHTMLBuilder, app.builder)
+ if app.registry.html_assets_policy == 'always' or domain.has_equations(pagename):
+ # Enable mathjax only if equations exists
+ if app.config.mathjax2_config:
+ if app.config.mathjax_path == MATHJAX_URL:
+ logger.warning(
+ 'mathjax_config/mathjax2_config does not work '
+ 'for the current MathJax version, use mathjax3_config instead')
+ body = 'MathJax.Hub.Config(%s)' % json.dumps(app.config.mathjax2_config)
+ builder.add_js_file('', type='text/x-mathjax-config', body=body)
+ if app.config.mathjax3_config:
+ body = 'window.MathJax = %s' % json.dumps(app.config.mathjax3_config)
+ builder.add_js_file('', body=body)
+
+ options = {}
+ if app.config.mathjax_options:
+ options.update(app.config.mathjax_options)
+ if 'async' not in options and 'defer' not in options:
+ if app.config.mathjax3_config:
+ # Load MathJax v3 via "defer" method
+ options['defer'] = 'defer'
+ else:
+ # Load other MathJax via "async" method
+ options['async'] = 'async'
+ builder.add_js_file(app.config.mathjax_path, **options)
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_html_math_renderer('mathjax',
+ (html_visit_math, None),
+ (html_visit_displaymath, None))
+
+ app.add_config_value('mathjax_path', MATHJAX_URL, 'html')
+ app.add_config_value('mathjax_options', {}, 'html')
+ app.add_config_value('mathjax_inline', [r'\(', r'\)'], 'html')
+ app.add_config_value('mathjax_display', [r'\[', r'\]'], 'html')
+ app.add_config_value('mathjax_config', None, 'html')
+ app.add_config_value('mathjax2_config', lambda c: c.mathjax_config, 'html')
+ app.add_config_value('mathjax3_config', None, 'html')
+ app.connect('html-page-context', install_mathjax)
+
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/napoleon/__init__.py b/sphinx/ext/napoleon/__init__.py
new file mode 100644
index 0000000..61aa3d8
--- /dev/null
+++ b/sphinx/ext/napoleon/__init__.py
@@ -0,0 +1,474 @@
+"""Support for NumPy and Google style docstrings."""
+
+from __future__ import annotations
+
+from typing import Any
+
+import sphinx
+from sphinx.application import Sphinx
+from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
+from sphinx.util import inspect
+
+
+class Config:
+ """Sphinx napoleon extension settings in `conf.py`.
+
+ Listed below are all the settings used by napoleon and their default
+ values. These settings can be changed in the Sphinx `conf.py` file. Make
+ sure that "sphinx.ext.napoleon" is enabled in `conf.py`::
+
+ # conf.py
+
+ # Add any Sphinx extension module names here, as strings
+ extensions = ['sphinx.ext.napoleon']
+
+ # Napoleon settings
+ napoleon_google_docstring = True
+ napoleon_numpy_docstring = True
+ napoleon_include_init_with_doc = False
+ napoleon_include_private_with_doc = False
+ napoleon_include_special_with_doc = False
+ napoleon_use_admonition_for_examples = False
+ napoleon_use_admonition_for_notes = False
+ napoleon_use_admonition_for_references = False
+ napoleon_use_ivar = False
+ napoleon_use_param = True
+ napoleon_use_rtype = True
+ napoleon_use_keyword = True
+ napoleon_preprocess_types = False
+ napoleon_type_aliases = None
+ napoleon_custom_sections = None
+ napoleon_attr_annotations = True
+
+ .. _Google style:
+ https://google.github.io/styleguide/pyguide.html
+ .. _NumPy style:
+ https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard
+
+ Attributes
+ ----------
+ napoleon_google_docstring : :obj:`bool` (Defaults to True)
+ True to parse `Google style`_ docstrings. False to disable support
+ for Google style docstrings.
+ napoleon_numpy_docstring : :obj:`bool` (Defaults to True)
+ True to parse `NumPy style`_ docstrings. False to disable support
+ for NumPy style docstrings.
+ napoleon_include_init_with_doc : :obj:`bool` (Defaults to False)
+ True to list ``__init___`` docstrings separately from the class
+ docstring. False to fall back to Sphinx's default behavior, which
+ considers the ``__init___`` docstring as part of the class
+ documentation.
+
+ **If True**::
+
+ def __init__(self):
+ \"\"\"
+ This will be included in the docs because it has a docstring
+ \"\"\"
+
+ def __init__(self):
+ # This will NOT be included in the docs
+
+ napoleon_include_private_with_doc : :obj:`bool` (Defaults to False)
+ True to include private members (like ``_membername``) with docstrings
+ in the documentation. False to fall back to Sphinx's default behavior.
+
+ **If True**::
+
+ def _included(self):
+ \"\"\"
+ This will be included in the docs because it has a docstring
+ \"\"\"
+ pass
+
+ def _skipped(self):
+ # This will NOT be included in the docs
+ pass
+
+ napoleon_include_special_with_doc : :obj:`bool` (Defaults to False)
+ True to include special members (like ``__membername__``) with
+ docstrings in the documentation. False to fall back to Sphinx's
+ default behavior.
+
+ **If True**::
+
+ def __str__(self):
+ \"\"\"
+ This will be included in the docs because it has a docstring
+ \"\"\"
+ return unicode(self).encode('utf-8')
+
+ def __unicode__(self):
+ # This will NOT be included in the docs
+ return unicode(self.__class__.__name__)
+
+ napoleon_use_admonition_for_examples : :obj:`bool` (Defaults to False)
+ True to use the ``.. admonition::`` directive for the **Example** and
+ **Examples** sections. False to use the ``.. rubric::`` directive
+ instead. One may look better than the other depending on what HTML
+ theme is used.
+
+ This `NumPy style`_ snippet will be converted as follows::
+
+ Example
+ -------
+ This is just a quick example
+
+ **If True**::
+
+ .. admonition:: Example
+
+ This is just a quick example
+
+ **If False**::
+
+ .. rubric:: Example
+
+ This is just a quick example
+
+ napoleon_use_admonition_for_notes : :obj:`bool` (Defaults to False)
+ True to use the ``.. admonition::`` directive for **Notes** sections.
+ False to use the ``.. rubric::`` directive instead.
+
+ Note
+ ----
+ The singular **Note** section will always be converted to a
+ ``.. note::`` directive.
+
+ See Also
+ --------
+ :confval:`napoleon_use_admonition_for_examples`
+
+ napoleon_use_admonition_for_references : :obj:`bool` (Defaults to False)
+ True to use the ``.. admonition::`` directive for **References**
+ sections. False to use the ``.. rubric::`` directive instead.
+
+ See Also
+ --------
+ :confval:`napoleon_use_admonition_for_examples`
+
+ napoleon_use_ivar : :obj:`bool` (Defaults to False)
+ True to use the ``:ivar:`` role for instance variables. False to use
+ the ``.. attribute::`` directive instead.
+
+ This `NumPy style`_ snippet will be converted as follows::
+
+ Attributes
+ ----------
+ attr1 : int
+ Description of `attr1`
+
+ **If True**::
+
+ :ivar attr1: Description of `attr1`
+ :vartype attr1: int
+
+ **If False**::
+
+ .. attribute:: attr1
+
+ Description of `attr1`
+
+ :type: int
+
+ napoleon_use_param : :obj:`bool` (Defaults to True)
+ True to use a ``:param:`` role for each function parameter. False to
+ use a single ``:parameters:`` role for all the parameters.
+
+ This `NumPy style`_ snippet will be converted as follows::
+
+ Parameters
+ ----------
+ arg1 : str
+ Description of `arg1`
+ arg2 : int, optional
+ Description of `arg2`, defaults to 0
+
+ **If True**::
+
+ :param arg1: Description of `arg1`
+ :type arg1: str
+ :param arg2: Description of `arg2`, defaults to 0
+ :type arg2: int, optional
+
+ **If False**::
+
+ :parameters: * **arg1** (*str*) --
+ Description of `arg1`
+ * **arg2** (*int, optional*) --
+ Description of `arg2`, defaults to 0
+
+ napoleon_use_keyword : :obj:`bool` (Defaults to True)
+ True to use a ``:keyword:`` role for each function keyword argument.
+ False to use a single ``:keyword arguments:`` role for all the
+ keywords.
+
+ This behaves similarly to :confval:`napoleon_use_param`. Note unlike
+ docutils, ``:keyword:`` and ``:param:`` will not be treated the same
+ way - there will be a separate "Keyword Arguments" section, rendered
+ in the same fashion as "Parameters" section (type links created if
+ possible)
+
+ See Also
+ --------
+ :confval:`napoleon_use_param`
+
+ napoleon_use_rtype : :obj:`bool` (Defaults to True)
+ True to use the ``:rtype:`` role for the return type. False to output
+ the return type inline with the description.
+
+ This `NumPy style`_ snippet will be converted as follows::
+
+ Returns
+ -------
+ bool
+ True if successful, False otherwise
+
+ **If True**::
+
+ :returns: True if successful, False otherwise
+ :rtype: bool
+
+ **If False**::
+
+ :returns: *bool* -- True if successful, False otherwise
+
+ napoleon_preprocess_types : :obj:`bool` (Defaults to False)
+ Enable the type preprocessor.
+
+ napoleon_type_aliases : :obj:`dict` (Defaults to None)
+ Add a mapping of strings to string, translating types in numpy
+ style docstrings. Only works if ``napoleon_preprocess_types = True``.
+
+ napoleon_custom_sections : :obj:`list` (Defaults to None)
+ Add a list of custom sections to include, expanding the list of parsed sections.
+
+ The entries can either be strings or tuples, depending on the intention:
+ * To create a custom "generic" section, just pass a string.
+ * To create an alias for an existing section, pass a tuple containing the
+ alias name and the original, in that order.
+ * To create a custom section that displays like the parameters or returns
+ section, pass a tuple containing the custom section name and a string
+ value, "params_style" or "returns_style".
+
+ If an entry is just a string, it is interpreted as a header for a generic
+ section. If the entry is a tuple/list/indexed container, the first entry
+ is the name of the section, the second is the section key to emulate. If the
+ second entry value is "params_style" or "returns_style", the custom section
+ will be displayed like the parameters section or returns section.
+
+ napoleon_attr_annotations : :obj:`bool` (Defaults to True)
+ Use the type annotations of class attributes that are documented in the docstring
+ but do not have a type in the docstring.
+
+ """
+ _config_values = {
+ 'napoleon_google_docstring': (True, 'env'),
+ 'napoleon_numpy_docstring': (True, 'env'),
+ 'napoleon_include_init_with_doc': (False, 'env'),
+ 'napoleon_include_private_with_doc': (False, 'env'),
+ 'napoleon_include_special_with_doc': (False, 'env'),
+ 'napoleon_use_admonition_for_examples': (False, 'env'),
+ 'napoleon_use_admonition_for_notes': (False, 'env'),
+ 'napoleon_use_admonition_for_references': (False, 'env'),
+ 'napoleon_use_ivar': (False, 'env'),
+ 'napoleon_use_param': (True, 'env'),
+ 'napoleon_use_rtype': (True, 'env'),
+ 'napoleon_use_keyword': (True, 'env'),
+ 'napoleon_preprocess_types': (False, 'env'),
+ 'napoleon_type_aliases': (None, 'env'),
+ 'napoleon_custom_sections': (None, 'env'),
+ 'napoleon_attr_annotations': (True, 'env'),
+ }
+
+ def __init__(self, **settings: Any) -> None:
+ for name, (default, _rebuild) in self._config_values.items():
+ setattr(self, name, default)
+ for name, value in settings.items():
+ setattr(self, name, value)
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ """Sphinx extension setup function.
+
+ When the extension is loaded, Sphinx imports this module and executes
+ the ``setup()`` function, which in turn notifies Sphinx of everything
+ the extension offers.
+
+ Parameters
+ ----------
+ app : sphinx.application.Sphinx
+ Application object representing the Sphinx process
+
+ See Also
+ --------
+ `The Sphinx documentation on Extensions
+ <https://www.sphinx-doc.org/extensions.html>`_
+
+ `The Extension Tutorial <https://www.sphinx-doc.org/extdev/tutorial.html>`_
+
+ `The Extension API <https://www.sphinx-doc.org/extdev/appapi.html>`_
+
+ """
+ if not isinstance(app, Sphinx):
+ # probably called by tests
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
+
+ _patch_python_domain()
+
+ app.setup_extension('sphinx.ext.autodoc')
+ app.connect('autodoc-process-docstring', _process_docstring)
+ app.connect('autodoc-skip-member', _skip_member)
+
+ for name, (default, rebuild) in Config._config_values.items():
+ app.add_config_value(name, default, rebuild)
+ return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
+
+
+def _patch_python_domain() -> None:
+ from sphinx.domains.python import PyObject, PyTypedField
+ from sphinx.locale import _
+ for doc_field in PyObject.doc_field_types:
+ if doc_field.name == 'parameter':
+ doc_field.names = ('param', 'parameter', 'arg', 'argument')
+ break
+ PyObject.doc_field_types.append(
+ PyTypedField('keyword', label=_('Keyword Arguments'),
+ names=('keyword', 'kwarg', 'kwparam'),
+ typerolename='obj', typenames=('paramtype', 'kwtype'),
+ can_collapse=True))
+
+
+def _process_docstring(app: Sphinx, what: str, name: str, obj: Any,
+ options: Any, lines: list[str]) -> None:
+ """Process the docstring for a given python object.
+
+ Called when autodoc has read and processed a docstring. `lines` is a list
+ of docstring lines that `_process_docstring` modifies in place to change
+ what Sphinx outputs.
+
+ The following settings in conf.py control what styles of docstrings will
+ be parsed:
+
+ * ``napoleon_google_docstring`` -- parse Google style docstrings
+ * ``napoleon_numpy_docstring`` -- parse NumPy style docstrings
+
+ Parameters
+ ----------
+ app : sphinx.application.Sphinx
+ Application object representing the Sphinx process.
+ what : str
+ A string specifying the type of the object to which the docstring
+ belongs. Valid values: "module", "class", "exception", "function",
+ "method", "attribute".
+ name : str
+ The fully qualified name of the object.
+ obj : module, class, exception, function, method, or attribute
+ The object to which the docstring belongs.
+ options : sphinx.ext.autodoc.Options
+ The options given to the directive: an object with attributes
+ inherited_members, undoc_members, show_inheritance and no_index that
+ are True if the flag option of same name was given to the auto
+ directive.
+ lines : list of str
+ The lines of the docstring, see above.
+
+ .. note:: `lines` is modified *in place*
+
+ """
+ result_lines = lines
+ docstring: GoogleDocstring
+ if app.config.napoleon_numpy_docstring:
+ docstring = NumpyDocstring(result_lines, app.config, app, what, name,
+ obj, options)
+ result_lines = docstring.lines()
+ if app.config.napoleon_google_docstring:
+ docstring = GoogleDocstring(result_lines, app.config, app, what, name,
+ obj, options)
+ result_lines = docstring.lines()
+ lines[:] = result_lines[:]
+
+
+def _skip_member(app: Sphinx, what: str, name: str, obj: Any,
+ skip: bool, options: Any) -> bool | None:
+ """Determine if private and special class members are included in docs.
+
+ The following settings in conf.py determine if private and special class
+ members or init methods are included in the generated documentation:
+
+ * ``napoleon_include_init_with_doc`` --
+ include init methods if they have docstrings
+ * ``napoleon_include_private_with_doc`` --
+ include private members if they have docstrings
+ * ``napoleon_include_special_with_doc`` --
+ include special members if they have docstrings
+
+ Parameters
+ ----------
+ app : sphinx.application.Sphinx
+ Application object representing the Sphinx process
+ what : str
+ A string specifying the type of the object to which the member
+ belongs. Valid values: "module", "class", "exception", "function",
+ "method", "attribute".
+ name : str
+ The name of the member.
+ obj : module, class, exception, function, method, or attribute.
+ For example, if the member is the __init__ method of class A, then
+ `obj` will be `A.__init__`.
+ skip : bool
+ A boolean indicating if autodoc will skip this member if `_skip_member`
+ does not override the decision
+ options : sphinx.ext.autodoc.Options
+ The options given to the directive: an object with attributes
+ inherited_members, undoc_members, show_inheritance and no_index that
+ are True if the flag option of same name was given to the auto
+ directive.
+
+ Returns
+ -------
+ bool
+ True if the member should be skipped during creation of the docs,
+ False if it should be included in the docs.
+
+ """
+ has_doc = getattr(obj, '__doc__', False)
+ is_member = what in ('class', 'exception', 'module')
+ if name != '__weakref__' and has_doc and is_member:
+ cls_is_owner = False
+ if what in ('class', 'exception'):
+ qualname = getattr(obj, '__qualname__', '')
+ cls_path, _, _ = qualname.rpartition('.')
+ if cls_path:
+ try:
+ if '.' in cls_path:
+ import functools
+ import importlib
+
+ mod = importlib.import_module(obj.__module__)
+ mod_path = cls_path.split('.')
+ cls = functools.reduce(getattr, mod_path, mod)
+ else:
+ cls = inspect.unwrap(obj).__globals__[cls_path]
+ except Exception:
+ cls_is_owner = False
+ else:
+ cls_is_owner = (cls and hasattr(cls, name) and # type: ignore[assignment]
+ name in cls.__dict__)
+ else:
+ cls_is_owner = False
+
+ if what == 'module' or cls_is_owner:
+ is_init = (name == '__init__')
+ is_special = (not is_init and name.startswith('__') and
+ name.endswith('__'))
+ is_private = (not is_init and not is_special and
+ name.startswith('_'))
+ inc_init = app.config.napoleon_include_init_with_doc
+ inc_special = app.config.napoleon_include_special_with_doc
+ inc_private = app.config.napoleon_include_private_with_doc
+ if ((is_special and inc_special) or
+ (is_private and inc_private) or
+ (is_init and inc_init)):
+ return False
+ return None
diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py
new file mode 100644
index 0000000..2ffde39
--- /dev/null
+++ b/sphinx/ext/napoleon/docstring.py
@@ -0,0 +1,1363 @@
+"""Classes for docstring parsing and formatting."""
+
+from __future__ import annotations
+
+import collections
+import contextlib
+import inspect
+import re
+from functools import partial
+from typing import TYPE_CHECKING, Any, Callable
+
+from sphinx.locale import _, __
+from sphinx.util import logging
+from sphinx.util.typing import get_type_hints, stringify_annotation
+
+if TYPE_CHECKING:
+ from sphinx.application import Sphinx
+ from sphinx.config import Config as SphinxConfig
+
+logger = logging.getLogger(__name__)
+
+_directive_regex = re.compile(r'\.\. \S+::')
+_google_section_regex = re.compile(r'^(\s|\w)+:\s*$')
+_google_typed_arg_regex = re.compile(r'(.+?)\(\s*(.*[^\s]+)\s*\)')
+_numpy_section_regex = re.compile(r'^[=\-`:\'"~^_*+#<>]{2,}\s*$')
+_single_colon_regex = re.compile(r'(?<!:):(?!:)')
+_xref_or_code_regex = re.compile(
+ r'((?::(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:`.+?`)|'
+ r'(?:``.+?``)|'
+ r'(?::meta .+:.*)|'
+ r'(?:`.+?\s*(?<!\x00)<.*?>`))')
+_xref_regex = re.compile(
+ r'(?:(?::(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:)?`.+?`)',
+)
+_bullet_list_regex = re.compile(r'^(\*|\+|\-)(\s+\S|\s*$)')
+_enumerated_list_regex = re.compile(
+ r'^(?P<paren>\()?'
+ r'(\d+|#|[ivxlcdm]+|[IVXLCDM]+|[a-zA-Z])'
+ r'(?(paren)\)|\.)(\s+\S|\s*$)')
+_token_regex = re.compile(
+ r"(,\sor\s|\sor\s|\sof\s|:\s|\sto\s|,\sand\s|\sand\s|,\s"
+ r"|[{]|[}]"
+ r'|"(?:\\"|[^"])*"'
+ r"|'(?:\\'|[^'])*')",
+)
+_default_regex = re.compile(
+ r"^default[^_0-9A-Za-z].*$",
+)
+_SINGLETONS = ("None", "True", "False", "Ellipsis")
+
+
+class Deque(collections.deque):
+ """
+ A subclass of deque that mimics ``pockets.iterators.modify_iter``.
+
+ The `.Deque.get` and `.Deque.next` methods are added.
+ """
+
+ sentinel = object()
+
+ def get(self, n: int) -> Any:
+ """
+ Return the nth element of the stack, or ``self.sentinel`` if n is
+ greater than the stack size.
+ """
+ return self[n] if n < len(self) else self.sentinel
+
+ def next(self) -> Any:
+ if self:
+ return super().popleft()
+ else:
+ raise StopIteration
+
+
+def _convert_type_spec(_type: str, translations: dict[str, str] | None = None) -> str:
+ """Convert type specification to reference in reST."""
+ if translations is not None and _type in translations:
+ return translations[_type]
+ if _type == 'None':
+ return ':py:obj:`None`'
+ return f':py:class:`{_type}`'
+
+
+class GoogleDocstring:
+ """Convert Google style docstrings to reStructuredText.
+
+ Parameters
+ ----------
+ docstring : :obj:`str` or :obj:`list` of :obj:`str`
+ The docstring to parse, given either as a string or split into
+ individual lines.
+ config: :obj:`sphinx.ext.napoleon.Config` or :obj:`sphinx.config.Config`
+ The configuration settings to use. If not given, defaults to the
+ config object on `app`; or if `app` is not given defaults to the
+ a new :class:`sphinx.ext.napoleon.Config` object.
+
+
+ Other Parameters
+ ----------------
+ app : :class:`sphinx.application.Sphinx`, optional
+ Application object representing the Sphinx process.
+ what : :obj:`str`, optional
+ A string specifying the type of the object to which the docstring
+ belongs. Valid values: "module", "class", "exception", "function",
+ "method", "attribute".
+ name : :obj:`str`, optional
+ The fully qualified name of the object.
+ obj : module, class, exception, function, method, or attribute
+ The object to which the docstring belongs.
+ options : :class:`sphinx.ext.autodoc.Options`, optional
+ The options given to the directive: an object with attributes
+ inherited_members, undoc_members, show_inheritance and no_index that
+ are True if the flag option of same name was given to the auto
+ directive.
+
+
+ Example
+ -------
+ >>> from sphinx.ext.napoleon import Config
+ >>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
+ >>> docstring = '''One line summary.
+ ...
+ ... Extended description.
+ ...
+ ... Args:
+ ... arg1(int): Description of `arg1`
+ ... arg2(str): Description of `arg2`
+ ... Returns:
+ ... str: Description of return value.
+ ... '''
+ >>> print(GoogleDocstring(docstring, config))
+ One line summary.
+ <BLANKLINE>
+ Extended description.
+ <BLANKLINE>
+ :param arg1: Description of `arg1`
+ :type arg1: int
+ :param arg2: Description of `arg2`
+ :type arg2: str
+ <BLANKLINE>
+ :returns: Description of return value.
+ :rtype: str
+ <BLANKLINE>
+
+ """
+
+ _name_rgx = re.compile(r"^\s*((?::(?P<role>\S+):)?`(?P<name>~?[a-zA-Z0-9_.-]+)`|"
+ r" (?P<name2>~?[a-zA-Z0-9_.-]+))\s*", re.X)
+
+ def __init__(
+ self,
+ docstring: str | list[str],
+ config: SphinxConfig | None = None,
+ app: Sphinx | None = None,
+ what: str = '',
+ name: str = '',
+ obj: Any = None,
+ options: Any = None,
+ ) -> None:
+ self._app = app
+ if config:
+ self._config = config
+ elif app:
+ self._config = app.config
+ else:
+ from sphinx.ext.napoleon import Config
+
+ self._config = Config() # type: ignore[assignment]
+
+ if not what:
+ if inspect.isclass(obj):
+ what = 'class'
+ elif inspect.ismodule(obj):
+ what = 'module'
+ elif callable(obj):
+ what = 'function'
+ else:
+ what = 'object'
+
+ self._what = what
+ self._name = name
+ self._obj = obj
+ self._opt = options
+ if isinstance(docstring, str):
+ lines = docstring.splitlines()
+ else:
+ lines = docstring
+ self._lines = Deque(map(str.rstrip, lines))
+ self._parsed_lines: list[str] = []
+ self._is_in_section = False
+ self._section_indent = 0
+ if not hasattr(self, '_directive_sections'):
+ self._directive_sections: list[str] = []
+ if not hasattr(self, '_sections'):
+ self._sections: dict[str, Callable] = {
+ 'args': self._parse_parameters_section,
+ 'arguments': self._parse_parameters_section,
+ 'attention': partial(self._parse_admonition, 'attention'),
+ 'attributes': self._parse_attributes_section,
+ 'caution': partial(self._parse_admonition, 'caution'),
+ 'danger': partial(self._parse_admonition, 'danger'),
+ 'error': partial(self._parse_admonition, 'error'),
+ 'example': self._parse_examples_section,
+ 'examples': self._parse_examples_section,
+ 'hint': partial(self._parse_admonition, 'hint'),
+ 'important': partial(self._parse_admonition, 'important'),
+ 'keyword args': self._parse_keyword_arguments_section,
+ 'keyword arguments': self._parse_keyword_arguments_section,
+ 'methods': self._parse_methods_section,
+ 'note': partial(self._parse_admonition, 'note'),
+ 'notes': self._parse_notes_section,
+ 'other parameters': self._parse_other_parameters_section,
+ 'parameters': self._parse_parameters_section,
+ 'receive': self._parse_receives_section,
+ 'receives': self._parse_receives_section,
+ 'return': self._parse_returns_section,
+ 'returns': self._parse_returns_section,
+ 'raise': self._parse_raises_section,
+ 'raises': self._parse_raises_section,
+ 'references': self._parse_references_section,
+ 'see also': self._parse_see_also_section,
+ 'tip': partial(self._parse_admonition, 'tip'),
+ 'todo': partial(self._parse_admonition, 'todo'),
+ 'warning': partial(self._parse_admonition, 'warning'),
+ 'warnings': partial(self._parse_admonition, 'warning'),
+ 'warn': self._parse_warns_section,
+ 'warns': self._parse_warns_section,
+ 'yield': self._parse_yields_section,
+ 'yields': self._parse_yields_section,
+ }
+
+ self._load_custom_sections()
+
+ self._parse()
+
+ def __str__(self) -> str:
+ """Return the parsed docstring in reStructuredText format.
+
+ Returns
+ -------
+ unicode
+ Unicode version of the docstring.
+
+ """
+ return '\n'.join(self.lines())
+
+ def lines(self) -> list[str]:
+ """Return the parsed lines of the docstring in reStructuredText format.
+
+ Returns
+ -------
+ list(str)
+ The lines of the docstring in a list.
+
+ """
+ return self._parsed_lines
+
+ def _consume_indented_block(self, indent: int = 1) -> list[str]:
+ lines = []
+ line = self._lines.get(0)
+ while (
+ not self._is_section_break() and
+ (not line or self._is_indented(line, indent))
+ ):
+ lines.append(self._lines.next())
+ line = self._lines.get(0)
+ return lines
+
+ def _consume_contiguous(self) -> list[str]:
+ lines = []
+ while (self._lines and
+ self._lines.get(0) and
+ not self._is_section_header()):
+ lines.append(self._lines.next())
+ return lines
+
+ def _consume_empty(self) -> list[str]:
+ lines = []
+ line = self._lines.get(0)
+ while self._lines and not line:
+ lines.append(self._lines.next())
+ line = self._lines.get(0)
+ return lines
+
+ def _consume_field(self, parse_type: bool = True, prefer_type: bool = False,
+ ) -> tuple[str, str, list[str]]:
+ line = self._lines.next()
+
+ before, colon, after = self._partition_field_on_colon(line)
+ _name, _type, _desc = before, '', after
+
+ if parse_type:
+ match = _google_typed_arg_regex.match(before)
+ if match:
+ _name = match.group(1).strip()
+ _type = match.group(2)
+
+ _name = self._escape_args_and_kwargs(_name)
+
+ if prefer_type and not _type:
+ _type, _name = _name, _type
+
+ if _type and self._config.napoleon_preprocess_types:
+ _type = _convert_type_spec(_type, self._config.napoleon_type_aliases or {})
+
+ indent = self._get_indent(line) + 1
+ _descs = [_desc] + self._dedent(self._consume_indented_block(indent))
+ _descs = self.__class__(_descs, self._config).lines()
+ return _name, _type, _descs
+
+ def _consume_fields(self, parse_type: bool = True, prefer_type: bool = False,
+ multiple: bool = False) -> list[tuple[str, str, list[str]]]:
+ self._consume_empty()
+ fields = []
+ while not self._is_section_break():
+ _name, _type, _desc = self._consume_field(parse_type, prefer_type)
+ if multiple and _name:
+ for name in _name.split(","):
+ fields.append((name.strip(), _type, _desc))
+ elif _name or _type or _desc:
+ fields.append((_name, _type, _desc))
+ return fields
+
+ def _consume_inline_attribute(self) -> tuple[str, list[str]]:
+ line = self._lines.next()
+ _type, colon, _desc = self._partition_field_on_colon(line)
+ if not colon or not _desc:
+ _type, _desc = _desc, _type
+ _desc += colon
+ _descs = [_desc] + self._dedent(self._consume_to_end())
+ _descs = self.__class__(_descs, self._config).lines()
+ return _type, _descs
+
+ def _consume_returns_section(self, preprocess_types: bool = False,
+ ) -> list[tuple[str, str, list[str]]]:
+ lines = self._dedent(self._consume_to_next_section())
+ if lines:
+ before, colon, after = self._partition_field_on_colon(lines[0])
+ _name, _type, _desc = '', '', lines
+
+ if colon:
+ if after:
+ _desc = [after] + lines[1:]
+ else:
+ _desc = lines[1:]
+
+ _type = before
+
+ if (_type and preprocess_types and
+ self._config.napoleon_preprocess_types):
+ _type = _convert_type_spec(_type, self._config.napoleon_type_aliases or {})
+
+ _desc = self.__class__(_desc, self._config).lines()
+ return [(_name, _type, _desc)]
+ else:
+ return []
+
+ def _consume_usage_section(self) -> list[str]:
+ lines = self._dedent(self._consume_to_next_section())
+ return lines
+
+ def _consume_section_header(self) -> str:
+ section = self._lines.next()
+ stripped_section = section.strip(':')
+ if stripped_section.lower() in self._sections:
+ section = stripped_section
+ return section
+
+ def _consume_to_end(self) -> list[str]:
+ lines = []
+ while self._lines:
+ lines.append(self._lines.next())
+ return lines
+
+ def _consume_to_next_section(self) -> list[str]:
+ self._consume_empty()
+ lines = []
+ while not self._is_section_break():
+ lines.append(self._lines.next())
+ return lines + self._consume_empty()
+
+ def _dedent(self, lines: list[str], full: bool = False) -> list[str]:
+ if full:
+ return [line.lstrip() for line in lines]
+ else:
+ min_indent = self._get_min_indent(lines)
+ return [line[min_indent:] for line in lines]
+
+ def _escape_args_and_kwargs(self, name: str) -> str:
+ if name.endswith('_') and getattr(self._config, 'strip_signature_backslash', False):
+ name = name[:-1] + r'\_'
+
+ if name[:2] == '**':
+ return r'\*\*' + name[2:]
+ elif name[:1] == '*':
+ return r'\*' + name[1:]
+ else:
+ return name
+
+ def _fix_field_desc(self, desc: list[str]) -> list[str]:
+ if self._is_list(desc):
+ desc = [''] + desc
+ elif desc[0].endswith('::'):
+ desc_block = desc[1:]
+ indent = self._get_indent(desc[0])
+ block_indent = self._get_initial_indent(desc_block)
+ if block_indent > indent:
+ desc = [''] + desc
+ else:
+ desc = ['', desc[0]] + self._indent(desc_block, 4)
+ return desc
+
+ def _format_admonition(self, admonition: str, lines: list[str]) -> list[str]:
+ lines = self._strip_empty(lines)
+ if len(lines) == 1:
+ return [f'.. {admonition}:: {lines[0].strip()}', '']
+ elif lines:
+ lines = self._indent(self._dedent(lines), 3)
+ return ['.. %s::' % admonition, ''] + lines + ['']
+ else:
+ return ['.. %s::' % admonition, '']
+
+ def _format_block(
+ self, prefix: str, lines: list[str], padding: str | None = None,
+ ) -> list[str]:
+ if lines:
+ if padding is None:
+ padding = ' ' * len(prefix)
+ result_lines = []
+ for i, line in enumerate(lines):
+ if i == 0:
+ result_lines.append((prefix + line).rstrip())
+ elif line:
+ result_lines.append(padding + line)
+ else:
+ result_lines.append('')
+ return result_lines
+ else:
+ return [prefix]
+
+ def _format_docutils_params(self, fields: list[tuple[str, str, list[str]]],
+ field_role: str = 'param', type_role: str = 'type',
+ ) -> list[str]:
+ lines = []
+ for _name, _type, _desc in fields:
+ _desc = self._strip_empty(_desc)
+ if any(_desc):
+ _desc = self._fix_field_desc(_desc)
+ field = f':{field_role} {_name}: '
+ lines.extend(self._format_block(field, _desc))
+ else:
+ lines.append(f':{field_role} {_name}:')
+
+ if _type:
+ lines.append(f':{type_role} {_name}: {_type}')
+ return lines + ['']
+
+ def _format_field(self, _name: str, _type: str, _desc: list[str]) -> list[str]:
+ _desc = self._strip_empty(_desc)
+ has_desc = any(_desc)
+ separator = ' -- ' if has_desc else ''
+ if _name:
+ if _type:
+ if '`' in _type:
+ field = f'**{_name}** ({_type}){separator}'
+ else:
+ field = f'**{_name}** (*{_type}*){separator}'
+ else:
+ field = f'**{_name}**{separator}'
+ elif _type:
+ if '`' in _type:
+ field = f'{_type}{separator}'
+ else:
+ field = f'*{_type}*{separator}'
+ else:
+ field = ''
+
+ if has_desc:
+ _desc = self._fix_field_desc(_desc)
+ if _desc[0]:
+ return [field + _desc[0]] + _desc[1:]
+ else:
+ return [field] + _desc
+ else:
+ return [field]
+
+ def _format_fields(self, field_type: str, fields: list[tuple[str, str, list[str]]],
+ ) -> list[str]:
+ field_type = ':%s:' % field_type.strip()
+ padding = ' ' * len(field_type)
+ multi = len(fields) > 1
+ lines: list[str] = []
+ for _name, _type, _desc in fields:
+ field = self._format_field(_name, _type, _desc)
+ if multi:
+ if lines:
+ lines.extend(self._format_block(padding + ' * ', field))
+ else:
+ lines.extend(self._format_block(field_type + ' * ', field))
+ else:
+ lines.extend(self._format_block(field_type + ' ', field))
+ if lines and lines[-1]:
+ lines.append('')
+ return lines
+
+ def _get_current_indent(self, peek_ahead: int = 0) -> int:
+ line = self._lines.get(peek_ahead)
+ while line is not self._lines.sentinel:
+ if line:
+ return self._get_indent(line)
+ peek_ahead += 1
+ line = self._lines.get(peek_ahead)
+ return 0
+
+ def _get_indent(self, line: str) -> int:
+ for i, s in enumerate(line):
+ if not s.isspace():
+ return i
+ return len(line)
+
+ def _get_initial_indent(self, lines: list[str]) -> int:
+ for line in lines:
+ if line:
+ return self._get_indent(line)
+ return 0
+
+ def _get_min_indent(self, lines: list[str]) -> int:
+ min_indent = None
+ for line in lines:
+ if line:
+ indent = self._get_indent(line)
+ if min_indent is None or indent < min_indent:
+ min_indent = indent
+ return min_indent or 0
+
+ def _indent(self, lines: list[str], n: int = 4) -> list[str]:
+ return [(' ' * n) + line for line in lines]
+
+ def _is_indented(self, line: str, indent: int = 1) -> bool:
+ for i, s in enumerate(line): # noqa: SIM110
+ if i >= indent:
+ return True
+ elif not s.isspace():
+ return False
+ return False
+
+ def _is_list(self, lines: list[str]) -> bool:
+ if not lines:
+ return False
+ if _bullet_list_regex.match(lines[0]):
+ return True
+ if _enumerated_list_regex.match(lines[0]):
+ return True
+ if len(lines) < 2 or lines[0].endswith('::'):
+ return False
+ indent = self._get_indent(lines[0])
+ next_indent = indent
+ for line in lines[1:]:
+ if line:
+ next_indent = self._get_indent(line)
+ break
+ return next_indent > indent
+
+ def _is_section_header(self) -> bool:
+ section = self._lines.get(0).lower()
+ match = _google_section_regex.match(section)
+ if match and section.strip(':') in self._sections:
+ header_indent = self._get_indent(section)
+ section_indent = self._get_current_indent(peek_ahead=1)
+ return section_indent > header_indent
+ elif self._directive_sections:
+ if _directive_regex.match(section):
+ for directive_section in self._directive_sections:
+ if section.startswith(directive_section):
+ return True
+ return False
+
+ def _is_section_break(self) -> bool:
+ line = self._lines.get(0)
+ return (not self._lines or
+ self._is_section_header() or
+ (self._is_in_section and
+ line and
+ not self._is_indented(line, self._section_indent)))
+
+ def _load_custom_sections(self) -> None:
+ if self._config.napoleon_custom_sections is not None:
+ for entry in self._config.napoleon_custom_sections:
+ if isinstance(entry, str):
+ # if entry is just a label, add to sections list,
+ # using generic section logic.
+ self._sections[entry.lower()] = self._parse_custom_generic_section
+ else:
+ # otherwise, assume entry is container;
+ if entry[1] == "params_style":
+ self._sections[entry[0].lower()] = \
+ self._parse_custom_params_style_section
+ elif entry[1] == "returns_style":
+ self._sections[entry[0].lower()] = \
+ self._parse_custom_returns_style_section
+ else:
+ # [0] is new section, [1] is the section to alias.
+ # in the case of key mismatch, just handle as generic section.
+ self._sections[entry[0].lower()] = \
+ self._sections.get(entry[1].lower(),
+ self._parse_custom_generic_section)
+
+ def _parse(self) -> None:
+ self._parsed_lines = self._consume_empty()
+
+ if self._name and self._what in ('attribute', 'data', 'property'):
+ res: list[str] = []
+ with contextlib.suppress(StopIteration):
+ res = self._parse_attribute_docstring()
+
+ self._parsed_lines.extend(res)
+ return
+
+ while self._lines:
+ if self._is_section_header():
+ try:
+ section = self._consume_section_header()
+ self._is_in_section = True
+ self._section_indent = self._get_current_indent()
+ if _directive_regex.match(section):
+ lines = [section] + self._consume_to_next_section()
+ else:
+ lines = self._sections[section.lower()](section)
+ finally:
+ self._is_in_section = False
+ self._section_indent = 0
+ else:
+ if not self._parsed_lines:
+ lines = self._consume_contiguous() + self._consume_empty()
+ else:
+ lines = self._consume_to_next_section()
+ self._parsed_lines.extend(lines)
+
+ def _parse_admonition(self, admonition: str, section: str) -> list[str]:
+ # type (str, str) -> List[str]
+ lines = self._consume_to_next_section()
+ return self._format_admonition(admonition, lines)
+
+ def _parse_attribute_docstring(self) -> list[str]:
+ _type, _desc = self._consume_inline_attribute()
+ lines = self._format_field('', '', _desc)
+ if _type:
+ lines.extend(['', ':type: %s' % _type])
+ return lines
+
+ def _parse_attributes_section(self, section: str) -> list[str]:
+ lines = []
+ for _name, _type, _desc in self._consume_fields():
+ if not _type:
+ _type = self._lookup_annotation(_name)
+ if self._config.napoleon_use_ivar:
+ field = ':ivar %s: ' % _name
+ lines.extend(self._format_block(field, _desc))
+ if _type:
+ lines.append(f':vartype {_name}: {_type}')
+ else:
+ lines.append('.. attribute:: ' + _name)
+ if self._opt:
+ if 'no-index' in self._opt or 'noindex' in self._opt:
+ lines.append(' :no-index:')
+ lines.append('')
+
+ fields = self._format_field('', '', _desc)
+ lines.extend(self._indent(fields, 3))
+ if _type:
+ lines.append('')
+ lines.extend(self._indent([':type: %s' % _type], 3))
+ lines.append('')
+ if self._config.napoleon_use_ivar:
+ lines.append('')
+ return lines
+
+ def _parse_examples_section(self, section: str) -> list[str]:
+ labels = {
+ 'example': _('Example'),
+ 'examples': _('Examples'),
+ }
+ use_admonition = self._config.napoleon_use_admonition_for_examples
+ label = labels.get(section.lower(), section)
+ return self._parse_generic_section(label, use_admonition)
+
+ def _parse_custom_generic_section(self, section: str) -> list[str]:
+ # for now, no admonition for simple custom sections
+ return self._parse_generic_section(section, False)
+
+ def _parse_custom_params_style_section(self, section: str) -> list[str]:
+ return self._format_fields(section, self._consume_fields())
+
+ def _parse_custom_returns_style_section(self, section: str) -> list[str]:
+ fields = self._consume_returns_section(preprocess_types=True)
+ return self._format_fields(section, fields)
+
+ def _parse_usage_section(self, section: str) -> list[str]:
+ header = ['.. rubric:: Usage:', '']
+ block = ['.. code-block:: python', '']
+ lines = self._consume_usage_section()
+ lines = self._indent(lines, 3)
+ return header + block + lines + ['']
+
+ def _parse_generic_section(self, section: str, use_admonition: bool) -> list[str]:
+ lines = self._strip_empty(self._consume_to_next_section())
+ lines = self._dedent(lines)
+ if use_admonition:
+ header = '.. admonition:: %s' % section
+ lines = self._indent(lines, 3)
+ else:
+ header = '.. rubric:: %s' % section
+ if lines:
+ return [header, ''] + lines + ['']
+ else:
+ return [header, '']
+
+ def _parse_keyword_arguments_section(self, section: str) -> list[str]:
+ fields = self._consume_fields()
+ if self._config.napoleon_use_keyword:
+ return self._format_docutils_params(
+ fields,
+ field_role="keyword",
+ type_role="kwtype")
+ else:
+ return self._format_fields(_('Keyword Arguments'), fields)
+
+ def _parse_methods_section(self, section: str) -> list[str]:
+ lines: list[str] = []
+ for _name, _type, _desc in self._consume_fields(parse_type=False):
+ lines.append('.. method:: %s' % _name)
+ if self._opt:
+ if 'no-index' in self._opt or 'noindex' in self._opt:
+ lines.append(' :no-index:')
+ if _desc:
+ lines.extend([''] + self._indent(_desc, 3))
+ lines.append('')
+ return lines
+
+ def _parse_notes_section(self, section: str) -> list[str]:
+ use_admonition = self._config.napoleon_use_admonition_for_notes
+ return self._parse_generic_section(_('Notes'), use_admonition)
+
+ def _parse_other_parameters_section(self, section: str) -> list[str]:
+ if self._config.napoleon_use_param:
+ # Allow to declare multiple parameters at once (ex: x, y: int)
+ fields = self._consume_fields(multiple=True)
+ return self._format_docutils_params(fields)
+ else:
+ fields = self._consume_fields()
+ return self._format_fields(_('Other Parameters'), fields)
+
+ def _parse_parameters_section(self, section: str) -> list[str]:
+ if self._config.napoleon_use_param:
+ # Allow to declare multiple parameters at once (ex: x, y: int)
+ fields = self._consume_fields(multiple=True)
+ return self._format_docutils_params(fields)
+ else:
+ fields = self._consume_fields()
+ return self._format_fields(_('Parameters'), fields)
+
+ def _parse_raises_section(self, section: str) -> list[str]:
+ fields = self._consume_fields(parse_type=False, prefer_type=True)
+ lines: list[str] = []
+ for _name, _type, _desc in fields:
+ m = self._name_rgx.match(_type)
+ if m and m.group('name'):
+ _type = m.group('name')
+ elif _xref_regex.match(_type):
+ pos = _type.find('`')
+ _type = _type[pos + 1:-1]
+ _type = ' ' + _type if _type else ''
+ _desc = self._strip_empty(_desc)
+ _descs = ' ' + '\n '.join(_desc) if any(_desc) else ''
+ lines.append(f':raises{_type}:{_descs}')
+ if lines:
+ lines.append('')
+ return lines
+
+ def _parse_receives_section(self, section: str) -> list[str]:
+ if self._config.napoleon_use_param:
+ # Allow to declare multiple parameters at once (ex: x, y: int)
+ fields = self._consume_fields(multiple=True)
+ return self._format_docutils_params(fields)
+ else:
+ fields = self._consume_fields()
+ return self._format_fields(_('Receives'), fields)
+
+ def _parse_references_section(self, section: str) -> list[str]:
+ use_admonition = self._config.napoleon_use_admonition_for_references
+ return self._parse_generic_section(_('References'), use_admonition)
+
+ def _parse_returns_section(self, section: str) -> list[str]:
+ fields = self._consume_returns_section()
+ multi = len(fields) > 1
+ use_rtype = False if multi else self._config.napoleon_use_rtype
+ lines: list[str] = []
+
+ for _name, _type, _desc in fields:
+ if use_rtype:
+ field = self._format_field(_name, '', _desc)
+ else:
+ field = self._format_field(_name, _type, _desc)
+
+ if multi:
+ if lines:
+ lines.extend(self._format_block(' * ', field))
+ else:
+ lines.extend(self._format_block(':returns: * ', field))
+ else:
+ if any(field): # only add :returns: if there's something to say
+ lines.extend(self._format_block(':returns: ', field))
+ if _type and use_rtype:
+ lines.extend([':rtype: %s' % _type, ''])
+ if lines and lines[-1]:
+ lines.append('')
+ return lines
+
+ def _parse_see_also_section(self, section: str) -> list[str]:
+ return self._parse_admonition('seealso', section)
+
+ def _parse_warns_section(self, section: str) -> list[str]:
+ return self._format_fields(_('Warns'), self._consume_fields())
+
+ def _parse_yields_section(self, section: str) -> list[str]:
+ fields = self._consume_returns_section(preprocess_types=True)
+ return self._format_fields(_('Yields'), fields)
+
+ def _partition_field_on_colon(self, line: str) -> tuple[str, str, str]:
+ before_colon = []
+ after_colon = []
+ colon = ''
+ found_colon = False
+ for i, source in enumerate(_xref_or_code_regex.split(line)):
+ if found_colon:
+ after_colon.append(source)
+ else:
+ m = _single_colon_regex.search(source)
+ if (i % 2) == 0 and m:
+ found_colon = True
+ colon = source[m.start(): m.end()]
+ before_colon.append(source[:m.start()])
+ after_colon.append(source[m.end():])
+ else:
+ before_colon.append(source)
+
+ return ("".join(before_colon).strip(),
+ colon,
+ "".join(after_colon).strip())
+
+ def _strip_empty(self, lines: list[str]) -> list[str]:
+ if lines:
+ start = -1
+ for i, line in enumerate(lines):
+ if line:
+ start = i
+ break
+ if start == -1:
+ lines = []
+ end = -1
+ for i in reversed(range(len(lines))):
+ line = lines[i]
+ if line:
+ end = i
+ break
+ if start > 0 or end + 1 < len(lines):
+ lines = lines[start:end + 1]
+ return lines
+
+ def _lookup_annotation(self, _name: str) -> str:
+ if self._config.napoleon_attr_annotations:
+ if self._what in ("module", "class", "exception") and self._obj:
+ # cache the class annotations
+ if not hasattr(self, "_annotations"):
+ localns = getattr(self._config, "autodoc_type_aliases", {})
+ localns.update(getattr(
+ self._config, "napoleon_type_aliases", {},
+ ) or {})
+ self._annotations = get_type_hints(self._obj, None, localns)
+ if _name in self._annotations:
+ return stringify_annotation(self._annotations[_name],
+ 'fully-qualified-except-typing')
+ # No annotation found
+ return ""
+
+
+def _recombine_set_tokens(tokens: list[str]) -> list[str]:
+ token_queue = collections.deque(tokens)
+ keywords = ("optional", "default")
+
+ def takewhile_set(tokens):
+ open_braces = 0
+ previous_token = None
+ while True:
+ try:
+ token = tokens.popleft()
+ except IndexError:
+ break
+
+ if token == ", ":
+ previous_token = token
+ continue
+
+ if not token.strip():
+ continue
+
+ if token in keywords:
+ tokens.appendleft(token)
+ if previous_token is not None:
+ tokens.appendleft(previous_token)
+ break
+
+ if previous_token is not None:
+ yield previous_token
+ previous_token = None
+
+ if token == "{":
+ open_braces += 1
+ elif token == "}":
+ open_braces -= 1
+
+ yield token
+
+ if open_braces == 0:
+ break
+
+ def combine_set(tokens):
+ while True:
+ try:
+ token = tokens.popleft()
+ except IndexError:
+ break
+
+ if token == "{":
+ tokens.appendleft("{")
+ yield "".join(takewhile_set(tokens))
+ else:
+ yield token
+
+ return list(combine_set(token_queue))
+
+
+def _tokenize_type_spec(spec: str) -> list[str]:
+ def postprocess(item):
+ if _default_regex.match(item):
+ default = item[:7]
+ # can't be separated by anything other than a single space
+ # for now
+ other = item[8:]
+
+ return [default, " ", other]
+ else:
+ return [item]
+
+ tokens = [
+ item
+ for raw_token in _token_regex.split(spec)
+ for item in postprocess(raw_token)
+ if item
+ ]
+ return tokens
+
+
+def _token_type(token: str, location: str | None = None) -> str:
+ def is_numeric(token):
+ try:
+ # use complex to make sure every numeric value is detected as literal
+ complex(token)
+ except ValueError:
+ return False
+ else:
+ return True
+
+ if token.startswith(" ") or token.endswith(" "):
+ type_ = "delimiter"
+ elif (
+ is_numeric(token) or
+ (token.startswith("{") and token.endswith("}")) or
+ (token.startswith('"') and token.endswith('"')) or
+ (token.startswith("'") and token.endswith("'"))
+ ):
+ type_ = "literal"
+ elif token.startswith("{"):
+ logger.warning(
+ __("invalid value set (missing closing brace): %s"),
+ token,
+ location=location,
+ )
+ type_ = "literal"
+ elif token.endswith("}"):
+ logger.warning(
+ __("invalid value set (missing opening brace): %s"),
+ token,
+ location=location,
+ )
+ type_ = "literal"
+ elif token.startswith(("'", '"')):
+ logger.warning(
+ __("malformed string literal (missing closing quote): %s"),
+ token,
+ location=location,
+ )
+ type_ = "literal"
+ elif token.endswith(("'", '"')):
+ logger.warning(
+ __("malformed string literal (missing opening quote): %s"),
+ token,
+ location=location,
+ )
+ type_ = "literal"
+ elif token in ("optional", "default"):
+ # default is not a official keyword (yet) but supported by the
+ # reference implementation (numpydoc) and widely used
+ type_ = "control"
+ elif _xref_regex.match(token):
+ type_ = "reference"
+ else:
+ type_ = "obj"
+
+ return type_
+
+
+def _convert_numpy_type_spec(
+ _type: str, location: str | None = None, translations: dict | None = None,
+) -> str:
+ if translations is None:
+ translations = {}
+
+ def convert_obj(obj, translations, default_translation):
+ translation = translations.get(obj, obj)
+
+ # use :class: (the default) only if obj is not a standard singleton
+ if translation in _SINGLETONS and default_translation == ":class:`%s`":
+ default_translation = ":obj:`%s`"
+ elif translation == "..." and default_translation == ":class:`%s`":
+ # allow referencing the builtin ...
+ default_translation = ":obj:`%s <Ellipsis>`"
+
+ if _xref_regex.match(translation) is None:
+ translation = default_translation % translation
+
+ return translation
+
+ tokens = _tokenize_type_spec(_type)
+ combined_tokens = _recombine_set_tokens(tokens)
+ types = [
+ (token, _token_type(token, location))
+ for token in combined_tokens
+ ]
+
+ converters = {
+ "literal": lambda x: "``%s``" % x,
+ "obj": lambda x: convert_obj(x, translations, ":class:`%s`"),
+ "control": lambda x: "*%s*" % x,
+ "delimiter": lambda x: x,
+ "reference": lambda x: x,
+ }
+
+ converted = "".join(converters.get(type_)(token) # type: ignore[misc]
+ for token, type_ in types)
+
+ return converted
+
+
+class NumpyDocstring(GoogleDocstring):
+ """Convert NumPy style docstrings to reStructuredText.
+
+ Parameters
+ ----------
+ docstring : :obj:`str` or :obj:`list` of :obj:`str`
+ The docstring to parse, given either as a string or split into
+ individual lines.
+ config: :obj:`sphinx.ext.napoleon.Config` or :obj:`sphinx.config.Config`
+ The configuration settings to use. If not given, defaults to the
+ config object on `app`; or if `app` is not given defaults to the
+ a new :class:`sphinx.ext.napoleon.Config` object.
+
+
+ Other Parameters
+ ----------------
+ app : :class:`sphinx.application.Sphinx`, optional
+ Application object representing the Sphinx process.
+ what : :obj:`str`, optional
+ A string specifying the type of the object to which the docstring
+ belongs. Valid values: "module", "class", "exception", "function",
+ "method", "attribute".
+ name : :obj:`str`, optional
+ The fully qualified name of the object.
+ obj : module, class, exception, function, method, or attribute
+ The object to which the docstring belongs.
+ options : :class:`sphinx.ext.autodoc.Options`, optional
+ The options given to the directive: an object with attributes
+ inherited_members, undoc_members, show_inheritance and no_index that
+ are True if the flag option of same name was given to the auto
+ directive.
+
+
+ Example
+ -------
+ >>> from sphinx.ext.napoleon import Config
+ >>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
+ >>> docstring = '''One line summary.
+ ...
+ ... Extended description.
+ ...
+ ... Parameters
+ ... ----------
+ ... arg1 : int
+ ... Description of `arg1`
+ ... arg2 : str
+ ... Description of `arg2`
+ ... Returns
+ ... -------
+ ... str
+ ... Description of return value.
+ ... '''
+ >>> print(NumpyDocstring(docstring, config))
+ One line summary.
+ <BLANKLINE>
+ Extended description.
+ <BLANKLINE>
+ :param arg1: Description of `arg1`
+ :type arg1: int
+ :param arg2: Description of `arg2`
+ :type arg2: str
+ <BLANKLINE>
+ :returns: Description of return value.
+ :rtype: str
+ <BLANKLINE>
+
+ Methods
+ -------
+ __str__()
+ Return the parsed docstring in reStructuredText format.
+
+ Returns
+ -------
+ str
+ UTF-8 encoded version of the docstring.
+
+ __unicode__()
+ Return the parsed docstring in reStructuredText format.
+
+ Returns
+ -------
+ unicode
+ Unicode version of the docstring.
+
+ lines()
+ Return the parsed lines of the docstring in reStructuredText format.
+
+ Returns
+ -------
+ list(str)
+ The lines of the docstring in a list.
+
+ """
+ def __init__(
+ self,
+ docstring: str | list[str],
+ config: SphinxConfig | None = None,
+ app: Sphinx | None = None,
+ what: str = '',
+ name: str = '',
+ obj: Any = None,
+ options: Any = None,
+ ) -> None:
+ self._directive_sections = ['.. index::']
+ super().__init__(docstring, config, app, what, name, obj, options)
+
+ def _get_location(self) -> str | None:
+ try:
+ filepath = inspect.getfile(self._obj) if self._obj is not None else None
+ except TypeError:
+ filepath = None
+ name = self._name
+
+ if filepath is None and name is None:
+ return None
+ elif filepath is None:
+ filepath = ""
+
+ return ":".join([filepath, "docstring of %s" % name])
+
+ def _escape_args_and_kwargs(self, name: str) -> str:
+ func = super()._escape_args_and_kwargs
+
+ if ", " in name:
+ return ", ".join(func(param) for param in name.split(", "))
+ else:
+ return func(name)
+
+ def _consume_field(self, parse_type: bool = True, prefer_type: bool = False,
+ ) -> tuple[str, str, list[str]]:
+ line = self._lines.next()
+ if parse_type:
+ _name, _, _type = self._partition_field_on_colon(line)
+ else:
+ _name, _type = line, ''
+ _name, _type = _name.strip(), _type.strip()
+ _name = self._escape_args_and_kwargs(_name)
+
+ if parse_type and not _type:
+ _type = self._lookup_annotation(_name)
+
+ if prefer_type and not _type:
+ _type, _name = _name, _type
+
+ if self._config.napoleon_preprocess_types:
+ _type = _convert_numpy_type_spec(
+ _type,
+ location=self._get_location(),
+ translations=self._config.napoleon_type_aliases or {},
+ )
+
+ indent = self._get_indent(line) + 1
+ _desc = self._dedent(self._consume_indented_block(indent))
+ _desc = self.__class__(_desc, self._config).lines()
+ return _name, _type, _desc
+
+ def _consume_returns_section(self, preprocess_types: bool = False,
+ ) -> list[tuple[str, str, list[str]]]:
+ return self._consume_fields(prefer_type=True)
+
+ def _consume_section_header(self) -> str:
+ section = self._lines.next()
+ if not _directive_regex.match(section):
+ # Consume the header underline
+ self._lines.next()
+ return section
+
+ def _is_section_break(self) -> bool:
+ line1, line2 = self._lines.get(0), self._lines.get(1)
+ return (not self._lines or
+ self._is_section_header() or
+ ['', ''] == [line1, line2] or
+ (self._is_in_section and
+ line1 and
+ not self._is_indented(line1, self._section_indent)))
+
+ def _is_section_header(self) -> bool:
+ section, underline = self._lines.get(0), self._lines.get(1)
+ section = section.lower()
+ if section in self._sections and isinstance(underline, str):
+ return bool(_numpy_section_regex.match(underline))
+ elif self._directive_sections:
+ if _directive_regex.match(section):
+ for directive_section in self._directive_sections:
+ if section.startswith(directive_section):
+ return True
+ return False
+
+ def _parse_see_also_section(self, section: str) -> list[str]:
+ lines = self._consume_to_next_section()
+ try:
+ return self._parse_numpydoc_see_also_section(lines)
+ except ValueError:
+ return self._format_admonition('seealso', lines)
+
+ def _parse_numpydoc_see_also_section(self, content: list[str]) -> list[str]:
+ """
+ Derived from the NumpyDoc implementation of _parse_see_also.
+
+ See Also
+ --------
+ func_name : Descriptive text
+ continued text
+ another_func_name : Descriptive text
+ func_name1, func_name2, :meth:`func_name`, func_name3
+
+ """
+ items = []
+
+ def parse_item_name(text: str) -> tuple[str, str | None]:
+ """Match ':role:`name`' or 'name'"""
+ m = self._name_rgx.match(text)
+ if m:
+ g = m.groups()
+ if g[1] is None:
+ return g[3], None
+ else:
+ return g[2], g[1]
+ raise ValueError("%s is not a item name" % text)
+
+ def push_item(name: str | None, rest: list[str]) -> None:
+ if not name:
+ return
+ name, role = parse_item_name(name)
+ items.append((name, list(rest), role))
+ del rest[:]
+
+ def translate(func, description, role):
+ translations = self._config.napoleon_type_aliases
+ if role is not None or not translations:
+ return func, description, role
+
+ translated = translations.get(func, func)
+ match = self._name_rgx.match(translated)
+ if not match:
+ return translated, description, role
+
+ groups = match.groupdict()
+ role = groups["role"]
+ new_func = groups["name"] or groups["name2"]
+
+ return new_func, description, role
+
+ current_func = None
+ rest: list[str] = []
+
+ for line in content:
+ if not line.strip():
+ continue
+
+ m = self._name_rgx.match(line)
+ if m and line[m.end():].strip().startswith(':'):
+ push_item(current_func, rest)
+ current_func, line = line[:m.end()], line[m.end():]
+ rest = [line.split(':', 1)[1].strip()]
+ if not rest[0]:
+ rest = []
+ elif not line.startswith(' '):
+ push_item(current_func, rest)
+ current_func = None
+ if ',' in line:
+ for func in line.split(','):
+ if func.strip():
+ push_item(func, [])
+ elif line.strip():
+ current_func = line
+ elif current_func is not None:
+ rest.append(line.strip())
+ push_item(current_func, rest)
+
+ if not items:
+ return []
+
+ # apply type aliases
+ items = [
+ translate(func, description, role)
+ for func, description, role in items
+ ]
+
+ lines: list[str] = []
+ last_had_desc = True
+ for name, desc, role in items:
+ if role:
+ link = f':{role}:`{name}`'
+ else:
+ link = ':obj:`%s`' % name
+ if desc or last_had_desc:
+ lines += ['']
+ lines += [link]
+ else:
+ lines[-1] += ", %s" % link
+ if desc:
+ lines += self._indent([' '.join(desc)])
+ last_had_desc = True
+ else:
+ last_had_desc = False
+ lines += ['']
+
+ return self._format_admonition('seealso', lines)
diff --git a/sphinx/ext/todo.py b/sphinx/ext/todo.py
new file mode 100644
index 0000000..e540e7e
--- /dev/null
+++ b/sphinx/ext/todo.py
@@ -0,0 +1,246 @@
+"""Allow todos to be inserted into your documentation.
+
+Inclusion of todos can be switched of by a configuration variable.
+The todolist directive collects all todos of your project and lists them along
+with a backlink to the original location.
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, cast
+
+from docutils import nodes
+from docutils.parsers.rst import directives
+from docutils.parsers.rst.directives.admonitions import BaseAdmonition
+
+import sphinx
+from sphinx import addnodes
+from sphinx.domains import Domain
+from sphinx.errors import NoUri
+from sphinx.locale import _, __
+from sphinx.util import logging, texescape
+from sphinx.util.docutils import SphinxDirective, new_document
+
+if TYPE_CHECKING:
+ from docutils.nodes import Element, Node
+
+ from sphinx.application import Sphinx
+ from sphinx.environment import BuildEnvironment
+ from sphinx.util.typing import OptionSpec
+ from sphinx.writers.html import HTML5Translator
+ from sphinx.writers.latex import LaTeXTranslator
+
+logger = logging.getLogger(__name__)
+
+
+class todo_node(nodes.Admonition, nodes.Element):
+ pass
+
+
+class todolist(nodes.General, nodes.Element):
+ pass
+
+
+class Todo(BaseAdmonition, SphinxDirective):
+ """
+ A todo entry, displayed (if configured) in the form of an admonition.
+ """
+
+ node_class = todo_node
+ has_content = True
+ required_arguments = 0
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec: OptionSpec = {
+ 'class': directives.class_option,
+ 'name': directives.unchanged,
+ }
+
+ def run(self) -> list[Node]:
+ if not self.options.get('class'):
+ self.options['class'] = ['admonition-todo']
+
+ (todo,) = super().run()
+ if isinstance(todo, nodes.system_message):
+ return [todo]
+ elif isinstance(todo, todo_node):
+ todo.insert(0, nodes.title(text=_('Todo')))
+ todo['docname'] = self.env.docname
+ self.add_name(todo)
+ self.set_source_info(todo)
+ self.state.document.note_explicit_target(todo)
+ return [todo]
+ else:
+ raise RuntimeError # never reached here
+
+
+class TodoDomain(Domain):
+ name = 'todo'
+ label = 'todo'
+
+ @property
+ def todos(self) -> dict[str, list[todo_node]]:
+ return self.data.setdefault('todos', {})
+
+ def clear_doc(self, docname: str) -> None:
+ self.todos.pop(docname, None)
+
+ def merge_domaindata(self, docnames: list[str], otherdata: dict) -> None:
+ for docname in docnames:
+ self.todos[docname] = otherdata['todos'][docname]
+
+ def process_doc(self, env: BuildEnvironment, docname: str,
+ document: nodes.document) -> None:
+ todos = self.todos.setdefault(docname, [])
+ for todo in document.findall(todo_node):
+ env.app.emit('todo-defined', todo)
+ todos.append(todo)
+
+ if env.config.todo_emit_warnings:
+ logger.warning(__("TODO entry found: %s"), todo[1].astext(),
+ location=todo)
+
+
+class TodoList(SphinxDirective):
+ """
+ A list of all todo entries.
+ """
+
+ has_content = False
+ required_arguments = 0
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec: OptionSpec = {}
+
+ def run(self) -> list[Node]:
+ # Simply insert an empty todolist node which will be replaced later
+ # when process_todo_nodes is called
+ return [todolist('')]
+
+
+class TodoListProcessor:
+ def __init__(self, app: Sphinx, doctree: nodes.document, docname: str) -> None:
+ self.builder = app.builder
+ self.config = app.config
+ self.env = app.env
+ self.domain = cast(TodoDomain, app.env.get_domain('todo'))
+ self.document = new_document('')
+
+ self.process(doctree, docname)
+
+ def process(self, doctree: nodes.document, docname: str) -> None:
+ todos: list[todo_node] = sum(self.domain.todos.values(), [])
+ for node in list(doctree.findall(todolist)):
+ if not self.config.todo_include_todos:
+ node.parent.remove(node)
+ continue
+
+ if node.get('ids'):
+ content: list[Element] = [nodes.target()]
+ else:
+ content = []
+
+ for todo in todos:
+ # Create a copy of the todo node
+ new_todo = todo.deepcopy()
+ new_todo['ids'].clear()
+
+ self.resolve_reference(new_todo, docname)
+ content.append(new_todo)
+
+ todo_ref = self.create_todo_reference(todo, docname)
+ content.append(todo_ref)
+
+ node.replace_self(content)
+
+ def create_todo_reference(self, todo: todo_node, docname: str) -> nodes.paragraph:
+ if self.config.todo_link_only:
+ description = _('<<original entry>>')
+ else:
+ description = (_('(The <<original entry>> is located in %s, line %d.)') %
+ (todo.source, todo.line))
+
+ prefix = description[:description.find('<<')]
+ suffix = description[description.find('>>') + 2:]
+
+ para = nodes.paragraph(classes=['todo-source'])
+ para += nodes.Text(prefix)
+
+ # Create a reference
+ linktext = nodes.emphasis(_('original entry'), _('original entry'))
+ reference = nodes.reference('', '', linktext, internal=True)
+ try:
+ reference['refuri'] = self.builder.get_relative_uri(docname, todo['docname'])
+ reference['refuri'] += '#' + todo['ids'][0]
+ except NoUri:
+ # ignore if no URI can be determined, e.g. for LaTeX output
+ pass
+
+ para += reference
+ para += nodes.Text(suffix)
+
+ return para
+
+ def resolve_reference(self, todo: todo_node, docname: str) -> None:
+ """Resolve references in the todo content."""
+ for node in todo.findall(addnodes.pending_xref):
+ if 'refdoc' in node:
+ node['refdoc'] = docname
+
+ # Note: To resolve references, it is needed to wrap it with document node
+ self.document += todo
+ self.env.resolve_references(self.document, docname, self.builder)
+ self.document.remove(todo)
+
+
+def visit_todo_node(self: HTML5Translator, node: todo_node) -> None:
+ if self.config.todo_include_todos:
+ self.visit_admonition(node)
+ else:
+ raise nodes.SkipNode
+
+
+def depart_todo_node(self: HTML5Translator, node: todo_node) -> None:
+ self.depart_admonition(node)
+
+
+def latex_visit_todo_node(self: LaTeXTranslator, node: todo_node) -> None:
+ if self.config.todo_include_todos:
+ self.body.append('\n\\begin{sphinxadmonition}{note}{')
+ self.body.append(self.hypertarget_to(node))
+
+ title_node = cast(nodes.title, node[0])
+ title = texescape.escape(title_node.astext(), self.config.latex_engine)
+ self.body.append('%s:}' % title)
+ node.pop(0)
+ else:
+ raise nodes.SkipNode
+
+
+def latex_depart_todo_node(self: LaTeXTranslator, node: todo_node) -> None:
+ self.body.append('\\end{sphinxadmonition}\n')
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_event('todo-defined')
+ app.add_config_value('todo_include_todos', False, 'html')
+ app.add_config_value('todo_link_only', False, 'html')
+ app.add_config_value('todo_emit_warnings', False, 'html')
+
+ app.add_node(todolist)
+ app.add_node(todo_node,
+ html=(visit_todo_node, depart_todo_node),
+ latex=(latex_visit_todo_node, latex_depart_todo_node),
+ text=(visit_todo_node, depart_todo_node),
+ man=(visit_todo_node, depart_todo_node),
+ texinfo=(visit_todo_node, depart_todo_node))
+
+ app.add_directive('todo', Todo)
+ app.add_directive('todolist', TodoList)
+ app.add_domain(TodoDomain)
+ app.connect('doctree-resolved', TodoListProcessor)
+ return {
+ 'version': sphinx.__display_version__,
+ 'env_version': 2,
+ 'parallel_read_safe': True,
+ }
diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py
new file mode 100644
index 0000000..c5fcda5
--- /dev/null
+++ b/sphinx/ext/viewcode.py
@@ -0,0 +1,361 @@
+"""Add links to module code in Python object descriptions."""
+
+from __future__ import annotations
+
+import posixpath
+import traceback
+from importlib import import_module
+from os import path
+from typing import TYPE_CHECKING, Any, cast
+
+from docutils import nodes
+from docutils.nodes import Element, Node
+
+import sphinx
+from sphinx import addnodes
+from sphinx.builders.html import StandaloneHTMLBuilder
+from sphinx.locale import _, __
+from sphinx.pycode import ModuleAnalyzer
+from sphinx.transforms.post_transforms import SphinxPostTransform
+from sphinx.util import logging
+from sphinx.util.display import status_iterator
+from sphinx.util.nodes import make_refnode
+
+if TYPE_CHECKING:
+ from collections.abc import Generator, Iterable
+
+ from sphinx.application import Sphinx
+ from sphinx.builders import Builder
+ from sphinx.environment import BuildEnvironment
+
+logger = logging.getLogger(__name__)
+
+
+OUTPUT_DIRNAME = '_modules'
+
+
+class viewcode_anchor(Element):
+ """Node for viewcode anchors.
+
+ This node will be processed in the resolving phase.
+ For viewcode supported builders, they will be all converted to the anchors.
+ For not supported builders, they will be removed.
+ """
+
+
+def _get_full_modname(modname: str, attribute: str) -> str | None:
+ try:
+ if modname is None:
+ # Prevents a TypeError: if the last getattr() call will return None
+ # then it's better to return it directly
+ return None
+ module = import_module(modname)
+
+ # Allow an attribute to have multiple parts and incidentally allow
+ # repeated .s in the attribute.
+ value = module
+ for attr in attribute.split('.'):
+ if attr:
+ value = getattr(value, attr)
+
+ return getattr(value, '__module__', None)
+ except AttributeError:
+ # sphinx.ext.viewcode can't follow class instance attribute
+ # then AttributeError logging output only verbose mode.
+ logger.verbose("Didn't find %s in %s", attribute, modname)
+ return None
+ except Exception as e:
+ # sphinx.ext.viewcode follow python domain directives.
+ # because of that, if there are no real modules exists that specified
+ # by py:function or other directives, viewcode emits a lot of warnings.
+ # It should be displayed only verbose mode.
+ logger.verbose(traceback.format_exc().rstrip())
+ logger.verbose('viewcode can\'t import %s, failed with error "%s"', modname, e)
+ return None
+
+
+def is_supported_builder(builder: Builder) -> bool:
+ if builder.format != 'html':
+ return False
+ if builder.name == 'singlehtml':
+ return False
+ if builder.name.startswith('epub') and not builder.config.viewcode_enable_epub:
+ return False
+ return True
+
+
+def doctree_read(app: Sphinx, doctree: Node) -> None:
+ env = app.builder.env
+ if not hasattr(env, '_viewcode_modules'):
+ env._viewcode_modules = {} # type: ignore[attr-defined]
+
+ def has_tag(modname: str, fullname: str, docname: str, refname: str) -> bool:
+ entry = env._viewcode_modules.get(modname, None) # type: ignore[attr-defined]
+ if entry is False:
+ return False
+
+ code_tags = app.emit_firstresult('viewcode-find-source', modname)
+ if code_tags is None:
+ try:
+ analyzer = ModuleAnalyzer.for_module(modname)
+ analyzer.find_tags()
+ except Exception:
+ env._viewcode_modules[modname] = False # type: ignore[attr-defined]
+ return False
+
+ code = analyzer.code
+ tags = analyzer.tags
+ else:
+ code, tags = code_tags
+
+ if entry is None or entry[0] != code:
+ entry = code, tags, {}, refname
+ env._viewcode_modules[modname] = entry # type: ignore[attr-defined]
+ _, tags, used, _ = entry
+ if fullname in tags:
+ used[fullname] = docname
+ return True
+
+ return False
+
+ for objnode in list(doctree.findall(addnodes.desc)):
+ if objnode.get('domain') != 'py':
+ continue
+ names: set[str] = set()
+ for signode in objnode:
+ if not isinstance(signode, addnodes.desc_signature):
+ continue
+ modname = signode.get('module')
+ fullname = signode.get('fullname')
+ refname = modname
+ if env.config.viewcode_follow_imported_members:
+ new_modname = app.emit_firstresult(
+ 'viewcode-follow-imported', modname, fullname,
+ )
+ if not new_modname:
+ new_modname = _get_full_modname(modname, fullname)
+ modname = new_modname
+ if not modname:
+ continue
+ fullname = signode.get('fullname')
+ if not has_tag(modname, fullname, env.docname, refname):
+ continue
+ if fullname in names:
+ # only one link per name, please
+ continue
+ names.add(fullname)
+ pagename = posixpath.join(OUTPUT_DIRNAME, modname.replace('.', '/'))
+ signode += viewcode_anchor(reftarget=pagename, refid=fullname, refdoc=env.docname)
+
+
+def env_merge_info(app: Sphinx, env: BuildEnvironment, docnames: Iterable[str],
+ other: BuildEnvironment) -> None:
+ if not hasattr(other, '_viewcode_modules'):
+ return
+ # create a _viewcode_modules dict on the main environment
+ if not hasattr(env, '_viewcode_modules'):
+ env._viewcode_modules = {} # type: ignore[attr-defined]
+ # now merge in the information from the subprocess
+ for modname, entry in other._viewcode_modules.items():
+ if modname not in env._viewcode_modules: # type: ignore[attr-defined]
+ env._viewcode_modules[modname] = entry # type: ignore[attr-defined]
+ else:
+ if env._viewcode_modules[modname]: # type: ignore[attr-defined]
+ used = env._viewcode_modules[modname][2] # type: ignore[attr-defined]
+ for fullname, docname in entry[2].items():
+ if fullname not in used:
+ used[fullname] = docname
+
+
+def env_purge_doc(app: Sphinx, env: BuildEnvironment, docname: str) -> None:
+ modules = getattr(env, '_viewcode_modules', {})
+
+ for modname, entry in list(modules.items()):
+ if entry is False:
+ continue
+
+ code, tags, used, refname = entry
+ for fullname in list(used):
+ if used[fullname] == docname:
+ used.pop(fullname)
+
+ if len(used) == 0:
+ modules.pop(modname)
+
+
+class ViewcodeAnchorTransform(SphinxPostTransform):
+ """Convert or remove viewcode_anchor nodes depends on builder."""
+ default_priority = 100
+
+ def run(self, **kwargs: Any) -> None:
+ if is_supported_builder(self.app.builder):
+ self.convert_viewcode_anchors()
+ else:
+ self.remove_viewcode_anchors()
+
+ def convert_viewcode_anchors(self) -> None:
+ for node in self.document.findall(viewcode_anchor):
+ anchor = nodes.inline('', _('[source]'), classes=['viewcode-link'])
+ refnode = make_refnode(self.app.builder, node['refdoc'], node['reftarget'],
+ node['refid'], anchor)
+ node.replace_self(refnode)
+
+ def remove_viewcode_anchors(self) -> None:
+ for node in list(self.document.findall(viewcode_anchor)):
+ node.parent.remove(node)
+
+
+def get_module_filename(app: Sphinx, modname: str) -> str | None:
+ """Get module filename for *modname*."""
+ source_info = app.emit_firstresult('viewcode-find-source', modname)
+ if source_info:
+ return None
+ else:
+ try:
+ filename, source = ModuleAnalyzer.get_module_source(modname)
+ return filename
+ except Exception:
+ return None
+
+
+def should_generate_module_page(app: Sphinx, modname: str) -> bool:
+ """Check generation of module page is needed."""
+ module_filename = get_module_filename(app, modname)
+ if module_filename is None:
+ # Always (re-)generate module page when module filename is not found.
+ return True
+
+ builder = cast(StandaloneHTMLBuilder, app.builder)
+ basename = modname.replace('.', '/') + builder.out_suffix
+ page_filename = path.join(app.outdir, '_modules/', basename)
+
+ try:
+ if path.getmtime(module_filename) <= path.getmtime(page_filename):
+ # generation is not needed if the HTML page is newer than module file.
+ return False
+ except OSError:
+ pass
+
+ return True
+
+
+def collect_pages(app: Sphinx) -> Generator[tuple[str, dict[str, Any], str], None, None]:
+ env = app.builder.env
+ if not hasattr(env, '_viewcode_modules'):
+ return
+ if not is_supported_builder(app.builder):
+ return
+ highlighter = app.builder.highlighter # type: ignore[attr-defined]
+ urito = app.builder.get_relative_uri
+
+ modnames = set(env._viewcode_modules)
+
+ for modname, entry in status_iterator(
+ sorted(env._viewcode_modules.items()),
+ __('highlighting module code... '), "blue",
+ len(env._viewcode_modules),
+ app.verbosity, lambda x: x[0]):
+ if not entry:
+ continue
+ if not should_generate_module_page(app, modname):
+ continue
+
+ code, tags, used, refname = entry
+ # construct a page name for the highlighted source
+ pagename = posixpath.join(OUTPUT_DIRNAME, modname.replace('.', '/'))
+ # highlight the source using the builder's highlighter
+ if env.config.highlight_language in {'default', 'none'}:
+ lexer = env.config.highlight_language
+ else:
+ lexer = 'python'
+ linenos = 'inline' * env.config.viewcode_line_numbers
+ highlighted = highlighter.highlight_block(code, lexer, linenos=linenos)
+ # split the code into lines
+ lines = highlighted.splitlines()
+ # split off wrap markup from the first line of the actual code
+ before, after = lines[0].split('<pre>')
+ lines[0:1] = [before + '<pre>', after]
+ # nothing to do for the last line; it always starts with </pre> anyway
+ # now that we have code lines (starting at index 1), insert anchors for
+ # the collected tags (HACK: this only works if the tag boundaries are
+ # properly nested!)
+ max_index = len(lines) - 1
+ link_text = _('[docs]')
+ for name, docname in used.items():
+ type, start, end = tags[name]
+ backlink = urito(pagename, docname) + '#' + refname + '.' + name
+ lines[start] = (f'<div class="viewcode-block" id="{name}">\n'
+ f'<a class="viewcode-back" href="{backlink}">{link_text}</a>\n'
+ + lines[start])
+ lines[min(end, max_index)] += '</div>\n'
+
+ # try to find parents (for submodules)
+ parents = []
+ parent = modname
+ while '.' in parent:
+ parent = parent.rsplit('.', 1)[0]
+ if parent in modnames:
+ parents.append({
+ 'link': urito(pagename,
+ posixpath.join(OUTPUT_DIRNAME, parent.replace('.', '/'))),
+ 'title': parent})
+ parents.append({'link': urito(pagename, posixpath.join(OUTPUT_DIRNAME, 'index')),
+ 'title': _('Module code')})
+ parents.reverse()
+ # putting it all together
+ context = {
+ 'parents': parents,
+ 'title': modname,
+ 'body': (_('<h1>Source code for %s</h1>') % modname +
+ '\n'.join(lines)),
+ }
+ yield (pagename, context, 'page.html')
+
+ if not modnames:
+ return
+
+ html = ['\n']
+ # the stack logic is needed for using nested lists for submodules
+ stack = ['']
+ for modname in sorted(modnames):
+ if modname.startswith(stack[-1]):
+ stack.append(modname + '.')
+ html.append('<ul>')
+ else:
+ stack.pop()
+ while not modname.startswith(stack[-1]):
+ stack.pop()
+ html.append('</ul>')
+ stack.append(modname + '.')
+ relative_uri = urito(posixpath.join(OUTPUT_DIRNAME, 'index'),
+ posixpath.join(OUTPUT_DIRNAME, modname.replace('.', '/')))
+ html.append(f'<li><a href="{relative_uri}">{modname}</a></li>\n')
+ html.append('</ul>' * (len(stack) - 1))
+ context = {
+ 'title': _('Overview: module code'),
+ 'body': (_('<h1>All modules for which code is available</h1>') +
+ ''.join(html)),
+ }
+
+ yield (posixpath.join(OUTPUT_DIRNAME, 'index'), context, 'page.html')
+
+
+def setup(app: Sphinx) -> dict[str, Any]:
+ app.add_config_value('viewcode_import', None, False)
+ app.add_config_value('viewcode_enable_epub', False, False)
+ app.add_config_value('viewcode_follow_imported_members', True, False)
+ app.add_config_value('viewcode_line_numbers', False, 'env', (bool,))
+ app.connect('doctree-read', doctree_read)
+ app.connect('env-merge-info', env_merge_info)
+ app.connect('env-purge-doc', env_purge_doc)
+ app.connect('html-collect-pages', collect_pages)
+ # app.add_config_value('viewcode_include_modules', [], 'env')
+ # app.add_config_value('viewcode_exclude_modules', [], 'env')
+ app.add_event('viewcode-find-source')
+ app.add_event('viewcode-follow-imported')
+ app.add_post_transform(ViewcodeAnchorTransform)
+ return {
+ 'version': sphinx.__display_version__,
+ 'env_version': 1,
+ 'parallel_read_safe': True,
+ }