summaryrefslogtreecommitdiffstats
path: root/tests/test_util
diff options
context:
space:
mode:
Diffstat (limited to 'tests/test_util')
-rw-r--r--tests/test_util/__init__.py0
-rw-r--r--tests/test_util/intersphinx_data.py52
-rw-r--r--tests/test_util/test_util.py73
-rw-r--r--tests/test_util/test_util_console.py90
-rw-r--r--tests/test_util/test_util_display.py105
-rw-r--r--tests/test_util/test_util_docstrings.py88
-rw-r--r--tests/test_util/test_util_docutils.py92
-rw-r--r--tests/test_util/test_util_fileutil.py103
-rw-r--r--tests/test_util/test_util_i18n.py192
-rw-r--r--tests/test_util/test_util_images.py74
-rw-r--r--tests/test_util/test_util_inspect.py853
-rw-r--r--tests/test_util/test_util_inventory.py78
-rw-r--r--tests/test_util/test_util_logging.py414
-rw-r--r--tests/test_util/test_util_matching.py174
-rw-r--r--tests/test_util/test_util_nodes.py254
-rw-r--r--tests/test_util/test_util_rst.py164
-rw-r--r--tests/test_util/test_util_template.py29
-rw-r--r--tests/test_util/test_util_typing.py633
-rw-r--r--tests/test_util/typing_test_data.py129
19 files changed, 3597 insertions, 0 deletions
diff --git a/tests/test_util/__init__.py b/tests/test_util/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/test_util/__init__.py
diff --git a/tests/test_util/intersphinx_data.py b/tests/test_util/intersphinx_data.py
new file mode 100644
index 0000000..042ee76
--- /dev/null
+++ b/tests/test_util/intersphinx_data.py
@@ -0,0 +1,52 @@
+from __future__ import annotations
+
+import zlib
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from typing import Final
+
+INVENTORY_V1: Final[bytes] = b'''\
+# Sphinx inventory version 1
+# Project: foo
+# Version: 1.0
+module mod foo.html
+module.cls class foo.html
+'''
+
+INVENTORY_V2: Final[bytes] = b'''\
+# Sphinx inventory version 2
+# Project: foo
+# Version: 2.0
+# The remainder of this file is compressed with zlib.
+''' + zlib.compress(b'''\
+module1 py:module 0 foo.html#module-module1 Long Module desc
+module2 py:module 0 foo.html#module-$ -
+module1.func py:function 1 sub/foo.html#$ -
+module1.Foo.bar py:method 1 index.html#foo.Bar.baz -
+CFunc c:function 2 cfunc.html#CFunc -
+std cpp:type 1 index.html#std -
+std::uint8_t cpp:type 1 index.html#std_uint8_t -
+foo::Bar cpp:class 1 index.html#cpp_foo_bar -
+foo::Bar::baz cpp:function 1 index.html#cpp_foo_bar_baz -
+foons cpp:type 1 index.html#foons -
+foons::bartype cpp:type 1 index.html#foons_bartype -
+a term std:term -1 glossary.html#term-a-term -
+ls.-l std:cmdoption 1 index.html#cmdoption-ls-l -
+docname std:doc -1 docname.html -
+foo js:module 1 index.html#foo -
+foo.bar js:class 1 index.html#foo.bar -
+foo.bar.baz js:method 1 index.html#foo.bar.baz -
+foo.bar.qux js:data 1 index.html#foo.bar.qux -
+a term including:colon std:term -1 glossary.html#term-a-term-including-colon -
+The-Julia-Domain std:label -1 write_inventory/#$ The Julia Domain
+''')
+
+INVENTORY_V2_NO_VERSION: Final[bytes] = b'''\
+# Sphinx inventory version 2
+# Project: foo
+# Version:
+# The remainder of this file is compressed with zlib.
+''' + zlib.compress(b'''\
+module1 py:module 0 foo.html#module-module1 Long Module desc
+''')
diff --git a/tests/test_util/test_util.py b/tests/test_util/test_util.py
new file mode 100644
index 0000000..4389894
--- /dev/null
+++ b/tests/test_util/test_util.py
@@ -0,0 +1,73 @@
+"""Tests util functions."""
+
+import os
+import tempfile
+
+import pytest
+
+from sphinx.errors import ExtensionError
+from sphinx.util import encode_uri, ensuredir, import_object, parselinenos
+
+
+def test_encode_uri():
+ expected = ('https://ru.wikipedia.org/wiki/%D0%A1%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D0%B0_'
+ '%D1%83%D0%BF%D1%80%D0%B0%D0%B2%D0%BB%D0%B5%D0%BD%D0%B8%D1%8F_'
+ '%D0%B1%D0%B0%D0%B7%D0%B0%D0%BC%D0%B8_%D0%B4%D0%B0%D0%BD%D0%BD%D1%8B%D1%85')
+ uri = ('https://ru.wikipedia.org/wiki'
+ '/Система_управления_базами_данных')
+ assert expected == encode_uri(uri)
+
+ expected = ('https://github.com/search?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+is%3A'
+ 'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults')
+ uri = ('https://github.com/search?utf8=✓&q=is%3Aissue+is%3Aopen+is%3A'
+ 'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults')
+ assert expected == encode_uri(uri)
+
+
+def test_ensuredir():
+ with tempfile.TemporaryDirectory() as tmp_path:
+ # Does not raise an exception for an existing directory.
+ ensuredir(tmp_path)
+
+ path = os.path.join(tmp_path, 'a', 'b', 'c')
+ ensuredir(path)
+ assert os.path.isdir(path)
+
+
+def test_import_object():
+ module = import_object('sphinx')
+ assert module.__name__ == 'sphinx'
+
+ module = import_object('sphinx.application')
+ assert module.__name__ == 'sphinx.application'
+
+ obj = import_object('sphinx.application.Sphinx')
+ assert obj.__name__ == 'Sphinx'
+
+ with pytest.raises(ExtensionError) as exc:
+ import_object('sphinx.unknown_module')
+ assert exc.value.args[0] == 'Could not import sphinx.unknown_module'
+
+ with pytest.raises(ExtensionError) as exc:
+ import_object('sphinx.unknown_module', 'my extension')
+ assert exc.value.args[0] == ('Could not import sphinx.unknown_module '
+ '(needed for my extension)')
+
+
+def test_parselinenos():
+ assert parselinenos('1,2,3', 10) == [0, 1, 2]
+ assert parselinenos('4, 5, 6', 10) == [3, 4, 5]
+ assert parselinenos('-4', 10) == [0, 1, 2, 3]
+ assert parselinenos('7-9', 10) == [6, 7, 8]
+ assert parselinenos('7-', 10) == [6, 7, 8, 9]
+ assert parselinenos('1,7-', 10) == [0, 6, 7, 8, 9]
+ assert parselinenos('7-7', 10) == [6]
+ assert parselinenos('11-', 10) == [10]
+ with pytest.raises(ValueError, match="invalid line number spec: '1-2-3'"):
+ parselinenos('1-2-3', 10)
+ with pytest.raises(ValueError, match="invalid line number spec: 'abc-def'"):
+ parselinenos('abc-def', 10)
+ with pytest.raises(ValueError, match="invalid line number spec: '-'"):
+ parselinenos('-', 10)
+ with pytest.raises(ValueError, match="invalid line number spec: '3-1'"):
+ parselinenos('3-1', 10)
diff --git a/tests/test_util/test_util_console.py b/tests/test_util/test_util_console.py
new file mode 100644
index 0000000..b617a33
--- /dev/null
+++ b/tests/test_util/test_util_console.py
@@ -0,0 +1,90 @@
+from __future__ import annotations
+
+import itertools
+import operator
+from typing import TYPE_CHECKING
+
+import pytest
+
+from sphinx.util.console import blue, reset, strip_colors, strip_escape_sequences
+
+if TYPE_CHECKING:
+ from collections.abc import Callable, Sequence
+ from typing import Final, TypeVar
+
+ _T = TypeVar('_T')
+
+CURSOR_UP: Final[str] = '\x1b[2A' # ignored ANSI code
+ERASE_LINE: Final[str] = '\x1b[2K' # supported ANSI code
+TEXT: Final[str] = '\x07 Hello world!'
+
+
+@pytest.mark.parametrize(
+ ('strip_function', 'ansi_base_blocks', 'text_base_blocks'),
+ [
+ (
+ strip_colors,
+ # double ERASE_LINE so that the tested strings may have 2 of them
+ [TEXT, blue(TEXT), reset(TEXT), ERASE_LINE, ERASE_LINE, CURSOR_UP],
+ # :func:`strip_colors` removes color codes but keeps ERASE_LINE and CURSOR_UP
+ [TEXT, TEXT, TEXT, ERASE_LINE, ERASE_LINE, CURSOR_UP],
+ ),
+ (
+ strip_escape_sequences,
+ # double ERASE_LINE so that the tested strings may have 2 of them
+ [TEXT, blue(TEXT), reset(TEXT), ERASE_LINE, ERASE_LINE, CURSOR_UP],
+ # :func:`strip_escape_sequences` strips ANSI codes known by Sphinx
+ [TEXT, TEXT, TEXT, '', '', CURSOR_UP],
+ ),
+ ],
+ ids=[strip_colors.__name__, strip_escape_sequences.__name__],
+)
+def test_strip_ansi(
+ strip_function: Callable[[str], str],
+ ansi_base_blocks: Sequence[str],
+ text_base_blocks: Sequence[str],
+) -> None:
+ assert callable(strip_function)
+ assert len(text_base_blocks) == len(ansi_base_blocks)
+ N = len(ansi_base_blocks)
+
+ def next_ansi_blocks(choices: Sequence[str], n: int) -> Sequence[str]:
+ # Get a list of *n* words from a cyclic sequence of *choices*.
+ #
+ # For instance ``next_ansi_blocks(['a', 'b'], 3) == ['a', 'b', 'a']``.
+ stream = itertools.cycle(choices)
+ return list(map(operator.itemgetter(0), zip(stream, range(n))))
+
+ # generate all permutations of length N
+ for sigma in itertools.permutations(range(N), N):
+ # apply the permutation on the blocks with ANSI codes
+ ansi_blocks = list(map(ansi_base_blocks.__getitem__, sigma))
+ # apply the permutation on the blocks with stripped codes
+ text_blocks = list(map(text_base_blocks.__getitem__, sigma))
+
+ for glue, n in itertools.product(['.', '\n', '\r\n'], range(4 * N)):
+ ansi_strings = next_ansi_blocks(ansi_blocks, n)
+ text_strings = next_ansi_blocks(text_blocks, n)
+ assert len(ansi_strings) == len(text_strings) == n
+
+ ansi_string = glue.join(ansi_strings)
+ text_string = glue.join(text_strings)
+ assert strip_function(ansi_string) == text_string
+
+
+def test_strip_ansi_short_forms():
+ # In Sphinx, we always "normalize" the color codes so that they
+ # match "\x1b\[(\d\d;){0,2}(\d\d)m" but it might happen that
+ # some messages use '\x1b[0m' instead of ``reset(s)``, so we
+ # test whether this alternative form is supported or not.
+
+ for strip_function in [strip_colors, strip_escape_sequences]:
+ # \x1b[m and \x1b[0m are equivalent to \x1b[00m
+ assert strip_function('\x1b[m') == ''
+ assert strip_function('\x1b[0m') == ''
+
+ # \x1b[1m is equivalent to \x1b[01m
+ assert strip_function('\x1b[1mbold\x1b[0m') == 'bold'
+
+ # \x1b[K is equivalent to \x1b[0K
+ assert strip_escape_sequences('\x1b[K') == ''
diff --git a/tests/test_util/test_util_display.py b/tests/test_util/test_util_display.py
new file mode 100644
index 0000000..a18fa1e
--- /dev/null
+++ b/tests/test_util/test_util_display.py
@@ -0,0 +1,105 @@
+"""Tests util functions."""
+
+import pytest
+
+from sphinx.util import logging
+from sphinx.util.console import strip_colors
+from sphinx.util.display import (
+ SkipProgressMessage,
+ display_chunk,
+ progress_message,
+ status_iterator,
+)
+
+
+def test_display_chunk():
+ assert display_chunk('hello') == 'hello'
+ assert display_chunk(['hello']) == 'hello'
+ assert display_chunk(['hello', 'sphinx', 'world']) == 'hello .. world'
+ assert display_chunk(('hello',)) == 'hello'
+ assert display_chunk(('hello', 'sphinx', 'world')) == 'hello .. world'
+
+
+@pytest.mark.sphinx('dummy')
+def test_status_iterator_length_0(app, status, warning):
+ logging.setup(app, status, warning)
+
+ # test for status_iterator (length=0)
+ status.seek(0)
+ status.truncate(0)
+ yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... '))
+ output = strip_colors(status.getvalue())
+ assert 'testing ... hello sphinx world \n' in output
+ assert yields == ['hello', 'sphinx', 'world']
+
+
+@pytest.mark.sphinx('dummy')
+def test_status_iterator_verbosity_0(app, status, warning, monkeypatch):
+ monkeypatch.setenv("FORCE_COLOR", "1")
+ logging.setup(app, status, warning)
+
+ # test for status_iterator (verbosity=0)
+ status.seek(0)
+ status.truncate(0)
+ yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... ',
+ length=3, verbosity=0))
+ output = strip_colors(status.getvalue())
+ assert 'testing ... [ 33%] hello\r' in output
+ assert 'testing ... [ 67%] sphinx\r' in output
+ assert 'testing ... [100%] world\r\n' in output
+ assert yields == ['hello', 'sphinx', 'world']
+
+
+@pytest.mark.sphinx('dummy')
+def test_status_iterator_verbosity_1(app, status, warning, monkeypatch):
+ monkeypatch.setenv("FORCE_COLOR", "1")
+ logging.setup(app, status, warning)
+
+ # test for status_iterator (verbosity=1)
+ status.seek(0)
+ status.truncate(0)
+ yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... ',
+ length=3, verbosity=1))
+ output = strip_colors(status.getvalue())
+ assert 'testing ... [ 33%] hello\n' in output
+ assert 'testing ... [ 67%] sphinx\n' in output
+ assert 'testing ... [100%] world\n\n' in output
+ assert yields == ['hello', 'sphinx', 'world']
+
+
+def test_progress_message(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ # standard case
+ with progress_message('testing'):
+ logger.info('blah ', nonl=True)
+
+ output = strip_colors(status.getvalue())
+ assert 'testing... blah done\n' in output
+
+ # skipping case
+ with progress_message('testing'):
+ raise SkipProgressMessage('Reason: %s', 'error') # NoQA: EM101
+
+ output = strip_colors(status.getvalue())
+ assert 'testing... skipped\nReason: error\n' in output
+
+ # error case
+ try:
+ with progress_message('testing'):
+ raise
+ except Exception:
+ pass
+
+ output = strip_colors(status.getvalue())
+ assert 'testing... failed\n' in output
+
+ # decorator
+ @progress_message('testing')
+ def func():
+ logger.info('in func ', nonl=True)
+
+ func()
+ output = strip_colors(status.getvalue())
+ assert 'testing... in func done\n' in output
diff --git a/tests/test_util/test_util_docstrings.py b/tests/test_util/test_util_docstrings.py
new file mode 100644
index 0000000..813e84e
--- /dev/null
+++ b/tests/test_util/test_util_docstrings.py
@@ -0,0 +1,88 @@
+"""Test sphinx.util.docstrings."""
+
+from sphinx.util.docstrings import prepare_commentdoc, prepare_docstring, separate_metadata
+
+
+def test_separate_metadata():
+ # metadata only
+ text = (":meta foo: bar\n"
+ ":meta baz:\n")
+ docstring, metadata = separate_metadata(text)
+ assert docstring == ''
+ assert metadata == {'foo': 'bar', 'baz': ''}
+
+ # non metadata field list item
+ text = (":meta foo: bar\n"
+ ":param baz:\n")
+ docstring, metadata = separate_metadata(text)
+ assert docstring == ':param baz:\n'
+ assert metadata == {'foo': 'bar'}
+
+ # field_list like text following just after paragraph is not a field_list
+ text = ("blah blah blah\n"
+ ":meta foo: bar\n"
+ ":meta baz:\n")
+ docstring, metadata = separate_metadata(text)
+ assert docstring == text
+ assert metadata == {}
+
+ # field_list like text following after blank line is a field_list
+ text = ("blah blah blah\n"
+ "\n"
+ ":meta foo: bar\n"
+ ":meta baz:\n")
+ docstring, metadata = separate_metadata(text)
+ assert docstring == "blah blah blah\n\n"
+ assert metadata == {'foo': 'bar', 'baz': ''}
+
+ # non field_list item breaks field_list
+ text = (":meta foo: bar\n"
+ "blah blah blah\n"
+ ":meta baz:\n")
+ docstring, metadata = separate_metadata(text)
+ assert docstring == ("blah blah blah\n"
+ ":meta baz:\n")
+ assert metadata == {'foo': 'bar'}
+
+
+def test_prepare_docstring():
+ docstring = """multiline docstring
+
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit,
+ sed do eiusmod tempor incididunt ut labore et dolore magna
+ aliqua::
+
+ Ut enim ad minim veniam, quis nostrud exercitation
+ ullamco laboris nisi ut aliquip ex ea commodo consequat.
+ """
+
+ assert (prepare_docstring(docstring) ==
+ ["multiline docstring",
+ "",
+ "Lorem ipsum dolor sit amet, consectetur adipiscing elit,",
+ "sed do eiusmod tempor incididunt ut labore et dolore magna",
+ "aliqua::",
+ "",
+ " Ut enim ad minim veniam, quis nostrud exercitation",
+ " ullamco laboris nisi ut aliquip ex ea commodo consequat.",
+ ""])
+
+ docstring = """
+
+ multiline docstring with leading empty lines
+ """
+ assert (prepare_docstring(docstring) ==
+ ["multiline docstring with leading empty lines",
+ ""])
+
+ docstring = "single line docstring"
+ assert (prepare_docstring(docstring) ==
+ ["single line docstring",
+ ""])
+
+
+def test_prepare_commentdoc():
+ assert prepare_commentdoc("hello world") == []
+ assert prepare_commentdoc("#: hello world") == ["hello world", ""]
+ assert prepare_commentdoc("#: hello world") == [" hello world", ""]
+ assert prepare_commentdoc("#: hello\n#: world\n") == ["hello", "world", ""]
diff --git a/tests/test_util/test_util_docutils.py b/tests/test_util/test_util_docutils.py
new file mode 100644
index 0000000..69999eb
--- /dev/null
+++ b/tests/test_util/test_util_docutils.py
@@ -0,0 +1,92 @@
+"""Tests util.utils functions."""
+
+import os
+
+from docutils import nodes
+
+from sphinx.util.docutils import (
+ SphinxFileOutput,
+ SphinxTranslator,
+ docutils_namespace,
+ new_document,
+ register_node,
+)
+
+
+def test_register_node():
+ class custom_node(nodes.Element):
+ pass
+
+ with docutils_namespace():
+ register_node(custom_node)
+
+ # check registered
+ assert hasattr(nodes.GenericNodeVisitor, 'visit_custom_node')
+ assert hasattr(nodes.GenericNodeVisitor, 'depart_custom_node')
+ assert hasattr(nodes.SparseNodeVisitor, 'visit_custom_node')
+ assert hasattr(nodes.SparseNodeVisitor, 'depart_custom_node')
+
+ # check unregistered outside namespace
+ assert not hasattr(nodes.GenericNodeVisitor, 'visit_custom_node')
+ assert not hasattr(nodes.GenericNodeVisitor, 'depart_custom_node')
+ assert not hasattr(nodes.SparseNodeVisitor, 'visit_custom_node')
+ assert not hasattr(nodes.SparseNodeVisitor, 'depart_custom_node')
+
+
+def test_SphinxFileOutput(tmpdir):
+ content = 'Hello Sphinx World'
+
+ # write test.txt at first
+ filename = str(tmpdir / 'test.txt')
+ output = SphinxFileOutput(destination_path=filename)
+ output.write(content)
+ os.utime(filename, (0, 0))
+
+ # overwrite it again
+ output.write(content)
+ assert os.stat(filename).st_mtime != 0 # updated
+
+ # write test2.txt at first
+ filename = str(tmpdir / 'test2.txt')
+ output = SphinxFileOutput(destination_path=filename, overwrite_if_changed=True)
+ output.write(content)
+ os.utime(filename, (0, 0))
+
+ # overwrite it again
+ output.write(content)
+ assert os.stat(filename).st_mtime == 0 # not updated
+
+ # overwrite it again (content changed)
+ output.write(content + "; content change")
+ assert os.stat(filename).st_mtime != 0 # updated
+
+
+def test_SphinxTranslator(app):
+ class CustomNode(nodes.inline):
+ pass
+
+ class MyTranslator(SphinxTranslator):
+ def __init__(self, *args):
+ self.called = []
+ super().__init__(*args)
+
+ def visit_document(self, node):
+ pass
+
+ def depart_document(self, node):
+ pass
+
+ def visit_inline(self, node):
+ self.called.append('visit_inline')
+
+ def depart_inline(self, node):
+ self.called.append('depart_inline')
+
+ document = new_document('')
+ document += CustomNode()
+
+ translator = MyTranslator(document, app.builder)
+ document.walkabout(translator)
+
+ # MyTranslator does not have visit_CustomNode. But it calls visit_inline instead.
+ assert translator.called == ['visit_inline', 'depart_inline']
diff --git a/tests/test_util/test_util_fileutil.py b/tests/test_util/test_util_fileutil.py
new file mode 100644
index 0000000..9c23821
--- /dev/null
+++ b/tests/test_util/test_util_fileutil.py
@@ -0,0 +1,103 @@
+"""Tests sphinx.util.fileutil functions."""
+
+from unittest import mock
+
+from sphinx.jinja2glue import BuiltinTemplateLoader
+from sphinx.util.fileutil import copy_asset, copy_asset_file
+
+
+class DummyTemplateLoader(BuiltinTemplateLoader):
+ def __init__(self):
+ super().__init__()
+ builder = mock.Mock()
+ builder.config.templates_path = []
+ builder.app.translator = None
+ self.init(builder)
+
+
+def test_copy_asset_file(tmp_path):
+ renderer = DummyTemplateLoader()
+
+ # copy normal file
+ src = (tmp_path / 'asset.txt')
+ src.write_text('# test data', encoding='utf8')
+ dest = (tmp_path / 'output.txt')
+
+ copy_asset_file(src, dest)
+ assert dest.exists()
+ assert src.read_text(encoding='utf8') == dest.read_text(encoding='utf8')
+
+ # copy template file
+ src = (tmp_path / 'asset.txt_t')
+ src.write_text('# {{var1}} data', encoding='utf8')
+ dest = (tmp_path / 'output.txt_t')
+
+ copy_asset_file(str(src), str(dest), {'var1': 'template'}, renderer)
+ assert not dest.exists()
+ assert (tmp_path / 'output.txt').exists()
+ assert (tmp_path / 'output.txt').read_text(encoding='utf8') == '# template data'
+
+ # copy template file to subdir
+ src = (tmp_path / 'asset.txt_t')
+ src.write_text('# {{var1}} data', encoding='utf8')
+ subdir1 = (tmp_path / 'subdir')
+ subdir1.mkdir(parents=True, exist_ok=True)
+
+ copy_asset_file(src, subdir1, {'var1': 'template'}, renderer)
+ assert (subdir1 / 'asset.txt').exists()
+ assert (subdir1 / 'asset.txt').read_text(encoding='utf8') == '# template data'
+
+ # copy template file without context
+ src = (tmp_path / 'asset.txt_t')
+ subdir2 = (tmp_path / 'subdir2')
+ subdir2.mkdir(parents=True, exist_ok=True)
+
+ copy_asset_file(src, subdir2)
+ assert not (subdir2 / 'asset.txt').exists()
+ assert (subdir2 / 'asset.txt_t').exists()
+ assert (subdir2 / 'asset.txt_t').read_text(encoding='utf8') == '# {{var1}} data'
+
+
+def test_copy_asset(tmp_path):
+ renderer = DummyTemplateLoader()
+
+ # prepare source files
+ source = (tmp_path / 'source')
+ source.mkdir(parents=True, exist_ok=True)
+ (source / 'index.rst').write_text('index.rst', encoding='utf8')
+ (source / 'foo.rst_t').write_text('{{var1}}.rst', encoding='utf8')
+ (source / '_static').mkdir(parents=True, exist_ok=True)
+ (source / '_static' / 'basic.css').write_text('basic.css', encoding='utf8')
+ (source / '_templates').mkdir(parents=True, exist_ok=True)
+ (source / '_templates' / 'layout.html').write_text('layout.html', encoding='utf8')
+ (source / '_templates' / 'sidebar.html_t').write_text('sidebar: {{var2}}', encoding='utf8')
+
+ # copy a single file
+ assert not (tmp_path / 'test1').exists()
+ copy_asset(source / 'index.rst', tmp_path / 'test1')
+ assert (tmp_path / 'test1').exists()
+ assert (tmp_path / 'test1/index.rst').exists()
+
+ # copy directories
+ destdir = tmp_path / 'test2'
+ copy_asset(source, destdir, context={'var1': 'bar', 'var2': 'baz'}, renderer=renderer)
+ assert (destdir / 'index.rst').exists()
+ assert (destdir / 'foo.rst').exists()
+ assert (destdir / 'foo.rst').read_text(encoding='utf8') == 'bar.rst'
+ assert (destdir / '_static' / 'basic.css').exists()
+ assert (destdir / '_templates' / 'layout.html').exists()
+ assert (destdir / '_templates' / 'sidebar.html').exists()
+ assert (destdir / '_templates' / 'sidebar.html').read_text(encoding='utf8') == 'sidebar: baz'
+
+ # copy with exclusion
+ def excluded(path):
+ return ('sidebar.html' in path or 'basic.css' in path)
+
+ destdir = tmp_path / 'test3'
+ copy_asset(source, destdir, excluded,
+ context={'var1': 'bar', 'var2': 'baz'}, renderer=renderer)
+ assert (destdir / 'index.rst').exists()
+ assert (destdir / 'foo.rst').exists()
+ assert not (destdir / '_static' / 'basic.css').exists()
+ assert (destdir / '_templates' / 'layout.html').exists()
+ assert not (destdir / '_templates' / 'sidebar.html').exists()
diff --git a/tests/test_util/test_util_i18n.py b/tests/test_util/test_util_i18n.py
new file mode 100644
index 0000000..f6baa04
--- /dev/null
+++ b/tests/test_util/test_util_i18n.py
@@ -0,0 +1,192 @@
+"""Test i18n util."""
+
+import datetime
+import os
+
+import babel
+import pytest
+from babel.messages.mofile import read_mo
+
+from sphinx.errors import SphinxError
+from sphinx.util import i18n
+
+BABEL_VERSION = tuple(map(int, babel.__version__.split('.')))
+
+
+def test_catalog_info_for_file_and_path():
+ cat = i18n.CatalogInfo('path', 'domain', 'utf-8')
+ assert cat.po_file == 'domain.po'
+ assert cat.mo_file == 'domain.mo'
+ assert cat.po_path == os.path.join('path', 'domain.po')
+ assert cat.mo_path == os.path.join('path', 'domain.mo')
+
+
+def test_catalog_info_for_sub_domain_file_and_path():
+ cat = i18n.CatalogInfo('path', 'sub/domain', 'utf-8')
+ assert cat.po_file == 'sub/domain.po'
+ assert cat.mo_file == 'sub/domain.mo'
+ assert cat.po_path == os.path.join('path', 'sub/domain.po')
+ assert cat.mo_path == os.path.join('path', 'sub/domain.mo')
+
+
+def test_catalog_outdated(tmp_path):
+ (tmp_path / 'test.po').write_text('#', encoding='utf8')
+ cat = i18n.CatalogInfo(tmp_path, 'test', 'utf-8')
+ assert cat.is_outdated() # if mo is not exist
+
+ mo_file = (tmp_path / 'test.mo')
+ mo_file.write_text('#', encoding='utf8')
+ assert not cat.is_outdated() # if mo is exist and newer than po
+
+ os.utime(mo_file, (os.stat(mo_file).st_mtime - 10,) * 2) # to be outdate
+ assert cat.is_outdated() # if mo is exist and older than po
+
+
+def test_catalog_write_mo(tmp_path):
+ (tmp_path / 'test.po').write_text('#', encoding='utf8')
+ cat = i18n.CatalogInfo(tmp_path, 'test', 'utf-8')
+ cat.write_mo('en')
+ assert os.path.exists(cat.mo_path)
+ with open(cat.mo_path, 'rb') as f:
+ assert read_mo(f) is not None
+
+
+def test_format_date():
+ date = datetime.date(2016, 2, 7)
+
+ # strftime format
+ format = '%B %d, %Y'
+ assert i18n.format_date(format, date=date, language='') == 'February 07, 2016'
+ assert i18n.format_date(format, date=date, language='unknown') == 'February 07, 2016'
+ assert i18n.format_date(format, date=date, language='en') == 'February 07, 2016'
+ assert i18n.format_date(format, date=date, language='ja') == '2月 07, 2016'
+ assert i18n.format_date(format, date=date, language='de') == 'Februar 07, 2016'
+
+ # raw string
+ format = 'Mon Mar 28 12:37:08 2016, commit 4367aef'
+ assert i18n.format_date(format, date=date, language='en') == format
+
+ format = '%B %d, %Y, %H:%M:%S %I %p'
+ datet = datetime.datetime(2016, 2, 7, 5, 11, 17, 0) # NoQA: DTZ001
+ assert i18n.format_date(format, date=datet, language='en') == 'February 07, 2016, 05:11:17 05 AM'
+
+ format = '%B %-d, %Y, %-H:%-M:%-S %-I %p'
+ assert i18n.format_date(format, date=datet, language='en') == 'February 7, 2016, 5:11:17 5 AM'
+ format = '%x'
+ assert i18n.format_date(format, date=datet, language='en') == 'Feb 7, 2016'
+ format = '%X'
+ if BABEL_VERSION >= (2, 12):
+ assert i18n.format_date(format, date=datet, language='en') == '5:11:17\u202fAM'
+ else:
+ assert i18n.format_date(format, date=datet, language='en') == '5:11:17 AM'
+ assert i18n.format_date(format, date=date, language='en') == 'Feb 7, 2016'
+ format = '%c'
+ if BABEL_VERSION >= (2, 12):
+ assert i18n.format_date(format, date=datet, language='en') == 'Feb 7, 2016, 5:11:17\u202fAM'
+ else:
+ assert i18n.format_date(format, date=datet, language='en') == 'Feb 7, 2016, 5:11:17 AM'
+ assert i18n.format_date(format, date=date, language='en') == 'Feb 7, 2016'
+
+ # timezone
+ format = '%Z'
+ assert i18n.format_date(format, date=datet, language='en') == 'UTC'
+ format = '%z'
+ assert i18n.format_date(format, date=datet, language='en') == '+0000'
+
+
+def test_get_filename_for_language(app):
+ app.env.temp_data['docname'] = 'index'
+
+ # language is en
+ app.env.config.language = 'en'
+ assert i18n.get_image_filename_for_language('foo.png', app.env) == 'foo.en.png'
+ assert i18n.get_image_filename_for_language('foo.bar.png', app.env) == 'foo.bar.en.png'
+ assert i18n.get_image_filename_for_language('dir/foo.png', app.env) == 'dir/foo.en.png'
+ assert i18n.get_image_filename_for_language('../foo.png', app.env) == '../foo.en.png'
+ assert i18n.get_image_filename_for_language('foo', app.env) == 'foo.en'
+
+ # modify figure_language_filename and language is 'en'
+ app.env.config.language = 'en'
+ app.env.config.figure_language_filename = 'images/{language}/{root}{ext}'
+ assert i18n.get_image_filename_for_language('foo.png', app.env) == 'images/en/foo.png'
+ assert i18n.get_image_filename_for_language(
+ 'foo.bar.png', app.env) == 'images/en/foo.bar.png'
+ assert i18n.get_image_filename_for_language(
+ 'subdir/foo.png', app.env) == 'images/en/subdir/foo.png'
+ assert i18n.get_image_filename_for_language(
+ '../foo.png', app.env) == 'images/en/../foo.png'
+ assert i18n.get_image_filename_for_language('foo', app.env) == 'images/en/foo'
+
+ # new path and basename tokens
+ app.env.config.language = 'en'
+ app.env.config.figure_language_filename = '{path}{language}/{basename}{ext}'
+ assert i18n.get_image_filename_for_language('foo.png', app.env) == 'en/foo.png'
+ assert i18n.get_image_filename_for_language(
+ 'foo.bar.png', app.env) == 'en/foo.bar.png'
+ assert i18n.get_image_filename_for_language(
+ 'subdir/foo.png', app.env) == 'subdir/en/foo.png'
+ assert i18n.get_image_filename_for_language(
+ '../foo.png', app.env) == '../en/foo.png'
+ assert i18n.get_image_filename_for_language('foo', app.env) == 'en/foo'
+
+ # invalid figure_language_filename
+ app.env.config.figure_language_filename = '{root}.{invalid}{ext}'
+ with pytest.raises(SphinxError):
+ i18n.get_image_filename_for_language('foo.png', app.env)
+
+ # docpath (for a document in the top of source directory)
+ app.env.config.language = 'en'
+ app.env.config.figure_language_filename = '/{docpath}{language}/{basename}{ext}'
+ assert (i18n.get_image_filename_for_language('foo.png', app.env) ==
+ '/en/foo.png')
+
+ # docpath (for a document in the sub directory)
+ app.env.temp_data['docname'] = 'subdir/index'
+ assert (i18n.get_image_filename_for_language('foo.png', app.env) ==
+ '/subdir/en/foo.png')
+
+
+def test_CatalogRepository(tmp_path):
+ (tmp_path / 'loc1' / 'xx' / 'LC_MESSAGES').mkdir(parents=True, exist_ok=True)
+ (tmp_path / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#', encoding='utf8')
+ (tmp_path / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test2.po').write_text('#', encoding='utf8')
+ (tmp_path / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub').mkdir(parents=True, exist_ok=True)
+ (tmp_path / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test3.po').write_text('#', encoding='utf8')
+ (tmp_path / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test4.po').write_text('#', encoding='utf8')
+ (tmp_path / 'loc1' / 'xx' / 'LC_MESSAGES' / '.dotdir').mkdir(parents=True, exist_ok=True)
+ (tmp_path / 'loc1' / 'xx' / 'LC_MESSAGES' / '.dotdir' / 'test5.po').write_text('#', encoding='utf8')
+ (tmp_path / 'loc1' / 'yy' / 'LC_MESSAGES').mkdir(parents=True, exist_ok=True)
+ (tmp_path / 'loc1' / 'yy' / 'LC_MESSAGES' / 'test6.po').write_text('#', encoding='utf8')
+ (tmp_path / 'loc2' / 'xx' / 'LC_MESSAGES').mkdir(parents=True, exist_ok=True)
+ (tmp_path / 'loc2' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#', encoding='utf8')
+ (tmp_path / 'loc2' / 'xx' / 'LC_MESSAGES' / 'test7.po').write_text('#', encoding='utf8')
+ (tmp_path / 'loc1' / 'xx' / 'LC_MESSAGES' / '.dotdir2').mkdir(parents=True, exist_ok=True)
+ (tmp_path / 'loc1' / 'xx' / 'LC_MESSAGES' / '.dotdir2' / 'test8.po').write_text('#', encoding='utf8')
+
+ # for language xx
+ repo = i18n.CatalogRepository(tmp_path, ['loc1', 'loc2'], 'xx', 'utf-8')
+ assert list(repo.locale_dirs) == [str(tmp_path / 'loc1'),
+ str(tmp_path / 'loc2')]
+ assert all(isinstance(c, i18n.CatalogInfo) for c in repo.catalogs)
+ assert sorted(c.domain for c in repo.catalogs) == ['sub/test3', 'sub/test4',
+ 'test1', 'test1', 'test2', 'test7']
+
+ # for language yy
+ repo = i18n.CatalogRepository(tmp_path, ['loc1', 'loc2'], 'yy', 'utf-8')
+ assert sorted(c.domain for c in repo.catalogs) == ['test6']
+
+ # unknown languages
+ repo = i18n.CatalogRepository(tmp_path, ['loc1', 'loc2'], 'zz', 'utf-8')
+ assert sorted(c.domain for c in repo.catalogs) == []
+
+ # no languages
+ repo = i18n.CatalogRepository(tmp_path, ['loc1', 'loc2'], None, 'utf-8')
+ assert sorted(c.domain for c in repo.catalogs) == []
+
+ # unknown locale_dirs
+ repo = i18n.CatalogRepository(tmp_path, ['loc3'], None, 'utf-8')
+ assert sorted(c.domain for c in repo.catalogs) == []
+
+ # no locale_dirs
+ repo = i18n.CatalogRepository(tmp_path, [], None, 'utf-8')
+ assert sorted(c.domain for c in repo.catalogs) == []
diff --git a/tests/test_util/test_util_images.py b/tests/test_util/test_util_images.py
new file mode 100644
index 0000000..15853c7
--- /dev/null
+++ b/tests/test_util/test_util_images.py
@@ -0,0 +1,74 @@
+"""Test images util."""
+
+import pytest
+
+from sphinx.util.images import (
+ get_image_extension,
+ get_image_size,
+ guess_mimetype,
+ parse_data_uri,
+)
+
+GIF_FILENAME = 'img.gif'
+PNG_FILENAME = 'img.png'
+PDF_FILENAME = 'img.pdf'
+TXT_FILENAME = 'index.txt'
+
+
+def test_get_image_size(rootdir):
+ assert get_image_size(rootdir / 'test-root' / GIF_FILENAME) == (200, 181)
+ assert get_image_size(rootdir / 'test-root' / PNG_FILENAME) == (200, 181)
+ assert get_image_size(rootdir / 'test-root' / PDF_FILENAME) is None
+ assert get_image_size(rootdir / 'test-root' / TXT_FILENAME) is None
+
+
+@pytest.mark.filterwarnings('ignore:The content argument')
+def test_guess_mimetype():
+ # guess by filename
+ assert guess_mimetype('img.png') == 'image/png'
+ assert guess_mimetype('img.jpg') == 'image/jpeg'
+ assert guess_mimetype('img.txt') is None
+ assert guess_mimetype('img.txt', default='text/plain') == 'text/plain'
+ assert guess_mimetype('no_extension') is None
+ assert guess_mimetype('IMG.PNG') == 'image/png'
+
+ # default parameter is used when no extension
+ assert guess_mimetype('img.png', 'text/plain') == 'image/png'
+ assert guess_mimetype('no_extension', 'text/plain') == 'text/plain'
+
+
+def test_get_image_extension():
+ assert get_image_extension('image/png') == '.png'
+ assert get_image_extension('image/jpeg') == '.jpg'
+ assert get_image_extension('image/svg+xml') == '.svg'
+ assert get_image_extension('text/plain') is None
+
+
+def test_parse_data_uri():
+ # standard case
+ uri = ("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4"
+ "//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==")
+ image = parse_data_uri(uri)
+ assert image is not None
+ assert image.mimetype == 'image/png'
+ assert image.charset == 'US-ASCII'
+
+ # no mimetype
+ uri = ("data:charset=utf-8,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElE"
+ "QVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==")
+ image = parse_data_uri(uri)
+ assert image is not None
+ assert image.mimetype == 'text/plain'
+ assert image.charset == 'utf-8'
+
+ # non data URI
+ uri = ("image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4"
+ "//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==")
+ image = parse_data_uri(uri)
+ assert image is None
+
+ # invalid data URI (no properties)
+ uri = ("data:iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4"
+ "//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==")
+ with pytest.raises(ValueError, match=r'not enough values to unpack \(expected 2, got 1\)'):
+ parse_data_uri(uri)
diff --git a/tests/test_util/test_util_inspect.py b/tests/test_util/test_util_inspect.py
new file mode 100644
index 0000000..32840b8
--- /dev/null
+++ b/tests/test_util/test_util_inspect.py
@@ -0,0 +1,853 @@
+"""Tests util.inspect functions."""
+
+from __future__ import annotations
+
+import ast
+import datetime
+import enum
+import functools
+import sys
+import types
+from inspect import Parameter
+from typing import Callable, List, Optional, Union # NoQA: UP035
+
+import pytest
+
+from sphinx.util import inspect
+from sphinx.util.inspect import TypeAliasForwardRef, TypeAliasNamespace, stringify_signature
+from sphinx.util.typing import stringify_annotation
+
+
+class Base:
+ def meth(self):
+ pass
+
+ @staticmethod
+ def staticmeth():
+ pass
+
+ @classmethod
+ def classmeth(cls):
+ pass
+
+ @property
+ def prop(self):
+ pass
+
+ partialmeth = functools.partialmethod(meth)
+
+ async def coroutinemeth(self):
+ pass
+
+ partial_coroutinemeth = functools.partialmethod(coroutinemeth)
+
+ @classmethod
+ async def coroutineclassmeth(cls):
+ """A documented coroutine classmethod"""
+ pass
+
+
+class Inherited(Base):
+ pass
+
+
+def func():
+ pass
+
+
+async def coroutinefunc():
+ pass
+
+
+async def asyncgenerator():
+ yield
+
+
+partial_func = functools.partial(func)
+partial_coroutinefunc = functools.partial(coroutinefunc)
+
+builtin_func = print
+partial_builtin_func = functools.partial(print)
+
+
+class Descriptor:
+ def __get__(self, obj, typ=None):
+ pass
+
+
+class _Callable:
+ def __call__(self):
+ pass
+
+
+def _decorator(f):
+ @functools.wraps(f)
+ def wrapper():
+ return f()
+ return wrapper
+
+
+def test_TypeAliasForwardRef():
+ alias = TypeAliasForwardRef('example')
+ assert stringify_annotation(alias, 'fully-qualified-except-typing') == 'example'
+
+ alias = Optional[alias]
+ assert stringify_annotation(alias, 'fully-qualified-except-typing') == 'example | None'
+
+
+def test_TypeAliasNamespace():
+ import logging.config
+ type_alias = TypeAliasNamespace({'logging.Filter': 'MyFilter',
+ 'logging.Handler': 'MyHandler',
+ 'logging.handlers.SyslogHandler': 'MySyslogHandler'})
+
+ assert type_alias['logging'].Filter == 'MyFilter'
+ assert type_alias['logging'].Handler == 'MyHandler'
+ assert type_alias['logging'].handlers.SyslogHandler == 'MySyslogHandler'
+ assert type_alias['logging'].Logger == logging.Logger
+ assert type_alias['logging'].config == logging.config
+
+ with pytest.raises(KeyError):
+ assert type_alias['log']
+
+ with pytest.raises(KeyError):
+ assert type_alias['unknown']
+
+
+def test_signature():
+ # literals
+ with pytest.raises(TypeError):
+ inspect.signature(1)
+
+ with pytest.raises(TypeError):
+ inspect.signature('')
+
+ # builtins are supported on a case-by-case basis, depending on whether
+ # they define __text_signature__
+ if getattr(list, '__text_signature__', None):
+ sig = inspect.stringify_signature(inspect.signature(list))
+ assert sig == '(iterable=(), /)'
+ else:
+ with pytest.raises(ValueError, match='no signature found for builtin type'):
+ inspect.signature(list)
+ with pytest.raises(ValueError, match='no signature found for builtin type'):
+ inspect.signature(range)
+
+ # normal function
+ def func(a, b, c=1, d=2, *e, **f):
+ pass
+
+ sig = inspect.stringify_signature(inspect.signature(func))
+ assert sig == '(a, b, c=1, d=2, *e, **f)'
+
+
+def test_signature_partial():
+ def fun(a, b, c=1, d=2):
+ pass
+ p = functools.partial(fun, 10, c=11)
+
+ sig = inspect.signature(p)
+ assert stringify_signature(sig) == '(b, *, c=11, d=2)'
+
+
+def test_signature_methods():
+ class Foo:
+ def meth1(self, arg1, **kwargs):
+ pass
+
+ @classmethod
+ def meth2(cls, arg1, *args, **kwargs):
+ pass
+
+ @staticmethod
+ def meth3(arg1, *args, **kwargs):
+ pass
+
+ @functools.wraps(Foo().meth1)
+ def wrapped_bound_method(*args, **kwargs):
+ pass
+
+ # unbound method
+ sig = inspect.signature(Foo.meth1)
+ assert stringify_signature(sig) == '(self, arg1, **kwargs)'
+
+ sig = inspect.signature(Foo.meth1, bound_method=True)
+ assert stringify_signature(sig) == '(arg1, **kwargs)'
+
+ # bound method
+ sig = inspect.signature(Foo().meth1)
+ assert stringify_signature(sig) == '(arg1, **kwargs)'
+
+ # class method
+ sig = inspect.signature(Foo.meth2)
+ assert stringify_signature(sig) == '(arg1, *args, **kwargs)'
+
+ sig = inspect.signature(Foo().meth2)
+ assert stringify_signature(sig) == '(arg1, *args, **kwargs)'
+
+ # static method
+ sig = inspect.signature(Foo.meth3)
+ assert stringify_signature(sig) == '(arg1, *args, **kwargs)'
+
+ sig = inspect.signature(Foo().meth3)
+ assert stringify_signature(sig) == '(arg1, *args, **kwargs)'
+
+ # wrapped bound method
+ sig = inspect.signature(wrapped_bound_method)
+ assert stringify_signature(sig) == '(arg1, **kwargs)'
+
+
+def test_signature_partialmethod():
+ from functools import partialmethod
+
+ class Foo:
+ def meth1(self, arg1, arg2, arg3=None, arg4=None):
+ pass
+
+ def meth2(self, arg1, arg2):
+ pass
+
+ foo = partialmethod(meth1, 1, 2)
+ bar = partialmethod(meth1, 1, arg3=3)
+ baz = partialmethod(meth2, 1, 2)
+
+ subject = Foo()
+ sig = inspect.signature(subject.foo)
+ assert stringify_signature(sig) == '(arg3=None, arg4=None)'
+
+ sig = inspect.signature(subject.bar)
+ assert stringify_signature(sig) == '(arg2, *, arg3=3, arg4=None)'
+
+ sig = inspect.signature(subject.baz)
+ assert stringify_signature(sig) == '()'
+
+
+def test_signature_annotations():
+ import tests.test_util.typing_test_data as mod
+
+ # Class annotations
+ sig = inspect.signature(mod.f0)
+ assert stringify_signature(sig) == '(x: int, y: numbers.Integral) -> None'
+
+ # Generic types with concrete parameters
+ sig = inspect.signature(mod.f1)
+ assert stringify_signature(sig) == '(x: list[int]) -> typing.List[int]'
+
+ # TypeVars and generic types with TypeVars
+ sig = inspect.signature(mod.f2)
+ assert stringify_signature(sig) == ('(x: typing.List[tests.test_util.typing_test_data.T],'
+ ' y: typing.List[tests.test_util.typing_test_data.T_co],'
+ ' z: tests.test_util.typing_test_data.T'
+ ') -> typing.List[tests.test_util.typing_test_data.T_contra]')
+
+ # Union types
+ sig = inspect.signature(mod.f3)
+ assert stringify_signature(sig) == '(x: str | numbers.Integral) -> None'
+
+ # Quoted annotations
+ sig = inspect.signature(mod.f4)
+ assert stringify_signature(sig) == '(x: str, y: str) -> None'
+
+ # Keyword-only arguments
+ sig = inspect.signature(mod.f5)
+ assert stringify_signature(sig) == '(x: int, *, y: str, z: str) -> None'
+
+ # Keyword-only arguments with varargs
+ sig = inspect.signature(mod.f6)
+ assert stringify_signature(sig) == '(x: int, *args, y: str, z: str) -> None'
+
+ # Space around '=' for defaults
+ sig = inspect.signature(mod.f7)
+ if sys.version_info[:2] <= (3, 10):
+ assert stringify_signature(sig) == '(x: int | None = None, y: dict = {}) -> None'
+ else:
+ assert stringify_signature(sig) == '(x: int = None, y: dict = {}) -> None'
+
+ # Callable types
+ sig = inspect.signature(mod.f8)
+ assert stringify_signature(sig) == '(x: typing.Callable[[int, str], int]) -> None'
+
+ sig = inspect.signature(mod.f9)
+ assert stringify_signature(sig) == '(x: typing.Callable) -> None'
+
+ # Tuple types
+ sig = inspect.signature(mod.f10)
+ assert stringify_signature(sig) == '(x: typing.Tuple[int, str], y: typing.Tuple[int, ...]) -> None'
+
+ # Instance annotations
+ sig = inspect.signature(mod.f11)
+ assert stringify_signature(sig) == '(x: CustomAnnotation, y: 123) -> None'
+
+ # tuple with more than two items
+ sig = inspect.signature(mod.f12)
+ assert stringify_signature(sig) == '() -> typing.Tuple[int, str, int]'
+
+ # optional
+ sig = inspect.signature(mod.f13)
+ assert stringify_signature(sig) == '() -> str | None'
+
+ # optional union
+ sig = inspect.signature(mod.f20)
+ assert stringify_signature(sig) in ('() -> int | str | None',
+ '() -> str | int | None')
+
+ # Any
+ sig = inspect.signature(mod.f14)
+ assert stringify_signature(sig) == '() -> typing.Any'
+
+ # ForwardRef
+ sig = inspect.signature(mod.f15)
+ assert stringify_signature(sig) == '(x: Unknown, y: int) -> typing.Any'
+
+ # keyword only arguments (1)
+ sig = inspect.signature(mod.f16)
+ assert stringify_signature(sig) == '(arg1, arg2, *, arg3=None, arg4=None)'
+
+ # keyword only arguments (2)
+ sig = inspect.signature(mod.f17)
+ assert stringify_signature(sig) == '(*, arg3, arg4)'
+
+ sig = inspect.signature(mod.f18)
+ assert stringify_signature(sig) == ('(self, arg1: int | typing.Tuple = 10) -> '
+ 'typing.List[typing.Dict]')
+
+ # annotations for variadic and keyword parameters
+ sig = inspect.signature(mod.f19)
+ assert stringify_signature(sig) == '(*args: int, **kwargs: str)'
+
+ # default value is inspect.Signature.empty
+ sig = inspect.signature(mod.f21)
+ assert stringify_signature(sig) == "(arg1='whatever', arg2)"
+
+ # type hints by string
+ sig = inspect.signature(mod.Node.children)
+ assert stringify_signature(sig) == '(self) -> typing.List[tests.test_util.typing_test_data.Node]'
+
+ sig = inspect.signature(mod.Node.__init__)
+ assert stringify_signature(sig) == '(self, parent: tests.test_util.typing_test_data.Node | None) -> None'
+
+ # show_annotation is False
+ sig = inspect.signature(mod.f7)
+ assert stringify_signature(sig, show_annotation=False) == '(x=None, y={})'
+
+ # show_return_annotation is False
+ sig = inspect.signature(mod.f7)
+ if sys.version_info[:2] <= (3, 10):
+ assert stringify_signature(sig, show_return_annotation=False) == '(x: int | None = None, y: dict = {})'
+ else:
+ assert stringify_signature(sig, show_return_annotation=False) == '(x: int = None, y: dict = {})'
+
+ # unqualified_typehints is True
+ sig = inspect.signature(mod.f7)
+ if sys.version_info[:2] <= (3, 10):
+ assert stringify_signature(sig, unqualified_typehints=True) == '(x: int | None = None, y: dict = {}) -> None'
+ else:
+ assert stringify_signature(sig, unqualified_typehints=True) == '(x: int = None, y: dict = {}) -> None'
+
+ # case: separator at head
+ sig = inspect.signature(mod.f22)
+ assert stringify_signature(sig) == '(*, a, b)'
+
+ # case: separator in the middle
+ sig = inspect.signature(mod.f23)
+ assert stringify_signature(sig) == '(a, b, /, c, d)'
+
+ sig = inspect.signature(mod.f24)
+ assert stringify_signature(sig) == '(a, /, *, b)'
+
+ # case: separator at tail
+ sig = inspect.signature(mod.f25)
+ assert stringify_signature(sig) == '(a, b, /)'
+
+
+def test_signature_from_str_basic():
+ signature = '(a, b, *args, c=0, d="blah", **kwargs)'
+ sig = inspect.signature_from_str(signature)
+ assert list(sig.parameters.keys()) == ['a', 'b', 'args', 'c', 'd', 'kwargs']
+ assert sig.parameters['a'].name == 'a'
+ assert sig.parameters['a'].kind == Parameter.POSITIONAL_OR_KEYWORD
+ assert sig.parameters['a'].default == Parameter.empty
+ assert sig.parameters['a'].annotation == Parameter.empty
+ assert sig.parameters['b'].name == 'b'
+ assert sig.parameters['b'].kind == Parameter.POSITIONAL_OR_KEYWORD
+ assert sig.parameters['b'].default == Parameter.empty
+ assert sig.parameters['b'].annotation == Parameter.empty
+ assert sig.parameters['args'].name == 'args'
+ assert sig.parameters['args'].kind == Parameter.VAR_POSITIONAL
+ assert sig.parameters['args'].default == Parameter.empty
+ assert sig.parameters['args'].annotation == Parameter.empty
+ assert sig.parameters['c'].name == 'c'
+ assert sig.parameters['c'].kind == Parameter.KEYWORD_ONLY
+ assert sig.parameters['c'].default == '0'
+ assert sig.parameters['c'].annotation == Parameter.empty
+ assert sig.parameters['d'].name == 'd'
+ assert sig.parameters['d'].kind == Parameter.KEYWORD_ONLY
+ assert sig.parameters['d'].default == "'blah'"
+ assert sig.parameters['d'].annotation == Parameter.empty
+ assert sig.parameters['kwargs'].name == 'kwargs'
+ assert sig.parameters['kwargs'].kind == Parameter.VAR_KEYWORD
+ assert sig.parameters['kwargs'].default == Parameter.empty
+ assert sig.parameters['kwargs'].annotation == Parameter.empty
+ assert sig.return_annotation == Parameter.empty
+
+
+def test_signature_from_str_default_values():
+ signature = ('(a=0, b=0.0, c="str", d=b"bytes", e=..., f=True, '
+ 'g=[1, 2, 3], h={"a": 1}, i={1, 2, 3}, '
+ 'j=lambda x, y: None, k=None, l=object(), m=foo.bar.CONSTANT)')
+ sig = inspect.signature_from_str(signature)
+ assert sig.parameters['a'].default == '0'
+ assert sig.parameters['b'].default == '0.0'
+ assert sig.parameters['c'].default == "'str'"
+ assert sig.parameters['d'].default == "b'bytes'"
+ assert sig.parameters['e'].default == '...'
+ assert sig.parameters['f'].default == 'True'
+ assert sig.parameters['g'].default == '[1, 2, 3]'
+ assert sig.parameters['h'].default == "{'a': 1}"
+ assert sig.parameters['i'].default == '{1, 2, 3}'
+ assert sig.parameters['j'].default == 'lambda x, y: ...'
+ assert sig.parameters['k'].default == 'None'
+ assert sig.parameters['l'].default == 'object()'
+ assert sig.parameters['m'].default == 'foo.bar.CONSTANT'
+
+
+def test_signature_from_str_annotations():
+ signature = '(a: int, *args: bytes, b: str = "blah", **kwargs: float) -> None'
+ sig = inspect.signature_from_str(signature)
+ assert list(sig.parameters.keys()) == ['a', 'args', 'b', 'kwargs']
+ assert sig.parameters['a'].annotation == "int"
+ assert sig.parameters['args'].annotation == "bytes"
+ assert sig.parameters['b'].annotation == "str"
+ assert sig.parameters['kwargs'].annotation == "float"
+ assert sig.return_annotation == 'None'
+
+
+def test_signature_from_str_complex_annotations():
+ sig = inspect.signature_from_str('() -> Tuple[str, int, ...]')
+ assert sig.return_annotation == 'Tuple[str, int, ...]'
+
+ sig = inspect.signature_from_str('() -> Callable[[int, int], int]')
+ assert sig.return_annotation == 'Callable[[int, int], int]'
+
+
+def test_signature_from_str_kwonly_args():
+ sig = inspect.signature_from_str('(a, *, b)')
+ assert list(sig.parameters.keys()) == ['a', 'b']
+ assert sig.parameters['a'].kind == Parameter.POSITIONAL_OR_KEYWORD
+ assert sig.parameters['a'].default == Parameter.empty
+ assert sig.parameters['b'].kind == Parameter.KEYWORD_ONLY
+ assert sig.parameters['b'].default == Parameter.empty
+
+
+def test_signature_from_str_positionaly_only_args():
+ sig = inspect.signature_from_str('(a, b=0, /, c=1)')
+ assert list(sig.parameters.keys()) == ['a', 'b', 'c']
+ assert sig.parameters['a'].kind == Parameter.POSITIONAL_ONLY
+ assert sig.parameters['a'].default == Parameter.empty
+ assert sig.parameters['b'].kind == Parameter.POSITIONAL_ONLY
+ assert sig.parameters['b'].default == '0'
+ assert sig.parameters['c'].kind == Parameter.POSITIONAL_OR_KEYWORD
+ assert sig.parameters['c'].default == '1'
+
+
+def test_signature_from_str_invalid():
+ with pytest.raises(SyntaxError):
+ inspect.signature_from_str('')
+
+
+def test_signature_from_ast():
+ signature = 'def func(a, b, *args, c=0, d="blah", **kwargs): pass'
+ tree = ast.parse(signature)
+ sig = inspect.signature_from_ast(tree.body[0])
+ assert list(sig.parameters.keys()) == ['a', 'b', 'args', 'c', 'd', 'kwargs']
+ assert sig.parameters['a'].name == 'a'
+ assert sig.parameters['a'].kind == Parameter.POSITIONAL_OR_KEYWORD
+ assert sig.parameters['a'].default == Parameter.empty
+ assert sig.parameters['a'].annotation == Parameter.empty
+ assert sig.parameters['b'].name == 'b'
+ assert sig.parameters['b'].kind == Parameter.POSITIONAL_OR_KEYWORD
+ assert sig.parameters['b'].default == Parameter.empty
+ assert sig.parameters['b'].annotation == Parameter.empty
+ assert sig.parameters['args'].name == 'args'
+ assert sig.parameters['args'].kind == Parameter.VAR_POSITIONAL
+ assert sig.parameters['args'].default == Parameter.empty
+ assert sig.parameters['args'].annotation == Parameter.empty
+ assert sig.parameters['c'].name == 'c'
+ assert sig.parameters['c'].kind == Parameter.KEYWORD_ONLY
+ assert sig.parameters['c'].default == '0'
+ assert sig.parameters['c'].annotation == Parameter.empty
+ assert sig.parameters['d'].name == 'd'
+ assert sig.parameters['d'].kind == Parameter.KEYWORD_ONLY
+ assert sig.parameters['d'].default == "'blah'"
+ assert sig.parameters['d'].annotation == Parameter.empty
+ assert sig.parameters['kwargs'].name == 'kwargs'
+ assert sig.parameters['kwargs'].kind == Parameter.VAR_KEYWORD
+ assert sig.parameters['kwargs'].default == Parameter.empty
+ assert sig.parameters['kwargs'].annotation == Parameter.empty
+ assert sig.return_annotation == Parameter.empty
+
+
+def test_safe_getattr_with_default():
+ class Foo:
+ def __getattr__(self, item):
+ raise Exception
+
+ obj = Foo()
+
+ result = inspect.safe_getattr(obj, 'bar', 'baz')
+
+ assert result == 'baz'
+
+
+def test_safe_getattr_with_exception():
+ class Foo:
+ def __getattr__(self, item):
+ raise Exception
+
+ obj = Foo()
+
+ with pytest.raises(AttributeError, match='bar'):
+ inspect.safe_getattr(obj, 'bar')
+
+
+def test_safe_getattr_with_property_exception():
+ class Foo:
+ @property
+ def bar(self):
+ raise Exception
+
+ obj = Foo()
+
+ with pytest.raises(AttributeError, match='bar'):
+ inspect.safe_getattr(obj, 'bar')
+
+
+def test_safe_getattr_with___dict___override():
+ class Foo:
+ @property
+ def __dict__(self):
+ raise Exception
+
+ obj = Foo()
+
+ with pytest.raises(AttributeError, match='bar'):
+ inspect.safe_getattr(obj, 'bar')
+
+
+def test_dictionary_sorting():
+ dictionary = {"c": 3, "a": 1, "d": 2, "b": 4}
+ description = inspect.object_description(dictionary)
+ assert description == "{'a': 1, 'b': 4, 'c': 3, 'd': 2}"
+
+
+def test_set_sorting():
+ set_ = set("gfedcba")
+ description = inspect.object_description(set_)
+ assert description == "{'a', 'b', 'c', 'd', 'e', 'f', 'g'}"
+
+
+def test_set_sorting_enum():
+ class MyEnum(enum.Enum):
+ a = 1
+ b = 2
+ c = 3
+
+ set_ = set(MyEnum)
+ description = inspect.object_description(set_)
+ assert description == "{MyEnum.a, MyEnum.b, MyEnum.c}"
+
+
+def test_set_sorting_fallback():
+ set_ = {None, 1}
+ description = inspect.object_description(set_)
+ assert description == "{1, None}"
+
+
+def test_deterministic_nested_collection_descriptions():
+ # sortable
+ assert inspect.object_description([{1, 2, 3, 10}]) == "[{1, 2, 3, 10}]"
+ assert inspect.object_description(({1, 2, 3, 10},)) == "({1, 2, 3, 10},)"
+ # non-sortable (elements of varying datatype)
+ assert inspect.object_description([{None, 1}]) == "[{1, None}]"
+ assert inspect.object_description(({None, 1},)) == "({1, None},)"
+ assert inspect.object_description([{None, 1, 'A'}]) == "[{'A', 1, None}]"
+ assert inspect.object_description(({None, 1, 'A'},)) == "({'A', 1, None},)"
+
+
+def test_frozenset_sorting():
+ frozenset_ = frozenset("gfedcba")
+ description = inspect.object_description(frozenset_)
+ assert description == "frozenset({'a', 'b', 'c', 'd', 'e', 'f', 'g'})"
+
+
+def test_frozenset_sorting_fallback():
+ frozenset_ = frozenset((None, 1))
+ description = inspect.object_description(frozenset_)
+ assert description == "frozenset({1, None})"
+
+
+def test_nested_tuple_sorting():
+ tuple_ = ({"c", "b", "a"},) # nb. trailing comma
+ description = inspect.object_description(tuple_)
+ assert description == "({'a', 'b', 'c'},)"
+
+ tuple_ = ({"c", "b", "a"}, {"f", "e", "d"})
+ description = inspect.object_description(tuple_)
+ assert description == "({'a', 'b', 'c'}, {'d', 'e', 'f'})"
+
+
+def test_recursive_collection_description():
+ dict_a_, dict_b_ = {"a": 1}, {"b": 2}
+ dict_a_["link"], dict_b_["link"] = dict_b_, dict_a_
+ description_a, description_b = (
+ inspect.object_description(dict_a_),
+ inspect.object_description(dict_b_),
+ )
+ assert description_a == "{'a': 1, 'link': {'b': 2, 'link': dict(...)}}"
+ assert description_b == "{'b': 2, 'link': {'a': 1, 'link': dict(...)}}"
+
+ list_c_, list_d_ = [1, 2, 3, 4], [5, 6, 7, 8]
+ list_c_.append(list_d_)
+ list_d_.append(list_c_)
+ description_c, description_d = (
+ inspect.object_description(list_c_),
+ inspect.object_description(list_d_),
+ )
+
+ assert description_c == "[1, 2, 3, 4, [5, 6, 7, 8, list(...)]]"
+ assert description_d == "[5, 6, 7, 8, [1, 2, 3, 4, list(...)]]"
+
+
+def test_dict_customtype():
+ class CustomType:
+ def __init__(self, value):
+ self._value = value
+
+ def __repr__(self):
+ return "<CustomType(%r)>" % self._value
+
+ dictionary = {CustomType(2): 2, CustomType(1): 1}
+ description = inspect.object_description(dictionary)
+ # Type is unsortable, just check that it does not crash
+ assert "<CustomType(2)>: 2" in description
+
+
+def test_object_description_enum():
+ class MyEnum(enum.Enum):
+ FOO = 1
+ BAR = 2
+
+ assert inspect.object_description(MyEnum.FOO) == "MyEnum.FOO"
+
+
+def test_object_description_enum_custom_repr():
+ class MyEnum(enum.Enum):
+ FOO = 1
+ BAR = 2
+
+ def __repr__(self):
+ return self.name
+
+ assert inspect.object_description(MyEnum.FOO) == "FOO"
+
+
+def test_getslots():
+ class Foo:
+ pass
+
+ class Bar:
+ __slots__ = ['attr']
+
+ class Baz:
+ __slots__ = {'attr': 'docstring'}
+
+ class Qux:
+ __slots__ = 'attr'
+
+ assert inspect.getslots(Foo) is None
+ assert inspect.getslots(Bar) == {'attr': None}
+ assert inspect.getslots(Baz) == {'attr': 'docstring'}
+ assert inspect.getslots(Qux) == {'attr': None}
+
+ with pytest.raises(TypeError):
+ inspect.getslots(Bar())
+
+
+def test_isclassmethod():
+ assert inspect.isclassmethod(Base.classmeth) is True
+ assert inspect.isclassmethod(Base.meth) is False
+ assert inspect.isclassmethod(Inherited.classmeth) is True
+ assert inspect.isclassmethod(Inherited.meth) is False
+
+
+def test_isstaticmethod():
+ assert inspect.isstaticmethod(Base.staticmeth, Base, 'staticmeth') is True
+ assert inspect.isstaticmethod(Base.meth, Base, 'meth') is False
+ assert inspect.isstaticmethod(Inherited.staticmeth, Inherited, 'staticmeth') is True
+ assert inspect.isstaticmethod(Inherited.meth, Inherited, 'meth') is False
+
+
+def test_iscoroutinefunction():
+ assert inspect.iscoroutinefunction(func) is False # function
+ assert inspect.iscoroutinefunction(coroutinefunc) is True # coroutine
+ assert inspect.iscoroutinefunction(partial_coroutinefunc) is True # partial-ed coroutine
+ assert inspect.iscoroutinefunction(Base.meth) is False # method
+ assert inspect.iscoroutinefunction(Base.coroutinemeth) is True # coroutine-method
+ assert inspect.iscoroutinefunction(Base.__dict__["coroutineclassmeth"]) is True # coroutine classmethod
+
+ # partial-ed coroutine-method
+ partial_coroutinemeth = Base.__dict__['partial_coroutinemeth']
+ assert inspect.iscoroutinefunction(partial_coroutinemeth) is True
+
+
+def test_iscoroutinefunction_wrapped():
+ # function wrapping a callable obj
+ assert inspect.isfunction(_decorator(coroutinefunc)) is True
+
+
+def test_isfunction():
+ assert inspect.isfunction(func) is True # function
+ assert inspect.isfunction(partial_func) is True # partial-ed function
+ assert inspect.isfunction(Base.meth) is True # method of class
+ assert inspect.isfunction(Base.partialmeth) is True # partial-ed method of class
+ assert inspect.isfunction(Base().meth) is False # method of instance
+ assert inspect.isfunction(builtin_func) is False # builtin function
+ assert inspect.isfunction(partial_builtin_func) is False # partial-ed builtin function
+
+
+def test_isfunction_wrapped():
+ # function wrapping a callable obj
+ assert inspect.isfunction(_decorator(_Callable())) is True
+
+
+def test_isbuiltin():
+ assert inspect.isbuiltin(builtin_func) is True # builtin function
+ assert inspect.isbuiltin(partial_builtin_func) is True # partial-ed builtin function
+ assert inspect.isbuiltin(func) is False # function
+ assert inspect.isbuiltin(partial_func) is False # partial-ed function
+ assert inspect.isbuiltin(Base.meth) is False # method of class
+ assert inspect.isbuiltin(Base().meth) is False # method of instance
+
+
+def test_isdescriptor():
+ assert inspect.isdescriptor(Base.prop) is True # property of class
+ assert inspect.isdescriptor(Base().prop) is False # property of instance
+ assert inspect.isdescriptor(Base.meth) is True # method of class
+ assert inspect.isdescriptor(Base().meth) is True # method of instance
+ assert inspect.isdescriptor(func) is True # function
+
+
+def test_isattributedescriptor():
+ assert inspect.isattributedescriptor(Base.prop) is True # property
+ assert inspect.isattributedescriptor(Base.meth) is False # method
+ assert inspect.isattributedescriptor(Base.staticmeth) is False # staticmethod
+ assert inspect.isattributedescriptor(Base.classmeth) is False # classmetho
+ assert inspect.isattributedescriptor(Descriptor) is False # custom descriptor class
+ assert inspect.isattributedescriptor(str.join) is False # MethodDescriptorType
+ assert inspect.isattributedescriptor(object.__init__) is False # WrapperDescriptorType
+ assert inspect.isattributedescriptor(dict.__dict__['fromkeys']) is False # ClassMethodDescriptorType
+ assert inspect.isattributedescriptor(types.FrameType.f_locals) is True # GetSetDescriptorType
+ assert inspect.isattributedescriptor(datetime.timedelta.days) is True # MemberDescriptorType
+
+ try:
+ # _testcapi module cannot be importable in some distro
+ # refs: https://github.com/sphinx-doc/sphinx/issues/9868
+ import _testcapi
+
+ testinstancemethod = _testcapi.instancemethod(str.__repr__)
+ assert inspect.isattributedescriptor(testinstancemethod) is False # instancemethod (C-API)
+ except ImportError:
+ pass
+
+
+def test_isproperty():
+ assert inspect.isproperty(Base.prop) is True # property of class
+ assert inspect.isproperty(Base().prop) is False # property of instance
+ assert inspect.isproperty(Base.meth) is False # method of class
+ assert inspect.isproperty(Base().meth) is False # method of instance
+ assert inspect.isproperty(func) is False # function
+
+
+def test_isgenericalias():
+ #: A list of int
+ T = List[int] # NoQA: UP006
+ S = list[Union[str, None]]
+
+ C = Callable[[int], None] # a generic alias not having a doccomment
+
+ assert inspect.isgenericalias(C) is True
+ assert inspect.isgenericalias(Callable) is True
+ assert inspect.isgenericalias(T) is True
+ assert inspect.isgenericalias(List) is True # NoQA: UP006
+ assert inspect.isgenericalias(S) is True
+ assert inspect.isgenericalias(list) is False
+ assert inspect.isgenericalias([]) is False
+ assert inspect.isgenericalias(object()) is False
+ assert inspect.isgenericalias(Base) is False
+
+
+def test_unpartial():
+ def func1(a, b, c):
+ pass
+
+ func2 = functools.partial(func1, 1)
+ func2.__doc__ = "func2"
+ func3 = functools.partial(func2, 2) # nested partial object
+
+ assert inspect.unpartial(func2) is func1
+ assert inspect.unpartial(func3) is func1
+
+
+def test_getdoc_inherited_classmethod():
+ class Foo:
+ @classmethod
+ def meth(self):
+ """
+ docstring
+ indented text
+ """
+
+ class Bar(Foo):
+ @classmethod
+ def meth(self):
+ # inherited classmethod
+ pass
+
+ assert inspect.getdoc(Bar.meth, getattr, False, Bar, "meth") is None
+ assert inspect.getdoc(Bar.meth, getattr, True, Bar, "meth") == Foo.meth.__doc__
+
+
+def test_getdoc_inherited_decorated_method():
+ class Foo:
+ def meth(self):
+ """
+ docstring
+ indented text
+ """
+
+ class Bar(Foo):
+ @functools.lru_cache # NoQA: B019
+ def meth(self):
+ # inherited and decorated method
+ pass
+
+ assert inspect.getdoc(Bar.meth, getattr, False, Bar, "meth") is None
+ assert inspect.getdoc(Bar.meth, getattr, True, Bar, "meth") == Foo.meth.__doc__
+
+
+def test_is_builtin_class_method():
+ class MyInt(int):
+ def my_method(self):
+ pass
+
+ assert inspect.is_builtin_class_method(MyInt, 'to_bytes')
+ assert inspect.is_builtin_class_method(MyInt, '__init__')
+ assert not inspect.is_builtin_class_method(MyInt, 'my_method')
+ assert not inspect.is_builtin_class_method(MyInt, 'does_not_exist')
+ assert not inspect.is_builtin_class_method(4, 'still does not crash')
+
+ class ObjectWithMroAttr:
+ def __init__(self, mro_attr):
+ self.__mro__ = mro_attr
+
+ assert not inspect.is_builtin_class_method(ObjectWithMroAttr([1, 2, 3]), 'still does not crash')
diff --git a/tests/test_util/test_util_inventory.py b/tests/test_util/test_util_inventory.py
new file mode 100644
index 0000000..81d31b0
--- /dev/null
+++ b/tests/test_util/test_util_inventory.py
@@ -0,0 +1,78 @@
+"""Test inventory util functions."""
+import os
+import posixpath
+from io import BytesIO
+
+import sphinx.locale
+from sphinx.testing.util import SphinxTestApp
+from sphinx.util.inventory import InventoryFile
+
+from tests.test_util.intersphinx_data import (
+ INVENTORY_V1,
+ INVENTORY_V2,
+ INVENTORY_V2_NO_VERSION,
+)
+
+
+def test_read_inventory_v1():
+ f = BytesIO(INVENTORY_V1)
+ invdata = InventoryFile.load(f, '/util', posixpath.join)
+ assert invdata['py:module']['module'] == \
+ ('foo', '1.0', '/util/foo.html#module-module', '-')
+ assert invdata['py:class']['module.cls'] == \
+ ('foo', '1.0', '/util/foo.html#module.cls', '-')
+
+
+def test_read_inventory_v2():
+ f = BytesIO(INVENTORY_V2)
+ invdata = InventoryFile.load(f, '/util', posixpath.join)
+
+ assert len(invdata['py:module']) == 2
+ assert invdata['py:module']['module1'] == \
+ ('foo', '2.0', '/util/foo.html#module-module1', 'Long Module desc')
+ assert invdata['py:module']['module2'] == \
+ ('foo', '2.0', '/util/foo.html#module-module2', '-')
+ assert invdata['py:function']['module1.func'][2] == \
+ '/util/sub/foo.html#module1.func'
+ assert invdata['c:function']['CFunc'][2] == '/util/cfunc.html#CFunc'
+ assert invdata['std:term']['a term'][2] == \
+ '/util/glossary.html#term-a-term'
+ assert invdata['std:term']['a term including:colon'][2] == \
+ '/util/glossary.html#term-a-term-including-colon'
+
+
+def test_read_inventory_v2_not_having_version():
+ f = BytesIO(INVENTORY_V2_NO_VERSION)
+ invdata = InventoryFile.load(f, '/util', posixpath.join)
+ assert invdata['py:module']['module1'] == \
+ ('foo', '', '/util/foo.html#module-module1', 'Long Module desc')
+
+
+def _write_appconfig(dir, language, prefix=None):
+ prefix = prefix or language
+ os.makedirs(dir / prefix, exist_ok=True)
+ (dir / prefix / 'conf.py').write_text(f'language = "{language}"', encoding='utf8')
+ (dir / prefix / 'index.rst').write_text('index.rst', encoding='utf8')
+ assert sorted(os.listdir(dir / prefix)) == ['conf.py', 'index.rst']
+ assert (dir / prefix / 'index.rst').exists()
+ return dir / prefix
+
+
+def _build_inventory(srcdir):
+ app = SphinxTestApp(srcdir=srcdir)
+ app.build()
+ sphinx.locale.translators.clear()
+ return app.outdir / 'objects.inv'
+
+
+def test_inventory_localization(tmp_path):
+ # Build an app using Estonian (EE) locale
+ srcdir_et = _write_appconfig(tmp_path, "et")
+ inventory_et = _build_inventory(srcdir_et)
+
+ # Build the same app using English (US) locale
+ srcdir_en = _write_appconfig(tmp_path, "en")
+ inventory_en = _build_inventory(srcdir_en)
+
+ # Ensure that the inventory contents differ
+ assert inventory_et.read_bytes() != inventory_en.read_bytes()
diff --git a/tests/test_util/test_util_logging.py b/tests/test_util/test_util_logging.py
new file mode 100644
index 0000000..4ee548a
--- /dev/null
+++ b/tests/test_util/test_util_logging.py
@@ -0,0 +1,414 @@
+"""Test logging util."""
+
+import codecs
+import os
+import os.path
+
+import pytest
+from docutils import nodes
+
+from sphinx.errors import SphinxWarning
+from sphinx.util import logging, osutil
+from sphinx.util.console import colorize, strip_colors
+from sphinx.util.logging import is_suppressed_warning, prefixed_warnings
+from sphinx.util.parallel import ParallelTasks
+
+
+def test_info_and_warning(app, status, warning):
+ app.verbosity = 2
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.debug('message1')
+ logger.info('message2')
+ logger.warning('message3')
+ logger.critical('message4')
+ logger.error('message5')
+
+ assert 'message1' in status.getvalue()
+ assert 'message2' in status.getvalue()
+ assert 'message3' not in status.getvalue()
+ assert 'message4' not in status.getvalue()
+ assert 'message5' not in status.getvalue()
+
+ assert 'message1' not in warning.getvalue()
+ assert 'message2' not in warning.getvalue()
+ assert 'WARNING: message3' in warning.getvalue()
+ assert 'CRITICAL: message4' in warning.getvalue()
+ assert 'ERROR: message5' in warning.getvalue()
+
+
+def test_Exception(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.info(Exception)
+ assert "<class 'Exception'>" in status.getvalue()
+
+
+def test_verbosity_filter(app, status, warning):
+ # verbosity = 0: INFO
+ app.verbosity = 0
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.info('message1')
+ logger.verbose('message2')
+ logger.debug('message3')
+
+ assert 'message1' in status.getvalue()
+ assert 'message2' not in status.getvalue()
+ assert 'message3' not in status.getvalue()
+ assert 'message4' not in status.getvalue()
+
+ # verbosity = 1: VERBOSE
+ app.verbosity = 1
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.info('message1')
+ logger.verbose('message2')
+ logger.debug('message3')
+
+ assert 'message1' in status.getvalue()
+ assert 'message2' in status.getvalue()
+ assert 'message3' not in status.getvalue()
+ assert 'message4' not in status.getvalue()
+
+ # verbosity = 2: DEBUG
+ app.verbosity = 2
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.info('message1')
+ logger.verbose('message2')
+ logger.debug('message3')
+
+ assert 'message1' in status.getvalue()
+ assert 'message2' in status.getvalue()
+ assert 'message3' in status.getvalue()
+ assert 'message4' not in status.getvalue()
+
+
+def test_nonl_info_log(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.info('message1', nonl=True)
+ logger.info('message2')
+ logger.info('message3')
+
+ assert 'message1message2\nmessage3' in status.getvalue()
+
+
+def test_once_warning_log(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.warning('message: %d', 1, once=True)
+ logger.warning('message: %d', 1, once=True)
+ logger.warning('message: %d', 2, once=True)
+
+ assert 'WARNING: message: 1\nWARNING: message: 2\n' in strip_colors(warning.getvalue())
+
+
+def test_is_suppressed_warning():
+ suppress_warnings = ["ref", "files.*", "rest.duplicated_labels"]
+
+ assert is_suppressed_warning(None, None, suppress_warnings) is False
+ assert is_suppressed_warning("ref", None, suppress_warnings) is True
+ assert is_suppressed_warning("ref", "numref", suppress_warnings) is True
+ assert is_suppressed_warning("ref", "option", suppress_warnings) is True
+ assert is_suppressed_warning("files", "image", suppress_warnings) is True
+ assert is_suppressed_warning("files", "stylesheet", suppress_warnings) is True
+ assert is_suppressed_warning("rest", None, suppress_warnings) is False
+ assert is_suppressed_warning("rest", "syntax", suppress_warnings) is False
+ assert is_suppressed_warning("rest", "duplicated_labels", suppress_warnings) is True
+
+
+def test_suppress_warnings(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ app._warncount = 0 # force reset
+
+ app.config.suppress_warnings = []
+ warning.truncate(0)
+ logger.warning('message0', type='test')
+ logger.warning('message1', type='test', subtype='logging')
+ logger.warning('message2', type='test', subtype='crash')
+ logger.warning('message3', type='actual', subtype='logging')
+ assert 'message0' in warning.getvalue()
+ assert 'message1' in warning.getvalue()
+ assert 'message2' in warning.getvalue()
+ assert 'message3' in warning.getvalue()
+ assert app._warncount == 4
+
+ app.config.suppress_warnings = ['test']
+ warning.truncate(0)
+ logger.warning('message0', type='test')
+ logger.warning('message1', type='test', subtype='logging')
+ logger.warning('message2', type='test', subtype='crash')
+ logger.warning('message3', type='actual', subtype='logging')
+ assert 'message0' not in warning.getvalue()
+ assert 'message1' not in warning.getvalue()
+ assert 'message2' not in warning.getvalue()
+ assert 'message3' in warning.getvalue()
+ assert app._warncount == 5
+
+ app.config.suppress_warnings = ['test.logging']
+ warning.truncate(0)
+ logger.warning('message0', type='test')
+ logger.warning('message1', type='test', subtype='logging')
+ logger.warning('message2', type='test', subtype='crash')
+ logger.warning('message3', type='actual', subtype='logging')
+ assert 'message0' in warning.getvalue()
+ assert 'message1' not in warning.getvalue()
+ assert 'message2' in warning.getvalue()
+ assert 'message3' in warning.getvalue()
+ assert app._warncount == 8
+
+
+def test_warningiserror(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ # if False, warning is not error
+ app.warningiserror = False
+ logger.warning('message')
+
+ # if True, warning raises SphinxWarning exception
+ app.warningiserror = True
+ with pytest.raises(SphinxWarning):
+ logger.warning('message: %s', 'arg')
+
+ # message contains format string (refs: #4070)
+ with pytest.raises(SphinxWarning):
+ logger.warning('%s')
+
+
+def test_info_location(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.info('message1', location='index')
+ assert 'index.txt: message1' in status.getvalue()
+
+ logger.info('message2', location=('index', 10))
+ assert 'index.txt:10: message2' in status.getvalue()
+
+ logger.info('message3', location=None)
+ assert '\nmessage3' in status.getvalue()
+
+ node = nodes.Node()
+ node.source, node.line = ('index.txt', 10)
+ logger.info('message4', location=node)
+ assert 'index.txt:10: message4' in status.getvalue()
+
+ node.source, node.line = ('index.txt', None)
+ logger.info('message5', location=node)
+ assert 'index.txt:: message5' in status.getvalue()
+
+ node.source, node.line = (None, 10)
+ logger.info('message6', location=node)
+ assert '<unknown>:10: message6' in status.getvalue()
+
+ node.source, node.line = (None, None)
+ logger.info('message7', location=node)
+ assert '\nmessage7' in status.getvalue()
+
+
+def test_warning_location(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.warning('message1', location='index')
+ assert 'index.txt: WARNING: message1' in warning.getvalue()
+
+ logger.warning('message2', location=('index', 10))
+ assert 'index.txt:10: WARNING: message2' in warning.getvalue()
+
+ logger.warning('message3', location=None)
+ assert colorize('red', 'WARNING: message3') in warning.getvalue()
+
+ node = nodes.Node()
+ node.source, node.line = ('index.txt', 10)
+ logger.warning('message4', location=node)
+ assert 'index.txt:10: WARNING: message4' in warning.getvalue()
+
+ node.source, node.line = ('index.txt', None)
+ logger.warning('message5', location=node)
+ assert 'index.txt:: WARNING: message5' in warning.getvalue()
+
+ node.source, node.line = (None, 10)
+ logger.warning('message6', location=node)
+ assert '<unknown>:10: WARNING: message6' in warning.getvalue()
+
+ node.source, node.line = (None, None)
+ logger.warning('message7', location=node)
+ assert colorize('red', 'WARNING: message7') in warning.getvalue()
+
+
+def test_suppress_logging(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.warning('message1')
+ with logging.suppress_logging():
+ logger.warning('message2')
+ assert 'WARNING: message1' in warning.getvalue()
+ assert 'WARNING: message2' not in warning.getvalue()
+
+ assert 'WARNING: message1' in warning.getvalue()
+ assert 'WARNING: message2' not in warning.getvalue()
+
+
+def test_pending_warnings(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.warning('message1')
+ with logging.pending_warnings():
+ # not logged yet (buffered) in here
+ logger.warning('message2')
+ logger.warning('message3')
+ assert 'WARNING: message1' in warning.getvalue()
+ assert 'WARNING: message2' not in warning.getvalue()
+ assert 'WARNING: message3' not in warning.getvalue()
+
+ # actually logged as ordered
+ assert 'WARNING: message2\nWARNING: message3' in strip_colors(warning.getvalue())
+
+
+def test_colored_logs(app, status, warning):
+ app.verbosity = 2
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ # default colors
+ logger.debug('message1')
+ logger.verbose('message2')
+ logger.info('message3')
+ logger.warning('message4')
+ logger.critical('message5')
+ logger.error('message6')
+
+ assert colorize('darkgray', 'message1') in status.getvalue()
+ assert 'message2\n' in status.getvalue() # not colored
+ assert 'message3\n' in status.getvalue() # not colored
+ assert colorize('red', 'WARNING: message4') in warning.getvalue()
+ assert 'CRITICAL: message5\n' in warning.getvalue() # not colored
+ assert colorize('darkred', 'ERROR: message6') in warning.getvalue()
+
+ # color specification
+ logger.debug('message7', color='white')
+ logger.info('message8', color='red')
+ assert colorize('white', 'message7') in status.getvalue()
+ assert colorize('red', 'message8') in status.getvalue()
+
+
+@pytest.mark.xfail(os.name != 'posix',
+ reason="Parallel mode does not work on Windows")
+def test_logging_in_ParallelTasks(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ def child_process():
+ logger.info('message1')
+ logger.warning('message2', location='index')
+
+ tasks = ParallelTasks(1)
+ tasks.add_task(child_process)
+ tasks.join()
+ assert 'message1' in status.getvalue()
+ assert 'index.txt: WARNING: message2' in warning.getvalue()
+
+
+def test_output_with_unencodable_char(app, status, warning):
+ class StreamWriter(codecs.StreamWriter):
+ def write(self, object):
+ self.stream.write(object.encode('cp1252').decode('cp1252'))
+
+ logging.setup(app, StreamWriter(status), warning)
+ logger = logging.getLogger(__name__)
+
+ # info with UnicodeEncodeError
+ status.truncate(0)
+ status.seek(0)
+ logger.info("unicode \u206d...")
+ assert status.getvalue() == "unicode ?...\n"
+
+
+def test_skip_warningiserror(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ app.warningiserror = True
+ with logging.skip_warningiserror():
+ logger.warning('message')
+
+ # if False, warning raises SphinxWarning exception
+ with logging.skip_warningiserror(False): # NoQA: SIM117
+ with pytest.raises(SphinxWarning):
+ logger.warning('message')
+
+ # It also works during pending_warnings.
+ with logging.pending_warnings(): # NoQA: SIM117
+ with logging.skip_warningiserror():
+ logger.warning('message')
+
+ with pytest.raises(SphinxWarning): # NoQA: PT012,SIM117
+ with logging.pending_warnings():
+ with logging.skip_warningiserror(False):
+ logger.warning('message')
+
+
+def test_prefixed_warnings(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.warning('message1')
+ with prefixed_warnings('PREFIX:'):
+ logger.warning('message2')
+ with prefixed_warnings('Another PREFIX:'):
+ logger.warning('message3')
+ logger.warning('message4')
+ logger.warning('message5')
+
+ assert 'WARNING: message1' in warning.getvalue()
+ assert 'WARNING: PREFIX: message2' in warning.getvalue()
+ assert 'WARNING: Another PREFIX: message3' in warning.getvalue()
+ assert 'WARNING: PREFIX: message4' in warning.getvalue()
+ assert 'WARNING: message5' in warning.getvalue()
+
+
+def test_get_node_location_abspath():
+ # Ensure that node locations are reported as an absolute path,
+ # even if the source attribute is a relative path.
+
+ relative_filename = os.path.join('relative', 'path.txt')
+ absolute_filename = osutil.abspath(relative_filename)
+
+ n = nodes.Node()
+ n.source = relative_filename
+
+ location = logging.get_node_location(n)
+
+ assert location == absolute_filename + ':'
+
+
+@pytest.mark.sphinx(confoverrides={'show_warning_types': True})
+def test_show_warning_types(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+ logger.warning('message2')
+ logger.warning('message3', type='test')
+ logger.warning('message4', type='test', subtype='logging')
+
+ warnings = strip_colors(warning.getvalue()).splitlines()
+
+ assert warnings == [
+ 'WARNING: message2',
+ 'WARNING: message3 [test]',
+ 'WARNING: message4 [test.logging]',
+ ]
diff --git a/tests/test_util/test_util_matching.py b/tests/test_util/test_util_matching.py
new file mode 100644
index 0000000..7d865ba
--- /dev/null
+++ b/tests/test_util/test_util_matching.py
@@ -0,0 +1,174 @@
+"""Tests sphinx.util.matching functions."""
+from sphinx.util.matching import Matcher, compile_matchers, get_matching_files
+
+
+def test_compile_matchers():
+ # exact matching
+ pat = compile_matchers(['hello.py']).pop()
+ assert pat('hello.py')
+ assert not pat('hello-py')
+ assert not pat('subdir/hello.py')
+
+ # wild card (*)
+ pat = compile_matchers(['hello.*']).pop()
+ assert pat('hello.py')
+ assert pat('hello.rst')
+
+ pat = compile_matchers(['*.py']).pop()
+ assert pat('hello.py')
+ assert pat('world.py')
+ assert not pat('subdir/hello.py')
+
+ # wild card (**)
+ pat = compile_matchers(['hello.**']).pop()
+ assert pat('hello.py')
+ assert pat('hello.rst')
+ assert pat('hello.py/world.py')
+
+ pat = compile_matchers(['**.py']).pop()
+ assert pat('hello.py')
+ assert pat('world.py')
+ assert pat('subdir/hello.py')
+
+ pat = compile_matchers(['**/hello.py']).pop()
+ assert not pat('hello.py')
+ assert pat('subdir/hello.py')
+ assert pat('subdir/subdir/hello.py')
+
+ # wild card (?)
+ pat = compile_matchers(['hello.?']).pop()
+ assert pat('hello.c')
+ assert not pat('hello.py')
+
+ # pattern ([...])
+ pat = compile_matchers(['hello[12\\].py']).pop()
+ assert pat('hello1.py')
+ assert pat('hello2.py')
+ assert pat('hello\\.py')
+ assert not pat('hello3.py')
+
+ pat = compile_matchers(['hello[^12].py']).pop() # "^" is not negative identifier
+ assert pat('hello1.py')
+ assert pat('hello2.py')
+ assert pat('hello^.py')
+ assert not pat('hello3.py')
+
+ # negative pattern ([!...])
+ pat = compile_matchers(['hello[!12].py']).pop()
+ assert not pat('hello1.py')
+ assert not pat('hello2.py')
+ assert not pat('hello/.py') # negative pattern does not match to "/"
+ assert pat('hello3.py')
+
+ # non patterns
+ pat = compile_matchers(['hello[.py']).pop()
+ assert pat('hello[.py')
+ assert not pat('hello.py')
+
+ pat = compile_matchers(['hello[].py']).pop()
+ assert pat('hello[].py')
+ assert not pat('hello.py')
+
+ pat = compile_matchers(['hello[!].py']).pop()
+ assert pat('hello[!].py')
+ assert not pat('hello.py')
+
+
+def test_Matcher():
+ matcher = Matcher(['hello.py', '**/world.py'])
+ assert matcher('hello.py')
+ assert not matcher('subdir/hello.py')
+ assert matcher('world.py')
+ assert matcher('subdir/world.py')
+
+
+def test_get_matching_files_all(rootdir):
+ files = get_matching_files(rootdir / "test-root")
+ assert sorted(files) == [
+ 'Makefile', '_templates/contentssb.html', '_templates/customsb.html',
+ '_templates/layout.html', 'autodoc.txt', 'autodoc_target.py', 'bom.txt', 'conf.py',
+ 'extapi.txt', 'extensions.txt', 'file_with_special_#_chars.xyz', 'footnote.txt',
+ 'images.txt', 'img.foo.png', 'img.gif', 'img.pdf', 'img.png', 'includes.txt',
+ 'index.txt', 'lists.txt', 'literal.inc', 'literal_orig.inc', 'markup.txt', 'math.txt',
+ 'objects.txt', 'otherext.foo', 'parsermod.py', 'quotes.inc', 'rimg.png',
+ 'special/api.h', 'special/code.py', 'subdir/excluded.txt', 'subdir/images.txt',
+ 'subdir/img.png', 'subdir/include.inc', 'subdir/includes.txt', 'subdir/simg.png',
+ 'svgimg.pdf', 'svgimg.svg', 'tabs.inc', 'test.inc', 'wrongenc.inc',
+ ]
+
+
+def test_get_matching_files_all_exclude_single(rootdir):
+ files = get_matching_files(rootdir / "test-root", exclude_patterns=["**.html"])
+ assert sorted(files) == [
+ 'Makefile', 'autodoc.txt', 'autodoc_target.py', 'bom.txt', 'conf.py',
+ 'extapi.txt', 'extensions.txt', 'file_with_special_#_chars.xyz', 'footnote.txt',
+ 'images.txt', 'img.foo.png', 'img.gif', 'img.pdf', 'img.png', 'includes.txt',
+ 'index.txt', 'lists.txt', 'literal.inc', 'literal_orig.inc', 'markup.txt', 'math.txt',
+ 'objects.txt', 'otherext.foo', 'parsermod.py', 'quotes.inc', 'rimg.png',
+ 'special/api.h', 'special/code.py', 'subdir/excluded.txt', 'subdir/images.txt',
+ 'subdir/img.png', 'subdir/include.inc', 'subdir/includes.txt', 'subdir/simg.png',
+ 'svgimg.pdf', 'svgimg.svg', 'tabs.inc', 'test.inc', 'wrongenc.inc',
+ ]
+
+
+def test_get_matching_files_all_exclude_multiple(rootdir):
+ files = get_matching_files(rootdir / "test-root", exclude_patterns=["**.html", "**.inc"])
+ assert sorted(files) == [
+ 'Makefile', 'autodoc.txt', 'autodoc_target.py', 'bom.txt', 'conf.py',
+ 'extapi.txt', 'extensions.txt', 'file_with_special_#_chars.xyz', 'footnote.txt',
+ 'images.txt', 'img.foo.png', 'img.gif', 'img.pdf', 'img.png', 'includes.txt',
+ 'index.txt', 'lists.txt', 'markup.txt', 'math.txt', 'objects.txt', 'otherext.foo',
+ 'parsermod.py', 'rimg.png', 'special/api.h', 'special/code.py', 'subdir/excluded.txt',
+ 'subdir/images.txt', 'subdir/img.png', 'subdir/includes.txt', 'subdir/simg.png',
+ 'svgimg.pdf', 'svgimg.svg',
+ ]
+
+
+def test_get_matching_files_all_exclude_nonexistent(rootdir):
+ files = get_matching_files(rootdir / "test-root", exclude_patterns=["halibut/**"])
+ assert sorted(files) == [
+ 'Makefile', '_templates/contentssb.html', '_templates/customsb.html',
+ '_templates/layout.html', 'autodoc.txt', 'autodoc_target.py', 'bom.txt', 'conf.py',
+ 'extapi.txt', 'extensions.txt', 'file_with_special_#_chars.xyz', 'footnote.txt',
+ 'images.txt', 'img.foo.png', 'img.gif', 'img.pdf', 'img.png', 'includes.txt',
+ 'index.txt', 'lists.txt', 'literal.inc', 'literal_orig.inc', 'markup.txt', 'math.txt',
+ 'objects.txt', 'otherext.foo', 'parsermod.py', 'quotes.inc', 'rimg.png',
+ 'special/api.h', 'special/code.py', 'subdir/excluded.txt', 'subdir/images.txt',
+ 'subdir/img.png', 'subdir/include.inc', 'subdir/includes.txt', 'subdir/simg.png',
+ 'svgimg.pdf', 'svgimg.svg', 'tabs.inc', 'test.inc', 'wrongenc.inc',
+ ]
+
+
+def test_get_matching_files_all_include_single(rootdir):
+ files = get_matching_files(rootdir / "test-root", include_patterns=["subdir/**"])
+ assert sorted(files) == [
+ 'subdir/excluded.txt', 'subdir/images.txt', 'subdir/img.png', 'subdir/include.inc',
+ 'subdir/includes.txt', 'subdir/simg.png',
+ ]
+
+
+def test_get_matching_files_all_include_multiple(rootdir):
+ files = get_matching_files(rootdir / "test-root", include_patterns=["special/**", "subdir/**"])
+ assert sorted(files) == [
+ 'special/api.h', 'special/code.py', 'subdir/excluded.txt', 'subdir/images.txt',
+ 'subdir/img.png', 'subdir/include.inc', 'subdir/includes.txt', 'subdir/simg.png',
+ ]
+
+
+def test_get_matching_files_all_include_nonexistent(rootdir):
+ files = get_matching_files(rootdir / "test-root", include_patterns=["halibut/**"])
+ assert sorted(files) == []
+
+
+def test_get_matching_files_all_include_prefix(rootdir):
+ files = get_matching_files(rootdir / "test-root", include_patterns=["autodoc*"])
+ assert sorted(files) == [
+ 'autodoc.txt', 'autodoc_target.py',
+ ]
+
+
+def test_get_matching_files_all_include_question_mark(rootdir):
+ files = get_matching_files(rootdir / "test-root", include_patterns=["img.???"])
+ assert sorted(files) == [
+ 'img.gif', 'img.pdf', 'img.png',
+ ]
diff --git a/tests/test_util/test_util_nodes.py b/tests/test_util/test_util_nodes.py
new file mode 100644
index 0000000..ddd5974
--- /dev/null
+++ b/tests/test_util/test_util_nodes.py
@@ -0,0 +1,254 @@
+"""Tests uti.nodes functions."""
+from __future__ import annotations
+
+import warnings
+from textwrap import dedent
+from typing import Any
+
+import pytest
+from docutils import frontend, nodes
+from docutils.parsers import rst
+from docutils.utils import new_document
+
+from sphinx.transforms import ApplySourceWorkaround
+from sphinx.util.nodes import (
+ NodeMatcher,
+ apply_source_workaround,
+ clean_astext,
+ extract_messages,
+ make_id,
+ split_explicit_title,
+)
+
+
+def _transform(doctree):
+ ApplySourceWorkaround(doctree).apply()
+
+
+def create_new_document():
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', category=DeprecationWarning)
+ # DeprecationWarning: The frontend.OptionParser class will be replaced
+ # by a subclass of argparse.ArgumentParser in Docutils 0.21 or later.
+ settings = frontend.OptionParser(
+ components=(rst.Parser,)).get_default_values()
+ settings.id_prefix = 'id'
+ document = new_document('dummy.txt', settings)
+ return document
+
+
+def _get_doctree(text):
+ document = create_new_document()
+ rst.Parser().parse(text, document)
+ _transform(document)
+ return document
+
+
+def assert_node_count(messages, node_type, expect_count):
+ count = 0
+ node_list = [node for node, msg in messages]
+ for node in node_list:
+ if isinstance(node, node_type):
+ count += 1
+
+ assert count == expect_count, (
+ "Count of %r in the %r is %d instead of %d"
+ % (node_type, node_list, count, expect_count))
+
+
+def test_NodeMatcher():
+ doctree = nodes.document(None, None)
+ doctree += nodes.paragraph('', 'Hello')
+ doctree += nodes.paragraph('', 'Sphinx', block=1)
+ doctree += nodes.paragraph('', 'World', block=2)
+ doctree += nodes.literal_block('', 'blah blah blah', block=3)
+
+ # search by node class
+ matcher = NodeMatcher(nodes.paragraph)
+ assert len(list(doctree.findall(matcher))) == 3
+
+ # search by multiple node classes
+ matcher = NodeMatcher(nodes.paragraph, nodes.literal_block)
+ assert len(list(doctree.findall(matcher))) == 4
+
+ # search by node attribute
+ matcher = NodeMatcher(block=1)
+ assert len(list(doctree.findall(matcher))) == 1
+
+ # search by node attribute (Any)
+ matcher = NodeMatcher(block=Any)
+ assert len(list(doctree.findall(matcher))) == 3
+
+ # search by both class and attribute
+ matcher = NodeMatcher(nodes.paragraph, block=Any)
+ assert len(list(doctree.findall(matcher))) == 2
+
+ # mismatched
+ matcher = NodeMatcher(nodes.title)
+ assert len(list(doctree.findall(matcher))) == 0
+
+ # search with Any does not match to Text node
+ matcher = NodeMatcher(blah=Any)
+ assert len(list(doctree.findall(matcher))) == 0
+
+
+@pytest.mark.parametrize(
+ ('rst', 'node_cls', 'count'),
+ [
+ (
+ """
+ .. admonition:: admonition title
+
+ admonition body
+ """,
+ nodes.title, 1,
+ ),
+ (
+ """
+ .. figure:: foo.jpg
+
+ this is title
+ """,
+ nodes.caption, 1,
+ ),
+ (
+ """
+ .. rubric:: spam
+ """,
+ nodes.rubric, 1,
+ ),
+ (
+ """
+ | spam
+ | egg
+ """,
+ nodes.line, 2,
+ ),
+ (
+ """
+ section
+ =======
+
+ +----------------+
+ | | **Title 1** |
+ | | Message 1 |
+ +----------------+
+ """,
+ nodes.line, 2,
+ ),
+ (
+ """
+ * | **Title 1**
+ | Message 1
+ """,
+ nodes.line, 2,
+
+ ),
+ ],
+)
+def test_extract_messages(rst, node_cls, count):
+ msg = extract_messages(_get_doctree(dedent(rst)))
+ assert_node_count(msg, node_cls, count)
+
+
+def test_extract_messages_without_rawsource():
+ """
+ Check node.rawsource is fall-backed by using node.astext() value.
+
+ `extract_message` which is used from Sphinx i18n feature drop ``not node.rawsource``
+ nodes. So, all nodes which want to translate must have ``rawsource`` value.
+ However, sometimes node.rawsource is not set.
+
+ For example: recommonmark-0.2.0 doesn't set rawsource to `paragraph` node.
+
+ refs #1994: Fall back to node's astext() during i18n message extraction.
+ """
+ p = nodes.paragraph()
+ p.append(nodes.Text('test'))
+ p.append(nodes.Text('sentence'))
+ assert not p.rawsource # target node must not have rawsource value
+ document = create_new_document()
+ document.append(p)
+ _transform(document)
+ assert_node_count(extract_messages(document), nodes.TextElement, 1)
+ assert [m for n, m in extract_messages(document)][0], 'text sentence'
+
+
+def test_clean_astext():
+ node = nodes.paragraph(text='hello world')
+ assert clean_astext(node) == 'hello world'
+
+ node = nodes.image(alt='hello world')
+ assert clean_astext(node) == ''
+
+ node = nodes.paragraph(text='hello world')
+ node += nodes.raw('', 'raw text', format='html')
+ assert clean_astext(node) == 'hello world'
+
+
+@pytest.mark.parametrize(
+ ('prefix', 'term', 'expected'),
+ [
+ ('', '', 'id0'),
+ ('term', '', 'term-0'),
+ ('term', 'Sphinx', 'term-Sphinx'),
+ ('', 'io.StringIO', 'io.StringIO'), # contains a dot
+ ('', 'sphinx.setup_command', 'sphinx.setup_command'), # contains a dot & underscore
+ ('', '_io.StringIO', 'io.StringIO'), # starts with underscore
+ ('', 'sphinx', 'sphinx'), # alphabets in unicode fullwidth characters
+ ('', '悠好', 'id0'), # multibytes text (in Chinese)
+ ('', 'Hello=悠好=こんにちは', 'Hello'), # alphabets and multibytes text
+ ('', 'fünf', 'funf'), # latin1 (umlaut)
+ ('', '0sphinx', 'sphinx'), # starts with number
+ ('', 'sphinx-', 'sphinx'), # ends with hyphen
+ ])
+def test_make_id(app, prefix, term, expected):
+ document = create_new_document()
+ assert make_id(app.env, document, prefix, term) == expected
+
+
+def test_make_id_already_registered(app):
+ document = create_new_document()
+ document.ids['term-Sphinx'] = True # register "term-Sphinx" manually
+ assert make_id(app.env, document, 'term', 'Sphinx') == 'term-0'
+
+
+def test_make_id_sequential(app):
+ document = create_new_document()
+ document.ids['term-0'] = True
+ assert make_id(app.env, document, 'term') == 'term-1'
+
+
+@pytest.mark.parametrize(
+ ('title', 'expected'),
+ [
+ # implicit
+ ('hello', (False, 'hello', 'hello')),
+ # explicit
+ ('hello <world>', (True, 'hello', 'world')),
+ # explicit (title having angle brackets)
+ ('hello <world> <sphinx>', (True, 'hello <world>', 'sphinx')),
+ ],
+)
+def test_split_explicit_target(title, expected):
+ assert expected == split_explicit_title(title)
+
+
+def test_apply_source_workaround_literal_block_no_source():
+ """Regression test for #11091.
+
+ Test that apply_source_workaround doesn't raise.
+ """
+ literal_block = nodes.literal_block('', '')
+ list_item = nodes.list_item('', literal_block)
+ bullet_list = nodes.bullet_list('', list_item)
+
+ assert literal_block.source is None
+ assert list_item.source is None
+ assert bullet_list.source is None
+
+ apply_source_workaround(literal_block)
+
+ assert literal_block.source is None
+ assert list_item.source is None
+ assert bullet_list.source is None
diff --git a/tests/test_util/test_util_rst.py b/tests/test_util/test_util_rst.py
new file mode 100644
index 0000000..d50c90c
--- /dev/null
+++ b/tests/test_util/test_util_rst.py
@@ -0,0 +1,164 @@
+"""Tests sphinx.util.rst functions."""
+
+from docutils.statemachine import StringList
+from jinja2 import Environment
+
+from sphinx.util.rst import append_epilog, escape, heading, prepend_prolog, textwidth
+
+
+def test_escape():
+ assert escape(':ref:`id`') == r'\:ref\:\`id\`'
+ assert escape('footnote [#]_') == r'footnote \[\#\]\_'
+ assert escape('sphinx.application') == r'sphinx.application'
+ assert escape('.. toctree::') == r'\.. toctree\:\:'
+
+
+def test_append_epilog(app):
+ epilog = 'this is rst_epilog\ngood-bye reST!'
+ content = StringList(['hello Sphinx world',
+ 'Sphinx is a document generator'],
+ 'dummy.rst')
+ append_epilog(content, epilog)
+
+ assert list(content.xitems()) == [('dummy.rst', 0, 'hello Sphinx world'),
+ ('dummy.rst', 1, 'Sphinx is a document generator'),
+ ('dummy.rst', 2, ''),
+ ('<rst_epilog>', 0, 'this is rst_epilog'),
+ ('<rst_epilog>', 1, 'good-bye reST!')]
+
+
+def test_prepend_prolog(app):
+ prolog = 'this is rst_prolog\nhello reST!'
+ content = StringList([':title: test of SphinxFileInput',
+ ':author: Sphinx team',
+ '',
+ 'hello Sphinx world',
+ 'Sphinx is a document generator'],
+ 'dummy.rst')
+ prepend_prolog(content, prolog)
+
+ assert list(content.xitems()) == [('dummy.rst', 0, ':title: test of SphinxFileInput'),
+ ('dummy.rst', 1, ':author: Sphinx team'),
+ ('<generated>', 0, ''),
+ ('<rst_prolog>', 0, 'this is rst_prolog'),
+ ('<rst_prolog>', 1, 'hello reST!'),
+ ('<generated>', 0, ''),
+ ('dummy.rst', 2, ''),
+ ('dummy.rst', 3, 'hello Sphinx world'),
+ ('dummy.rst', 4, 'Sphinx is a document generator')]
+
+
+def test_prepend_prolog_with_CR(app):
+ # prolog having CR at tail
+ prolog = 'this is rst_prolog\nhello reST!\n'
+ content = StringList(['hello Sphinx world',
+ 'Sphinx is a document generator'],
+ 'dummy.rst')
+ prepend_prolog(content, prolog)
+
+ assert list(content.xitems()) == [('<rst_prolog>', 0, 'this is rst_prolog'),
+ ('<rst_prolog>', 1, 'hello reST!'),
+ ('<generated>', 0, ''),
+ ('dummy.rst', 0, 'hello Sphinx world'),
+ ('dummy.rst', 1, 'Sphinx is a document generator')]
+
+
+def test_prepend_prolog_without_CR(app):
+ # prolog not having CR at tail
+ prolog = 'this is rst_prolog\nhello reST!'
+ content = StringList(['hello Sphinx world',
+ 'Sphinx is a document generator'],
+ 'dummy.rst')
+ prepend_prolog(content, prolog)
+
+ assert list(content.xitems()) == [('<rst_prolog>', 0, 'this is rst_prolog'),
+ ('<rst_prolog>', 1, 'hello reST!'),
+ ('<generated>', 0, ''),
+ ('dummy.rst', 0, 'hello Sphinx world'),
+ ('dummy.rst', 1, 'Sphinx is a document generator')]
+
+
+def test_prepend_prolog_with_roles_in_sections(app):
+ prolog = 'this is rst_prolog\nhello reST!'
+ content = StringList([':title: test of SphinxFileInput',
+ ':author: Sphinx team',
+ '', # this newline is required
+ ':mod:`foo`',
+ '----------',
+ '',
+ 'hello'],
+ 'dummy.rst')
+ prepend_prolog(content, prolog)
+
+ assert list(content.xitems()) == [('dummy.rst', 0, ':title: test of SphinxFileInput'),
+ ('dummy.rst', 1, ':author: Sphinx team'),
+ ('<generated>', 0, ''),
+ ('<rst_prolog>', 0, 'this is rst_prolog'),
+ ('<rst_prolog>', 1, 'hello reST!'),
+ ('<generated>', 0, ''),
+ ('dummy.rst', 2, ''),
+ ('dummy.rst', 3, ':mod:`foo`'),
+ ('dummy.rst', 4, '----------'),
+ ('dummy.rst', 5, ''),
+ ('dummy.rst', 6, 'hello')]
+
+
+def test_prepend_prolog_with_roles_in_sections_with_newline(app):
+ # prologue with trailing line break
+ prolog = 'this is rst_prolog\nhello reST!\n'
+ content = StringList([':mod:`foo`', '-' * 10, '', 'hello'], 'dummy.rst')
+ prepend_prolog(content, prolog)
+
+ assert list(content.xitems()) == [('<rst_prolog>', 0, 'this is rst_prolog'),
+ ('<rst_prolog>', 1, 'hello reST!'),
+ ('<generated>', 0, ''),
+ ('dummy.rst', 0, ':mod:`foo`'),
+ ('dummy.rst', 1, '----------'),
+ ('dummy.rst', 2, ''),
+ ('dummy.rst', 3, 'hello')]
+
+
+def test_prepend_prolog_with_roles_in_sections_without_newline(app):
+ # prologue with no trailing line break
+ prolog = 'this is rst_prolog\nhello reST!'
+ content = StringList([':mod:`foo`', '-' * 10, '', 'hello'], 'dummy.rst')
+ prepend_prolog(content, prolog)
+
+ assert list(content.xitems()) == [('<rst_prolog>', 0, 'this is rst_prolog'),
+ ('<rst_prolog>', 1, 'hello reST!'),
+ ('<generated>', 0, ''),
+ ('dummy.rst', 0, ':mod:`foo`'),
+ ('dummy.rst', 1, '----------'),
+ ('dummy.rst', 2, ''),
+ ('dummy.rst', 3, 'hello')]
+
+
+def test_textwidth():
+ assert textwidth('Hello') == 5
+ assert textwidth('русский язык') == 12
+ assert textwidth('русский язык', 'WFA') == 23 # Cyrillic are ambiguous chars
+
+
+def test_heading():
+ env = Environment()
+ env.extend(language=None)
+
+ assert heading(env, 'Hello') == ('Hello\n'
+ '=====')
+ assert heading(env, 'Hello', 1) == ('Hello\n'
+ '=====')
+ assert heading(env, 'Hello', 2) == ('Hello\n'
+ '-----')
+ assert heading(env, 'Hello', 3) == ('Hello\n'
+ '~~~~~')
+ assert heading(env, 'русский язык', 1) == (
+ 'русский язык\n'
+ '============'
+ )
+
+ # language=ja: ambiguous
+ env.language = 'ja'
+ assert heading(env, 'русский язык', 1) == (
+ 'русский язык\n'
+ '======================='
+ )
diff --git a/tests/test_util/test_util_template.py b/tests/test_util/test_util_template.py
new file mode 100644
index 0000000..4601179
--- /dev/null
+++ b/tests/test_util/test_util_template.py
@@ -0,0 +1,29 @@
+"""Tests sphinx.util.template functions."""
+
+from sphinx.util.template import ReSTRenderer
+
+
+def test_ReSTRenderer_escape():
+ r = ReSTRenderer()
+ template = '{{ "*hello*" | e }}'
+ assert r.render_string(template, {}) == r'\*hello\*'
+
+
+def test_ReSTRenderer_heading():
+ r = ReSTRenderer()
+
+ template = '{{ "hello" | heading }}'
+ assert r.render_string(template, {}) == 'hello\n====='
+
+ template = '{{ "hello" | heading(1) }}'
+ assert r.render_string(template, {}) == 'hello\n====='
+
+ template = '{{ "русский язык" | heading(2) }}'
+ assert r.render_string(template, {}) == ('русский язык\n'
+ '------------')
+
+ # language: ja
+ r.env.language = 'ja'
+ template = '{{ "русский язык" | heading }}'
+ assert r.render_string(template, {}) == ('русский язык\n'
+ '=======================')
diff --git a/tests/test_util/test_util_typing.py b/tests/test_util/test_util_typing.py
new file mode 100644
index 0000000..9c28029
--- /dev/null
+++ b/tests/test_util/test_util_typing.py
@@ -0,0 +1,633 @@
+"""Tests util.typing functions."""
+
+import sys
+from contextvars import Context, ContextVar, Token
+from enum import Enum
+from numbers import Integral
+from struct import Struct
+from types import (
+ AsyncGeneratorType,
+ BuiltinFunctionType,
+ BuiltinMethodType,
+ CellType,
+ ClassMethodDescriptorType,
+ CodeType,
+ CoroutineType,
+ FrameType,
+ FunctionType,
+ GeneratorType,
+ GetSetDescriptorType,
+ LambdaType,
+ MappingProxyType,
+ MemberDescriptorType,
+ MethodDescriptorType,
+ MethodType,
+ MethodWrapperType,
+ ModuleType,
+ TracebackType,
+ WrapperDescriptorType,
+)
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Generator,
+ Iterator,
+ List,
+ NewType,
+ Optional,
+ Tuple,
+ TypeVar,
+ Union,
+)
+
+import pytest
+
+from sphinx.ext.autodoc import mock
+from sphinx.util.typing import _INVALID_BUILTIN_CLASSES, restify, stringify_annotation
+
+
+class MyClass1:
+ pass
+
+
+class MyClass2(MyClass1):
+ __qualname__ = '<MyClass2>'
+
+
+class MyEnum(Enum):
+ a = 1
+
+
+T = TypeVar('T')
+MyInt = NewType('MyInt', int)
+
+
+class MyList(List[T]):
+ pass
+
+
+class BrokenType:
+ __args__ = int
+
+
+def test_restify():
+ assert restify(int) == ":py:class:`int`"
+ assert restify(int, "smart") == ":py:class:`int`"
+
+ assert restify(str) == ":py:class:`str`"
+ assert restify(str, "smart") == ":py:class:`str`"
+
+ assert restify(None) == ":py:obj:`None`"
+ assert restify(None, "smart") == ":py:obj:`None`"
+
+ assert restify(Integral) == ":py:class:`numbers.Integral`"
+ assert restify(Integral, "smart") == ":py:class:`~numbers.Integral`"
+
+ assert restify(Struct) == ":py:class:`struct.Struct`"
+ assert restify(Struct, "smart") == ":py:class:`~struct.Struct`"
+
+ assert restify(TracebackType) == ":py:class:`types.TracebackType`"
+ assert restify(TracebackType, "smart") == ":py:class:`~types.TracebackType`"
+
+ assert restify(Any) == ":py:obj:`~typing.Any`"
+ assert restify(Any, "smart") == ":py:obj:`~typing.Any`"
+
+ assert restify('str') == "str"
+ assert restify('str', "smart") == "str"
+
+
+def test_is_invalid_builtin_class():
+ # if these tests start failing, it means that the __module__
+ # of one of these classes has changed, and _INVALID_BUILTIN_CLASSES
+ # in sphinx.util.typing needs to be updated.
+ assert _INVALID_BUILTIN_CLASSES.keys() == {
+ Context,
+ ContextVar,
+ Token,
+ Struct,
+ AsyncGeneratorType,
+ BuiltinFunctionType,
+ BuiltinMethodType,
+ CellType,
+ ClassMethodDescriptorType,
+ CodeType,
+ CoroutineType,
+ FrameType,
+ FunctionType,
+ GeneratorType,
+ GetSetDescriptorType,
+ LambdaType,
+ MappingProxyType,
+ MemberDescriptorType,
+ MethodDescriptorType,
+ MethodType,
+ MethodWrapperType,
+ ModuleType,
+ TracebackType,
+ WrapperDescriptorType,
+ }
+ assert Struct.__module__ == '_struct'
+ assert AsyncGeneratorType.__module__ == 'builtins'
+ assert BuiltinFunctionType.__module__ == 'builtins'
+ assert BuiltinMethodType.__module__ == 'builtins'
+ assert CellType.__module__ == 'builtins'
+ assert ClassMethodDescriptorType.__module__ == 'builtins'
+ assert CodeType.__module__ == 'builtins'
+ assert CoroutineType.__module__ == 'builtins'
+ assert FrameType.__module__ == 'builtins'
+ assert FunctionType.__module__ == 'builtins'
+ assert GeneratorType.__module__ == 'builtins'
+ assert GetSetDescriptorType.__module__ == 'builtins'
+ assert LambdaType.__module__ == 'builtins'
+ assert MappingProxyType.__module__ == 'builtins'
+ assert MemberDescriptorType.__module__ == 'builtins'
+ assert MethodDescriptorType.__module__ == 'builtins'
+ assert MethodType.__module__ == 'builtins'
+ assert MethodWrapperType.__module__ == 'builtins'
+ assert ModuleType.__module__ == 'builtins'
+ assert TracebackType.__module__ == 'builtins'
+ assert WrapperDescriptorType.__module__ == 'builtins'
+
+
+def test_restify_type_hints_containers():
+ assert restify(List) == ":py:class:`~typing.List`"
+ assert restify(Dict) == ":py:class:`~typing.Dict`"
+ assert restify(List[int]) == ":py:class:`~typing.List`\\ [:py:class:`int`]"
+ assert restify(List[str]) == ":py:class:`~typing.List`\\ [:py:class:`str`]"
+ assert restify(Dict[str, float]) == (":py:class:`~typing.Dict`\\ "
+ "[:py:class:`str`, :py:class:`float`]")
+ assert restify(Tuple[str, str, str]) == (":py:class:`~typing.Tuple`\\ "
+ "[:py:class:`str`, :py:class:`str`, "
+ ":py:class:`str`]")
+ assert restify(Tuple[str, ...]) == ":py:class:`~typing.Tuple`\\ [:py:class:`str`, ...]"
+
+ if sys.version_info[:2] <= (3, 10):
+ assert restify(Tuple[()]) == ":py:class:`~typing.Tuple`\\ [()]"
+ else:
+ assert restify(Tuple[()]) == ":py:class:`~typing.Tuple`"
+
+ assert restify(List[Dict[str, Tuple]]) == (":py:class:`~typing.List`\\ "
+ "[:py:class:`~typing.Dict`\\ "
+ "[:py:class:`str`, :py:class:`~typing.Tuple`]]")
+ assert restify(MyList[Tuple[int, int]]) == (":py:class:`tests.test_util.test_util_typing.MyList`\\ "
+ "[:py:class:`~typing.Tuple`\\ "
+ "[:py:class:`int`, :py:class:`int`]]")
+ assert restify(Generator[None, None, None]) == (":py:class:`~typing.Generator`\\ "
+ "[:py:obj:`None`, :py:obj:`None`, "
+ ":py:obj:`None`]")
+ assert restify(Iterator[None]) == (":py:class:`~typing.Iterator`\\ "
+ "[:py:obj:`None`]")
+
+
+def test_restify_type_hints_Callable():
+ assert restify(Callable) == ":py:class:`~typing.Callable`"
+
+ assert restify(Callable[[str], int]) == (":py:class:`~typing.Callable`\\ "
+ "[[:py:class:`str`], :py:class:`int`]")
+ assert restify(Callable[..., int]) == (":py:class:`~typing.Callable`\\ "
+ "[[...], :py:class:`int`]")
+
+
+def test_restify_type_hints_Union():
+ assert restify(Union[int]) == ":py:class:`int`"
+ assert restify(Union[int, str]) == ":py:class:`int` | :py:class:`str`"
+ assert restify(Optional[int]) == ":py:class:`int` | :py:obj:`None`"
+
+ assert restify(Union[str, None]) == ":py:class:`str` | :py:obj:`None`"
+ assert restify(Union[None, str]) == ":py:obj:`None` | :py:class:`str`"
+ assert restify(Optional[str]) == ":py:class:`str` | :py:obj:`None`"
+
+ assert restify(Union[int, str, None]) == (
+ ":py:class:`int` | :py:class:`str` | :py:obj:`None`"
+ )
+ assert restify(Optional[Union[int, str]]) in {
+ ":py:class:`str` | :py:class:`int` | :py:obj:`None`",
+ ":py:class:`int` | :py:class:`str` | :py:obj:`None`",
+ }
+
+ assert restify(Union[int, Integral]) == (
+ ":py:class:`int` | :py:class:`numbers.Integral`"
+ )
+ assert restify(Union[int, Integral], "smart") == (
+ ":py:class:`int` | :py:class:`~numbers.Integral`"
+ )
+
+ assert (restify(Union[MyClass1, MyClass2]) ==
+ (":py:class:`tests.test_util.test_util_typing.MyClass1`"
+ " | :py:class:`tests.test_util.test_util_typing.<MyClass2>`"))
+ assert (restify(Union[MyClass1, MyClass2], "smart") ==
+ (":py:class:`~tests.test_util.test_util_typing.MyClass1`"
+ " | :py:class:`~tests.test_util.test_util_typing.<MyClass2>`"))
+
+ assert (restify(Optional[Union[MyClass1, MyClass2]]) ==
+ (":py:class:`tests.test_util.test_util_typing.MyClass1`"
+ " | :py:class:`tests.test_util.test_util_typing.<MyClass2>`"
+ " | :py:obj:`None`"))
+ assert (restify(Optional[Union[MyClass1, MyClass2]], "smart") ==
+ (":py:class:`~tests.test_util.test_util_typing.MyClass1`"
+ " | :py:class:`~tests.test_util.test_util_typing.<MyClass2>`"
+ " | :py:obj:`None`"))
+
+
+def test_restify_type_hints_typevars():
+ T = TypeVar('T')
+ T_co = TypeVar('T_co', covariant=True)
+ T_contra = TypeVar('T_contra', contravariant=True)
+
+ assert restify(T) == ":py:obj:`tests.test_util.test_util_typing.T`"
+ assert restify(T, "smart") == ":py:obj:`~tests.test_util.test_util_typing.T`"
+
+ assert restify(T_co) == ":py:obj:`tests.test_util.test_util_typing.T_co`"
+ assert restify(T_co, "smart") == ":py:obj:`~tests.test_util.test_util_typing.T_co`"
+
+ assert restify(T_contra) == ":py:obj:`tests.test_util.test_util_typing.T_contra`"
+ assert restify(T_contra, "smart") == ":py:obj:`~tests.test_util.test_util_typing.T_contra`"
+
+ assert restify(List[T]) == ":py:class:`~typing.List`\\ [:py:obj:`tests.test_util.test_util_typing.T`]"
+ assert restify(List[T], "smart") == ":py:class:`~typing.List`\\ [:py:obj:`~tests.test_util.test_util_typing.T`]"
+
+ assert restify(list[T]) == ":py:class:`list`\\ [:py:obj:`tests.test_util.test_util_typing.T`]"
+ assert restify(list[T], "smart") == ":py:class:`list`\\ [:py:obj:`~tests.test_util.test_util_typing.T`]"
+
+ if sys.version_info[:2] >= (3, 10):
+ assert restify(MyInt) == ":py:class:`tests.test_util.test_util_typing.MyInt`"
+ assert restify(MyInt, "smart") == ":py:class:`~tests.test_util.test_util_typing.MyInt`"
+ else:
+ assert restify(MyInt) == ":py:class:`MyInt`"
+ assert restify(MyInt, "smart") == ":py:class:`MyInt`"
+
+
+def test_restify_type_hints_custom_class():
+ assert restify(MyClass1) == ":py:class:`tests.test_util.test_util_typing.MyClass1`"
+ assert restify(MyClass1, "smart") == ":py:class:`~tests.test_util.test_util_typing.MyClass1`"
+
+ assert restify(MyClass2) == ":py:class:`tests.test_util.test_util_typing.<MyClass2>`"
+ assert restify(MyClass2, "smart") == ":py:class:`~tests.test_util.test_util_typing.<MyClass2>`"
+
+
+def test_restify_type_hints_alias():
+ MyStr = str
+ MyTypingTuple = Tuple[str, str]
+ MyTuple = tuple[str, str]
+ assert restify(MyStr) == ":py:class:`str`"
+ assert restify(MyTypingTuple) == ":py:class:`~typing.Tuple`\\ [:py:class:`str`, :py:class:`str`]"
+ assert restify(MyTuple) == ":py:class:`tuple`\\ [:py:class:`str`, :py:class:`str`]"
+
+
+def test_restify_type_ForwardRef():
+ from typing import ForwardRef # type: ignore[attr-defined]
+ assert restify(ForwardRef("MyInt")) == ":py:class:`MyInt`"
+
+ assert restify(list[ForwardRef("MyInt")]) == ":py:class:`list`\\ [:py:class:`MyInt`]"
+
+ assert restify(Tuple[dict[ForwardRef("MyInt"), str], list[List[int]]]) == ":py:class:`~typing.Tuple`\\ [:py:class:`dict`\\ [:py:class:`MyInt`, :py:class:`str`], :py:class:`list`\\ [:py:class:`~typing.List`\\ [:py:class:`int`]]]" # type: ignore[attr-defined]
+
+
+def test_restify_type_Literal():
+ from typing import Literal # type: ignore[attr-defined]
+ assert restify(Literal[1, "2", "\r"]) == ":py:obj:`~typing.Literal`\\ [1, '2', '\\r']"
+
+ assert restify(Literal[MyEnum.a], 'fully-qualified-except-typing') == ':py:obj:`~typing.Literal`\\ [:py:attr:`tests.test_util.test_util_typing.MyEnum.a`]'
+ assert restify(Literal[MyEnum.a], 'smart') == ':py:obj:`~typing.Literal`\\ [:py:attr:`~tests.test_util.test_util_typing.MyEnum.a`]'
+
+
+def test_restify_pep_585():
+ assert restify(list[str]) == ":py:class:`list`\\ [:py:class:`str`]" # type: ignore[attr-defined]
+ assert restify(dict[str, str]) == (":py:class:`dict`\\ " # type: ignore[attr-defined]
+ "[:py:class:`str`, :py:class:`str`]")
+ assert restify(tuple[str, ...]) == ":py:class:`tuple`\\ [:py:class:`str`, ...]"
+ assert restify(tuple[str, str, str]) == (":py:class:`tuple`\\ "
+ "[:py:class:`str`, :py:class:`str`, "
+ ":py:class:`str`]")
+ assert restify(dict[str, tuple[int, ...]]) == (":py:class:`dict`\\ " # type: ignore[attr-defined]
+ "[:py:class:`str`, :py:class:`tuple`\\ "
+ "[:py:class:`int`, ...]]")
+
+ assert restify(tuple[()]) == ":py:class:`tuple`\\ [()]"
+
+ # Mix old typing with PEP 585
+ assert restify(List[dict[str, Tuple[str, ...]]]) == (":py:class:`~typing.List`\\ "
+ "[:py:class:`dict`\\ "
+ "[:py:class:`str`, :py:class:`~typing.Tuple`\\ "
+ "[:py:class:`str`, ...]]]")
+ assert restify(tuple[MyList[list[int]], int]) == (":py:class:`tuple`\\ ["
+ ":py:class:`tests.test_util.test_util_typing.MyList`\\ "
+ "[:py:class:`list`\\ [:py:class:`int`]], "
+ ":py:class:`int`]")
+
+
+@pytest.mark.skipif(sys.version_info[:2] <= (3, 9), reason='python 3.10+ is required.')
+def test_restify_type_union_operator():
+ assert restify(int | None) == ":py:class:`int` | :py:obj:`None`" # type: ignore[attr-defined]
+ assert restify(None | int) == ":py:obj:`None` | :py:class:`int`" # type: ignore[attr-defined]
+ assert restify(int | str) == ":py:class:`int` | :py:class:`str`" # type: ignore[attr-defined]
+ assert restify(int | str | None) == (":py:class:`int` | :py:class:`str` | " # type: ignore[attr-defined]
+ ":py:obj:`None`")
+
+
+def test_restify_broken_type_hints():
+ assert restify(BrokenType) == ':py:class:`tests.test_util.test_util_typing.BrokenType`'
+ assert restify(BrokenType, "smart") == ':py:class:`~tests.test_util.test_util_typing.BrokenType`'
+
+
+def test_restify_mock():
+ with mock(['unknown']):
+ import unknown
+ assert restify(unknown) == ':py:class:`unknown`'
+ assert restify(unknown.secret.Class) == ':py:class:`unknown.secret.Class`'
+ assert restify(unknown.secret.Class, "smart") == ':py:class:`~unknown.secret.Class`'
+
+
+def test_stringify_annotation():
+ assert stringify_annotation(int, 'fully-qualified-except-typing') == "int"
+ assert stringify_annotation(int, "smart") == "int"
+
+ assert stringify_annotation(str, 'fully-qualified-except-typing') == "str"
+ assert stringify_annotation(str, "smart") == "str"
+
+ assert stringify_annotation(None, 'fully-qualified-except-typing') == "None"
+ assert stringify_annotation(None, "smart") == "None"
+
+ assert stringify_annotation(Integral, 'fully-qualified-except-typing') == "numbers.Integral"
+ assert stringify_annotation(Integral, "smart") == "~numbers.Integral"
+
+ assert stringify_annotation(Struct, 'fully-qualified-except-typing') == "struct.Struct"
+ assert stringify_annotation(Struct, "smart") == "~struct.Struct"
+
+ assert stringify_annotation(TracebackType, 'fully-qualified-except-typing') == "types.TracebackType"
+ assert stringify_annotation(TracebackType, "smart") == "~types.TracebackType"
+
+ assert stringify_annotation(Any, 'fully-qualified-except-typing') == "Any"
+ assert stringify_annotation(Any, "fully-qualified") == "typing.Any"
+ assert stringify_annotation(Any, "smart") == "~typing.Any"
+
+
+def test_stringify_type_hints_containers():
+ assert stringify_annotation(List, 'fully-qualified-except-typing') == "List"
+ assert stringify_annotation(List, "fully-qualified") == "typing.List"
+ assert stringify_annotation(List, "smart") == "~typing.List"
+
+ assert stringify_annotation(Dict, 'fully-qualified-except-typing') == "Dict"
+ assert stringify_annotation(Dict, "fully-qualified") == "typing.Dict"
+ assert stringify_annotation(Dict, "smart") == "~typing.Dict"
+
+ assert stringify_annotation(List[int], 'fully-qualified-except-typing') == "List[int]"
+ assert stringify_annotation(List[int], "fully-qualified") == "typing.List[int]"
+ assert stringify_annotation(List[int], "smart") == "~typing.List[int]"
+
+ assert stringify_annotation(List[str], 'fully-qualified-except-typing') == "List[str]"
+ assert stringify_annotation(List[str], "fully-qualified") == "typing.List[str]"
+ assert stringify_annotation(List[str], "smart") == "~typing.List[str]"
+
+ assert stringify_annotation(Dict[str, float], 'fully-qualified-except-typing') == "Dict[str, float]"
+ assert stringify_annotation(Dict[str, float], "fully-qualified") == "typing.Dict[str, float]"
+ assert stringify_annotation(Dict[str, float], "smart") == "~typing.Dict[str, float]"
+
+ assert stringify_annotation(Tuple[str, str, str], 'fully-qualified-except-typing') == "Tuple[str, str, str]"
+ assert stringify_annotation(Tuple[str, str, str], "fully-qualified") == "typing.Tuple[str, str, str]"
+ assert stringify_annotation(Tuple[str, str, str], "smart") == "~typing.Tuple[str, str, str]"
+
+ assert stringify_annotation(Tuple[str, ...], 'fully-qualified-except-typing') == "Tuple[str, ...]"
+ assert stringify_annotation(Tuple[str, ...], "fully-qualified") == "typing.Tuple[str, ...]"
+ assert stringify_annotation(Tuple[str, ...], "smart") == "~typing.Tuple[str, ...]"
+
+ if sys.version_info[:2] <= (3, 10):
+ assert stringify_annotation(Tuple[()], 'fully-qualified-except-typing') == "Tuple[()]"
+ assert stringify_annotation(Tuple[()], "fully-qualified") == "typing.Tuple[()]"
+ assert stringify_annotation(Tuple[()], "smart") == "~typing.Tuple[()]"
+ else:
+ assert stringify_annotation(Tuple[()], 'fully-qualified-except-typing') == "Tuple"
+ assert stringify_annotation(Tuple[()], "fully-qualified") == "typing.Tuple"
+ assert stringify_annotation(Tuple[()], "smart") == "~typing.Tuple"
+
+ assert stringify_annotation(List[Dict[str, Tuple]], 'fully-qualified-except-typing') == "List[Dict[str, Tuple]]"
+ assert stringify_annotation(List[Dict[str, Tuple]], "fully-qualified") == "typing.List[typing.Dict[str, typing.Tuple]]"
+ assert stringify_annotation(List[Dict[str, Tuple]], "smart") == "~typing.List[~typing.Dict[str, ~typing.Tuple]]"
+
+ assert stringify_annotation(MyList[Tuple[int, int]], 'fully-qualified-except-typing') == "tests.test_util.test_util_typing.MyList[Tuple[int, int]]"
+ assert stringify_annotation(MyList[Tuple[int, int]], "fully-qualified") == "tests.test_util.test_util_typing.MyList[typing.Tuple[int, int]]"
+ assert stringify_annotation(MyList[Tuple[int, int]], "smart") == "~tests.test_util.test_util_typing.MyList[~typing.Tuple[int, int]]"
+
+ assert stringify_annotation(Generator[None, None, None], 'fully-qualified-except-typing') == "Generator[None, None, None]"
+ assert stringify_annotation(Generator[None, None, None], "fully-qualified") == "typing.Generator[None, None, None]"
+ assert stringify_annotation(Generator[None, None, None], "smart") == "~typing.Generator[None, None, None]"
+
+ assert stringify_annotation(Iterator[None], 'fully-qualified-except-typing') == "Iterator[None]"
+ assert stringify_annotation(Iterator[None], "fully-qualified") == "typing.Iterator[None]"
+ assert stringify_annotation(Iterator[None], "smart") == "~typing.Iterator[None]"
+
+
+def test_stringify_type_hints_pep_585():
+ assert stringify_annotation(list[int], 'fully-qualified-except-typing') == "list[int]"
+ assert stringify_annotation(list[int], "smart") == "list[int]"
+
+ assert stringify_annotation(list[str], 'fully-qualified-except-typing') == "list[str]"
+ assert stringify_annotation(list[str], "smart") == "list[str]"
+
+ assert stringify_annotation(dict[str, float], 'fully-qualified-except-typing') == "dict[str, float]"
+ assert stringify_annotation(dict[str, float], "smart") == "dict[str, float]"
+
+ assert stringify_annotation(tuple[str, str, str], 'fully-qualified-except-typing') == "tuple[str, str, str]"
+ assert stringify_annotation(tuple[str, str, str], "smart") == "tuple[str, str, str]"
+
+ assert stringify_annotation(tuple[str, ...], 'fully-qualified-except-typing') == "tuple[str, ...]"
+ assert stringify_annotation(tuple[str, ...], "smart") == "tuple[str, ...]"
+
+ assert stringify_annotation(tuple[()], 'fully-qualified-except-typing') == "tuple[()]"
+ assert stringify_annotation(tuple[()], "smart") == "tuple[()]"
+
+ assert stringify_annotation(list[dict[str, tuple]], 'fully-qualified-except-typing') == "list[dict[str, tuple]]"
+ assert stringify_annotation(list[dict[str, tuple]], "smart") == "list[dict[str, tuple]]"
+
+ assert stringify_annotation(MyList[tuple[int, int]], 'fully-qualified-except-typing') == "tests.test_util.test_util_typing.MyList[tuple[int, int]]"
+ assert stringify_annotation(MyList[tuple[int, int]], "fully-qualified") == "tests.test_util.test_util_typing.MyList[tuple[int, int]]"
+ assert stringify_annotation(MyList[tuple[int, int]], "smart") == "~tests.test_util.test_util_typing.MyList[tuple[int, int]]"
+
+ assert stringify_annotation(type[int], 'fully-qualified-except-typing') == "type[int]"
+ assert stringify_annotation(type[int], "smart") == "type[int]"
+
+ # Mix typing and pep 585
+ assert stringify_annotation(tuple[List[dict[int, str]], str, ...], 'fully-qualified-except-typing') == "tuple[List[dict[int, str]], str, ...]"
+ assert stringify_annotation(tuple[List[dict[int, str]], str, ...], "smart") == "tuple[~typing.List[dict[int, str]], str, ...]"
+
+
+def test_stringify_Annotated():
+ from typing import Annotated # type: ignore[attr-defined]
+ assert stringify_annotation(Annotated[str, "foo", "bar"], 'fully-qualified-except-typing') == "str"
+ assert stringify_annotation(Annotated[str, "foo", "bar"], "smart") == "str"
+
+
+def test_stringify_type_hints_string():
+ assert stringify_annotation("int", 'fully-qualified-except-typing') == "int"
+ assert stringify_annotation("int", 'fully-qualified') == "int"
+ assert stringify_annotation("int", "smart") == "int"
+
+ assert stringify_annotation("str", 'fully-qualified-except-typing') == "str"
+ assert stringify_annotation("str", 'fully-qualified') == "str"
+ assert stringify_annotation("str", "smart") == "str"
+
+ assert stringify_annotation(List["int"], 'fully-qualified-except-typing') == "List[int]"
+ assert stringify_annotation(List["int"], 'fully-qualified') == "typing.List[int]"
+ assert stringify_annotation(List["int"], "smart") == "~typing.List[int]"
+
+ assert stringify_annotation(list["int"], 'fully-qualified-except-typing') == "list[int]"
+ assert stringify_annotation(list["int"], 'fully-qualified') == "list[int]"
+ assert stringify_annotation(list["int"], "smart") == "list[int]"
+
+ assert stringify_annotation("Tuple[str]", 'fully-qualified-except-typing') == "Tuple[str]"
+ assert stringify_annotation("Tuple[str]", 'fully-qualified') == "Tuple[str]"
+ assert stringify_annotation("Tuple[str]", "smart") == "Tuple[str]"
+
+ assert stringify_annotation("tuple[str]", 'fully-qualified-except-typing') == "tuple[str]"
+ assert stringify_annotation("tuple[str]", 'fully-qualified') == "tuple[str]"
+ assert stringify_annotation("tuple[str]", "smart") == "tuple[str]"
+
+ assert stringify_annotation("unknown", 'fully-qualified-except-typing') == "unknown"
+ assert stringify_annotation("unknown", 'fully-qualified') == "unknown"
+ assert stringify_annotation("unknown", "smart") == "unknown"
+
+
+def test_stringify_type_hints_Callable():
+ assert stringify_annotation(Callable, 'fully-qualified-except-typing') == "Callable"
+ assert stringify_annotation(Callable, "fully-qualified") == "typing.Callable"
+ assert stringify_annotation(Callable, "smart") == "~typing.Callable"
+
+ assert stringify_annotation(Callable[[str], int], 'fully-qualified-except-typing') == "Callable[[str], int]"
+ assert stringify_annotation(Callable[[str], int], "fully-qualified") == "typing.Callable[[str], int]"
+ assert stringify_annotation(Callable[[str], int], "smart") == "~typing.Callable[[str], int]"
+
+ assert stringify_annotation(Callable[..., int], 'fully-qualified-except-typing') == "Callable[[...], int]"
+ assert stringify_annotation(Callable[..., int], "fully-qualified") == "typing.Callable[[...], int]"
+ assert stringify_annotation(Callable[..., int], "smart") == "~typing.Callable[[...], int]"
+
+
+def test_stringify_type_hints_Union():
+ assert stringify_annotation(Optional[int], 'fully-qualified-except-typing') == "int | None"
+ assert stringify_annotation(Optional[int], "fully-qualified") == "int | None"
+ assert stringify_annotation(Optional[int], "smart") == "int | None"
+
+ assert stringify_annotation(Union[int, None], 'fully-qualified-except-typing') == "int | None"
+ assert stringify_annotation(Union[None, int], 'fully-qualified-except-typing') == "None | int"
+ assert stringify_annotation(Union[int, None], "fully-qualified") == "int | None"
+ assert stringify_annotation(Union[None, int], "fully-qualified") == "None | int"
+ assert stringify_annotation(Union[int, None], "smart") == "int | None"
+ assert stringify_annotation(Union[None, int], "smart") == "None | int"
+
+ assert stringify_annotation(Union[int, str], 'fully-qualified-except-typing') == "int | str"
+ assert stringify_annotation(Union[int, str], "fully-qualified") == "int | str"
+ assert stringify_annotation(Union[int, str], "smart") == "int | str"
+
+ assert stringify_annotation(Union[int, Integral], 'fully-qualified-except-typing') == "int | numbers.Integral"
+ assert stringify_annotation(Union[int, Integral], "fully-qualified") == "int | numbers.Integral"
+ assert stringify_annotation(Union[int, Integral], "smart") == "int | ~numbers.Integral"
+
+ assert (stringify_annotation(Union[MyClass1, MyClass2], 'fully-qualified-except-typing') ==
+ "tests.test_util.test_util_typing.MyClass1 | tests.test_util.test_util_typing.<MyClass2>")
+ assert (stringify_annotation(Union[MyClass1, MyClass2], "fully-qualified") ==
+ "tests.test_util.test_util_typing.MyClass1 | tests.test_util.test_util_typing.<MyClass2>")
+ assert (stringify_annotation(Union[MyClass1, MyClass2], "smart") ==
+ "~tests.test_util.test_util_typing.MyClass1 | ~tests.test_util.test_util_typing.<MyClass2>")
+
+
+def test_stringify_type_hints_typevars():
+ T = TypeVar('T')
+ T_co = TypeVar('T_co', covariant=True)
+ T_contra = TypeVar('T_contra', contravariant=True)
+
+ assert stringify_annotation(T, 'fully-qualified-except-typing') == "tests.test_util.test_util_typing.T"
+ assert stringify_annotation(T, "smart") == "~tests.test_util.test_util_typing.T"
+
+ assert stringify_annotation(T_co, 'fully-qualified-except-typing') == "tests.test_util.test_util_typing.T_co"
+ assert stringify_annotation(T_co, "smart") == "~tests.test_util.test_util_typing.T_co"
+
+ assert stringify_annotation(T_contra, 'fully-qualified-except-typing') == "tests.test_util.test_util_typing.T_contra"
+ assert stringify_annotation(T_contra, "smart") == "~tests.test_util.test_util_typing.T_contra"
+
+ assert stringify_annotation(List[T], 'fully-qualified-except-typing') == "List[tests.test_util.test_util_typing.T]"
+ assert stringify_annotation(List[T], "smart") == "~typing.List[~tests.test_util.test_util_typing.T]"
+
+ assert stringify_annotation(list[T], 'fully-qualified-except-typing') == "list[tests.test_util.test_util_typing.T]"
+ assert stringify_annotation(list[T], "smart") == "list[~tests.test_util.test_util_typing.T]"
+
+ if sys.version_info[:2] >= (3, 10):
+ assert stringify_annotation(MyInt, 'fully-qualified-except-typing') == "tests.test_util.test_util_typing.MyInt"
+ assert stringify_annotation(MyInt, "smart") == "~tests.test_util.test_util_typing.MyInt"
+ else:
+ assert stringify_annotation(MyInt, 'fully-qualified-except-typing') == "MyInt"
+ assert stringify_annotation(MyInt, "smart") == "MyInt"
+
+
+def test_stringify_type_hints_custom_class():
+ assert stringify_annotation(MyClass1, 'fully-qualified-except-typing') == "tests.test_util.test_util_typing.MyClass1"
+ assert stringify_annotation(MyClass1, "smart") == "~tests.test_util.test_util_typing.MyClass1"
+
+ assert stringify_annotation(MyClass2, 'fully-qualified-except-typing') == "tests.test_util.test_util_typing.<MyClass2>"
+ assert stringify_annotation(MyClass2, "smart") == "~tests.test_util.test_util_typing.<MyClass2>"
+
+
+def test_stringify_type_hints_alias():
+ MyStr = str
+ MyTuple = Tuple[str, str]
+
+ assert stringify_annotation(MyStr, 'fully-qualified-except-typing') == "str"
+ assert stringify_annotation(MyStr, "smart") == "str"
+
+ assert stringify_annotation(MyTuple) == "Tuple[str, str]" # type: ignore[attr-defined]
+ assert stringify_annotation(MyTuple, "smart") == "~typing.Tuple[str, str]" # type: ignore[attr-defined]
+
+
+def test_stringify_type_Literal():
+ from typing import Literal # type: ignore[attr-defined]
+ assert stringify_annotation(Literal[1, "2", "\r"], 'fully-qualified-except-typing') == "Literal[1, '2', '\\r']"
+ assert stringify_annotation(Literal[1, "2", "\r"], "fully-qualified") == "typing.Literal[1, '2', '\\r']"
+ assert stringify_annotation(Literal[1, "2", "\r"], "smart") == "~typing.Literal[1, '2', '\\r']"
+
+ assert stringify_annotation(Literal[MyEnum.a], 'fully-qualified-except-typing') == 'Literal[tests.test_util.test_util_typing.MyEnum.a]'
+ assert stringify_annotation(Literal[MyEnum.a], 'fully-qualified') == 'typing.Literal[tests.test_util.test_util_typing.MyEnum.a]'
+ assert stringify_annotation(Literal[MyEnum.a], 'smart') == '~typing.Literal[MyEnum.a]'
+
+
+@pytest.mark.skipif(sys.version_info[:2] <= (3, 9), reason='python 3.10+ is required.')
+def test_stringify_type_union_operator():
+ assert stringify_annotation(int | None) == "int | None" # type: ignore[attr-defined]
+ assert stringify_annotation(int | None, "smart") == "int | None" # type: ignore[attr-defined]
+
+ assert stringify_annotation(int | str) == "int | str" # type: ignore[attr-defined]
+ assert stringify_annotation(int | str, "smart") == "int | str" # type: ignore[attr-defined]
+
+ assert stringify_annotation(int | str | None) == "int | str | None" # type: ignore[attr-defined]
+ assert stringify_annotation(int | str | None, "smart") == "int | str | None" # type: ignore[attr-defined]
+
+ assert stringify_annotation(int | tuple[dict[str, int | None], list[int | str]] | None) == "int | tuple[dict[str, int | None], list[int | str]] | None" # type: ignore[attr-defined]
+ assert stringify_annotation(int | tuple[dict[str, int | None], list[int | str]] | None, "smart") == "int | tuple[dict[str, int | None], list[int | str]] | None" # type: ignore[attr-defined]
+
+ assert stringify_annotation(int | Struct) == "int | struct.Struct" # type: ignore[attr-defined]
+ assert stringify_annotation(int | Struct, "smart") == "int | ~struct.Struct" # type: ignore[attr-defined]
+
+
+def test_stringify_broken_type_hints():
+ assert stringify_annotation(BrokenType, 'fully-qualified-except-typing') == 'tests.test_util.test_util_typing.BrokenType'
+ assert stringify_annotation(BrokenType, "smart") == '~tests.test_util.test_util_typing.BrokenType'
+
+
+def test_stringify_mock():
+ with mock(['unknown']):
+ import unknown
+ assert stringify_annotation(unknown, 'fully-qualified-except-typing') == 'unknown'
+ assert stringify_annotation(unknown.secret.Class, 'fully-qualified-except-typing') == 'unknown.secret.Class'
+ assert stringify_annotation(unknown.secret.Class, "smart") == 'unknown.secret.Class'
+
+
+def test_stringify_type_ForwardRef():
+ from typing import ForwardRef # type: ignore[attr-defined]
+
+ assert stringify_annotation(ForwardRef("MyInt")) == "MyInt"
+ assert stringify_annotation(ForwardRef("MyInt"), 'smart') == "MyInt"
+
+ assert stringify_annotation(list[ForwardRef("MyInt")]) == "list[MyInt]"
+ assert stringify_annotation(list[ForwardRef("MyInt")], 'smart') == "list[MyInt]"
+
+ assert stringify_annotation(Tuple[dict[ForwardRef("MyInt"), str], list[List[int]]]) == "Tuple[dict[MyInt, str], list[List[int]]]" # type: ignore[attr-defined]
+ assert stringify_annotation(Tuple[dict[ForwardRef("MyInt"), str], list[List[int]]], 'fully-qualified-except-typing') == "Tuple[dict[MyInt, str], list[List[int]]]" # type: ignore[attr-defined]
+ assert stringify_annotation(Tuple[dict[ForwardRef("MyInt"), str], list[List[int]]], 'smart') == "~typing.Tuple[dict[MyInt, str], list[~typing.List[int]]]" # type: ignore[attr-defined]
diff --git a/tests/test_util/typing_test_data.py b/tests/test_util/typing_test_data.py
new file mode 100644
index 0000000..e29b600
--- /dev/null
+++ b/tests/test_util/typing_test_data.py
@@ -0,0 +1,129 @@
+from inspect import Signature
+from numbers import Integral
+from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union
+
+
+def f0(x: int, y: Integral) -> None:
+ pass
+
+
+def f1(x: list[int]) -> List[int]:
+ pass
+
+
+T = TypeVar('T')
+T_co = TypeVar('T_co', covariant=True)
+T_contra = TypeVar('T_contra', contravariant=True)
+
+
+def f2(x: List[T], y: List[T_co], z: T) -> List[T_contra]:
+ pass
+
+
+def f3(x: Union[str, Integral]) -> None:
+ pass
+
+
+MyStr = str
+
+
+def f4(x: 'MyStr', y: MyStr) -> None:
+ pass
+
+
+def f5(x: int, *, y: str, z: str) -> None:
+ pass
+
+
+def f6(x: int, *args, y: str, z: str) -> None:
+ pass
+
+
+def f7(x: int = None, y: dict = {}) -> None: # NoQA: B006,RUF013
+ pass
+
+
+def f8(x: Callable[[int, str], int]) -> None:
+ # See https://github.com/ambv/typehinting/issues/149 for Callable[..., int]
+ pass
+
+
+def f9(x: Callable) -> None:
+ pass
+
+
+def f10(x: Tuple[int, str], y: Tuple[int, ...]) -> None:
+ pass
+
+
+class CustomAnnotation:
+ def __repr__(self):
+ return 'CustomAnnotation'
+
+
+def f11(x: CustomAnnotation(), y: 123) -> None:
+ pass
+
+
+def f12() -> Tuple[int, str, int]:
+ pass
+
+
+def f13() -> Optional[str]:
+ pass
+
+
+def f14() -> Any:
+ pass
+
+
+def f15(x: "Unknown", y: "int") -> Any: # NoQA: F821 # type: ignore[attr-defined]
+ pass
+
+
+def f16(arg1, arg2, *, arg3=None, arg4=None):
+ pass
+
+
+def f17(*, arg3, arg4):
+ pass
+
+
+def f18(self, arg1: Union[int, Tuple] = 10) -> List[Dict]:
+ pass
+
+
+def f19(*args: int, **kwargs: str):
+ pass
+
+
+def f20() -> Optional[Union[int, str]]:
+ pass
+
+
+def f21(arg1='whatever', arg2=Signature.empty):
+ pass
+
+
+def f22(*, a, b):
+ pass
+
+
+def f23(a, b, /, c, d):
+ pass
+
+
+def f24(a, /, *, b):
+ pass
+
+
+def f25(a, b, /):
+ pass
+
+
+class Node:
+ def __init__(self, parent: Optional['Node']) -> None:
+ pass
+
+ def children(self) -> List['Node']:
+ pass