1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
|
"""Tests uti.nodes functions."""
from __future__ import annotations
import warnings
from textwrap import dedent
from typing import Any
import pytest
from docutils import frontend, nodes
from docutils.parsers import rst
from docutils.utils import new_document
from sphinx.transforms import ApplySourceWorkaround
from sphinx.util.nodes import (
NodeMatcher,
apply_source_workaround,
clean_astext,
extract_messages,
make_id,
split_explicit_title,
)
def _transform(doctree):
ApplySourceWorkaround(doctree).apply()
def create_new_document():
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
# DeprecationWarning: The frontend.OptionParser class will be replaced
# by a subclass of argparse.ArgumentParser in Docutils 0.21 or later.
settings = frontend.OptionParser(
components=(rst.Parser,)).get_default_values()
settings.id_prefix = 'id'
document = new_document('dummy.txt', settings)
return document
def _get_doctree(text):
document = create_new_document()
rst.Parser().parse(text, document)
_transform(document)
return document
def assert_node_count(messages, node_type, expect_count):
count = 0
node_list = [node for node, msg in messages]
for node in node_list:
if isinstance(node, node_type):
count += 1
assert count == expect_count, (
"Count of %r in the %r is %d instead of %d"
% (node_type, node_list, count, expect_count))
def test_NodeMatcher():
doctree = nodes.document(None, None)
doctree += nodes.paragraph('', 'Hello')
doctree += nodes.paragraph('', 'Sphinx', block=1)
doctree += nodes.paragraph('', 'World', block=2)
doctree += nodes.literal_block('', 'blah blah blah', block=3)
# search by node class
matcher = NodeMatcher(nodes.paragraph)
assert len(list(doctree.findall(matcher))) == 3
# search by multiple node classes
matcher = NodeMatcher(nodes.paragraph, nodes.literal_block)
assert len(list(doctree.findall(matcher))) == 4
# search by node attribute
matcher = NodeMatcher(block=1)
assert len(list(doctree.findall(matcher))) == 1
# search by node attribute (Any)
matcher = NodeMatcher(block=Any)
assert len(list(doctree.findall(matcher))) == 3
# search by both class and attribute
matcher = NodeMatcher(nodes.paragraph, block=Any)
assert len(list(doctree.findall(matcher))) == 2
# mismatched
matcher = NodeMatcher(nodes.title)
assert len(list(doctree.findall(matcher))) == 0
# search with Any does not match to Text node
matcher = NodeMatcher(blah=Any)
assert len(list(doctree.findall(matcher))) == 0
@pytest.mark.parametrize(
('rst', 'node_cls', 'count'),
[
(
"""
.. admonition:: admonition title
admonition body
""",
nodes.title, 1,
),
(
"""
.. figure:: foo.jpg
this is title
""",
nodes.caption, 1,
),
(
"""
.. rubric:: spam
""",
nodes.rubric, 1,
),
(
"""
| spam
| egg
""",
nodes.line, 2,
),
(
"""
section
=======
+----------------+
| | **Title 1** |
| | Message 1 |
+----------------+
""",
nodes.line, 2,
),
(
"""
* | **Title 1**
| Message 1
""",
nodes.line, 2,
),
],
)
def test_extract_messages(rst, node_cls, count):
msg = extract_messages(_get_doctree(dedent(rst)))
assert_node_count(msg, node_cls, count)
def test_extract_messages_without_rawsource():
"""
Check node.rawsource is fall-backed by using node.astext() value.
`extract_message` which is used from Sphinx i18n feature drop ``not node.rawsource``
nodes. So, all nodes which want to translate must have ``rawsource`` value.
However, sometimes node.rawsource is not set.
For example: recommonmark-0.2.0 doesn't set rawsource to `paragraph` node.
refs #1994: Fall back to node's astext() during i18n message extraction.
"""
p = nodes.paragraph()
p.append(nodes.Text('test'))
p.append(nodes.Text('sentence'))
assert not p.rawsource # target node must not have rawsource value
document = create_new_document()
document.append(p)
_transform(document)
assert_node_count(extract_messages(document), nodes.TextElement, 1)
assert [m for n, m in extract_messages(document)][0], 'text sentence'
def test_clean_astext():
node = nodes.paragraph(text='hello world')
assert clean_astext(node) == 'hello world'
node = nodes.image(alt='hello world')
assert clean_astext(node) == ''
node = nodes.paragraph(text='hello world')
node += nodes.raw('', 'raw text', format='html')
assert clean_astext(node) == 'hello world'
@pytest.mark.parametrize(
('prefix', 'term', 'expected'),
[
('', '', 'id0'),
('term', '', 'term-0'),
('term', 'Sphinx', 'term-Sphinx'),
('', 'io.StringIO', 'io.StringIO'), # contains a dot
('', 'sphinx.setup_command', 'sphinx.setup_command'), # contains a dot & underscore
('', '_io.StringIO', 'io.StringIO'), # starts with underscore
('', 'sphinx', 'sphinx'), # alphabets in unicode fullwidth characters
('', '悠好', 'id0'), # multibytes text (in Chinese)
('', 'Hello=悠好=こんにちは', 'Hello'), # alphabets and multibytes text
('', 'fünf', 'funf'), # latin1 (umlaut)
('', '0sphinx', 'sphinx'), # starts with number
('', 'sphinx-', 'sphinx'), # ends with hyphen
])
def test_make_id(app, prefix, term, expected):
document = create_new_document()
assert make_id(app.env, document, prefix, term) == expected
def test_make_id_already_registered(app):
document = create_new_document()
document.ids['term-Sphinx'] = True # register "term-Sphinx" manually
assert make_id(app.env, document, 'term', 'Sphinx') == 'term-0'
def test_make_id_sequential(app):
document = create_new_document()
document.ids['term-0'] = True
assert make_id(app.env, document, 'term') == 'term-1'
@pytest.mark.parametrize(
('title', 'expected'),
[
# implicit
('hello', (False, 'hello', 'hello')),
# explicit
('hello <world>', (True, 'hello', 'world')),
# explicit (title having angle brackets)
('hello <world> <sphinx>', (True, 'hello <world>', 'sphinx')),
],
)
def test_split_explicit_target(title, expected):
assert expected == split_explicit_title(title)
def test_apply_source_workaround_literal_block_no_source():
"""Regression test for #11091.
Test that apply_source_workaround doesn't raise.
"""
literal_block = nodes.literal_block('', '')
list_item = nodes.list_item('', literal_block)
bullet_list = nodes.bullet_list('', list_item)
assert literal_block.source is None
assert list_item.source is None
assert bullet_list.source is None
apply_source_workaround(literal_block)
assert literal_block.source is None
assert list_item.source is None
assert bullet_list.source is None
|