1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
|
"""
Basic CrystalLexer Test
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pytest
from pygments.token import Text, String, Number, Punctuation, Error, Whitespace
from pygments.lexers import CrystalLexer
@pytest.fixture(scope='module')
def lexer():
yield CrystalLexer()
def test_numbers(lexer):
for kind, testset in [
(Number.Integer, '0 1 1_000_000 1u8 11231231231121312i64'),
(Number.Float, '0.0 1.0_f32 1_f32 0f64 1e+4 1e111 1_234.567_890'),
(Number.Bin, '0b1001_0110 0b0u8'),
(Number.Oct, '0o17 0o7_i32'),
(Number.Hex, '0xdeadBEEF'),
]:
for fragment in testset.split():
assert list(lexer.get_tokens(fragment + '\n')) == \
[(kind, fragment), (Whitespace, '\n')]
for fragment in '01 0b2 0x129g2 0o12358'.split():
assert next(lexer.get_tokens(fragment + '\n'))[0] == Error
def test_symbols(lexer):
for fragment in [':sym_bol', ':\u3042', ':question?']:
assert list(lexer.get_tokens(fragment + '\n')) == \
[(String.Symbol, fragment), (Whitespace, '\n')]
fragment = ':"sym bol"\n'
tokens = [
(String.Symbol, ':"'),
(String.Symbol, 'sym bol'),
(String.Symbol, '"'),
(Whitespace, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_chars(lexer):
for fragment in ["'a'", "'я'", "'\\u{1234}'", "'\n'"]:
assert list(lexer.get_tokens(fragment + '\n')) == \
[(String.Char, fragment), (Whitespace, '\n')]
assert next(lexer.get_tokens("'abc'"))[0] == Error
def test_string_escapes(lexer):
for body in ['\\n', '\\a', '\\xff', '\\u1234', '\\000', '\\u{0}', '\\u{10AfF9}']:
fragment = '"a' + body + 'z"\n'
assert list(lexer.get_tokens(fragment)) == [
(String.Double, '"'),
(String.Double, 'a'),
(String.Escape, body),
(String.Double, 'z'),
(String.Double, '"'),
(Whitespace, '\n'),
]
def test_empty_percent_strings(lexer):
for body in ['%()', '%[]', '%{}', '%<>', '%||']:
fragment = '(' + body + ')\n'
assert list(lexer.get_tokens(fragment)) == [
(Punctuation, '('),
(String.Other, body[:-1]),
(String.Other, body[-1]),
(Punctuation, ')'),
(Whitespace, '\n'),
]
|