summaryrefslogtreecommitdiffstats
path: root/tests/test_javascript.py
blob: 05f74e27626be1e1cdf877839085b7eb16bb218e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
"""
    Javascript tests
    ~~~~~~~~~~~~~~~~

    :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
    :license: BSD, see LICENSE for details.
"""

import pytest

from pygments.lexers.javascript import JavascriptLexer
from pygments.token import Number


@pytest.fixture(scope='module')
def lexer():
    yield JavascriptLexer()


@pytest.mark.parametrize(
    'text',
    (
        '1', '1.', '.1', '1.1', '1e1', '1E1', '1e+1', '1E-1', '1.e1', '.1e1',
        '0888',  # octal prefix with non-octal numbers
    )
)
def test_float_literal_positive_matches(lexer, text):
    """Test literals that should be tokenized as float literals."""
    assert list(lexer.get_tokens(text))[0] == (Number.Float, text)


@pytest.mark.parametrize('text', ('.\u0b6a', '.', '1..', '1n', '1ee', '1e',
                                  '1e-', '1e--1', '1e++1', '1e1.0'))
def test_float_literals_negative_matches(lexer, text):
    """Test text that should **not** be tokenized as float literals."""
    assert list(lexer.get_tokens(text))[0] != (Number.Float, text)


@pytest.mark.parametrize('text', ('0n', '123n'))
def test_integer_literal_positive_matches(lexer, text):
    """Test literals that should be tokenized as integer literals."""
    assert list(lexer.get_tokens(text))[0] == (Number.Integer, text)


@pytest.mark.parametrize('text', ('1N', '1', '1.0'))
def test_integer_literals_negative_matches(lexer, text):
    """Test text that should **not** be tokenized as integer literals."""
    assert list(lexer.get_tokens(text))[0] != (Number.Integer, text)


@pytest.mark.parametrize('text', ('0b01', '0B10n'))
def test_binary_literal_positive_matches(lexer, text):
    """Test literals that should be tokenized as binary literals."""
    assert list(lexer.get_tokens(text))[0] == (Number.Bin, text)


@pytest.mark.parametrize('text', ('0b0N', '0b', '0bb', '0b2'))
def test_binary_literals_negative_matches(lexer, text):
    """Test text that should **not** be tokenized as binary literals."""
    assert list(lexer.get_tokens(text))[0] != (Number.Bin, text)


@pytest.mark.parametrize('text', ('017', '071n', '0o11', '0O77n'))
def test_octal_literal_positive_matches(lexer, text):
    """Test literals that should be tokenized as octal literals."""
    assert list(lexer.get_tokens(text))[0] == (Number.Oct, text)


@pytest.mark.parametrize('text', ('01N', '089', '098n', '0o', '0OO', '0o88', '0O88n'))
def test_octal_literals_negative_matches(lexer, text):
    """Test text that should **not** be tokenized as octal literals."""
    assert list(lexer.get_tokens(text))[0] != (Number.Oct, text)


@pytest.mark.parametrize('text', ('0x01', '0Xefn', '0x0EF'))
def test_hexadecimal_literal_positive_matches(lexer, text):
    """Test literals that should be tokenized as hexadecimal literals."""
    assert list(lexer.get_tokens(text))[0] == (Number.Hex, text)


@pytest.mark.parametrize('text', ('0x0N', '0x', '0Xx', '0xg', '0xhn'))
def test_hexadecimal_literals_negative_matches(lexer, text):
    """Test text that should **not** be tokenized as hexadecimal literals."""
    assert list(lexer.get_tokens(text))[0] != (Number.Hex, text)