"""
Typograhic Number Theory tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pytest
from pygments.lexers.tnt import TNTLexer
from pygments.token import Text, Operator, Keyword, Name, Number, \
Punctuation, Error
@pytest.fixture(autouse=True)
def lexer():
yield TNTLexer()
# whitespace
@pytest.mark.parametrize('text', (' a', ' \t0', '\n\n 3'))
def test_whitespace_positive_matches(lexer, text):
"""Test fragments that should be tokenized as whitespace text."""
assert lexer.whitespace(0, text) == len(text) - 1
assert lexer.whitespace(0, text, True) == len(text) - 1
assert lexer.cur[-1] == (0, Text, text[:-1])
@pytest.mark.parametrize('text', ('0 a=b premise', 'b=a symmetry'))
def test_whitespace_negative_matches(lexer, text):
"""Test statements that do not start with whitespace text."""
assert lexer.whitespace(0, text) == 0
with pytest.raises(AssertionError):
lexer.whitespace(0, text, True)
assert not lexer.cur
# terms that can go on either side of an = sign
@pytest.mark.parametrize('text', ('a ', "a' ", 'b ', "c' "))
def test_variable_positive_matches(lexer, text):
"""Test fragments that should be tokenized as variables."""
assert lexer.variable(0, text) == len(text) - 1
assert lexer.cur[-1] == (0, Name.Variable, text[:-1])
@pytest.mark.parametrize('text', ("' ", 'f ', "f' "))
def test_variable_negative_matches(lexer, text):
"""Test fragments that should **not** be tokenized as variables."""
with pytest.raises(AssertionError):
lexer.variable(0, text)
assert not lexer.cur
@pytest.mark.parametrize('text', ('0', 'S0', 'SSSSS0'))
def test_numeral_positive_matches(lexer, text):
"""Test fragments that should be tokenized as (unary) numerals."""
assert lexer.term(0, text) == len(text)
assert lexer.cur[-1] == (len(text) - 1, Number.Integer, text[-1])
if text != '0':
assert lexer.cur[-2] == (0, Number.Integer, text[:-1])
@pytest.mark.parametrize('text', (
'(a+b)', '(b.a)', '(c+d)'
))
def test_multiterm_positive_matches(lexer, text):
"""Test fragments that should be tokenized as a compound term."""
assert lexer.term(0, text) == len(text)
assert [t[1] for t in lexer.cur] == [
Punctuation, Name.Variable, Operator,
Name.Variable, Punctuation
]
@pytest.mark.parametrize('text', ('1', '=', 'A'))
def test_term_negative_matches(lexer, text):
"""Test fragments that should not be tokenized as terms at all."""
with pytest.raises(AssertionError):
lexer.term(0, text)
assert not lexer.cur
# full statements, minus rule
@pytest.mark.parametrize('text', ('~a=b ', '~~~~a=b '))
def test_negator_positive_matches(lexer, text):
"""Test statements that start with a negation."""
assert lexer.formula(0, text) == len(text) - 1
assert lexer.cur[0] == (0, Operator, text[:-4])
@pytest.mark.parametrize('text', ('Aa:a=b ', 'Eb:a=b '))
def test_quantifier_positive_matches(lexer, text):
"""Test statements that start with a quantifier."""
assert lexer.formula(0, text) == len(text) - 1
assert lexer.cur[0][1] == Keyword.Declaration
assert lexer.cur[1][1] == Name.Variable
assert lexer.cur[2] == (2, Punctuation, ':')
@pytest.mark.parametrize('text', ('Aaa=b', 'Eba=b'))
def test_quantifier_negative_matches(lexer, text):
"""Test quantifiers that are only partially valid."""
with pytest.raises(AssertionError):
lexer.formula(0, text)
# leftovers should still be valid
assert lexer.cur[0][1] == Keyword.Declaration
assert lexer.cur[1][1] == Name.Variable
@pytest.mark.parametrize('text', ('', '', ''))
def test_compound_positive_matches(lexer, text):
"""Test statements that consist of multiple formulas compounded."""
assert lexer.formula(0, text) == len(text)
assert lexer.cur[0] == (0, Punctuation, '<')
assert lexer.cur[4][1] == Operator
assert lexer.cur[-1] == (len(text)-1, Punctuation, '>')
@pytest.mark.parametrize('text', ('', '