diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-04 11:33:32 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-04 11:33:32 +0000 |
commit | 1f403ad2197fc7442409f434ee574f3e6b46fb73 (patch) | |
tree | 0299c6dd11d5edfa918a29b6456bc1875f1d288c /tests/test_func.py | |
parent | Initial commit. (diff) | |
download | pygments-1f403ad2197fc7442409f434ee574f3e6b46fb73.tar.xz pygments-1f403ad2197fc7442409f434ee574f3e6b46fb73.zip |
Adding upstream version 2.14.0+dfsg.upstream/2.14.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tests/test_func.py')
-rw-r--r-- | tests/test_func.py | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/tests/test_func.py b/tests/test_func.py new file mode 100644 index 0000000..c494b9c --- /dev/null +++ b/tests/test_func.py @@ -0,0 +1,44 @@ +import pytest +from pygments.lexers.func import FuncLexer +from pygments.token import Token, Name + +@pytest.fixture(scope='module') +def lexer_func(): + yield FuncLexer() + + +@pytest.mark.parametrize('text', ( + 'take(first)Entry', '"not_a_string', 'msg.sender', 'send_message,then_terminate', '_')) +def test_func_not_identifier(lexer_func, text): + """Test text that should **not** be tokenized as identifier.""" + assert list(lexer_func.get_tokens(text))[0] != (Name.Variable, text) + + +@pytest.mark.parametrize('text', ( + '`test identifier`', 'simple_identifier', 'query\'\'', + '_internal_value', 'get_pubkeys&signatures', + 'dict::udict_set_builder', '2+2=2*2', '-alsovalidname', '{hehehe}')) +def test_func_identifier(lexer_func, text): + """Test text that should be tokenized as identifier.""" + assert list(lexer_func.get_tokens(text))[0] == (Name.Variable, text) + + +@pytest.mark.parametrize('text', ( +'`test identifier`(', 'simple_identifier(', 'query\'\'(', +'_internal_value(', 'get_pubkeys&signatures(', +'dict::udict_set_builder(', '2+2=2*2(', '-alsovalidname(', '{hehehe}(')) +def test_func_function(lexer_func, text): + """Test text that should be tokenized as identifier.""" + assert list(lexer_func.get_tokens(text))[0] == (Name.Function, text[:-1]) + + +@pytest.mark.parametrize('text', ('0x0f', '0x1_2', '123', '0b10', '0xffff_fff', '1')) +def test_func_number(lexer_func, text): + """Test text that should be tokenized as number.""" + assert list(lexer_func.get_tokens(text))[0] == (Token.Literal.Number, text) + + +@pytest.mark.parametrize('text', ('0x0f_m', '0X1_2', '12d3', '0b1_0f', '0bff_fff', '0b')) +def test_func_not_number(lexer_func, text): + """Test text that should *not* be tokenized as number.""" + assert list(lexer_func.get_tokens(text))[0] != (Token.Literal.Number, text) |