summaryrefslogtreecommitdiffstats
path: root/third_party/python/ply/example/hedit/hedit.py
blob: 32da745677cd962499a89220b935b946d77a321c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# -----------------------------------------------------------------------------
# hedit.py
#
# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson)
#
# These tokens can't be easily tokenized because they are of the following
# form:
#
#   nHc1...cn
#
# where n is a positive integer and c1 ... cn are characters.
#
# This example shows how to modify the state of the lexer to parse
# such tokens
# -----------------------------------------------------------------------------

import sys
sys.path.insert(0, "../..")


tokens = (
    'H_EDIT_DESCRIPTOR',
)

# Tokens
t_ignore = " \t\n"


def t_H_EDIT_DESCRIPTOR(t):
    r"\d+H.*"                     # This grabs all of the remaining text
    i = t.value.index('H')
    n = eval(t.value[:i])

    # Adjust the tokenizing position
    t.lexer.lexpos -= len(t.value) - (i + 1 + n)

    t.value = t.value[i + 1:i + 1 + n]
    return t


def t_error(t):
    print("Illegal character '%s'" % t.value[0])
    t.lexer.skip(1)

# Build the lexer
import ply.lex as lex
lex.lex()
lex.runmain()