1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
|
// Copyright (c) 2001-2010 Hartmut Kaiser
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// This example shows how to create a simple lexer recognizing a couple of
// different tokens and how to use this with a grammar. This example has a
// heavily backtracking grammar which makes it a candidate for lexer based
// parsing (all tokens are scanned and generated only once, even if
// backtracking is required) which speeds up the overall parsing process
// considerably, out-weighting the overhead needed for setting up the lexer.
// Additionally it demonstrates how to use one of the defined tokens as a
// parser component in the grammar.
//
// The grammar recognizes a simple input structure: any number of English
// simple sentences (statements, questions and commands) are recognized and
// are being counted separately.
// #define BOOST_SPIRIT_DEBUG
// #define BOOST_SPIRIT_LEXERTL_DEBUG
#include <boost/config/warning_disable.hpp>
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/include/lex_lexertl.hpp>
#include <boost/spirit/include/phoenix_operator.hpp>
#include <iostream>
#include <fstream>
#include <string>
#include "example.hpp"
using namespace boost::spirit;
using namespace boost::spirit::ascii;
using boost::phoenix::ref;
///////////////////////////////////////////////////////////////////////////////
// Token definition
///////////////////////////////////////////////////////////////////////////////
template <typename Lexer>
struct example2_tokens : lex::lexer<Lexer>
{
example2_tokens()
{
// A 'word' is comprised of one or more letters and an optional
// apostrophe. If it contains an apostrophe, there may only be one and
// the apostrophe must be preceded and succeeded by at least 1 letter.
// For example, "I'm" and "doesn't" meet the definition of 'word' we
// define below.
word = "[a-zA-Z]+('[a-zA-Z]+)?";
// Associate the tokens and the token set with the lexer. Note that
// single character token definitions as used below always get
// interpreted literally and never as special regex characters. This is
// done to be able to assign single characters the id of their character
// code value, allowing to reference those as literals in Qi grammars.
this->self = lex::token_def<>(',') | '!' | '.' | '?' | ' ' | '\n' | word;
}
lex::token_def<> word;
};
///////////////////////////////////////////////////////////////////////////////
// Grammar definition
///////////////////////////////////////////////////////////////////////////////
template <typename Iterator>
struct example2_grammar : qi::grammar<Iterator>
{
template <typename TokenDef>
example2_grammar(TokenDef const& tok)
: example2_grammar::base_type(story)
, paragraphs(0), commands(0), questions(0), statements(0)
{
story
= +paragraph
;
paragraph
= ( +( command [ ++ref(commands) ]
| question [ ++ref(questions) ]
| statement [ ++ref(statements) ]
)
>> *char_(' ') >> +char_('\n')
)
[ ++ref(paragraphs) ]
;
command
= +(tok.word | ' ' | ',') >> '!'
;
question
= +(tok.word | ' ' | ',') >> '?'
;
statement
= +(tok.word | ' ' | ',') >> '.'
;
BOOST_SPIRIT_DEBUG_NODE(story);
BOOST_SPIRIT_DEBUG_NODE(paragraph);
BOOST_SPIRIT_DEBUG_NODE(command);
BOOST_SPIRIT_DEBUG_NODE(question);
BOOST_SPIRIT_DEBUG_NODE(statement);
}
qi::rule<Iterator> story, paragraph, command, question, statement;
int paragraphs, commands, questions, statements;
};
///////////////////////////////////////////////////////////////////////////////
int main()
{
// iterator type used to expose the underlying input stream
typedef std::string::iterator base_iterator_type;
// This is the token type to return from the lexer iterator
typedef lex::lexertl::token<base_iterator_type> token_type;
// This is the lexer type to use to tokenize the input.
// Here we use the lexertl based lexer engine.
typedef lex::lexertl::lexer<token_type> lexer_type;
// This is the token definition type (derived from the given lexer type).
typedef example2_tokens<lexer_type> example2_tokens;
// this is the iterator type exposed by the lexer
typedef example2_tokens::iterator_type iterator_type;
// this is the type of the grammar to parse
typedef example2_grammar<iterator_type> example2_grammar;
// now we use the types defined above to create the lexer and grammar
// object instances needed to invoke the parsing process
example2_tokens tokens; // Our lexer
example2_grammar calc(tokens); // Our parser
std::string str (read_from_file("example2.input"));
// At this point we generate the iterator pair used to expose the
// tokenized input stream.
std::string::iterator it = str.begin();
iterator_type iter = tokens.begin(it, str.end());
iterator_type end = tokens.end();
// Parsing is done based on the token stream, not the character
// stream read from the input.
bool r = qi::parse(iter, end, calc);
if (r && iter == end)
{
std::cout << "-------------------------\n";
std::cout << "Parsing succeeded\n";
std::cout << "There were "
<< calc.commands << " commands, "
<< calc.questions << " questions, and "
<< calc.statements << " statements.\n";
std::cout << "-------------------------\n";
}
else
{
std::cout << "-------------------------\n";
std::cout << "Parsing failed\n";
std::cout << "-------------------------\n";
}
std::cout << "Bye... :-) \n\n";
return 0;
}
|