summaryrefslogtreecommitdiffstats
path: root/src/boost/libs/tokenizer
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/boost/libs/tokenizer
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/boost/libs/tokenizer')
-rw-r--r--src/boost/libs/tokenizer/Jamfile11
-rw-r--r--src/boost/libs/tokenizer/LICENSE23
-rw-r--r--src/boost/libs/tokenizer/README.md94
-rw-r--r--src/boost/libs/tokenizer/example/Jamfile.v215
-rw-r--r--src/boost/libs/tokenizer/example/char_sep_example_1.cpp28
-rw-r--r--src/boost/libs/tokenizer/example/char_sep_example_2.cpp29
-rw-r--r--src/boost/libs/tokenizer/example/char_sep_example_3.cpp26
-rw-r--r--src/boost/libs/tokenizer/index.html13
-rw-r--r--src/boost/libs/tokenizer/meta/libraries.json15
-rw-r--r--src/boost/libs/tokenizer/test/Jamfile.v215
-rw-r--r--src/boost/libs/tokenizer/test/examples.cpp150
-rw-r--r--src/boost/libs/tokenizer/test/simple_example_1.cpp25
-rw-r--r--src/boost/libs/tokenizer/test/simple_example_2.cpp24
-rw-r--r--src/boost/libs/tokenizer/test/simple_example_3.cpp25
-rw-r--r--src/boost/libs/tokenizer/test/simple_example_4.cpp24
-rw-r--r--src/boost/libs/tokenizer/test/simple_example_5.cpp34
16 files changed, 551 insertions, 0 deletions
diff --git a/src/boost/libs/tokenizer/Jamfile b/src/boost/libs/tokenizer/Jamfile
new file mode 100644
index 00000000..2a827cf2
--- /dev/null
+++ b/src/boost/libs/tokenizer/Jamfile
@@ -0,0 +1,11 @@
+# Boost.Tokenizer Library Jamfile
+#
+# Copyright (c) 2018 James E. King III
+#
+# Use, modification, and distribution are subject to the
+# Boost Software License, Version 1.0. (See accompanying file
+# LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+# please order by name to ease maintenance
+build-project example ;
+build-project test ;
diff --git a/src/boost/libs/tokenizer/LICENSE b/src/boost/libs/tokenizer/LICENSE
new file mode 100644
index 00000000..36b7cd93
--- /dev/null
+++ b/src/boost/libs/tokenizer/LICENSE
@@ -0,0 +1,23 @@
+Boost Software License - Version 1.0 - August 17th, 2003
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/src/boost/libs/tokenizer/README.md b/src/boost/libs/tokenizer/README.md
new file mode 100644
index 00000000..54cc8c45
--- /dev/null
+++ b/src/boost/libs/tokenizer/README.md
@@ -0,0 +1,94 @@
+
+
+
+# [Boost.Tokenizer](http://boost.org/libs/tokenizer)
+
+
+
+Boost.Tokenizer is a part of [Boost C++ Libraries](http://github.com/boostorg). The Boost.Tokenizer package provides a flexible and easy-to-use way to break a string or other character sequence into a series of tokens.
+
+## License
+
+Distributed under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt).
+
+## Properties
+
+* C++03
+* Header-Only
+
+## Build Status
+
+Branch | Travis | Appveyor | Coverity Scan | codecov.io | Deps | Docs | Tests |
+:-------------: | ------ | -------- | ------------- | ---------- | ---- | ---- | ----- |
+[`master`](https://github.com/boostorg/tokenizer/tree/master) | [![Build Status](https://travis-ci.org/boostorg/tokenizer.svg?branch=master)](https://travis-ci.org/boostorg/tokenizer) | [![Build status](https://ci.appveyor.com/api/projects/status/FIXME-vc81nhd5i2f6hi8y/branch/master?svg=true)](https://ci.appveyor.com/project/jeking3/tokenizer-c6pnd/branch/master) | [![Coverity Scan Build Status](https://scan.coverity.com/projects/15854/badge.svg)](https://scan.coverity.com/projects/boostorg-tokenizer) | [![codecov](https://codecov.io/gh/boostorg/tokenizer/branch/master/graph/badge.svg)](https://codecov.io/gh/boostorg/tokenizer/branch/master)| [![Deps](https://img.shields.io/badge/deps-master-brightgreen.svg)](https://pdimov.github.io/boostdep-report/master/tokenizer.html) | [![Documentation](https://img.shields.io/badge/docs-master-brightgreen.svg)](http://www.boost.org/doc/libs/master/doc/html/tokenizer.html) | [![Enter the Matrix](https://img.shields.io/badge/matrix-master-brightgreen.svg)](http://www.boost.org/development/tests/master/developer/tokenizer.html)
+[`develop`](https://github.com/boostorg/tokenizer/tree/develop) | [![Build Status](https://travis-ci.org/boostorg/tokenizer.svg?branch=develop)](https://travis-ci.org/boostorg/tokenizer) | [![Build status](https://ci.appveyor.com/api/projects/status/FIXME-vc81nhd5i2f6hi8y/branch/develop?svg=true)](https://ci.appveyor.com/project/jeking3/tokenizer-c6pnd/branch/develop) | [![Coverity Scan Build Status](https://scan.coverity.com/projects/15854/badge.svg)](https://scan.coverity.com/projects/boostorg-tokenizer) | [![codecov](https://codecov.io/gh/boostorg/tokenizer/branch/develop/graph/badge.svg)](https://codecov.io/gh/boostorg/tokenizer/branch/develop) | [![Deps](https://img.shields.io/badge/deps-develop-brightgreen.svg)](https://pdimov.github.io/boostdep-report/develop/tokenizer.html) | [![Documentation](https://img.shields.io/badge/docs-develop-brightgreen.svg)](http://www.boost.org/doc/libs/develop/doc/html/tokenizer.html) | [![Enter the Matrix](https://img.shields.io/badge/matrix-develop-brightgreen.svg)](http://www.boost.org/development/tests/develop/developer/tokenizer.html)
+
+
+## Overview
+
+
+> break up a phrase into words.
+
+ <a target="_blank" href="http://melpon.org/wandbox/permlink/kZeKmQAtqDlpn8if">![Try it online][badge.wandbox]</a>
+
+```c++
+#include <iostream>
+#include <boost/tokenizer.hpp>
+#include <string>
+
+int main(){
+ std::string s = "This is, a test";
+ typedef boost::tokenizer<> Tok;
+ Tok tok(s);
+ for (Tok::iterator beg = tok.begin(); beg != tok.end(); ++beg){
+ std::cout << *beg << "\n";
+ }
+}
+
+```
+
+> Using Range-based for loop (>c++11)
+
+ <a target="_blank" href="http://melpon.org/wandbox/permlink/z94YLs8PdYSh7rXz">![Try it online][badge.wandbox]</a>
+```c++
+#include <iostream>
+#include <boost/tokenizer.hpp>
+#include <string>
+
+int main(){
+ std::string s = "This is, a test";
+ boost::tokenizer<> tok(s);
+ for (auto token: tok) {
+ std::cout << token << "\n";
+ }
+}
+```
+
+## Documentation
+
+Documentation can be found at [Boost.Tokenizer](http://boost.org/libs/tokenizer)
+
+## Related Material
+[Boost.Tokenizer](http://theboostcpplibraries.com/boost.tokenizer) Chapter 10 at theboostcpplibraries.com, contains several examples including **escaped_list_separator**.
+
+##Contributing
+
+>This library is being maintained as a part of the [Boost Library Official Maintainer Program](http://beta.boost.org/community/official_library_maintainer_program.html)
+
+
+Open Issues on <a target="_blank" href="https://svn.boost.org/trac/boost/query?status=assigned&status=new&status=reopened&component=tokenizer&col=id&col=summary&col=status&col=owner&col=type&col=milestone&order=priority">![][badge.trac]</a>
+
+
+
+##Acknowledgements
+>From the author:
+>
+I wish to thank the members of the boost mailing list, whose comments, compliments, and criticisms during both the development and formal review helped make the Tokenizer library what it is. I especially wish to thank Aleksey Gurtovoy for the idea of using a pair of iterators to specify the input, instead of a string. I also wish to thank Jeremy Siek for his idea of providing a container interface for the token iterators and for simplifying the template parameters for the TokenizerFunctions. He and Daryle Walker also emphasized the need to separate interface and implementation. Gary Powell sparked the idea of using the isspace and ispunct as the defaults for char_delimiters_separator. Jeff Garland provided ideas on how to change to order of the template parameters in order to make tokenizer easier to declare. Thanks to Douglas Gregor who served as review manager and provided many insights both on the boost list and in e-mail on how to polish up the implementation and presentation of Tokenizer. Finally, thanks to Beman Dawes who integrated the final version into the boost distribution.
+
+##License
+Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+
+[badge.Wandbox]: https://img.shields.io/badge/try%20it-online-blue.svg
+[badge.Trac]:https://svn.boost.org/htdocs/common/trac_logo_mini.png
+
diff --git a/src/boost/libs/tokenizer/example/Jamfile.v2 b/src/boost/libs/tokenizer/example/Jamfile.v2
new file mode 100644
index 00000000..97314128
--- /dev/null
+++ b/src/boost/libs/tokenizer/example/Jamfile.v2
@@ -0,0 +1,15 @@
+# Boost.Tokenizer Library example Jamfile
+#
+# Copyright (c) 2008 James E. King III
+#
+# Distributed under the Boost Software License, Version 1.0. (See accompany-
+# ing file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+import testing ;
+
+test-suite "tokenizer-examples"
+ : [ run char_sep_example_1.cpp ]
+ [ run char_sep_example_2.cpp ]
+ [ run char_sep_example_3.cpp ]
+ ;
+
diff --git a/src/boost/libs/tokenizer/example/char_sep_example_1.cpp b/src/boost/libs/tokenizer/example/char_sep_example_1.cpp
new file mode 100644
index 00000000..4022c715
--- /dev/null
+++ b/src/boost/libs/tokenizer/example/char_sep_example_1.cpp
@@ -0,0 +1,28 @@
+// (c) Copyright Jeremy Siek 2002.
+
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Sample output:
+//
+// <Hello> <world> <foo> <bar> <yow> <baz>
+
+// char_sep_example_1.cpp
+#include <iostream>
+#include <boost/tokenizer.hpp>
+#include <string>
+
+int main()
+{
+ std::string str = ";;Hello|world||-foo--bar;yow;baz|";
+ typedef boost::tokenizer<boost::char_separator<char> >
+ tokenizer;
+ boost::char_separator<char> sep("-;|");
+ tokenizer tokens(str, sep);
+ for (tokenizer::iterator tok_iter = tokens.begin();
+ tok_iter != tokens.end(); ++tok_iter)
+ std::cout << "<" << *tok_iter << "> ";
+ std::cout << "\n";
+ return EXIT_SUCCESS;
+}
diff --git a/src/boost/libs/tokenizer/example/char_sep_example_2.cpp b/src/boost/libs/tokenizer/example/char_sep_example_2.cpp
new file mode 100644
index 00000000..7db0b5e2
--- /dev/null
+++ b/src/boost/libs/tokenizer/example/char_sep_example_2.cpp
@@ -0,0 +1,29 @@
+// (c) Copyright Jeremy Siek 2002.
+
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Sample output:
+//
+// <> <> <Hello> <|> <world> <|> <> <|> <> <foo> <> <bar> <yow> <baz> <|> <>
+
+// char_sep_example_2.cpp
+#include <iostream>
+#include <boost/tokenizer.hpp>
+#include <string>
+
+int main()
+{
+ std::string str = ";;Hello|world||-foo--bar;yow;baz|";
+
+ typedef boost::tokenizer<boost::char_separator<char> >
+ tokenizer;
+ boost::char_separator<char> sep("-;", "|", boost::keep_empty_tokens);
+ tokenizer tokens(str, sep);
+ for (tokenizer::iterator tok_iter = tokens.begin();
+ tok_iter != tokens.end(); ++tok_iter)
+ std::cout << "<" << *tok_iter << "> ";
+ std::cout << "\n";
+ return EXIT_SUCCESS;
+}
diff --git a/src/boost/libs/tokenizer/example/char_sep_example_3.cpp b/src/boost/libs/tokenizer/example/char_sep_example_3.cpp
new file mode 100644
index 00000000..51b4906e
--- /dev/null
+++ b/src/boost/libs/tokenizer/example/char_sep_example_3.cpp
@@ -0,0 +1,26 @@
+// (c) Copyright Jeremy Siek 2002.
+
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Sample output:
+// <This> <is> <,> <a> <test>
+
+// char_sep_example_3.cpp
+#include <iostream>
+#include <boost/tokenizer.hpp>
+#include <string>
+
+int main()
+{
+ std::string str = "This is, a test";
+ typedef boost::tokenizer<boost::char_separator<char> > Tok;
+ boost::char_separator<char> sep; // default constructed
+ Tok tok(str, sep);
+ for(Tok::iterator tok_iter = tok.begin(); tok_iter != tok.end(); ++tok_iter)
+ std::cout << "<" << *tok_iter << "> ";
+ std::cout << "\n";
+ return EXIT_SUCCESS;
+}
+
diff --git a/src/boost/libs/tokenizer/index.html b/src/boost/libs/tokenizer/index.html
new file mode 100644
index 00000000..ab9d6b03
--- /dev/null
+++ b/src/boost/libs/tokenizer/index.html
@@ -0,0 +1,13 @@
+<html>
+<head>
+<meta http-equiv="refresh" content="0; URL=doc/index.html">
+</head>
+<body>
+Automatic redirection failed, please go to
+<a href="doc/index.html">doc/index.html</a>.&nbsp;<hr>
+<p>© Copyright Beman Dawes, 2001</p>
+<p>Distributed under the Boost Software License, Version 1.0. (See accompanying
+file <a href="../../LICENSE_1_0.txt">LICENSE_1_0.txt</a> or copy
+at <a href="http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a>)</p>
+</body>
+</html>
diff --git a/src/boost/libs/tokenizer/meta/libraries.json b/src/boost/libs/tokenizer/meta/libraries.json
new file mode 100644
index 00000000..1f488253
--- /dev/null
+++ b/src/boost/libs/tokenizer/meta/libraries.json
@@ -0,0 +1,15 @@
+{
+ "key": "tokenizer",
+ "name": "Tokenizer",
+ "authors": [
+ "John Bandela"
+ ],
+ "description": "Break of a string or other character sequence into a series of tokens.",
+ "category": [
+ "Iterators",
+ "String"
+ ],
+ "maintainers": [
+ "John R. Bandela <jbandela -at- ufl.edu>"
+ ]
+}
diff --git a/src/boost/libs/tokenizer/test/Jamfile.v2 b/src/boost/libs/tokenizer/test/Jamfile.v2
new file mode 100644
index 00000000..58bc06fb
--- /dev/null
+++ b/src/boost/libs/tokenizer/test/Jamfile.v2
@@ -0,0 +1,15 @@
+#~ Copyright Rene Rivera 2008
+#~ Distributed under the Boost Software License, Version 1.0.
+#~ (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+import testing ;
+
+test-suite tokenizer
+: [ run examples.cpp
+ /boost/test//boost_test_exec_monitor/<link>static ]
+ [ run simple_example_1.cpp ]
+ [ run simple_example_2.cpp ]
+ [ run simple_example_3.cpp ]
+ [ run simple_example_4.cpp ]
+ [ run simple_example_5.cpp ]
+ ;
diff --git a/src/boost/libs/tokenizer/test/examples.cpp b/src/boost/libs/tokenizer/test/examples.cpp
new file mode 100644
index 00000000..1e0b69b8
--- /dev/null
+++ b/src/boost/libs/tokenizer/test/examples.cpp
@@ -0,0 +1,150 @@
+// Boost tokenizer examples -------------------------------------------------//
+
+// (c) Copyright John R. Bandela 2001.
+
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// See http://www.boost.org for updates, documentation, and revision history.
+
+#include <iostream>
+#include <iterator>
+#include <string>
+#include <algorithm>
+#include <boost/tokenizer.hpp>
+#include <boost/array.hpp>
+
+#include <boost/test/minimal.hpp>
+
+int test_main( int /*argc*/, char* /*argv*/[] )
+{
+ using namespace boost;
+
+ // Use tokenizer
+ {
+ const std::string test_string = ";;Hello|world||-foo--bar;yow;baz|";
+ std::string answer[] = { "Hello", "world", "foo", "bar", "yow", "baz" };
+ typedef tokenizer<char_separator<char> > Tok;
+ char_separator<char> sep("-;|");
+ Tok t(test_string, sep);
+ BOOST_REQUIRE(std::equal(t.begin(),t.end(),answer));
+ }
+ {
+ const std::string test_string = ";;Hello|world||-foo--bar;yow;baz|";
+ std::string answer[] = { "", "", "Hello", "|", "world", "|", "", "|", "",
+ "foo", "", "bar", "yow", "baz", "|", "" };
+ typedef tokenizer<char_separator<char> > Tok;
+ char_separator<char> sep("-;", "|", boost::keep_empty_tokens);
+ Tok t(test_string, sep);
+ BOOST_REQUIRE(std::equal(t.begin(), t.end(), answer));
+ }
+ {
+ const std::string test_string = "This,,is, a.test..";
+ std::string answer[] = {"This","is","a","test"};
+ typedef tokenizer<> Tok;
+ Tok t(test_string);
+ BOOST_REQUIRE(std::equal(t.begin(),t.end(),answer));
+ }
+
+ {
+ const std::string test_string = "Field 1,\"embedded,comma\",quote \\\", escape \\\\";
+ std::string answer[] = {"Field 1","embedded,comma","quote \""," escape \\"};
+ typedef tokenizer<escaped_list_separator<char> > Tok;
+ Tok t(test_string);
+ BOOST_REQUIRE(std::equal(t.begin(),t.end(),answer));
+ }
+
+ {
+ const std::string test_string = ",1,;2\\\";3\\;,4,5^\\,\'6,7\';";
+ std::string answer[] = {"","1","","2\"","3;","4","5\\","6,7",""};
+ typedef tokenizer<escaped_list_separator<char> > Tok;
+ escaped_list_separator<char> sep("\\^",",;","\"\'");
+ Tok t(test_string,sep);
+ BOOST_REQUIRE(std::equal(t.begin(),t.end(),answer));
+ }
+
+ {
+ const std::string test_string = "12252001";
+ std::string answer[] = {"12","25","2001"};
+ typedef tokenizer<offset_separator > Tok;
+ boost::array<int,3> offsets = {{2,2,4}};
+ offset_separator func(offsets.begin(),offsets.end());
+ Tok t(test_string,func);
+ BOOST_REQUIRE(std::equal(t.begin(),t.end(),answer));
+ }
+
+ // Use token_iterator_generator
+ {
+ const std::string test_string = "This,,is, a.test..";
+ std::string answer[] = {"This","is","a","test"};
+ typedef token_iterator_generator<char_delimiters_separator<char> >::type Iter;
+ Iter begin = make_token_iterator<std::string>(test_string.begin(),
+ test_string.end(),char_delimiters_separator<char>());
+ Iter end;
+ BOOST_REQUIRE(std::equal(begin,end,answer));
+ }
+
+ {
+ const std::string test_string = "Field 1,\"embedded,comma\",quote \\\", escape \\\\";
+ std::string answer[] = {"Field 1","embedded,comma","quote \""," escape \\"};
+ typedef token_iterator_generator<escaped_list_separator<char> >::type Iter;
+ Iter begin = make_token_iterator<std::string>(test_string.begin(),
+ test_string.end(),escaped_list_separator<char>());
+ Iter begin_c(begin);
+ Iter end;
+ BOOST_REQUIRE(std::equal(begin,end,answer));
+
+ while(begin_c != end)
+ {
+ BOOST_REQUIRE(begin_c.at_end() == 0);
+ ++begin_c;
+ }
+ BOOST_REQUIRE(begin_c.at_end());
+ }
+
+ {
+ const std::string test_string = "12252001";
+ std::string answer[] = {"12","25","2001"};
+ typedef token_iterator_generator<offset_separator>::type Iter;
+ boost::array<int,3> offsets = {{2,2,4}};
+ offset_separator func(offsets.begin(),offsets.end());
+ Iter begin = make_token_iterator<std::string>(test_string.begin(),
+ test_string.end(),func);
+ Iter end= make_token_iterator<std::string>(test_string.end(),
+ test_string.end(),func);
+ BOOST_REQUIRE(std::equal(begin,end,answer));
+ }
+
+ // Test copying
+ {
+ const std::string test_string = "abcdef";
+ token_iterator_generator<offset_separator>::type beg, end, other;
+ boost::array<int,3> ar = {{1,2,3}};
+ offset_separator f(ar.begin(),ar.end());
+ beg = make_token_iterator<std::string>(test_string.begin(),test_string.end(),f);
+
+ ++beg;
+ other = beg;
+ ++other;
+
+ BOOST_REQUIRE(*beg=="bc");
+ BOOST_REQUIRE(*other=="def");
+
+ other = make_token_iterator<std::string>(test_string.begin(),
+ test_string.end(),f);
+
+ BOOST_REQUIRE(*other=="a");
+ }
+
+ // Test non-default constructed char_delimiters_separator
+ {
+ const std::string test_string = "how,are you, doing";
+ std::string answer[] = {"how",",","are you",","," doing"};
+ tokenizer<> t(test_string,char_delimiters_separator<char>(true,",",""));
+ BOOST_REQUIRE(std::equal(t.begin(),t.end(),answer));
+ }
+
+ return 0;
+}
+
diff --git a/src/boost/libs/tokenizer/test/simple_example_1.cpp b/src/boost/libs/tokenizer/test/simple_example_1.cpp
new file mode 100644
index 00000000..950f7328
--- /dev/null
+++ b/src/boost/libs/tokenizer/test/simple_example_1.cpp
@@ -0,0 +1,25 @@
+// (c) Copyright John R. Bandela 2001.
+
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// See http://www.boost.org/libs/tokenizer for documenation
+
+
+// simple_example_1.cpp
+#include<iostream>
+#include<boost/tokenizer.hpp>
+#include<string>
+
+int main(){
+ using namespace std;
+ using namespace boost;
+ string s = "This is, a test";
+ tokenizer<> tok(s);
+ for(tokenizer<>::iterator beg=tok.begin(); beg!=tok.end();++beg){
+ cout << *beg << "\n";
+ }
+ return 0;
+}
+
diff --git a/src/boost/libs/tokenizer/test/simple_example_2.cpp b/src/boost/libs/tokenizer/test/simple_example_2.cpp
new file mode 100644
index 00000000..1bac2d9b
--- /dev/null
+++ b/src/boost/libs/tokenizer/test/simple_example_2.cpp
@@ -0,0 +1,24 @@
+// (c) Copyright John R. Bandela 2001.
+
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// See http://www.boost.org/libs/tokenizer for documenation
+
+// simple_example_2.cpp
+#include<iostream>
+#include<boost/tokenizer.hpp>
+#include<string>
+
+int main(){
+ using namespace std;
+ using namespace boost;
+ string s = "Field 1,\"putting quotes around fields, allows commas\",Field 3";
+ tokenizer<escaped_list_separator<char> > tok(s);
+ for(tokenizer<escaped_list_separator<char> >::iterator beg=tok.begin(); beg!=tok.end();++beg){
+ cout << *beg << "\n";
+ }
+ return 0;
+}
+
diff --git a/src/boost/libs/tokenizer/test/simple_example_3.cpp b/src/boost/libs/tokenizer/test/simple_example_3.cpp
new file mode 100644
index 00000000..6f47d353
--- /dev/null
+++ b/src/boost/libs/tokenizer/test/simple_example_3.cpp
@@ -0,0 +1,25 @@
+// (c) Copyright John R. Bandela 2001.
+
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// See http://www.boost.org/libs/tokenizer for documenation
+
+// simple_example_3.cpp
+#include<iostream>
+#include<boost/tokenizer.hpp>
+#include<string>
+
+int main(){
+ using namespace std;
+ using namespace boost;
+ string s = "12252001";
+ int offsets[] = {2,2,4};
+ offset_separator f(offsets, offsets+3);
+ tokenizer<offset_separator> tok(s,f);
+ for(tokenizer<offset_separator>::iterator beg=tok.begin(); beg!=tok.end();++beg){
+ cout << *beg << "\n";
+ }
+ return 0;
+}
diff --git a/src/boost/libs/tokenizer/test/simple_example_4.cpp b/src/boost/libs/tokenizer/test/simple_example_4.cpp
new file mode 100644
index 00000000..cf7ead39
--- /dev/null
+++ b/src/boost/libs/tokenizer/test/simple_example_4.cpp
@@ -0,0 +1,24 @@
+// (c) Copyright John R. Bandela 2001.
+
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// See http://www.boost.org/libs/tokenizer for documenation
+
+// simple_example_4.cpp
+#include<iostream>
+#include<boost/tokenizer.hpp>
+#include<string>
+
+int main(){
+ using namespace std;
+ using namespace boost;
+ string s = "This is, a test";
+ tokenizer<char_delimiters_separator<char> > tok(s);
+ for(tokenizer<char_delimiters_separator<char> >::iterator beg=tok.begin(); beg!=tok.end();++beg){
+ cout << *beg << "\n";
+ }
+ return 0;
+}
+
diff --git a/src/boost/libs/tokenizer/test/simple_example_5.cpp b/src/boost/libs/tokenizer/test/simple_example_5.cpp
new file mode 100644
index 00000000..8259533c
--- /dev/null
+++ b/src/boost/libs/tokenizer/test/simple_example_5.cpp
@@ -0,0 +1,34 @@
+// (c) Copyright John R. Bandela 2001.
+
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// See http://www.boost.org/libs/tokenizer for documenation
+
+/// simple_example_5.cpp
+#include<iostream>
+#include<boost/token_iterator.hpp>
+#include<string>
+
+#ifdef __BORLANDC__
+// compiler bug fix:
+template class boost::token_iterator_generator<boost::offset_separator>::type;
+#endif
+
+int main(){
+ using namespace std;
+ using namespace boost;
+ string s = "12252001";
+ int offsets[] = {2,2,4};
+ offset_separator f(offsets, offsets+3);
+ typedef token_iterator_generator<offset_separator>::type Iter;
+ Iter beg = make_token_iterator<string>(s.begin(),s.end(),f);
+ Iter end = make_token_iterator<string>(s.end(),s.end(),f);
+ // The above statement could also have been what is below
+ // Iter end;
+ for(;beg!=end;++beg){
+ cout << *beg << "\n";
+ }
+ return 0;
+}