diff options
Diffstat (limited to '')
114 files changed, 9463 insertions, 0 deletions
diff --git a/doc/_static/images/overview/doc-content.png b/doc/_static/images/overview/doc-content.png Binary files differnew file mode 100644 index 0000000..cb87909 --- /dev/null +++ b/doc/_static/images/overview/doc-content.png diff --git a/doc/_static/images/overview/multi-sheets-sheet1.png b/doc/_static/images/overview/multi-sheets-sheet1.png Binary files differnew file mode 100644 index 0000000..e21c3b6 --- /dev/null +++ b/doc/_static/images/overview/multi-sheets-sheet1.png diff --git a/doc/_static/images/overview/multi-sheets-sheet2.png b/doc/_static/images/overview/multi-sheets-sheet2.png Binary files differnew file mode 100644 index 0000000..f2fe458 --- /dev/null +++ b/doc/_static/images/overview/multi-sheets-sheet2.png diff --git a/doc/cli/index.rst b/doc/cli/index.rst new file mode 100644 index 0000000..ed0726d --- /dev/null +++ b/doc/cli/index.rst @@ -0,0 +1,16 @@ + +CLI +=== + +.. toctree:: + :maxdepth: 1 + + orcus_csv.rst + orcus_gnumeric.rst + orcus_json.rst + orcus_ods.rst + orcus_parquet.rst + orcus_xls_xml.rst + orcus_xlsx.rst + orcus_xml.rst + orcus_yaml.rst diff --git a/doc/cli/orcus_csv.rst b/doc/cli/orcus_csv.rst new file mode 100644 index 0000000..7809d78 --- /dev/null +++ b/doc/cli/orcus_csv.rst @@ -0,0 +1,65 @@ +orcus-csv +========= + +Usage +----- + +.. code-block:: + + orcus-csv [options] FILE + +The FILE must specify a path to an existing file. + +Options +------- + +- ``-h [ --help ]`` + + Print this help. + +- ``-d [ --debug ]`` + + Turn on a debug mode and optionally specify a debug level in order to generate run-time debug outputs. + +- ``-r [ --recalc ]`` + + Re-calculate all formula cells after the documetn is loaded. + +- ``-e [ --error-policy ] arg (=fail)`` + + Specify whether to abort immediately when the loader fails to parse the first formula cell ('fail'), or skip the offending cells and continue ('skip'). + +- ``--dump-check`` + + Dump the content to stdout in a special format used for content verification in automated tests. + +- ``-o [ --output ] arg`` + + Output directory path, or output file when --dump-check option is used. + +- ``-f [ --output-format ] arg`` + + Specify the output format. Supported format types are: + + - check - Flat format that fully encodes document content. Suitable for automated testing. + - csv - CSV format. + - debug-state - This format dumps the internal state of the document in detail, useful for debugging. + - flat - Flat text format that displays document content in grid. + - html - HTML format. + - json - JSON format. + - none - No output to be generated. Maybe useful during development. + - xml - This format is currently unsupported. + - yaml - This format is currently unsupported. + +- ``--row-size arg`` + + Specify the number of maximum rows in each sheet. + +- ``--row-header arg`` + + Specify the number of header rows to repeat if the source content gets split into multiple sheets. + +- ``--split`` + + Specify whether or not to split the data into multiple sheets in case it won't fit in a single sheet. + diff --git a/doc/cli/orcus_gnumeric.rst b/doc/cli/orcus_gnumeric.rst new file mode 100644 index 0000000..6b6ae23 --- /dev/null +++ b/doc/cli/orcus_gnumeric.rst @@ -0,0 +1,57 @@ +orcus-gnumeric +============== + +Usage +----- + +.. code-block:: + + orcus-gnumeric [options] FILE + +The FILE must specify a path to an existing file. + +Options +------- + +- ``-h [ --help ]`` + + Print this help. + +- ``-d [ --debug ]`` + + Turn on a debug mode and optionally specify a debug level in order to generate run-time debug outputs. + +- ``-r [ --recalc ]`` + + Re-calculate all formula cells after the documetn is loaded. + +- ``-e [ --error-policy ] arg (=fail)`` + + Specify whether to abort immediately when the loader fails to parse the first formula cell ('fail'), or skip the offending cells and continue ('skip'). + +- ``--dump-check`` + + Dump the content to stdout in a special format used for content verification in automated tests. + +- ``-o [ --output ] arg`` + + Output directory path, or output file when --dump-check option is used. + +- ``-f [ --output-format ] arg`` + + Specify the output format. Supported format types are: + + - check - Flat format that fully encodes document content. Suitable for automated testing. + - csv - CSV format. + - debug-state - This format dumps the internal state of the document in detail, useful for debugging. + - flat - Flat text format that displays document content in grid. + - html - HTML format. + - json - JSON format. + - none - No output to be generated. Maybe useful during development. + - xml - This format is currently unsupported. + - yaml - This format is currently unsupported. + +- ``--row-size arg`` + + Specify the number of maximum rows in each sheet. + diff --git a/doc/cli/orcus_json.rst b/doc/cli/orcus_json.rst new file mode 100644 index 0000000..be0208b --- /dev/null +++ b/doc/cli/orcus_json.rst @@ -0,0 +1,45 @@ +orcus-json +========== + +Usage +----- + +.. code-block:: + + orcus-json [options] FILE + +The FILE must specify the path to an existing file. + +Options +------- + +- ``-h [ --help ]`` + + Print this help. + +- ``--mode arg`` + + Mode of operation. Select one of the following options: convert, map, map-gen, or structure. + +- ``--resolve-refs`` + + Resolve JSON references to external files. + +- ``-o [ --output ] arg`` + + Output file path. + +- ``-f [ --output-format ] arg`` + + Specify the format of output file. Supported format types are: + + - XML (xml) + - JSON (json) + - YAML (yaml) + - flat tree dump (check) + - no output (none) + +- ``-m [ --map ] arg`` + + Path to a map file. This parameter is only used for map mode, and it is required for map mode. + diff --git a/doc/cli/orcus_ods.rst b/doc/cli/orcus_ods.rst new file mode 100644 index 0000000..fdeb2fa --- /dev/null +++ b/doc/cli/orcus_ods.rst @@ -0,0 +1,57 @@ +orcus-ods +========= + +Usage +----- + +.. code-block:: + + orcus-ods [options] FILE + +The FILE must specify a path to an existing file. + +Options +------- + +- ``-h [ --help ]`` + + Print this help. + +- ``-d [ --debug ]`` + + Turn on a debug mode and optionally specify a debug level in order to generate run-time debug outputs. + +- ``-r [ --recalc ]`` + + Re-calculate all formula cells after the documetn is loaded. + +- ``-e [ --error-policy ] arg (=fail)`` + + Specify whether to abort immediately when the loader fails to parse the first formula cell ('fail'), or skip the offending cells and continue ('skip'). + +- ``--dump-check`` + + Dump the content to stdout in a special format used for content verification in automated tests. + +- ``-o [ --output ] arg`` + + Output directory path, or output file when --dump-check option is used. + +- ``-f [ --output-format ] arg`` + + Specify the output format. Supported format types are: + + - check - Flat format that fully encodes document content. Suitable for automated testing. + - csv - CSV format. + - debug-state - This format dumps the internal state of the document in detail, useful for debugging. + - flat - Flat text format that displays document content in grid. + - html - HTML format. + - json - JSON format. + - none - No output to be generated. Maybe useful during development. + - xml - This format is currently unsupported. + - yaml - This format is currently unsupported. + +- ``--row-size arg`` + + Specify the number of maximum rows in each sheet. + diff --git a/doc/cli/orcus_parquet.rst b/doc/cli/orcus_parquet.rst new file mode 100644 index 0000000..129919e --- /dev/null +++ b/doc/cli/orcus_parquet.rst @@ -0,0 +1,57 @@ +orcus-parquet +============= + +Usage +----- + +.. code-block:: + + orcus-parquet [options] FILE + +The FILE must specify a path to an existing file. + +Options +------- + +- ``-h [ --help ]`` + + Print this help. + +- ``-d [ --debug ]`` + + Turn on a debug mode and optionally specify a debug level in order to generate run-time debug outputs. + +- ``-r [ --recalc ]`` + + Re-calculate all formula cells after the documetn is loaded. + +- ``-e [ --error-policy ] arg (=fail)`` + + Specify whether to abort immediately when the loader fails to parse the first formula cell ('fail'), or skip the offending cells and continue ('skip'). + +- ``--dump-check`` + + Dump the content to stdout in a special format used for content verification in automated tests. + +- ``-o [ --output ] arg`` + + Output directory path, or output file when --dump-check option is used. + +- ``-f [ --output-format ] arg`` + + Specify the output format. Supported format types are: + + - check - Flat format that fully encodes document content. Suitable for automated testing. + - csv - CSV format. + - debug-state - This format dumps the internal state of the document in detail, useful for debugging. + - flat - Flat text format that displays document content in grid. + - html - HTML format. + - json - JSON format. + - none - No output to be generated. Maybe useful during development. + - xml - This format is currently unsupported. + - yaml - This format is currently unsupported. + +- ``--row-size arg`` + + Specify the number of maximum rows in each sheet. + diff --git a/doc/cli/orcus_xls_xml.rst b/doc/cli/orcus_xls_xml.rst new file mode 100644 index 0000000..0081693 --- /dev/null +++ b/doc/cli/orcus_xls_xml.rst @@ -0,0 +1,57 @@ +orcus-xls-xml +============= + +Usage +----- + +.. code-block:: + + orcus-xls-xml [options] FILE + +The FILE must specify a path to an existing file. + +Options +------- + +- ``-h [ --help ]`` + + Print this help. + +- ``-d [ --debug ]`` + + Turn on a debug mode and optionally specify a debug level in order to generate run-time debug outputs. + +- ``-r [ --recalc ]`` + + Re-calculate all formula cells after the documetn is loaded. + +- ``-e [ --error-policy ] arg (=fail)`` + + Specify whether to abort immediately when the loader fails to parse the first formula cell ('fail'), or skip the offending cells and continue ('skip'). + +- ``--dump-check`` + + Dump the content to stdout in a special format used for content verification in automated tests. + +- ``-o [ --output ] arg`` + + Output directory path, or output file when --dump-check option is used. + +- ``-f [ --output-format ] arg`` + + Specify the output format. Supported format types are: + + - check - Flat format that fully encodes document content. Suitable for automated testing. + - csv - CSV format. + - debug-state - This format dumps the internal state of the document in detail, useful for debugging. + - flat - Flat text format that displays document content in grid. + - html - HTML format. + - json - JSON format. + - none - No output to be generated. Maybe useful during development. + - xml - This format is currently unsupported. + - yaml - This format is currently unsupported. + +- ``--row-size arg`` + + Specify the number of maximum rows in each sheet. + diff --git a/doc/cli/orcus_xlsx.rst b/doc/cli/orcus_xlsx.rst new file mode 100644 index 0000000..6da6d4c --- /dev/null +++ b/doc/cli/orcus_xlsx.rst @@ -0,0 +1,57 @@ +orcus-xlsx +========== + +Usage +----- + +.. code-block:: + + orcus-xlsx [options] FILE + +The FILE must specify a path to an existing file. + +Options +------- + +- ``-h [ --help ]`` + + Print this help. + +- ``-d [ --debug ]`` + + Turn on a debug mode and optionally specify a debug level in order to generate run-time debug outputs. + +- ``-r [ --recalc ]`` + + Re-calculate all formula cells after the documetn is loaded. + +- ``-e [ --error-policy ] arg (=fail)`` + + Specify whether to abort immediately when the loader fails to parse the first formula cell ('fail'), or skip the offending cells and continue ('skip'). + +- ``--dump-check`` + + Dump the content to stdout in a special format used for content verification in automated tests. + +- ``-o [ --output ] arg`` + + Output directory path, or output file when --dump-check option is used. + +- ``-f [ --output-format ] arg`` + + Specify the output format. Supported format types are: + + - check - Flat format that fully encodes document content. Suitable for automated testing. + - csv - CSV format. + - debug-state - This format dumps the internal state of the document in detail, useful for debugging. + - flat - Flat text format that displays document content in grid. + - html - HTML format. + - json - JSON format. + - none - No output to be generated. Maybe useful during development. + - xml - This format is currently unsupported. + - yaml - This format is currently unsupported. + +- ``--row-size arg`` + + Specify the number of maximum rows in each sheet. + diff --git a/doc/cli/orcus_xml.rst b/doc/cli/orcus_xml.rst new file mode 100644 index 0000000..d41865e --- /dev/null +++ b/doc/cli/orcus_xml.rst @@ -0,0 +1,43 @@ +orcus-xml +========= + +Usage +----- + +.. code-block:: + + orcus-xml [OPTIONS] FILE + +Options +------- + +- ``-h [ --help ]`` + + Print this help. + +- ``--mode arg`` + + Mode of operation. Select one of the following options: dump, map, map-gen, structure, or transform. + +- ``-m [ --map ] arg`` + + Path to the map file. A map file is required for all modes except for the structure mode. + +- ``-o [ --output ] arg`` + + Path to either an output directory, or an output file. + +- ``-f [ --output-format ] arg`` + + Specify the output format. Supported format types are: + + - check - Flat format that fully encodes document content. Suitable for automated testing. + - csv - CSV format. + - debug-state - This format dumps the internal state of the document in detail, useful for debugging. + - flat - Flat text format that displays document content in grid. + - html - HTML format. + - json - JSON format. + - none - No output to be generated. Maybe useful during development. + - xml - This format is currently unsupported. + - yaml - This format is currently unsupported. + diff --git a/doc/cli/orcus_yaml.rst b/doc/cli/orcus_yaml.rst new file mode 100644 index 0000000..e29a8ed --- /dev/null +++ b/doc/cli/orcus_yaml.rst @@ -0,0 +1,27 @@ +orcus-yaml +========== + +Usage +----- + +.. code-block:: + + orcus-yaml [options] FILE + +The FILE must specify a path to an existing file. + +Options +------- + +- ``-h [ --help ]`` + + Print this help. + +- ``-o [ --output ] arg`` + + Output file path. + +- ``-f [ --output-format ] arg`` + + Specify the format of output file. Supported format types are: 1) yaml 2) json + diff --git a/doc/conf.py b/doc/conf.py new file mode 100644 index 0000000..7dffca4 --- /dev/null +++ b/doc/conf.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# orcus documentation build configuration file, created by +# sphinx-quickstart on Tue Sep 22 20:54:14 2015. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +from pathlib import Path +import subprocess + +rtd_build = os.environ.get('READTHEDOCS', None) == 'True' + +if rtd_build: + subprocess.call("doxygen --version; doxygen doxygen.conf", shell=True) + +# Set paths for python modules (for autodoc). The paths must be absolute. +py_root_path = Path(".") / ".." / "src" / "python" +py_root_path = py_root_path.absolute() +sys.path.insert(0, str(py_root_path)) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "breathe", + "sphinxarg.ext", + "sphinx.ext.napoleon", + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx" +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'Orcus' +copyright = '2023, Kohei Yoshida' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.19' +# The full version, including alpha/beta/rc tags. +release = '0.19.2' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "navigation_depth": 5, +} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# "<project> v<release> documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a <link> tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'orcusdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'orcus.tex', 'Orcus Documentation', + 'Kohei Yoshida', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'orcus', 'Orcus Documentation', + ['Kohei Yoshida'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'orcus', 'Orcus Documentation', + 'Kohei Yoshida', 'Orcus', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + +breathe_projects = {"orcus": "./_doxygen/xml"} + +breathe_default_project = "orcus" + +breathe_default_members = ('members', 'undoc-members') + +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'ixion': ('https://ixion.readthedocs.io/en/latest', None), +} + +autodoc_member_order = "bysource" + diff --git a/doc/cpp/core/config.rst b/doc/cpp/core/config.rst new file mode 100644 index 0000000..a5a1f50 --- /dev/null +++ b/doc/cpp/core/config.rst @@ -0,0 +1,13 @@ + +Configurations +============== + +.. doxygenstruct:: orcus::config + :members: + +.. doxygenstruct:: orcus::json_config + :members: + +.. doxygenstruct:: orcus::yaml_config + :members: + diff --git a/doc/cpp/core/import-filter/csv.rst b/doc/cpp/core/import-filter/csv.rst new file mode 100644 index 0000000..bee2671 --- /dev/null +++ b/doc/cpp/core/import-filter/csv.rst @@ -0,0 +1,6 @@ + +Comma-separated values (CSV) +============================ + +.. doxygenclass:: orcus::orcus_csv + :members: diff --git a/doc/cpp/core/import-filter/detect.rst b/doc/cpp/core/import-filter/detect.rst new file mode 100644 index 0000000..09d6041 --- /dev/null +++ b/doc/cpp/core/import-filter/detect.rst @@ -0,0 +1,5 @@ + +Format detection +================ + +.. doxygenfunction:: orcus::detect diff --git a/doc/cpp/core/import-filter/gnumeric.rst b/doc/cpp/core/import-filter/gnumeric.rst new file mode 100644 index 0000000..23a2bf9 --- /dev/null +++ b/doc/cpp/core/import-filter/gnumeric.rst @@ -0,0 +1,6 @@ + +Gnumeric XML +============ + +.. doxygenclass:: orcus::orcus_gnumeric + :members: diff --git a/doc/cpp/core/import-filter/index.rst b/doc/cpp/core/import-filter/index.rst new file mode 100644 index 0000000..0b6befa --- /dev/null +++ b/doc/cpp/core/import-filter/index.rst @@ -0,0 +1,20 @@ + +Import filters +============== + +.. toctree:: + :maxdepth: 1 + + csv.rst + ods.rst + xls_xml.rst + xlsx.rst + gnumeric.rst + xml.rst + json.rst + detect.rst + + + + + diff --git a/doc/cpp/core/import-filter/json.rst b/doc/cpp/core/import-filter/json.rst new file mode 100644 index 0000000..eb55810 --- /dev/null +++ b/doc/cpp/core/import-filter/json.rst @@ -0,0 +1,6 @@ + +Generic JSON +============ + +.. doxygenclass:: orcus::orcus_json + :members: diff --git a/doc/cpp/core/import-filter/ods.rst b/doc/cpp/core/import-filter/ods.rst new file mode 100644 index 0000000..527b064 --- /dev/null +++ b/doc/cpp/core/import-filter/ods.rst @@ -0,0 +1,9 @@ + +Open document spreadsheet +========================= + +.. doxygenclass:: orcus::orcus_ods + :members: + +.. doxygenclass:: orcus::import_ods + :members: diff --git a/doc/cpp/core/import-filter/xls_xml.rst b/doc/cpp/core/import-filter/xls_xml.rst new file mode 100644 index 0000000..48a5371 --- /dev/null +++ b/doc/cpp/core/import-filter/xls_xml.rst @@ -0,0 +1,6 @@ + +Microsoft Excel 2003 XML +======================== + +.. doxygenclass:: orcus::orcus_xls_xml + :members: diff --git a/doc/cpp/core/import-filter/xlsx.rst b/doc/cpp/core/import-filter/xlsx.rst new file mode 100644 index 0000000..4c468ca --- /dev/null +++ b/doc/cpp/core/import-filter/xlsx.rst @@ -0,0 +1,9 @@ + +Microsoft Excel 2007 XML +======================== + +.. doxygenclass:: orcus::orcus_xlsx + :members: + +.. doxygenclass:: orcus::import_xlsx + :members: diff --git a/doc/cpp/core/import-filter/xml.rst b/doc/cpp/core/import-filter/xml.rst new file mode 100644 index 0000000..67d9fd9 --- /dev/null +++ b/doc/cpp/core/import-filter/xml.rst @@ -0,0 +1,6 @@ + +Generic XML +=========== + +.. doxygenclass:: orcus::orcus_xml + :members: diff --git a/doc/cpp/core/index.rst b/doc/cpp/core/index.rst new file mode 100644 index 0000000..3bb0623 --- /dev/null +++ b/doc/cpp/core/index.rst @@ -0,0 +1,25 @@ + +Core +==== + +This section presents the API's from the ``liborcus`` part of this library, +which contains the import filters that process various file formats containing +spreadsheet document contents or contents that can be loaded into spreadsheet +documents. It consists of the filter classes that parse the file streams and +put their contents into the document store via a set of pre-defined interfaces, +and these interfaces themselves. + +This module does not contain the document store itself, which is provided by +the ``liborcus-spreadsheet-model`` module. Alternatively, the user can +provide their own document store implementation wrapped inside a factory that +provides all required interfaces. + +.. toctree:: + :maxdepth: 1 + + import-filter/index.rst + tree-doc/index.rst + interface/index.rst + types/index.rst + utils.rst + config.rst diff --git a/doc/cpp/core/interface/global.rst b/doc/cpp/core/interface/global.rst new file mode 100644 index 0000000..0e73e47 --- /dev/null +++ b/doc/cpp/core/interface/global.rst @@ -0,0 +1,12 @@ + +Global interfaces +================= + +The following global interfaces are used to abstract the concrete filter and +document classes from orcus's CLI framework. + +.. doxygenclass:: orcus::iface::import_filter + :members: + +.. doxygenclass:: orcus::iface::document_dumper + :members: diff --git a/doc/cpp/core/interface/index.rst b/doc/cpp/core/interface/index.rst new file mode 100644 index 0000000..92c22d0 --- /dev/null +++ b/doc/cpp/core/interface/index.rst @@ -0,0 +1,9 @@ + +Interfaces +========== + +.. toctree:: + :maxdepth: 1 + + global.rst + spreadsheet/index.rst diff --git a/doc/cpp/core/interface/spreadsheet/document.rst b/doc/cpp/core/interface/spreadsheet/document.rst new file mode 100644 index 0000000..95eed76 --- /dev/null +++ b/doc/cpp/core/interface/spreadsheet/document.rst @@ -0,0 +1,21 @@ + +Document import +=============== + +The following interfaces handle importing of contents and properties related to +the entire document store. + +.. doxygenclass:: orcus::spreadsheet::iface::import_factory + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_global_settings + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_shared_strings + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_named_expression + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_reference_resolver + :members: diff --git a/doc/cpp/core/interface/spreadsheet/document_export.rst b/doc/cpp/core/interface/spreadsheet/document_export.rst new file mode 100644 index 0000000..984c059 --- /dev/null +++ b/doc/cpp/core/interface/spreadsheet/document_export.rst @@ -0,0 +1,13 @@ + +Document export +=============== + +The following interfaces handle exporting of document content. Support for exporting +is still very limited in orcus. It is currently used only by :cpp:class:`~orcus::orcus_xml` +to re-export the content of an XML-mapped cell range as an XML output. + +.. doxygenclass:: orcus::spreadsheet::iface::export_sheet + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::export_factory + :members: diff --git a/doc/cpp/core/interface/spreadsheet/index.rst b/doc/cpp/core/interface/spreadsheet/index.rst new file mode 100644 index 0000000..4254c22 --- /dev/null +++ b/doc/cpp/core/interface/spreadsheet/index.rst @@ -0,0 +1,15 @@ + +.. _spreadsheet-interfaces: + +Spreadsheet interfaces +====================== + +.. toctree:: + :maxdepth: 1 + + document.rst + sheet.rst + pivot.rst + styles.rst + view.rst + document_export.rst diff --git a/doc/cpp/core/interface/spreadsheet/pivot.rst b/doc/cpp/core/interface/spreadsheet/pivot.rst new file mode 100644 index 0000000..28d8df7 --- /dev/null +++ b/doc/cpp/core/interface/spreadsheet/pivot.rst @@ -0,0 +1,14 @@ + +Pivot table import +================== + +The folloiwng interfaces handle importing of contents related to pivot tables. + +.. doxygenclass:: orcus::spreadsheet::iface::import_pivot_cache_definition + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_pivot_cache_field_group + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_pivot_cache_records + :members: diff --git a/doc/cpp/core/interface/spreadsheet/sheet.rst b/doc/cpp/core/interface/spreadsheet/sheet.rst new file mode 100644 index 0000000..6482ac6 --- /dev/null +++ b/doc/cpp/core/interface/spreadsheet/sheet.rst @@ -0,0 +1,30 @@ + +Sheet import +============ + +The following interfaces handle importing of contents and properties related to +individual sheets. + +.. doxygenclass:: orcus::spreadsheet::iface::import_sheet + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_sheet_properties + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_data_table + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_auto_filter + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_conditional_format + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_table + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_formula + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_array_formula + :members: diff --git a/doc/cpp/core/interface/spreadsheet/styles.rst b/doc/cpp/core/interface/spreadsheet/styles.rst new file mode 100644 index 0000000..1c4f9a8 --- /dev/null +++ b/doc/cpp/core/interface/spreadsheet/styles.rst @@ -0,0 +1,30 @@ + +Styles import +============= + +The folloiwng interfaces handle importing of properties related to various +formatting styles. + +.. doxygenclass:: orcus::spreadsheet::iface::import_styles + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_font_style + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_fill_style + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_border_style + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_cell_protection + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_number_format + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_xf + :members: + +.. doxygenclass:: orcus::spreadsheet::iface::import_cell_style + :members: diff --git a/doc/cpp/core/interface/spreadsheet/view.rst b/doc/cpp/core/interface/spreadsheet/view.rst new file mode 100644 index 0000000..21f393e --- /dev/null +++ b/doc/cpp/core/interface/spreadsheet/view.rst @@ -0,0 +1,9 @@ + +View properties import +====================== + +The following interface handles importing of view properties. View properties +affect how the content of a document may get displayed. + +.. doxygenclass:: orcus::spreadsheet::iface::import_sheet_view + :members: diff --git a/doc/cpp/core/tree-doc/index.rst b/doc/cpp/core/tree-doc/index.rst new file mode 100644 index 0000000..54b7a68 --- /dev/null +++ b/doc/cpp/core/tree-doc/index.rst @@ -0,0 +1,13 @@ + +Document tree +============= + +This section presents the API's for tree-structured documents, namely JSON and YAML +documents. + + +.. toctree:: + :maxdepth: 1 + + json.rst + yaml.rst diff --git a/doc/cpp/core/tree-doc/json.rst b/doc/cpp/core/tree-doc/json.rst new file mode 100644 index 0000000..390ac37 --- /dev/null +++ b/doc/cpp/core/tree-doc/json.rst @@ -0,0 +1,36 @@ + +JSON document tree +================== + +Document tree +------------- + +.. doxygenclass:: orcus::json::document_tree + :members: + +.. doxygenclass:: orcus::json::const_node + :members: + +.. doxygenclass:: orcus::json::node + :members: + +.. doxygenclass:: orcus::json::array + :members: + +.. doxygenclass:: orcus::json::object + :members: + +.. doxygenclass:: orcus::json::detail::init::node + :members: + +.. doxygenenum:: orcus::json::node_t + :project: orcus + +Exceptions +---------- + +.. doxygenclass:: orcus::json::document_error + :members: + +.. doxygenclass:: orcus::json::key_value_error + :members: diff --git a/doc/cpp/core/tree-doc/yaml.rst b/doc/cpp/core/tree-doc/yaml.rst new file mode 100644 index 0000000..547440b --- /dev/null +++ b/doc/cpp/core/tree-doc/yaml.rst @@ -0,0 +1,19 @@ + +YAML document tree +================== + +Document tree +------------- + +.. doxygenclass:: orcus::yaml::document_tree + :members: + +.. doxygenclass:: orcus::yaml::const_node + :members: + +Exceptions +---------- + +.. doxygenclass:: orcus::yaml::document_error + :members: + diff --git a/doc/cpp/core/types/core.rst b/doc/cpp/core/types/core.rst new file mode 100644 index 0000000..1decff2 --- /dev/null +++ b/doc/cpp/core/types/core.rst @@ -0,0 +1,68 @@ + +Core types +========== + +Integral types +-------------- + +.. doxygentypedef:: orcus::spreadsheet::row_t +.. doxygentypedef:: orcus::spreadsheet::col_t +.. doxygentypedef:: orcus::spreadsheet::sheet_t +.. doxygentypedef:: orcus::spreadsheet::color_elem_t +.. doxygentypedef:: orcus::spreadsheet::col_width_t +.. doxygentypedef:: orcus::spreadsheet::row_height_t +.. doxygentypedef:: orcus::spreadsheet::pivot_cache_id_t + + +Structs +------- + +.. doxygenstruct:: orcus::spreadsheet::address_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::range_size_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::range_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::src_address_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::src_range_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::color_rgb_t + :members: + + +Enums +----- + +.. doxygenenum:: orcus::spreadsheet::error_value_t +.. doxygenenum:: orcus::spreadsheet::border_direction_t +.. doxygenenum:: orcus::spreadsheet::border_style_t +.. doxygenenum:: orcus::spreadsheet::fill_pattern_t +.. doxygenenum:: orcus::spreadsheet::strikethrough_style_t +.. doxygenenum:: orcus::spreadsheet::strikethrough_type_t +.. doxygenenum:: orcus::spreadsheet::strikethrough_width_t +.. doxygenenum:: orcus::spreadsheet::strikethrough_text_t +.. doxygenenum:: orcus::spreadsheet::formula_grammar_t +.. doxygenenum:: orcus::spreadsheet::formula_t +.. doxygenenum:: orcus::spreadsheet::formula_ref_context_t +.. doxygenenum:: orcus::spreadsheet::formula_error_policy_t +.. doxygenenum:: orcus::spreadsheet::underline_t +.. doxygenenum:: orcus::spreadsheet::underline_width_t +.. doxygenenum:: orcus::spreadsheet::underline_mode_t +.. doxygenenum:: orcus::spreadsheet::underline_type_t +.. doxygenenum:: orcus::spreadsheet::hor_alignment_t +.. doxygenenum:: orcus::spreadsheet::ver_alignment_t +.. doxygenenum:: orcus::spreadsheet::xf_category_t +.. doxygenenum:: orcus::spreadsheet::data_table_type_t +.. doxygenenum:: orcus::spreadsheet::totals_row_function_t +.. doxygenenum:: orcus::spreadsheet::conditional_format_t +.. doxygenenum:: orcus::spreadsheet::condition_operator_t +.. doxygenenum:: orcus::spreadsheet::condition_type_t +.. doxygenenum:: orcus::spreadsheet::condition_date_t +.. doxygenenum:: orcus::spreadsheet::databar_axis_t +.. doxygenenum:: orcus::spreadsheet::pivot_cache_group_by_t diff --git a/doc/cpp/core/types/index.rst b/doc/cpp/core/types/index.rst new file mode 100644 index 0000000..a79f3f8 --- /dev/null +++ b/doc/cpp/core/types/index.rst @@ -0,0 +1,12 @@ + +Types +===== + +These types are used throughout the spreadsheet import and export interfaces. + +.. toctree:: + :maxdepth: 1 + + core.rst + view.rst + diff --git a/doc/cpp/core/types/view.rst b/doc/cpp/core/types/view.rst new file mode 100644 index 0000000..a542a62 --- /dev/null +++ b/doc/cpp/core/types/view.rst @@ -0,0 +1,20 @@ + +View types +========== + +Structs +------- + +.. doxygenstruct:: orcus::spreadsheet::split_pane_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::frozen_pane_t + :members: + + +Enums +----- + +.. doxygenenum:: orcus::spreadsheet::sheet_pane_t +.. doxygenenum:: orcus::spreadsheet::pane_state_t + diff --git a/doc/cpp/core/utils.rst b/doc/cpp/core/utils.rst new file mode 100644 index 0000000..480ae87 --- /dev/null +++ b/doc/cpp/core/utils.rst @@ -0,0 +1,19 @@ + +Utilities +========= + +Special values +-------------- + +.. doxygenfunction:: orcus::spreadsheet::get_default_column_width +.. doxygenfunction:: orcus::spreadsheet::get_default_row_height + +Type conversion +--------------- + +.. doxygenfunction:: orcus::spreadsheet::to_totals_row_function_enum +.. doxygenfunction:: orcus::spreadsheet::to_pivot_cache_group_by_enum +.. doxygenfunction:: orcus::spreadsheet::to_error_value_enum +.. doxygenfunction:: orcus::spreadsheet::to_color_rgb +.. doxygenfunction:: orcus::spreadsheet::to_rc_address +.. doxygenfunction:: orcus::spreadsheet::to_rc_range diff --git a/doc/cpp/index.rst b/doc/cpp/index.rst new file mode 100644 index 0000000..afb4909 --- /dev/null +++ b/doc/cpp/index.rst @@ -0,0 +1,10 @@ + +C++ API reference +================= + +.. toctree:: + :maxdepth: 2 + + parser/index.rst + core/index.rst + spreadsheet-doc/index.rst diff --git a/doc/cpp/parser/archive.rst b/doc/cpp/parser/archive.rst new file mode 100644 index 0000000..e5f7a25 --- /dev/null +++ b/doc/cpp/parser/archive.rst @@ -0,0 +1,21 @@ + +Archive +======= + +Zip archive +----------- + +.. doxygenstruct:: orcus::zip_file_entry_header + :members: + +.. doxygenclass:: orcus::zip_archive + :members: + +.. doxygenclass:: orcus::zip_archive_stream + :members: + +.. doxygenclass:: orcus::zip_archive_stream_fd + :members: + +.. doxygenclass:: orcus::zip_archive_stream_blob + :members: diff --git a/doc/cpp/parser/base64.rst b/doc/cpp/parser/base64.rst new file mode 100644 index 0000000..617db37 --- /dev/null +++ b/doc/cpp/parser/base64.rst @@ -0,0 +1,5 @@ + +Base64 +====== + +.. doxygenfile:: base64.hpp diff --git a/doc/cpp/parser/css.rst b/doc/cpp/parser/css.rst new file mode 100644 index 0000000..beb6cb2 --- /dev/null +++ b/doc/cpp/parser/css.rst @@ -0,0 +1,84 @@ +.. highlight:: cpp + +CSS parser +========== + +.. doxygenclass:: orcus::css_parser + :members: + +Parser handler +-------------- + +.. doxygenclass:: orcus::css_handler + :members: + +CSS types +--------- + +.. doxygenenum:: orcus::css::combinator_t +.. doxygenenum:: orcus::css::property_function_t +.. doxygenenum:: orcus::css::property_value_t + +.. doxygentypedef:: orcus::css::pseudo_element_t +.. doxygentypedef:: orcus::css::pseudo_class_t + +.. doxygenstruct:: orcus::css::rgba_color_t +.. doxygenstruct:: orcus::css::hsla_color_t + + +Constants +--------- + +Pseudo elements +^^^^^^^^^^^^^^^ + +.. doxygenvariable:: orcus::css::pseudo_element_after +.. doxygenvariable:: orcus::css::pseudo_element_before +.. doxygenvariable:: orcus::css::pseudo_element_first_letter +.. doxygenvariable:: orcus::css::pseudo_element_first_line +.. doxygenvariable:: orcus::css::pseudo_element_selection +.. doxygenvariable:: orcus::css::pseudo_element_backdrop + +Pseudo classes +^^^^^^^^^^^^^^ + +.. doxygenvariable:: orcus::css::pseudo_class_active +.. doxygenvariable:: orcus::css::pseudo_class_checked +.. doxygenvariable:: orcus::css::pseudo_class_default +.. doxygenvariable:: orcus::css::pseudo_class_dir +.. doxygenvariable:: orcus::css::pseudo_class_disabled +.. doxygenvariable:: orcus::css::pseudo_class_empty +.. doxygenvariable:: orcus::css::pseudo_class_enabled +.. doxygenvariable:: orcus::css::pseudo_class_first +.. doxygenvariable:: orcus::css::pseudo_class_first_child +.. doxygenvariable:: orcus::css::pseudo_class_first_of_type +.. doxygenvariable:: orcus::css::pseudo_class_fullscreen +.. doxygenvariable:: orcus::css::pseudo_class_focus +.. doxygenvariable:: orcus::css::pseudo_class_hover +.. doxygenvariable:: orcus::css::pseudo_class_indeterminate +.. doxygenvariable:: orcus::css::pseudo_class_in_range +.. doxygenvariable:: orcus::css::pseudo_class_invalid +.. doxygenvariable:: orcus::css::pseudo_class_lang +.. doxygenvariable:: orcus::css::pseudo_class_last_child +.. doxygenvariable:: orcus::css::pseudo_class_last_of_type +.. doxygenvariable:: orcus::css::pseudo_class_left +.. doxygenvariable:: orcus::css::pseudo_class_link +.. doxygenvariable:: orcus::css::pseudo_class_not +.. doxygenvariable:: orcus::css::pseudo_class_nth_child +.. doxygenvariable:: orcus::css::pseudo_class_nth_last_child +.. doxygenvariable:: orcus::css::pseudo_class_nth_last_of_type +.. doxygenvariable:: orcus::css::pseudo_class_nth_of_type +.. doxygenvariable:: orcus::css::pseudo_class_only_child +.. doxygenvariable:: orcus::css::pseudo_class_only_of_type +.. doxygenvariable:: orcus::css::pseudo_class_optional +.. doxygenvariable:: orcus::css::pseudo_class_out_of_range +.. doxygenvariable:: orcus::css::pseudo_class_read_only +.. doxygenvariable:: orcus::css::pseudo_class_read_write +.. doxygenvariable:: orcus::css::pseudo_class_required +.. doxygenvariable:: orcus::css::pseudo_class_right +.. doxygenvariable:: orcus::css::pseudo_class_root +.. doxygenvariable:: orcus::css::pseudo_class_scope +.. doxygenvariable:: orcus::css::pseudo_class_target +.. doxygenvariable:: orcus::css::pseudo_class_valid +.. doxygenvariable:: orcus::css::pseudo_class_visited + diff --git a/doc/cpp/parser/csv.rst b/doc/cpp/parser/csv.rst new file mode 100644 index 0000000..67e708a --- /dev/null +++ b/doc/cpp/parser/csv.rst @@ -0,0 +1,17 @@ +.. highlight:: cpp + +CSV parser +========== + +.. doxygenclass:: orcus::csv_parser + :members: + +.. doxygenstruct:: orcus::csv::parser_config + :members: + +Parser handler +-------------- + +.. doxygenclass:: orcus::csv_handler + :members: + diff --git a/doc/cpp/parser/exception.rst b/doc/cpp/parser/exception.rst new file mode 100644 index 0000000..e6b14cc --- /dev/null +++ b/doc/cpp/parser/exception.rst @@ -0,0 +1,37 @@ +.. highlight:: cpp + +Exceptions +========== + +.. doxygenclass:: orcus::general_error + :members: + +.. doxygenclass:: orcus::invalid_arg_error + :members: + +.. doxygenclass:: orcus::xml_structure_error + :members: + +.. doxygenclass:: orcus::json_structure_error + :members: + +.. doxygenclass:: orcus::invalid_map_error + :members: + +.. doxygenclass:: orcus::value_error + :members: + +.. doxygenclass:: orcus::xpath_error + :members: + +.. doxygenclass:: orcus::interface_error + :members: + +.. doxygenclass:: orcus::parse_error + :members: + +.. doxygenclass:: orcus::malformed_xml_error + :members: + +.. doxygenclass:: orcus::zip_error + :members: diff --git a/doc/cpp/parser/index.rst b/doc/cpp/parser/index.rst new file mode 100644 index 0000000..5a8141b --- /dev/null +++ b/doc/cpp/parser/index.rst @@ -0,0 +1,24 @@ + +Low-level parsers +================= + +This section presents the API's from the ``liborcus-parser`` part of this library, +which contains low-level parsers and utilities either used by or used in conjunction +with the parsers. The higher level import filters document models internally use +these parsers and utilities. + +.. toctree:: + :maxdepth: 1 + + xml.rst + xml_writer.rst + json.rst + css.rst + csv.rst + yaml.rst + types.rst + util.rst + stream.rst + base64.rst + archive.rst + exception.rst diff --git a/doc/cpp/parser/json.rst b/doc/cpp/parser/json.rst new file mode 100644 index 0000000..8aa402b --- /dev/null +++ b/doc/cpp/parser/json.rst @@ -0,0 +1,14 @@ +.. highlight:: cpp + +JSON parser +=========== + +.. doxygenclass:: orcus::json_parser + :members: + +Parser handler +-------------- + +.. doxygenclass:: orcus::json_handler + :members: + diff --git a/doc/cpp/parser/stream.rst b/doc/cpp/parser/stream.rst new file mode 100644 index 0000000..6f8ecde --- /dev/null +++ b/doc/cpp/parser/stream.rst @@ -0,0 +1,24 @@ +.. highlight:: cpp + +Stream +====== + +Stream buffers +-------------- + +.. doxygenclass:: orcus::file_content + :members: + +.. doxygenclass:: orcus::memory_content + :members: + +Utility functions +----------------- + +.. doxygenstruct:: orcus::line_with_offset + :members: + +.. doxygenfunction:: orcus::create_parse_error_output +.. doxygenfunction:: orcus::locate_line_with_offset +.. doxygenfunction:: orcus::locate_first_different_char +.. doxygenfunction:: orcus::calc_logical_string_length diff --git a/doc/cpp/parser/types.rst b/doc/cpp/parser/types.rst new file mode 100644 index 0000000..6c53267 --- /dev/null +++ b/doc/cpp/parser/types.rst @@ -0,0 +1,43 @@ + +Basic types +=========== + +Constants +--------- + +.. doxygenvariable:: orcus::INDEX_NOT_FOUND +.. doxygenvariable:: orcus::XMLNS_UNKNOWN_ID +.. doxygenvariable:: orcus::XML_UNKNOWN_TOKEN + +Type aliases +------------ + +.. doxygentypedef:: orcus::xml_token_attrs_t +.. doxygentypedef:: orcus::xml_token_t +.. doxygentypedef:: orcus::xmlns_id_t + +Structs +------- + +.. doxygenstruct:: orcus::date_time_t +.. doxygenstruct:: orcus::length_t +.. doxygenstruct:: orcus::parse_error_value_t +.. doxygenstruct:: orcus::xml_declaration_t +.. doxygenstruct:: orcus::xml_name_t +.. doxygenstruct:: orcus::xml_token_attr_t +.. doxygenstruct:: orcus::xml_token_element_t + +Enums +----- + +.. doxygenenum:: orcus::character_set_t +.. doxygenenum:: orcus::dump_format_t +.. doxygenenum:: orcus::format_t +.. doxygenenum:: orcus::length_unit_t + +Utility functions +----------------- + +.. doxygenfunction:: orcus::get_dump_format_entries +.. doxygenfunction:: orcus::to_character_set +.. doxygenfunction:: orcus::to_dump_format_enum diff --git a/doc/cpp/parser/util.rst b/doc/cpp/parser/util.rst new file mode 100644 index 0000000..2d3ec0d --- /dev/null +++ b/doc/cpp/parser/util.rst @@ -0,0 +1,13 @@ +.. highlight:: cpp + +Utilities +========= + +.. doxygenclass:: orcus::string_pool + :members: + +.. doxygenclass:: orcus::tokens + :members: + +.. doxygenclass:: orcus::cell_buffer + :members: diff --git a/doc/cpp/parser/xml.rst b/doc/cpp/parser/xml.rst new file mode 100644 index 0000000..462c466 --- /dev/null +++ b/doc/cpp/parser/xml.rst @@ -0,0 +1,65 @@ +.. highlight:: cpp + +XML parsers +=========== + +SAX base parser +--------------- + +.. doxygenclass:: orcus::sax_parser + :members: + +.. doxygenstruct:: orcus::sax_parser_default_config + :members: + +.. doxygenclass:: orcus::sax_handler + :members: + +.. doxygenstruct:: orcus::sax::parser_element + :members: + +.. doxygenstruct:: orcus::sax::parser_attribute + :members: + +SAX namespace parser +-------------------- + +.. doxygenclass:: orcus::sax_ns_parser + :members: + +.. doxygenclass:: orcus::sax_ns_handler + :members: + +.. doxygenstruct:: orcus::sax_ns_parser_element + :members: + +.. doxygenstruct:: orcus::sax_ns_parser_attribute + :members: + +SAX token parser +---------------- + +.. doxygenclass:: orcus::sax_token_parser + :members: + +.. doxygenclass:: orcus::sax_token_handler + :members: + +Namespace +--------- + +.. doxygenclass:: orcus::xmlns_repository + :members: + +.. doxygenclass:: orcus::xmlns_context + :members: + +Common +------ + +.. doxygenstruct:: orcus::sax::doctype_declaration + :members: + +.. doxygenfunction:: orcus::sax::decode_xml_encoded_char + +.. doxygenfunction:: orcus::sax::decode_xml_unicode_char diff --git a/doc/cpp/parser/xml_writer.rst b/doc/cpp/parser/xml_writer.rst new file mode 100644 index 0000000..1092ac4 --- /dev/null +++ b/doc/cpp/parser/xml_writer.rst @@ -0,0 +1,7 @@ +.. highlight:: cpp + +XML writer +========== + +.. doxygenclass:: orcus::xml_writer + :members: diff --git a/doc/cpp/parser/yaml.rst b/doc/cpp/parser/yaml.rst new file mode 100644 index 0000000..0a1107a --- /dev/null +++ b/doc/cpp/parser/yaml.rst @@ -0,0 +1,14 @@ +.. highlight:: cpp + +YAML parser +=========== + +.. doxygenclass:: orcus::yaml_parser + :members: + +Parser Handler +-------------- + +.. doxygenclass:: orcus::yaml_handler + :members: + diff --git a/doc/cpp/spreadsheet-doc/document.rst b/doc/cpp/spreadsheet-doc/document.rst new file mode 100644 index 0000000..0b03610 --- /dev/null +++ b/doc/cpp/spreadsheet-doc/document.rst @@ -0,0 +1,9 @@ + +Document +======== + +.. doxygenclass:: orcus::spreadsheet::document + :members: + +.. doxygenstruct:: orcus::spreadsheet::document_config + :members: diff --git a/doc/cpp/spreadsheet-doc/import-export.rst b/doc/cpp/spreadsheet-doc/import-export.rst new file mode 100644 index 0000000..0c5b801 --- /dev/null +++ b/doc/cpp/spreadsheet-doc/import-export.rst @@ -0,0 +1,50 @@ + +Import and export +================= + +The classes in this section can be viewed as the points of entry for initiating +import or export processes. + +The :cpp:class:`~orcus::spreadsheet::import_factory` class wraps +:cpp:class:`~orcus::spreadsheet::document` as its destination storage then +gets passed to an import filter class that parses the content of an input file +and populates the destination document store. + +The :cpp:class:`~orcus::spreadsheet::import_styles` class works similarly to +:cpp:class:`~orcus::spreadsheet::import_factory` in that it wraps +:cpp:class:`~orcus::spreadsheet::styles` as its destination storage then gets +passed to a styles import parser in order to get the destination store +populated. Although this class is used by +:cpp:class:`~orcus::spreadsheet::import_factory` internally, it can also be +instantiated independently to allow loading of just the styles data. + +The :cpp:class:`~orcus::spreadsheet::export_factory` also works in a similar +fashion, however; the export functionality of the orcus library is currently +very limited and should be considered experimental. It is currently only used +by :cpp:class:`~orcus::orcus_xml` to export the content of a document which +was originally imported from an XML document. + +.. warning:: + + The export functionality of the orcus library is highly experimental. + + +Import factory +-------------- + +.. doxygenclass:: orcus::spreadsheet::import_factory + :members: + + +Import styles +------------- + +.. doxygenclass:: orcus::spreadsheet::import_styles + :members: + + +Export factory +-------------- + +.. doxygenclass:: orcus::spreadsheet::export_factory + :members: diff --git a/doc/cpp/spreadsheet-doc/index.rst b/doc/cpp/spreadsheet-doc/index.rst new file mode 100644 index 0000000..b0c2c94 --- /dev/null +++ b/doc/cpp/spreadsheet-doc/index.rst @@ -0,0 +1,21 @@ + +Spreadsheet document +==================== + +This section contains the API's related to the spreadsheet document storage, which +is provided by the ``liborcus-spreadsheet`` part of this library. This ``liborcus-spreadsheet`` +module has dependency on the :ref:`ixion <ixion:index>` library in order to perform +computation of formula cells. + +.. toctree:: + :maxdepth: 1 + + types.rst + document.rst + sheet.rst + table.rst + pivot.rst + styles.rst + view.rst + shared-strings.rst + import-export.rst diff --git a/doc/cpp/spreadsheet-doc/pivot.rst b/doc/cpp/spreadsheet-doc/pivot.rst new file mode 100644 index 0000000..00c1a16 --- /dev/null +++ b/doc/cpp/spreadsheet-doc/pivot.rst @@ -0,0 +1,21 @@ + +Pivot table +=========== + +.. doxygenstruct:: orcus::spreadsheet::pivot_cache_record_value_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::pivot_cache_item_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::pivot_cache_group_data_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::pivot_cache_field_t + :members: + +.. doxygenclass:: orcus::spreadsheet::pivot_cache + :members: + +.. doxygenclass:: orcus::spreadsheet::pivot_collection + :members: diff --git a/doc/cpp/spreadsheet-doc/shared-strings.rst b/doc/cpp/spreadsheet-doc/shared-strings.rst new file mode 100644 index 0000000..799e57b --- /dev/null +++ b/doc/cpp/spreadsheet-doc/shared-strings.rst @@ -0,0 +1,10 @@ + +Shared strings +============== + +.. doxygenclass:: orcus::spreadsheet::shared_strings + :members: + +.. doxygenstruct:: orcus::spreadsheet::format_run +.. doxygentypedef:: orcus::spreadsheet::format_runs_t + diff --git a/doc/cpp/spreadsheet-doc/sheet.rst b/doc/cpp/spreadsheet-doc/sheet.rst new file mode 100644 index 0000000..e1cf87e --- /dev/null +++ b/doc/cpp/spreadsheet-doc/sheet.rst @@ -0,0 +1,6 @@ + +Sheet +===== + +.. doxygenclass:: orcus::spreadsheet::sheet + :members: diff --git a/doc/cpp/spreadsheet-doc/styles.rst b/doc/cpp/spreadsheet-doc/styles.rst new file mode 100644 index 0000000..59d7f77 --- /dev/null +++ b/doc/cpp/spreadsheet-doc/styles.rst @@ -0,0 +1,30 @@ + +Styles +====== + +.. doxygenclass:: orcus::spreadsheet::styles + :members: + +.. doxygenstruct:: orcus::spreadsheet::font_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::fill_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::border_attrs_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::border_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::protection_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::number_format_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::cell_format_t + :members: + +.. doxygenstruct:: orcus::spreadsheet::cell_style_t + :members: diff --git a/doc/cpp/spreadsheet-doc/table.rst b/doc/cpp/spreadsheet-doc/table.rst new file mode 100644 index 0000000..4b44e42 --- /dev/null +++ b/doc/cpp/spreadsheet-doc/table.rst @@ -0,0 +1,9 @@ + +Table and autofilter +==================== + +.. doxygenstruct:: orcus::spreadsheet::auto_filter_column_t +.. doxygenstruct:: orcus::spreadsheet::auto_filter_t +.. doxygenstruct:: orcus::spreadsheet::table_column_t +.. doxygenstruct:: orcus::spreadsheet::table_style_t +.. doxygenstruct:: orcus::spreadsheet::table_t diff --git a/doc/cpp/spreadsheet-doc/types.rst b/doc/cpp/spreadsheet-doc/types.rst new file mode 100644 index 0000000..51a8489 --- /dev/null +++ b/doc/cpp/spreadsheet-doc/types.rst @@ -0,0 +1,5 @@ + +Types +===== + +.. doxygenstruct:: orcus::spreadsheet::color_t diff --git a/doc/cpp/spreadsheet-doc/view.rst b/doc/cpp/spreadsheet-doc/view.rst new file mode 100644 index 0000000..79c07dd --- /dev/null +++ b/doc/cpp/spreadsheet-doc/view.rst @@ -0,0 +1,9 @@ + +View +==== + +.. doxygenclass:: orcus::spreadsheet::view + :members: + +.. doxygenclass:: orcus::spreadsheet::sheet_view + :members: diff --git a/doc/doxygen.conf b/doc/doxygen.conf new file mode 100644 index 0000000..375f5d2 --- /dev/null +++ b/doc/doxygen.conf @@ -0,0 +1,2276 @@ +# Doxyfile 1.8.6 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "Orcus" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is included in +# the documentation. The maximum height of the logo should not exceed 55 pixels +# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo +# to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = _doxygen + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a +# new page for each member. If set to NO, the documentation of a member will be +# part of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. +# +# Note For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by by putting a % sign in front of the word +# or globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO these classes will be included in the various overviews. This option has +# no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the +# todo list. This list is created by putting \todo commands in the +# documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the +# test list. This list is created by putting \test commands in the +# documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if <section_label> ... \endif and \cond <section_label> +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES the list +# will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. Do not use file names with spaces, bibtex cannot handle them. See +# also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = NO + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO doxygen will only warn about wrong or incomplete parameter +# documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. + +INPUT = ../include + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. + +FILE_PATTERNS = *.hpp + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# <filter> <input-file> +# +# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER ) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES, then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user- +# defined cascading style sheet that is included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefor more robust against future updates. +# Doxygen will copy the style sheet file to the output directory. For an example +# see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the stylesheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to NO can help when comparing the output of multiple runs. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler ( hhc.exe). If non-empty +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated ( +# YES) or that it should be included in the master .chm file ( NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated ( +# YES) or a normal table of contents ( NO) in the .chm file. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using prerendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use <access key> + S +# (what the <access key> is depends on the OS and browser, but it is typically +# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down +# key> to jump into the search results window, the results can be navigated +# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel +# the search. The filter options can be selected when the cursor is inside the +# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys> +# to select a filter and <Enter> or <escape> to activate or cancel the filter +# option. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a web server instead of a web client using Javascript. There +# are two flavours of web server based searching depending on the +# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for +# searching and an index file used by the script. When EXTERNAL_SEARCH is +# enabled the indexing and searching needs to be provided by external tools. See +# the section "External Indexing and Searching" for details. +# The default value is: NO. +# This tag requires that the tag SEARCHENGINE is set to YES. + +SERVER_BASED_SEARCH = NO + +# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP +# script for searching. Instead the search results are written to an XML file +# which needs to be processed by an external indexer. Doxygen will invoke an +# external search engine pointed to by the SEARCHENGINE_URL option to obtain the +# search results. +# +# Doxygen ships with an example indexer ( doxyindexer) and search engine +# (doxysearch.cgi) which are based on the open source search engine library +# Xapian (see: http://xapian.org/). +# +# See the section "External Indexing and Searching" for details. +# The default value is: NO. +# This tag requires that the tag SEARCHENGINE is set to YES. + +EXTERNAL_SEARCH = NO + +# The SEARCHENGINE_URL should point to a search engine hosted by a web server +# which will return the search results when EXTERNAL_SEARCH is enabled. +# +# Doxygen ships with an example indexer ( doxyindexer) and search engine +# (doxysearch.cgi) which are based on the open source search engine library +# Xapian (see: http://xapian.org/). See the section "External Indexing and +# Searching" for details. +# This tag requires that the tag SEARCHENGINE is set to YES. + +SEARCHENGINE_URL = + +# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed +# search data is written to a file for indexing by an external tool. With the +# SEARCHDATA_FILE tag the name of this file can be specified. +# The default file is: searchdata.xml. +# This tag requires that the tag SEARCHENGINE is set to YES. + +SEARCHDATA_FILE = searchdata.xml + +# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the +# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is +# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple +# projects and redirect the results back to the right project. +# This tag requires that the tag SEARCHENGINE is set to YES. + +EXTERNAL_SEARCH_ID = + +# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen +# projects other than the one defined by this configuration file, but that are +# all added to the same external search index. Each project needs to have a +# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of +# to a relative location where the documentation can be found. The format is: +# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ... +# This tag requires that the tag SEARCHENGINE is set to YES. + +EXTRA_SEARCH_MAPPINGS = + +#--------------------------------------------------------------------------- +# Configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output. +# The default value is: YES. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: latex. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. +# +# Note that when enabling USE_PDFLATEX this option is only used for generating +# bitmaps for formulas in the HTML output, but not in the Makefile that is +# written to the output directory. +# The default file is: latex. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate +# index for LaTeX. +# The default file is: makeindex. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX +# documents. This may be useful for small projects and may help to save some +# trees in general. +# The default value is: NO. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used by the +# printer. +# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x +# 14 inches) and executive (7.25 x 10.5 inches). +# The default value is: a4. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +PAPER_TYPE = a4 + +# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names +# that should be included in the LaTeX output. To get the times font for +# instance you can specify +# EXTRA_PACKAGES=times +# If left blank no extra packages will be included. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the +# generated LaTeX document. The header should contain everything until the first +# chapter. If it is left blank doxygen will generate a standard header. See +# section "Doxygen usage" for information on how to let doxygen write the +# default header to a separate file. +# +# Note: Only use a user-defined header if you know what you are doing! The +# following commands have a special meaning inside the header: $title, +# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will +# replace them by respectively the title of the page, the current date and time, +# only the current date, the version number of doxygen, the project name (see +# PROJECT_NAME), or the project number (see PROJECT_NUMBER). +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_HEADER = + +# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the +# generated LaTeX document. The footer should contain everything after the last +# chapter. If it is left blank doxygen will generate a standard footer. +# +# Note: Only use a user-defined footer if you know what you are doing! +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_FOOTER = + +# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the LATEX_OUTPUT output +# directory. Note that the files will be copied as-is; there are no commands or +# markers available. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_EXTRA_FILES = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is +# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will +# contain links (just like the HTML output) instead of page references. This +# makes the output suitable for online browsing using a PDF viewer. +# The default value is: YES. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +PDF_HYPERLINKS = YES + +# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate +# the PDF file directly from the LaTeX files. Set this option to YES to get a +# higher quality PDF documentation. +# The default value is: YES. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode +# command to the generated LaTeX files. This will instruct LaTeX to keep running +# if errors occur, instead of asking the user for help. This option is also used +# when generating formulas in HTML. +# The default value is: NO. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_BATCHMODE = NO + +# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the +# index chapters (such as File Index, Compound Index, etc.) in the output. +# The default value is: NO. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_HIDE_INDICES = NO + +# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source +# code with syntax highlighting in the LaTeX output. +# +# Note that which sources are shown also depends on other settings such as +# SOURCE_BROWSER. +# The default value is: NO. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_SOURCE_CODE = NO + +# The LATEX_BIB_STYLE tag can be used to specify the style to use for the +# bibliography, e.g. plainnat, or ieeetr. See +# http://en.wikipedia.org/wiki/BibTeX and \cite for more info. +# The default value is: plain. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_BIB_STYLE = plain + +#--------------------------------------------------------------------------- +# Configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The +# RTF output is optimized for Word 97 and may not look too pretty with other RTF +# readers/editors. +# The default value is: NO. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: rtf. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF +# documents. This may be useful for small projects and may help to save some +# trees in general. +# The default value is: NO. +# This tag requires that the tag GENERATE_RTF is set to YES. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will +# contain hyperlink fields. The RTF file will contain links (just like the HTML +# output) instead of page references. This makes the output suitable for online +# browsing using Word or some other Word compatible readers that support those +# fields. +# +# Note: WordPad (write) and others do not support links. +# The default value is: NO. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's config +# file, i.e. a series of assignments. You only have to provide replacements, +# missing definitions are set to their default value. +# +# See also section "Doxygen usage" for information on how to generate the +# default style sheet that doxygen normally uses. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an RTF document. Syntax is +# similar to doxygen's config file. A template extensions file can be generated +# using doxygen -e rtf extensionFile. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for +# classes and files. +# The default value is: NO. + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. A directory man3 will be created inside the directory specified by +# MAN_OUTPUT. +# The default directory is: man. +# This tag requires that the tag GENERATE_MAN is set to YES. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to the generated +# man pages. In case the manual section does not start with a number, the number +# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is +# optional. +# The default value is: .3. +# This tag requires that the tag GENERATE_MAN is set to YES. + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it +# will generate one additional man file for each entity documented in the real +# man page(s). These additional files only source the real man page, but without +# them the man command would be unable to find the correct page. +# The default value is: NO. +# This tag requires that the tag GENERATE_MAN is set to YES. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# Configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that +# captures the structure of the code including all documentation. +# The default value is: NO. + +GENERATE_XML = YES + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: xml. +# This tag requires that the tag GENERATE_XML is set to YES. + +XML_OUTPUT = xml + +# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program +# listings (including syntax highlighting and cross-referencing information) to +# the XML output. Note that enabling this will significantly increase the size +# of the XML output. +# The default value is: YES. +# This tag requires that the tag GENERATE_XML is set to YES. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the DOCBOOK output +#--------------------------------------------------------------------------- + +# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files +# that can be used to generate PDF. +# The default value is: NO. + +GENERATE_DOCBOOK = NO + +# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in +# front of it. +# The default directory is: docbook. +# This tag requires that the tag GENERATE_DOCBOOK is set to YES. + +DOCBOOK_OUTPUT = docbook + +#--------------------------------------------------------------------------- +# Configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen +# Definitions (see http://autogen.sf.net) file that captures the structure of +# the code including all documentation. Note that this feature is still +# experimental and incomplete at the moment. +# The default value is: NO. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# Configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module +# file that captures the structure of the code including all documentation. +# +# Note that this feature is still experimental and incomplete at the moment. +# The default value is: NO. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary +# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI +# output from the Perl module output. +# The default value is: NO. +# This tag requires that the tag GENERATE_PERLMOD is set to YES. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely +# formatted so it can be parsed by a human reader. This is useful if you want to +# understand what is going on. On the other hand, if this tag is set to NO the +# size of the Perl module output will be much smaller and Perl will parse it +# just the same. +# The default value is: YES. +# This tag requires that the tag GENERATE_PERLMOD is set to YES. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file are +# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful +# so different doxyrules.make files included by the same Makefile don't +# overwrite each other's variables. +# This tag requires that the tag GENERATE_PERLMOD is set to YES. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all +# C-preprocessor directives found in the sources and include files. +# The default value is: YES. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names +# in the source code. If set to NO only conditional compilation will be +# performed. Macro expansion can be done in a controlled way by setting +# EXPAND_ONLY_PREDEF to YES. +# The default value is: NO. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +MACRO_EXPANSION = YES + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then +# the macro expansion is limited to the macros specified with the PREDEFINED and +# EXPAND_AS_DEFINED tags. +# The default value is: NO. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES the includes files in the +# INCLUDE_PATH will be searched if a #include is found. +# The default value is: YES. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by the +# preprocessor. +# This tag requires that the tag SEARCH_INCLUDES is set to YES. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will be +# used. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that are +# defined before the preprocessor is started (similar to the -D option of e.g. +# gcc). The argument of the tag is a list of macros of the form: name or +# name=definition (no spaces). If the definition and the "=" are omitted, "=1" +# is assumed. To prevent a macro definition from being undefined via #undef or +# recursively expanded use the := operator instead of the = operator. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +PREDEFINED = ORCUS_DLLPUBLIC ORCUS_SPM_DLLPUBLIC ORCUS_PSR_DLLPUBLIC + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this +# tag can be used to specify a list of macro names that should be expanded. The +# macro definition that is found in the sources will be used. Use the PREDEFINED +# tag if you want to use a different macro definition that overrules the +# definition found in the source code. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will +# remove all refrences to function-like macros that are alone on a line, have an +# all uppercase name, and do not end with a semicolon. Such function macros are +# typically used for boiler-plate code, and will confuse the parser if not +# removed. +# The default value is: YES. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES tag can be used to specify one or more tag files. For each tag +# file the location of the external documentation should be added. The format of +# a tag file without this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where loc1 and loc2 can be relative or absolute paths or URLs. See the +# section "Linking to external documentation" for more information about the use +# of tag files. +# Note: Each tag file must have an unique name (where the name does NOT include +# the path). If a tag file is not located in the directory in which doxygen is +# run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create a +# tag file that is based on the input files it reads. See section "Linking to +# external documentation" for more information about the usage of tag files. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external class will be listed in the +# class index. If set to NO only the inherited external classes will be listed. +# The default value is: NO. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in +# the modules index. If set to NO, only the current project's groups will be +# listed. +# The default value is: YES. + +EXTERNAL_GROUPS = YES + +# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in +# the related pages index. If set to NO, only the current project's pages will +# be listed. +# The default value is: YES. + +EXTERNAL_PAGES = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram +# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to +# NO turns the diagrams off. Note that this option also works with HAVE_DOT +# disabled, but it is recommended to install and use dot, since it yields more +# powerful graphs. +# The default value is: YES. + +CLASS_DIAGRAMS = YES + +# You can include diagrams made with dia in doxygen documentation. Doxygen will +# then run dia to produce the diagram and insert it in the documentation. The +# DIA_PATH tag allows you to specify the directory where the dia binary resides. +# If left empty dia is assumed to be found in the default search path. + +DIA_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide inheritance +# and usage relations if the target is undocumented or is not a class. +# The default value is: YES. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz (see: +# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent +# Bell Labs. The other options in this section have no effect if this option is +# set to NO +# The default value is: NO. + +HAVE_DOT = NO + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed +# to run in parallel. When set to 0 doxygen will base this on the number of +# processors available in the system. You can set it explicitly to a value +# larger than 0 to get control over the balance between CPU load and processing +# speed. +# Minimum value: 0, maximum value: 32, default value: 0. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_NUM_THREADS = 0 + +# When you want a differently looking font n the dot files that doxygen +# generates you can specify the font name using DOT_FONTNAME. You need to make +# sure dot is able to find the font, which can be done by putting it in a +# standard location or by setting the DOTFONTPATH environment variable or by +# setting DOT_FONTPATH to the directory containing the font. +# The default value is: Helvetica. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_FONTNAME = Helvetica + +# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of +# dot graphs. +# Minimum value: 4, maximum value: 24, default value: 10. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the default font as specified with +# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set +# the path where dot can find it using this tag. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_FONTPATH = + +# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for +# each documented class showing the direct and indirect inheritance relations. +# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a +# graph for each documented class showing the direct and indirect implementation +# dependencies (inheritance, containment, and class references variables) of the +# class with other documented classes. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for +# groups, showing the direct groups dependencies. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +UML_LOOK = NO + +# If the UML_LOOK tag is enabled, the fields and methods are shown inside the +# class node. If there are many fields or methods and many nodes the graph may +# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the +# number of items for each type to make the size more manageable. Set this to 0 +# for no limit. Note that the threshold may be exceeded by 50% before the limit +# is enforced. So when you set the threshold to 10, up to 15 fields may appear, +# but if the number exceeds 15, the total amount of fields shown is limited to +# 10. +# Minimum value: 0, maximum value: 100, default value: 10. +# This tag requires that the tag HAVE_DOT is set to YES. + +UML_LIMIT_NUM_FIELDS = 10 + +# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and +# collaboration graphs will show the relations between templates and their +# instances. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +TEMPLATE_RELATIONS = NO + +# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to +# YES then doxygen will generate a graph for each documented file showing the +# direct and indirect include dependencies of the file with other documented +# files. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +INCLUDE_GRAPH = YES + +# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are +# set to YES then doxygen will generate a graph for each documented file showing +# the direct and indirect include dependencies of the file with other documented +# files. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH tag is set to YES then doxygen will generate a call +# dependency graph for every global function or class method. +# +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable call graphs for selected +# functions only using the \callgraph command. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller +# dependency graph for every global function or class method. +# +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable caller graphs for selected +# functions only using the \callergraph command. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical +# hierarchy of all classes instead of a textual one. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the +# dependencies a directory has on other directories in a graphical way. The +# dependency relations are determined by the #include relations between the +# files in the directories. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. +# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order +# to make the SVG files visible in IE 9+ (other browsers do not have this +# requirement). +# Possible values are: png, jpg, gif and svg. +# The default value is: png. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_IMAGE_FORMAT = png + +# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to +# enable generation of interactive SVG images that allow zooming and panning. +# +# Note that this requires a modern browser other than Internet Explorer. Tested +# and working are Firefox, Chrome, Safari, and Opera. +# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make +# the SVG files visible. Older versions of IE do not have SVG support. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +INTERACTIVE_SVG = NO + +# The DOT_PATH tag can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the \dotfile +# command). +# This tag requires that the tag HAVE_DOT is set to YES. + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the \mscfile +# command). + +MSCFILE_DIRS = + +# The DIAFILE_DIRS tag can be used to specify one or more directories that +# contain dia files that are included in the documentation (see the \diafile +# command). + +DIAFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes +# that will be shown in the graph. If the number of nodes in a graph becomes +# larger than this value, doxygen will truncate the graph, which is visualized +# by representing a node as a red box. Note that doxygen if the number of direct +# children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that +# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. +# Minimum value: 0, maximum value: 10000, default value: 50. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs +# generated by dot. A depth value of 3 means that only nodes reachable from the +# root by following a path via at most 3 edges will be shown. Nodes that lay +# further from the root node will be omitted. Note that setting this option to 1 +# or 2 may greatly reduce the computation time needed for large code bases. Also +# note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. +# Minimum value: 0, maximum value: 1000, default value: 0. +# This tag requires that the tag HAVE_DOT is set to YES. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not seem +# to support this out of the box. +# +# Warning: Depending on the platform used, enabling this option may lead to +# badly anti-aliased labels on the edges of a graph (i.e. they become hard to +# read). +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) support +# this, this feature is disabled by default. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page +# explaining the meaning of the various boxes and arrows in the dot generated +# graphs. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot +# files that are used to generate the various graphs. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_CLEANUP = YES diff --git a/doc/environment.yml b/doc/environment.yml new file mode 100644 index 0000000..048c8ae --- /dev/null +++ b/doc/environment.yml @@ -0,0 +1,9 @@ +name: RTD +channels: + - conda-forge + - defaults +dependencies: + - python=3.8 + - doxygen>=1.9.1 + - breathe + - sphinx-argparse diff --git a/doc/index.rst b/doc/index.rst new file mode 100644 index 0000000..2e72a50 --- /dev/null +++ b/doc/index.rst @@ -0,0 +1,33 @@ +.. orcus documentation master file, created by + sphinx-quickstart on Tue Sep 22 20:54:14 2015. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +.. _index: + +Orcus documentation +=================== + +Orcus is a library that provides a collection of standalone file processing +filters and utilities. It was originally focused on providing filters for +spreadsheet documents, but filters for other types of documents have been +added to the mix. + +Contents: + +.. toctree:: + :maxdepth: 1 + + overview/index.rst + cpp/index.rst + python/index.rst + cli/index.rst + notes/index.rst + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` + diff --git a/doc/notes/index.rst b/doc/notes/index.rst new file mode 100644 index 0000000..a474bab --- /dev/null +++ b/doc/notes/index.rst @@ -0,0 +1,9 @@ + +Notes +===== + +.. toctree:: + :maxdepth: 1 + + xml_map/index.rst + json_map/index.rst diff --git a/doc/notes/json_map/example.json b/doc/notes/json_map/example.json new file mode 100644 index 0000000..e7a2110 --- /dev/null +++ b/doc/notes/json_map/example.json @@ -0,0 +1,203 @@ +[ + { + "id": 1, + "name": [ + "Tab", + "Limpenny" + ], + "active": true, + "gender": "Male", + "language": "Kazakh" + }, + { + "id": 2, + "name": [ + "Manda", + "Hadgraft" + ], + "active": false, + "gender": "Female", + "language": "Bislama" + }, + { + "id": 3, + "name": [ + "Mickie", + "Boreham" + ], + "active": false, + "gender": "Male", + "language": "Swahili" + }, + { + "id": 4, + "name": [ + "Celinka", + "Brookfield" + ], + "active": false, + "gender": "Female", + "language": "Gagauz" + }, + { + "id": 5, + "name": [ + "Muffin", + "Bleas" + ], + "active": false, + "gender": "Female", + "language": "Hiri Motu" + }, + { + "id": 6, + "name": [ + "Jackelyn", + "Crumb" + ], + "active": false, + "gender": "Female", + "language": "Northern Sotho" + }, + { + "id": 7, + "name": [ + "Tessie", + "Hollingsbee" + ], + "active": true, + "gender": "Female", + "language": "Fijian" + }, + { + "id": 8, + "name": [ + "Yank", + "Wernham" + ], + "active": false, + "gender": "Male", + "language": "Tok Pisin" + }, + { + "id": 9, + "name": [ + "Brendan", + "Lello" + ], + "active": true, + "gender": "Male", + "language": "Fijian" + }, + { + "id": 10, + "name": [ + "Arabel", + "Rigg" + ], + "active": false, + "gender": "Female", + "language": "Kyrgyz" + }, + { + "id": 11, + "name": [ + "Carolann", + "McElory" + ], + "active": false, + "gender": "Female", + "language": "Pashto" + }, + { + "id": 12, + "name": [ + "Gasparo", + "Flack" + ], + "active": false, + "gender": "Male", + "language": "Telugu" + }, + { + "id": 13, + "name": [ + "Eolanda", + "Polendine" + ], + "active": false, + "gender": "Female", + "language": "Kashmiri" + }, + { + "id": 14, + "name": [ + "Brock", + "McCaw" + ], + "active": false, + "gender": "Male", + "language": "Tsonga" + }, + { + "id": 15, + "name": [ + "Wenda", + "Espinas" + ], + "active": false, + "gender": "Female", + "language": "Bulgarian" + }, + { + "id": 16, + "name": [ + "Zachary", + "Banane" + ], + "active": true, + "gender": "Male", + "language": "Persian" + }, + { + "id": 17, + "name": [ + "Sallyanne", + "Mengue" + ], + "active": false, + "gender": "Female", + "language": "Latvian" + }, + { + "id": 18, + "name": [ + "Elizabet", + "Hoofe" + ], + "active": true, + "gender": "Female", + "language": "Tswana" + }, + { + "id": 19, + "name": [ + "Alastair", + "Hutchence" + ], + "active": true, + "gender": "Male", + "language": "Ndebele" + }, + { + "id": 20, + "name": [ + "Minor", + "Worland" + ], + "active": true, + "gender": "Male", + "language": "Dutch" + } +] + diff --git a/doc/notes/json_map/index.rst b/doc/notes/json_map/index.rst new file mode 100644 index 0000000..00d6626 --- /dev/null +++ b/doc/notes/json_map/index.rst @@ -0,0 +1,326 @@ + +Mapping JSON to spreadsheet +=========================== + +This tutorial covers how to map JSON document to a spreadsheet document, very +similar to what we covered in :ref:`this tutorial <mapping-xml-to-ss>` +where we illustrated how to map XML document to a spreadsheet document. + +Throughout this tutorial, we will be using :download:`this sample JSON document <example.json>` +to illustrate how to achieve it using the ``orcus-json`` command. The structure +of this tutorial will be similar to the structure of the XML mapping counterpart, +since the steps are very similar. + +Examining the structure of the input JSON document +-------------------------------------------------- + +Let's first take a look at the sample JSON document: + +.. code-block:: javascript + + [ + { + "id": 1, + "name": [ + "Tab", + "Limpenny" + ], + "active": true, + "gender": "Male", + "language": "Kazakh" + }, + { + "id": 2, + "name": [ + "Manda", + "Hadgraft" + ], + "active": false, + "gender": "Female", + "language": "Bislama" + }, + { + "id": 3, + "name": [ + "Mickie", + "Boreham" + ], + "active": false, + "gender": "Male", + "language": "Swahili" + }, + + ... + +This is essentially the same content as the XML sample document we used in the +:ref:`last tutorial <mapping-xml-to-ss>` but re-formatted in JSON. + +Let run the following command: + +.. code-block:: + + orcus-json --mode structure example.json + +to analyze the structure of this JSON document. The command will generate the +following output: + +.. code-block:: + + $array[20].object(*)['active'].value + $array[20].object(*)['gender'].value + $array[20].object(*)['id'].value + $array[20].object(*)['language'].value + $array[20].object(*)['name'].array[2].value[0,1] + +This structure output resembles a variant of JSONPath but some modifications +are applied. It has the following characteristics: + +* The ``$`` symbol represents the root of the structure. +* Array node takes the form of either ``array[N]``, where the value of ``N`` + represents the number of elements. +* Object node takes the form of ``object['key']``. +* Value node, which is always a leaf node, is represented by ``value`` except + when the leaf node is an array containing values, it takes the form of ``value[0,1,2,...]``. +* The ``.`` symbols represent the node boundaries. +* The ``(*)`` symbols represent recurring nodes, which can be either array or + object. + +Auto-mapping the JSON document +------------------------------ + +Let's map this JSON document to a spreadsheet document by running: + +.. code-block:: + + orcus-json --mode map -o out -f flat example.json + +This is very similar to what we did in the XML mapping tutorial, except that +the command used is ``orcus-json`` and the input file is ``example.json``. +This will create file named ``out/range-0.txt`` which contains the following: + +.. code-block:: + + --- + Sheet name: range-0 + rows: 21 cols: 6 + +--------+-----------+-------------+-----------+--------+----------------+ + | id | field 0 | field 1 | active | gender | language | + +--------+-----------+-------------+-----------+--------+----------------+ + | 1 [v] | Tab | Limpenny | true [b] | Male | Kazakh | + +--------+-----------+-------------+-----------+--------+----------------+ + | 2 [v] | Manda | Hadgraft | false [b] | Female | Bislama | + +--------+-----------+-------------+-----------+--------+----------------+ + | 3 [v] | Mickie | Boreham | false [b] | Male | Swahili | + +--------+-----------+-------------+-----------+--------+----------------+ + | 4 [v] | Celinka | Brookfield | false [b] | Female | Gagauz | + +--------+-----------+-------------+-----------+--------+----------------+ + | 5 [v] | Muffin | Bleas | false [b] | Female | Hiri Motu | + +--------+-----------+-------------+-----------+--------+----------------+ + | 6 [v] | Jackelyn | Crumb | false [b] | Female | Northern Sotho | + +--------+-----------+-------------+-----------+--------+----------------+ + | 7 [v] | Tessie | Hollingsbee | true [b] | Female | Fijian | + +--------+-----------+-------------+-----------+--------+----------------+ + | 8 [v] | Yank | Wernham | false [b] | Male | Tok Pisin | + +--------+-----------+-------------+-----------+--------+----------------+ + | 9 [v] | Brendan | Lello | true [b] | Male | Fijian | + +--------+-----------+-------------+-----------+--------+----------------+ + | 10 [v] | Arabel | Rigg | false [b] | Female | Kyrgyz | + +--------+-----------+-------------+-----------+--------+----------------+ + | 11 [v] | Carolann | McElory | false [b] | Female | Pashto | + +--------+-----------+-------------+-----------+--------+----------------+ + | 12 [v] | Gasparo | Flack | false [b] | Male | Telugu | + +--------+-----------+-------------+-----------+--------+----------------+ + | 13 [v] | Eolanda | Polendine | false [b] | Female | Kashmiri | + +--------+-----------+-------------+-----------+--------+----------------+ + | 14 [v] | Brock | McCaw | false [b] | Male | Tsonga | + +--------+-----------+-------------+-----------+--------+----------------+ + | 15 [v] | Wenda | Espinas | false [b] | Female | Bulgarian | + +--------+-----------+-------------+-----------+--------+----------------+ + | 16 [v] | Zachary | Banane | true [b] | Male | Persian | + +--------+-----------+-------------+-----------+--------+----------------+ + | 17 [v] | Sallyanne | Mengue | false [b] | Female | Latvian | + +--------+-----------+-------------+-----------+--------+----------------+ + | 18 [v] | Elizabet | Hoofe | true [b] | Female | Tswana | + +--------+-----------+-------------+-----------+--------+----------------+ + | 19 [v] | Alastair | Hutchence | true [b] | Male | Ndebele | + +--------+-----------+-------------+-----------+--------+----------------+ + | 20 [v] | Minor | Worland | true [b] | Male | Dutch | + +--------+-----------+-------------+-----------+--------+----------------+ + +Again, this is very similar to what we saw in the XML-mapping example. Note +that cell values with ``[v]`` and ``[b]`` indicate numeric and boolean values, +respectively. Cells with no suffixes are string cells. + +Custom-mapping using map file +----------------------------- + +This process is also very similar to the process we followed for XML mapping. +We first auto-generate a map file, modify it, and use it to do the mapping again. +Since there isn't much difference between XML mapping and JSON mapping, let's +just go through this very quick. + +First step is to generate a map file for the auto-detected range by running: + +.. code-block:: + + orcus-json --mode map-gen -o map.json example.json + +which will write the mapping rules to ``map.json`` file. When you open the generated +map file, you will see something like the following: + +.. code-block:: javascript + + { + "sheets": [ + "range-0" + ], + "ranges": [ + { + "sheet": "range-0", + "row": 0, + "column": 0, + "row-header": true, + "fields": [ + { + "path": "$[]['id']" + }, + { + "path": "$[]['name'][0]" + }, + { + "path": "$[]['name'][1]" + }, + { + "path": "$[]['active']" + }, + { + "path": "$[]['gender']" + }, + { + "path": "$[]['language']" + } + ], + "row-groups": [ + { + "path": "$" + } + ] + } + ] + } + +The structure and content of the map file should look similar to the XML counterpart, +except that it is now in JSON format, and the paths are expressed in slightly +modified JSONPath bracket notation, where ``[]`` represents an array node with +no position specified. + +Now that we have a map file, let's modify this and use it to do the mapping once +again. Just like the XML mapping example, we are going to: + +* insert two blank rows above, +* drop the ``id`` and ``active`` fields, +* specify labels for the fields, and +* change the sheet name from ``range-0`` to ``My Data``. + +This is what we've come up with: + +.. code-block:: javascript + + { + "sheets": [ + "My Data" + ], + "ranges": [ + { + "sheet": "My Data", + "row": 2, + "column": 0, + "row-header": true, + "fields": [ + { + "path": "$[]['name'][0]", "label": "First Name" + }, + { + "path": "$[]['name'][1]", "label": "Last Name" + }, + { + "path": "$[]['gender']", "label": "Gender" + }, + { + "path": "$[]['language']", "label": "Language" + } + ], + "row-groups": [ + { + "path": "$" + } + ] + } + ] + } + +We'll save this file as ``map-modified.json``, and pass it to the ``orcus-json`` +command via ``--map`` or ``-m`` option: + +.. code-block:: + + orcus-json --mode map -o out -f flat -m map-modified.json example.json + +Let's check the output in ``out/My Data.txt`` and see what it contains: + +.. code-block:: + + --- + Sheet name: My Data + rows: 23 cols: 4 + +------------+-------------+--------+----------------+ + | | | | | + +------------+-------------+--------+----------------+ + | | | | | + +------------+-------------+--------+----------------+ + | First Name | Last Name | Gender | Language | + +------------+-------------+--------+----------------+ + | Tab | Limpenny | Male | Kazakh | + +------------+-------------+--------+----------------+ + | Manda | Hadgraft | Female | Bislama | + +------------+-------------+--------+----------------+ + | Mickie | Boreham | Male | Swahili | + +------------+-------------+--------+----------------+ + | Celinka | Brookfield | Female | Gagauz | + +------------+-------------+--------+----------------+ + | Muffin | Bleas | Female | Hiri Motu | + +------------+-------------+--------+----------------+ + | Jackelyn | Crumb | Female | Northern Sotho | + +------------+-------------+--------+----------------+ + | Tessie | Hollingsbee | Female | Fijian | + +------------+-------------+--------+----------------+ + | Yank | Wernham | Male | Tok Pisin | + +------------+-------------+--------+----------------+ + | Brendan | Lello | Male | Fijian | + +------------+-------------+--------+----------------+ + | Arabel | Rigg | Female | Kyrgyz | + +------------+-------------+--------+----------------+ + | Carolann | McElory | Female | Pashto | + +------------+-------------+--------+----------------+ + | Gasparo | Flack | Male | Telugu | + +------------+-------------+--------+----------------+ + | Eolanda | Polendine | Female | Kashmiri | + +------------+-------------+--------+----------------+ + | Brock | McCaw | Male | Tsonga | + +------------+-------------+--------+----------------+ + | Wenda | Espinas | Female | Bulgarian | + +------------+-------------+--------+----------------+ + | Zachary | Banane | Male | Persian | + +------------+-------------+--------+----------------+ + | Sallyanne | Mengue | Female | Latvian | + +------------+-------------+--------+----------------+ + | Elizabet | Hoofe | Female | Tswana | + +------------+-------------+--------+----------------+ + | Alastair | Hutchence | Male | Ndebele | + +------------+-------------+--------+----------------+ + | Minor | Worland | Male | Dutch | + +------------+-------------+--------+----------------+ + +The ``id`` and ``active`` fields are gone, the remaining fields have custom +labels we specified, and there are two blank rows above. It appears that all +the changes we have intended have been properly applied. diff --git a/doc/notes/xml_map/example.xml b/doc/notes/xml_map/example.xml new file mode 100644 index 0000000..0dbb6d4 --- /dev/null +++ b/doc/notes/xml_map/example.xml @@ -0,0 +1,183 @@ +<?xml version="1.0" encoding="UTF-8"?> +<dataset> + <record id="1"> + <name> + <first>Tab</first> + <last>Limpenny</last> + </name> + <active>true</active> + <gender>Male</gender> + <language>Kazakh</language> + </record> + <record id="2"> + <name> + <first>Manda</first> + <last>Hadgraft</last> + </name> + <active>false</active> + <gender>Female</gender> + <language>Bislama</language> + </record> + <record id="3"> + <name> + <first>Mickie</first> + <last>Boreham</last> + </name> + <active>false</active> + <gender>Male</gender> + <language>Swahili</language> + </record> + <record id="4"> + <name> + <first>Celinka</first> + <last>Brookfield</last> + </name> + <active>false</active> + <gender>Female</gender> + <language>Gagauz</language> + </record> + <record id="5"> + <name> + <first>Muffin</first> + <last>Bleas</last> + </name> + <active>false</active> + <gender>Female</gender> + <language>Hiri Motu</language> + </record> + <record id="6"> + <name> + <first>Jackelyn</first> + <last>Crumb</last> + </name> + <active>false</active> + <gender>Female</gender> + <language>Northern Sotho</language> + </record> + <record id="7"> + <name> + <first>Tessie</first> + <last>Hollingsbee</last> + </name> + <active>true</active> + <gender>Female</gender> + <language>Fijian</language> + </record> + <record id="8"> + <name> + <first>Yank</first> + <last>Wernham</last> + </name> + <active>false</active> + <gender>Male</gender> + <language>Tok Pisin</language> + </record> + <record id="9"> + <name> + <first>Brendan</first> + <last>Lello</last> + </name> + <active>true</active> + <gender>Male</gender> + <language>Fijian</language> + </record> + <record id="10"> + <name> + <first>Arabel</first> + <last>Rigg</last> + </name> + <active>false</active> + <gender>Female</gender> + <language>Kyrgyz</language> + </record> + <record id="11"> + <name> + <first>Carolann</first> + <last>McElory</last> + </name> + <active>false</active> + <gender>Female</gender> + <language>Pashto</language> + </record> + <record id="12"> + <name> + <first>Gasparo</first> + <last>Flack</last> + </name> + <active>false</active> + <gender>Male</gender> + <language>Telugu</language> + </record> + <record id="13"> + <name> + <first>Eolanda</first> + <last>Polendine</last> + </name> + <active>false</active> + <gender>Female</gender> + <language>Kashmiri</language> + </record> + <record id="14"> + <name> + <first>Brock</first> + <last>McCaw</last> + </name> + <active>false</active> + <gender>Male</gender> + <language>Tsonga</language> + </record> + <record id="15"> + <name> + <first>Wenda</first> + <last>Espinas</last> + </name> + <active>false</active> + <gender>Female</gender> + <language>Bulgarian</language> + </record> + <record id="16"> + <name> + <first>Zachary</first> + <last>Banane</last> + </name> + <active>true</active> + <gender>Male</gender> + <language>Persian</language> + </record> + <record id="17"> + <name> + <first>Sallyanne</first> + <last>Mengue</last> + </name> + <active>false</active> + <gender>Female</gender> + <language>Latvian</language> + </record> + <record id="18"> + <name> + <first>Elizabet</first> + <last>Hoofe</last> + </name> + <active>true</active> + <gender>Female</gender> + <language>Tswana</language> + </record> + <record id="19"> + <name> + <first>Alastair</first> + <last>Hutchence</last> + </name> + <active>true</active> + <gender>Male</gender> + <language>Ndebele</language> + </record> + <record id="20"> + <name> + <first>Minor</first> + <last>Worland</last> + </name> + <active>true</active> + <gender>Male</gender> + <language>Dutch</language> + </record> +</dataset> diff --git a/doc/notes/xml_map/index.rst b/doc/notes/xml_map/index.rst new file mode 100644 index 0000000..258920a --- /dev/null +++ b/doc/notes/xml_map/index.rst @@ -0,0 +1,314 @@ + +.. _mapping-xml-to-ss: + +Mapping XML to spreadsheet +========================== + +In this tutorial, we will go over how to use the ``orcus-xml`` command to map an +XML content into a spreadsheet document. We will be using :download:`this sample XML +document <example.xml>` throughout this tutorial. + +Examining the structure of input XML document +--------------------------------------------- + +First, let's examine the general structure of this XML document: + +.. code-block:: XML + + <?xml version="1.0" encoding="UTF-8"?> + <dataset> + <record id="1"> + <name> + <first>Tab</first> + <last>Limpenny</last> + </name> + <active>true</active> + <gender>Male</gender> + <language>Kazakh</language> + </record> + <record id="2"> + <name> + <first>Manda</first> + <last>Hadgraft</last> + </name> + <active>false</active> + <gender>Female</gender> + <language>Bislama</language> + </record> + <record id="3"> + + ... + + +It starts with the ``<dataset>`` element as its root element, which contains +recurring ``<record>`` elements each of which contains multiple fields. By +looking at each ``<record>`` element structure, you can easily infer how the +record content is structured. You can also run ``orcus-xml`` in structure +mode in order to detect the structure of its content. + +Running the following command + +.. code-block:: + + orcus-xml --mode structure example.xml + +should generate the following output: + +.. code-block:: + + /dataset + /dataset/record[*] + /dataset/record[*]/@id + /dataset/record[*]/name + /dataset/record[*]/name/first + /dataset/record[*]/name/last + /dataset/record[*]/active + /dataset/record[*]/gender + /dataset/record[*]/language + +This output lists the paths of all encountered "leaf node" items one item per +line, in order of occurrence. Each path is expressed in a XPath-like format, +except for recurring "anchor" elements which are suffixed with the ``[*]`` +symbols. An anchor element in this context is defined as a recurring non-leaf +element that contains either an attribute or a leaf element. You can think of +anchor elements as elements that define the individual record boundaries. + +Auto-mapping the XML document +----------------------------- + +Mapping this XML document to a spreadsheet document can be done by simply running +``orcus-xml`` in map mode. You also need to specify the output format type and +the output directory in order to see the content of the mapped spreadsheet +document. Running the command: + +.. code-block:: + + orcus-xml --mode map -f flat -o out example.xml + +will create an output file named ``out/range-0.txt`` which contains the following: + +.. code-block:: + + --- + Sheet name: range-0 + rows: 21 cols: 6 + +--------+-----------+-------------+--------+--------+----------------+ + | id | first | last | active | gender | language | + +--------+-----------+-------------+--------+--------+----------------+ + | 1 [v] | Tab | Limpenny | true | Male | Kazakh | + +--------+-----------+-------------+--------+--------+----------------+ + | 2 [v] | Manda | Hadgraft | false | Female | Bislama | + +--------+-----------+-------------+--------+--------+----------------+ + | 3 [v] | Mickie | Boreham | false | Male | Swahili | + +--------+-----------+-------------+--------+--------+----------------+ + | 4 [v] | Celinka | Brookfield | false | Female | Gagauz | + +--------+-----------+-------------+--------+--------+----------------+ + | 5 [v] | Muffin | Bleas | false | Female | Hiri Motu | + +--------+-----------+-------------+--------+--------+----------------+ + | 6 [v] | Jackelyn | Crumb | false | Female | Northern Sotho | + +--------+-----------+-------------+--------+--------+----------------+ + | 7 [v] | Tessie | Hollingsbee | true | Female | Fijian | + +--------+-----------+-------------+--------+--------+----------------+ + | 8 [v] | Yank | Wernham | false | Male | Tok Pisin | + +--------+-----------+-------------+--------+--------+----------------+ + | 9 [v] | Brendan | Lello | true | Male | Fijian | + +--------+-----------+-------------+--------+--------+----------------+ + | 10 [v] | Arabel | Rigg | false | Female | Kyrgyz | + +--------+-----------+-------------+--------+--------+----------------+ + | 11 [v] | Carolann | McElory | false | Female | Pashto | + +--------+-----------+-------------+--------+--------+----------------+ + | 12 [v] | Gasparo | Flack | false | Male | Telugu | + +--------+-----------+-------------+--------+--------+----------------+ + | 13 [v] | Eolanda | Polendine | false | Female | Kashmiri | + +--------+-----------+-------------+--------+--------+----------------+ + | 14 [v] | Brock | McCaw | false | Male | Tsonga | + +--------+-----------+-------------+--------+--------+----------------+ + | 15 [v] | Wenda | Espinas | false | Female | Bulgarian | + +--------+-----------+-------------+--------+--------+----------------+ + | 16 [v] | Zachary | Banane | true | Male | Persian | + +--------+-----------+-------------+--------+--------+----------------+ + | 17 [v] | Sallyanne | Mengue | false | Female | Latvian | + +--------+-----------+-------------+--------+--------+----------------+ + | 18 [v] | Elizabet | Hoofe | true | Female | Tswana | + +--------+-----------+-------------+--------+--------+----------------+ + | 19 [v] | Alastair | Hutchence | true | Male | Ndebele | + +--------+-----------+-------------+--------+--------+----------------+ + | 20 [v] | Minor | Worland | true | Male | Dutch | + +--------+-----------+-------------+--------+--------+----------------+ + +We are using the ``flat`` format type which writes the data range of a sheet +in a human-readable grid output. + +The mapped sheet content is the result of the automatic mapping of the original +XML document. In automatic mapping, all attributes and element contents that +can be mapped as field values will be mapped, and the sheet name will be automatically +generated. + +Although not applicable to this particular example, if the source XML document +contains multiple mappable ranges, they will get mapped to multiple sheets, one +sheet per range. + +Custom-mapping using map file +----------------------------- + +Generating map file +^^^^^^^^^^^^^^^^^^^ + +Automatic-mapping should work reasonably well in many cases, but sometime you +may need to customize how you map your data, and this section will go over how +you could do just that. + +The short answer is that you will need to create a map definition file and pass +it to the ``orcus-xml`` command via ``-m`` or ``--map`` option. The easiest +way to go about it is to have one generated for you. + +Running the following command: + +.. code-block:: + + orcus-xml --mode map-gen -o map.xml example.xml + +will generate a map file ``map.xml`` which contains the mapping definition based +on the auto-detected structure. The content of ``map.xml`` generated from the +example XML document should look like this: + +.. code-block:: XML + + <?xml version="1.0"?> + <map xmlns="https://gitlab.com/orcus/orcus/xml-map-definition"> + <sheet name="range-0"/> + <range sheet="range-0" row="0" column="0"> + <field path="/dataset/record/@id"/> + <field path="/dataset/record/name/first"/> + <field path="/dataset/record/name/last"/> + <field path="/dataset/record/active"/> + <field path="/dataset/record/gender"/> + <field path="/dataset/record/language"/> + <row-group path="/dataset/record"/> + </range> + </map> + +Note that since the original map file content does not include any line breaks, +you may want to run it through an XML reformatting tool such as +`xmllint <http://xmlsoft.org/xmllint.html>`_ to "prettify" its content before +viewing. + +Map file structure +^^^^^^^^^^^^^^^^^^ + +Hopefully the structure of the map file is self-explanatory, but let us go over +it a little. The ``map`` element is the root element which contains one or +more ``sheet`` elements and one or more ``range`` elements. The ``sheet`` +elements specify how many sheets should be created in the spreadsheet model, +and what their names should be via their ``name`` attributes. The ordering of +the ``sheet`` elements will reflect the ordering of the sheets in the final +spreadsheet document. + +Each ``range`` element defines one mapped range of the source XML document, and +this element itself stores the top-left position of the range in the final +spreadsheet document via ``sheet``, ``row`` and ``column`` attributes. The ``range`` +element then contains one or more ``field`` elements, and one or more ``row-group`` +elements. + +Each ``field`` element defines one field within the mapped range and the path of +the value in the source XML document. The path is expressed in XPath format. +The ordering of the ``field`` elements reflects the ordering of the field columns +in the final spreadsheet document. + +Each ``row-group`` element defines the path of an anchor element. For a simple +XML document such as our current example, you only need one ``row-group`` +element. But an XML document with more complex structure may need more than one +``row-group`` element to properly map nested recurring elements. + +Modifying map file +^^^^^^^^^^^^^^^^^^ + +Let's make some changes to this map file. First, the default sheet name ``range-0`` +doesn't look very good, so we'll change it to ``My Data``. Also, let's assume +we aren't really interested in the ID values or the "active" values (whatever +they may mean), so we'll drop those two fields. Additionally, since we don't like +the default field labels, which are taken literally from the names of the corresponding +attributes or elements, we'll define custom field labels. And finally, we'll add +two empty rows above the data range so that we can edit in some nice title afterward. + +The modified map file will look like this: + +.. code-block:: XML + + <?xml version="1.0"?> + <map xmlns="https://gitlab.com/orcus/orcus/xml-map-definition"> + <sheet name="My Data"/> + <range sheet="My Data" row="2" column="0"> + <field path="/dataset/record/name/first" label="First Name"/> + <field path="/dataset/record/name/last" label="Last Name"/> + <field path="/dataset/record/gender" label="Gender"/> + <field path="/dataset/record/language" label="Language"/> + <row-group path="/dataset/record"/> + </range> + </map> + +We'll save this as ``map-modified.xml``, and pass it to the ``orcus-xml`` command +this time around like so: + +.. code-block:: + + ./src/orcus-xml --mode map -m map-modified.xml -o out -f flat example.xml + +This will output the content of the sheet to ``out/My Data.txt``, which will +look like this: + +.. code-block:: + + --- + Sheet name: My Data + rows: 23 cols: 4 + +------------+-------------+--------+----------------+ + | | | | | + +------------+-------------+--------+----------------+ + | | | | | + +------------+-------------+--------+----------------+ + | First Name | Last Name | Gender | Language | + +------------+-------------+--------+----------------+ + | Tab | Limpenny | Male | Kazakh | + +------------+-------------+--------+----------------+ + | Manda | Hadgraft | Female | Bislama | + +------------+-------------+--------+----------------+ + | Mickie | Boreham | Male | Swahili | + +------------+-------------+--------+----------------+ + | Celinka | Brookfield | Female | Gagauz | + +------------+-------------+--------+----------------+ + | Muffin | Bleas | Female | Hiri Motu | + +------------+-------------+--------+----------------+ + | Jackelyn | Crumb | Female | Northern Sotho | + +------------+-------------+--------+----------------+ + | Tessie | Hollingsbee | Female | Fijian | + +------------+-------------+--------+----------------+ + | Yank | Wernham | Male | Tok Pisin | + +------------+-------------+--------+----------------+ + | Brendan | Lello | Male | Fijian | + +------------+-------------+--------+----------------+ + | Arabel | Rigg | Female | Kyrgyz | + +------------+-------------+--------+----------------+ + | Carolann | McElory | Female | Pashto | + +------------+-------------+--------+----------------+ + | Gasparo | Flack | Male | Telugu | + +------------+-------------+--------+----------------+ + | Eolanda | Polendine | Female | Kashmiri | + +------------+-------------+--------+----------------+ + | Brock | McCaw | Male | Tsonga | + +------------+-------------+--------+----------------+ + | Wenda | Espinas | Female | Bulgarian | + +------------+-------------+--------+----------------+ + | Zachary | Banane | Male | Persian | + +------------+-------------+--------+----------------+ + | Sallyanne | Mengue | Female | Latvian | + +------------+-------------+--------+----------------+ + | Elizabet | Hoofe | Female | Tswana | + +------------+-------------+--------+----------------+ + | Alastair | Hutchence | Male | Ndebele | + +------------+-------------+--------+----------------+ + | Minor | Worland | Male | Dutch | + +------------+-------------+--------+----------------+ + +The new output now only contains four fields, with custom labels at the top, and +now we have two empty rows above just like we intended. diff --git a/doc/overview/doc-orcus.rst b/doc/overview/doc-orcus.rst new file mode 100644 index 0000000..0577d4e --- /dev/null +++ b/doc/overview/doc-orcus.rst @@ -0,0 +1,176 @@ + +.. highlight:: cpp + +Use orcus's spreadsheet document class +====================================== + +If you want to use orcus' :cpp:class:`~orcus::spreadsheet::document` as your +document store, you can use the :cpp:class:`~orcus::spreadsheet::import_factory` +class that orcus provides which already implements all necessary interfaces. +The example code shown below illustrates how to do this: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_1.cpp + :language: C++ + +This example code loads a file saved in the Open Document Spreadsheet format +stored in a directory whose path is to be defined in the environment variable +named ``INPUTDIR``. In this example, we don't check for the validity of ``INPUTDIR`` +for bravity's sake. + +The input file consists of the following content on its first sheet. + +.. figure:: /_static/images/overview/doc-content.png + +While it is not clear from this screenshot, cell C2 contains the formula +**CONCATENATE(A2, " ", B2)** to concatenate the content of A2 and B2 with a +space between them. Cells C3 through C7 also contain similar formula +expressions. + +Let's walk through this code step by step. First, we need to instantiate the +document store. Here we are using the concrete :cpp:class:`~orcus::spreadsheet::document` +class available in orcus. Then immediately pass this document to the +:cpp:class:`~orcus::spreadsheet::import_factory` instance also from orcus: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_1.cpp + :language: C++ + :start-after: //!code-start: instantiate + :end-before: //!code-end: instantiate + :dedent: 4 + +The next step is to create the loader instance and pass the factory to it: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_1.cpp + :language: C++ + :start-after: //!code-start: loader + :end-before: //!code-end: loader + :dedent: 4 + +In this example we are using the :cpp:class:`~orcus::orcus_ods` filter class +because the document we are loading is of Open Document Spreadsheet type, but +the process is the same for other document types, the only difference being +the name of the class. Once the filter object is constructed, we'll simply +load the file by calling its :cpp:func:`~orcus::orcus_ods::read_file` method +and passing the path to the file as its argument: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_1.cpp + :language: C++ + :start-after: //!code-start: read-file + :end-before: //!code-end: read-file + :dedent: 4 + +Once this call returns, the document has been fully populated. What the rest +of the code does is to access the content of the first row of the first sheet of +the document. First, you need to get a reference to the internal cell value +store that we call *model context*: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_1.cpp + :language: C++ + :start-after: //!code-start: model-context + :end-before: //!code-end: model-context + :dedent: 4 + +Since the content of cell A1 is a string, to get the value you need to first +get the ID of the string: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_1.cpp + :language: C++ + :start-after: //!code-start: string-id + :end-before: //!code-end: string-id + :dedent: 4 + +Once you have the ID of the string, you can pass that to the model to get the +actual string value and print it to the standard output: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_1.cpp + :language: C++ + :start-after: //!code-start: print-string + :end-before: //!code-end: print-string + :dedent: 4 + +Here we assume that the string value exists for the given ID. In case you +pass a string ID value to the :cpp:func:`get_string` method and there isn't a string +value associated with it, you'll get a null pointer returned from the call. + +The reason you need to take this 2-step process to get a string value is +because all the string values stored in the cells are pooled at the document +model level, and the cells themselves only store the ID values as integers. + +You may also have noticed that the types surrounding the :cpp:class:`ixion::model_context` +class are all in the :cpp:any:`ixion` namespace. It is because orcus' own +:cpp:class:`~orcus::spreadsheet::document` class uses the formula engine and the +document model from the `ixion library <https://gitlab.com/ixion/ixion>`_ to handle +calculation of the formula cells stored in the document, and the formula engine +requires all cell values to be stored in the :cpp:class:`ixion::model_context` +instance. + +.. note:: The :cpp:class:`~orcus::spreadsheet::document` class in orcus uses + the formula engine from the `ixion library <https://gitlab.com/ixion/ixion>`_ + to calculate the results of the formula cells stored in the document. + +The rest of the code basically repeats the same process for cells B1 and C1: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_1.cpp + :language: C++ + :start-after: //!code-start: rest + :end-before: //!code-end: rest + :dedent: 4 + +and generate the following output: + +.. code-block:: text + + A1: Number + B1: String + C1: Formula + +Accessing the numeric cell values are a bit simpler since the values are +stored directly with the cells. Using the document from the above code example +code, the following code block: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_1_num_and_formula.cpp + :language: C++ + :start-after: //!code-start: print-numeric-cells + :end-before: //!code-end: print-numeric-cells + :dedent: 4 + +will access the cells from A2 through A7 and print out their numeric values. +You should see the following output generated from this code block: + +.. code-block:: text + + A2: 1 + A3: 2 + A4: 3 + A5: 4 + A6: 5 + A7: 6 + +It's a bit more complex to handle formula cells. Since each formula cell +contains two things: 1) the formula expression which is stored as tokens +internally, and 2) the cached result of the formula. The following code +illustrates how to retrieve the cached formula results of cells C2 through +C7: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_1_num_and_formula.cpp + :language: C++ + :start-after: //!code-start: print-formula-cells + :end-before: //!code-end: print-formula-cells + :dedent: 4 + +For each cell, this code first accesses the stored formula cell instance, get +a reference to its cached result, then obtain its string result value to print +it out to the standard output. Running this block of code will produce the +following output: + +.. code-block:: text + + C2: 1 Andy + C3: 2 Bruce + C4: 3 Charlie + C5: 4 David + C6: 5 Edward + C7: 6 Frank + +.. warning:: In production code, you should probabaly check the formula cell + pointer which may be null in case the cell at the specified + position is not a formula cell. diff --git a/doc/overview/doc-user.rst b/doc/overview/doc-user.rst new file mode 100644 index 0000000..a1292e5 --- /dev/null +++ b/doc/overview/doc-user.rst @@ -0,0 +1,574 @@ + +.. highlight:: cpp + +Use a user-defined custom document class +======================================== + +In this section we will demonstrate how you can use orcus to populate your own +custom document model by implementing your own set of interface classes and +passing it to the orcus import filter. The first example code shown below is +the *absolute* minimum that you need to implement in order for the orcus +filter to function properly: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2.cpp + :language: C++ + +Just like the example we used in the previous section, we are also loading a +document saved in the Open Document Spreadsheet format via +:cpp:class:`~orcus::orcus_ods`. The document being loaded is named +multi-sheets.ods, and contains three sheets which are are named **'1st +Sheet'**, **'2nd Sheet'**, and **'3rd Sheet'** in this exact order. When you +compile and execute the above code, you should get the following output: + +.. code-block:: text + + append_sheet: sheet index: 0; sheet name: 1st Sheet + append_sheet: sheet index: 1; sheet name: 2nd Sheet + append_sheet: sheet index: 2; sheet name: 3rd Sheet + +One primary role the import factory plays is to provide the orcus import +filter with the ability to create and insert a new sheet to the document. As +illustrated in the above code, it also provides access to existing sheets by +its name or its position. Every import factory implementation must be a +derived class of the :cpp:class:`orcus::spreadsheet::iface::import_factory` +interface base class. At a minimum, it must implement + +* the :cpp:func:`~orcus::spreadsheet::iface::import_factory::append_sheet` + method which inserts a new sheet and return access to it, + +* two variants of the :cpp:func:`~orcus::spreadsheet::iface::import_factory::get_sheet` + method which returns access to an existing sheet, and + +* the :cpp:func:`~orcus::spreadsheet::iface::import_factory::finalize` method + which gets called exactly once at the very end of the import, to give the + implementation a chance to perform post-import tasks. + +in order for the code to be buildable. Now, since all of the sheet accessor +methods return null pointers in this code, the import filter has no way of +populating the sheet data. To actually receive the sheet data from the import +filter, you must have these methods return valid pointers to sheet accessors. +The next example shows how that can be done. + + +Implement sheet interface +------------------------- + +In this section we will expand on the code in the previous section to +implement the sheet accessor interface, in order to receive cell values +in each individual sheet. In this example, we will define a structure +to hold a cell value, and store them in a 2-dimensional array for each +sheet. First, let's define the cell value structure: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_no_string_pool.cpp + :language: C++ + :start-after: //!code-start: cell_value + :end-before: //!code-end: cell_value + +As we will be handling only three cell types i.e. empty, numeric, or string +cell type, this structure will work just fine. We will also define a namespace +alias called ``ss`` for convenience. This will be used in later code. + +Next, we'll define a sheet class called ``my_sheet`` that stores the cell values +in a 2-dimensional array, and implements all required interfaces as a child class +of :cpp:class:`~orcus::spreadsheet::iface::import_sheet`. + +At a minimum, the sheet accessor class must implement the following virtual +methods to satisfy the interface requirements of +:cpp:class:`~orcus::spreadsheet::iface::import_sheet`. + +* :cpp:func:`~orcus::spreadsheet::iface::import_sheet::set_auto` - This is a + setter method for a cell whose type is undetermined. The implementor must + determine the value type of this cell, from the raw string value of the + cell. This method is used when loading a CSV document, for instance. + +* :cpp:func:`~orcus::spreadsheet::iface::import_sheet::set_string` - This is a + setter method for a cell that stores a string value. All cell string values + are expectd to be pooled for the entire document, and this method only + receives a string index into a centrally-managed string table. The document + model is expected to implement a central string table that can translate an + index into its actual string value. + +* :cpp:func:`~orcus::spreadsheet::iface::import_sheet::set_value` - This is a + setter method for a cell that stores a numeric value. + +* :cpp:func:`~orcus::spreadsheet::iface::import_sheet::set_bool` - This is a + setter method for a cell that stores a boolean value. Note that not all + format types use this method, as some formats store boolean values as + numeric values. + +* :cpp:func:`~orcus::spreadsheet::iface::import_sheet::set_date_time` - This + is a setter method for a cell that stores a date time value. As with + boolean value type, some format types may not use this method as they store + date time values as numeric values, typically as days since epoch. + +* :cpp:func:`~orcus::spreadsheet::iface::import_sheet::set_format` - This is a + setter method for applying cell formats. Just like the string values, cell + format properties are expected to be stored in a document-wide cell format + properties table, and this method only receives an index into the table. + +* :cpp:func:`~orcus::spreadsheet::iface::import_sheet::get_sheet_size` - This + method is expected to return the dimension of the sheet which the loader may + need in some operations. + +For now, we'll only implement +:cpp:func:`~orcus::spreadsheet::iface::import_sheet::set_string`, +:cpp:func:`~orcus::spreadsheet::iface::import_sheet::set_value`, and +:cpp:func:`~orcus::spreadsheet::iface::import_sheet::get_sheet_size`, and +leave the rest empty. + +Here is the actual code for class ``my_sheet``: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_no_string_pool.cpp + :language: C++ + :start-after: //!code-start: my_sheet + :end-before: //!code-end: my_sheet + +Note that this class receives its sheet index value from the caller upon +instantiation. A sheet index is a 0-based value and represents its position +within the sheet collection. + +Finally, we will modify the ``my_import_factory`` class to store and manage a +collection of ``my_sheet`` instances and to return the pointer value to a +correct sheet accessor instance as needed. + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_no_string_pool.cpp + :language: C++ + :start-after: //!code-start: my_import_factory + :end-before: //!code-end: my_import_factory + +Let's put it all together and run this code: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_no_string_pool.cpp + :language: C++ + +We'll be loading the same document we loaded in the previous example, but this +time we will receive its cell values. Let's go through each sheet one at a +time. + +Data on the first sheet looks like this: + +.. figure:: /_static/images/overview/multi-sheets-sheet1.png + +It consists of 4 columns, with each column having a header row followed by +exactly ten rows of data. The first and forth columns contain numeric data, +while the second and third columns contain string data. + +When you run the above code to load this sheet, you'll get the following output: + +.. code-block:: text + + (sheet: 0; row: 0; col: 0): string index = 0 + (sheet: 0; row: 0; col: 1): string index = 0 + (sheet: 0; row: 0; col: 2): string index = 0 + (sheet: 0; row: 0; col: 3): string index = 0 + (sheet: 0; row: 1; col: 0): value = 1 + (sheet: 0; row: 1; col: 1): string index = 0 + (sheet: 0; row: 1; col: 2): string index = 0 + (sheet: 0; row: 1; col: 3): value = 35 + (sheet: 0; row: 2; col: 0): value = 2 + (sheet: 0; row: 2; col: 1): string index = 0 + (sheet: 0; row: 2; col: 2): string index = 0 + (sheet: 0; row: 2; col: 3): value = 56 + (sheet: 0; row: 3; col: 0): value = 3 + (sheet: 0; row: 3; col: 1): string index = 0 + (sheet: 0; row: 3; col: 2): string index = 0 + (sheet: 0; row: 3; col: 3): value = 6 + (sheet: 0; row: 4; col: 0): value = 4 + (sheet: 0; row: 4; col: 1): string index = 0 + (sheet: 0; row: 4; col: 2): string index = 0 + (sheet: 0; row: 4; col: 3): value = 65 + (sheet: 0; row: 5; col: 0): value = 5 + (sheet: 0; row: 5; col: 1): string index = 0 + (sheet: 0; row: 5; col: 2): string index = 0 + (sheet: 0; row: 5; col: 3): value = 88 + (sheet: 0; row: 6; col: 0): value = 6 + (sheet: 0; row: 6; col: 1): string index = 0 + (sheet: 0; row: 6; col: 2): string index = 0 + (sheet: 0; row: 6; col: 3): value = 90 + (sheet: 0; row: 7; col: 0): value = 7 + (sheet: 0; row: 7; col: 1): string index = 0 + (sheet: 0; row: 7; col: 2): string index = 0 + (sheet: 0; row: 7; col: 3): value = 80 + (sheet: 0; row: 8; col: 0): value = 8 + (sheet: 0; row: 8; col: 1): string index = 0 + (sheet: 0; row: 8; col: 2): string index = 0 + (sheet: 0; row: 8; col: 3): value = 66 + (sheet: 0; row: 9; col: 0): value = 9 + (sheet: 0; row: 9; col: 1): string index = 0 + (sheet: 0; row: 9; col: 2): string index = 0 + (sheet: 0; row: 9; col: 3): value = 14 + (sheet: 0; row: 10; col: 0): value = 10 + (sheet: 0; row: 10; col: 1): string index = 0 + (sheet: 0; row: 10; col: 2): string index = 0 + (sheet: 0; row: 10; col: 3): value = 23 + +There is a couple of things worth pointing out. First, the cell data +flows left to right first then top to bottom second. Second, for this +particular sheet and for this particular format, implementing just the +two setter methods, namely +:cpp:func:`~orcus::spreadsheet::iface::import_sheet::set_string` and +:cpp:func:`~orcus::spreadsheet::iface::import_sheet::set_value` are +enough to receive all cell values. However, we are getting a string +index value of 0 for all string cells. This is because orcus expects +the backend document model to implement the shared strings interface +which is responsible for providing correct string indices to the import +filter, and we have not yet implemented one. Let's fix that. + + +Implement shared strings interface +---------------------------------- + +The first thing to do is define some types: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_with_string_pool.cpp + :language: C++ + :start-after: //!code-start: types + :end-before: //!code-end: types + +Here, we define ``ss_type`` to be the authoritative store for the shared +string values. The string values will be stored as std::string type, and we +use std::deque here to avoid re-allocation of internal buffers as the size +of the container grows. + +Another type we define is ``ss_hash_type``, which will be the hash map type +for storing string-to-index mapping entries. Here, we are using std::string_view +instead of std::string so that we can simply reference the string values stored in +the first container. + +The shared string interface is designed to handle both unformatted and +formatted string values. The following two methods: + +* :cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::add` +* :cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::append` + +are for unformatted string values. The +:cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::add` method is +used when passing a string value that may or may not already exist in the +shared string pool. The +:cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::append` method, +on the other hand, is used only when the string value being passed is a +brand-new string not yet stored in the string pool. When implementing the +:cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::append` method, +you may skip checking for the existance of the string value in the pool before +inserting it. Both of these methods are expected to return a positive integer +value as the index of the string being passed. + +The following eight methods: + +* :cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::set_segment_bold` +* :cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::set_segment_font` +* :cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::set_segment_font_color` +* :cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::set_segment_font_name` +* :cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::set_segment_font_size` +* :cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::set_segment_italic` +* :cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::append_segment` +* :cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::commit_segments` + +are for receiving formatted string values. Conceptually, a formatted string +consists of a series of multiple string segments, where each segment may have +different formatting attributes applied to it. These ``set_segment_*`` +methods are used to set the individual formatting attributes for the current +string segment, and the string value for the current segment is passed through +the +:cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::append_segment` +call. The order in which the ``set_segment_*`` methods are called is not +specified, and not all of them may be called, but they are guaranteed to be +called before the +:cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::append_segment` +method gets called. The implementation should keep a buffer to store the +formatting attributes for the current segment and apply each attribute to the +buffer as one of the ``set_segment_*`` methods gets called. When the +:cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::append_segment` +gets called, the implementation should apply the formatting attirbute set +currently in the buffer to the current segment, and reset the buffer for the +next segment. When all of the string segments and their formatting attributes +are passed, +:cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::commit_segments` +gets called, signaling the implementation that now it's time to commit the +string to the document model. + +As we are going to ignore the formatting attributes in our current example, +the following code will do: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_with_string_pool.cpp + :language: C++ + :start-after: //!code-start: my_shared_strings + :end-before: //!code-end: my_shared_strings + +Note that some import filters may use the +:cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::append_segment` +and +:cpp:func:`~orcus::spreadsheet::iface::import_shared_strings::commit_segments` +combination even for unformatted strings. Because of this, you still need to +implement these two methods even if raw string values are all you care about. + +Note also that the container storing the string values is a reference. The +source container will be owned by ``my_import_factory`` who will also be the +owner of the ``my_shared_strings`` instance. Shown below is the modified +version of ``my_import_factory`` that provides the shared string interface: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_with_string_pool.cpp + :language: C++ + :start-after: //!code-start: my_import_factory + :end-before: //!code-end: my_import_factory + +The shared string store is also passed to each sheet instance, and we'll use +that to fetch the string values from their respective string indices. + +Let's put this all together: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_with_string_pool.cpp + :language: C++ + +The sheet class is largely unchanged except for one thing; it now takes a +reference to the string pool and print the actual string value alongside the +string index associated with it. When you execute this code, you'll see the +following output when loading the same sheet: + +.. code-block:: text + + (sheet: 0; row: 0; col: 0): string index = 0 (ID) + (sheet: 0; row: 0; col: 1): string index = 1 (First Name) + (sheet: 0; row: 0; col: 2): string index = 2 (Last Name) + (sheet: 0; row: 0; col: 3): string index = 3 (Age) + (sheet: 0; row: 1; col: 0): value = 1 + (sheet: 0; row: 1; col: 1): string index = 5 (Thia) + (sheet: 0; row: 1; col: 2): string index = 6 (Beauly) + (sheet: 0; row: 1; col: 3): value = 35 + (sheet: 0; row: 2; col: 0): value = 2 + (sheet: 0; row: 2; col: 1): string index = 9 (Pepito) + (sheet: 0; row: 2; col: 2): string index = 10 (Resun) + (sheet: 0; row: 2; col: 3): value = 56 + (sheet: 0; row: 3; col: 0): value = 3 + (sheet: 0; row: 3; col: 1): string index = 13 (Emera) + (sheet: 0; row: 3; col: 2): string index = 14 (Gravey) + (sheet: 0; row: 3; col: 3): value = 6 + (sheet: 0; row: 4; col: 0): value = 4 + (sheet: 0; row: 4; col: 1): string index = 17 (Erinn) + (sheet: 0; row: 4; col: 2): string index = 18 (Flucks) + (sheet: 0; row: 4; col: 3): value = 65 + (sheet: 0; row: 5; col: 0): value = 5 + (sheet: 0; row: 5; col: 1): string index = 21 (Giusto) + (sheet: 0; row: 5; col: 2): string index = 22 (Bambury) + (sheet: 0; row: 5; col: 3): value = 88 + (sheet: 0; row: 6; col: 0): value = 6 + (sheet: 0; row: 6; col: 1): string index = 25 (Neall) + (sheet: 0; row: 6; col: 2): string index = 26 (Scorton) + (sheet: 0; row: 6; col: 3): value = 90 + (sheet: 0; row: 7; col: 0): value = 7 + (sheet: 0; row: 7; col: 1): string index = 29 (Ervin) + (sheet: 0; row: 7; col: 2): string index = 30 (Foreman) + (sheet: 0; row: 7; col: 3): value = 80 + (sheet: 0; row: 8; col: 0): value = 8 + (sheet: 0; row: 8; col: 1): string index = 33 (Shoshana) + (sheet: 0; row: 8; col: 2): string index = 34 (Bohea) + (sheet: 0; row: 8; col: 3): value = 66 + (sheet: 0; row: 9; col: 0): value = 9 + (sheet: 0; row: 9; col: 1): string index = 37 (Gladys) + (sheet: 0; row: 9; col: 2): string index = 38 (Somner) + (sheet: 0; row: 9; col: 3): value = 14 + (sheet: 0; row: 10; col: 0): value = 10 + (sheet: 0; row: 10; col: 1): string index = 41 (Ephraim) + (sheet: 0; row: 10; col: 2): string index = 42 (Russell) + (sheet: 0; row: 10; col: 3): value = 23 + +The string indices now increment nicely, and their respective string values +look correct. + +Now, let's turn our attention to the second sheet, which contains formulas. +First, here is what the second sheet looks like: + +.. figure:: /_static/images/overview/multi-sheets-sheet2.png + +It contains a simple table extending from A1 to C9. It consists of three +columns and the first row is a header row. Cells in the the first and second +columns contain simple numbers and the third column contains formulas that +simply add the two numbers to the left of the same row. When loading this +sheet using the last code we used above, you'll see the following output: + +.. code-block:: text + + (sheet: 1; row: 0; col: 0): string index = 44 (X) + (sheet: 1; row: 0; col: 1): string index = 45 (Y) + (sheet: 1; row: 0; col: 2): string index = 46 (X + Y) + (sheet: 1; row: 1; col: 0): value = 18 + (sheet: 1; row: 1; col: 1): value = 79 + (sheet: 1; row: 2; col: 0): value = 48 + (sheet: 1; row: 2; col: 1): value = 55 + (sheet: 1; row: 3; col: 0): value = 99 + (sheet: 1; row: 3; col: 1): value = 35 + (sheet: 1; row: 4; col: 0): value = 41 + (sheet: 1; row: 4; col: 1): value = 69 + (sheet: 1; row: 5; col: 0): value = 5 + (sheet: 1; row: 5; col: 1): value = 18 + (sheet: 1; row: 6; col: 0): value = 46 + (sheet: 1; row: 6; col: 1): value = 69 + (sheet: 1; row: 7; col: 0): value = 36 + (sheet: 1; row: 7; col: 1): value = 67 + (sheet: 1; row: 8; col: 0): value = 78 + (sheet: 1; row: 8; col: 1): value = 2 + +Everything looks fine except that the formula cells in C2:C9 are not loaded at +all. This is because, in order to receive formula cell data, you must +implement the required :cpp:class:`~orcus::spreadsheet::iface::import_formula` +interface, which we will cover in the next section. + + +Implement formula interface +--------------------------- + +In this section we will extend the code from the previous section in order to +receive and process formula cell values from the sheet. We will need to make +quite a few changes. Let's go over this one thing at a time. First, we are +adding a new cell value type ``formula``: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_with_formula.cpp + :language: C++ + :start-after: //!code-start: cell_value_type + :end-before: //!code-end: cell_value_type + +which should not come as a surprise. + +We are not making any change to the ``cell_value`` struct itself, but we are +re-using its ``index`` member for a formula cell value such that, if the cell +stores a formula, the index will refer to its actual formula data which will +be stored in a separate data store, much like how strings are stored +externally and referenced by their indices in the ``cell_value`` instances. + +We are also adding a brand-new class called ``cell_grid``, to add an extra +layer over the raw cell value array: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_with_formula.cpp + :language: C++ + :start-after: //!code-start: cell_grid + :end-before: //!code-end: cell_grid + +Each sheet instance will own one instance of ``cell_grid``, and the formula +interface class instance will hold a reference to it and use it to insert +formula cell values into it. The same sheet instance will also hold a formula +value store, and pass its reference to the formula interface class. + +The formula interface class must implement the following methods: + +* :cpp:func:`~orcus::spreadsheet::iface::import_formula::set_position` +* :cpp:func:`~orcus::spreadsheet::iface::import_formula::set_formula` +* :cpp:func:`~orcus::spreadsheet::iface::import_formula::set_shared_formula_index` +* :cpp:func:`~orcus::spreadsheet::iface::import_formula::set_result_string` +* :cpp:func:`~orcus::spreadsheet::iface::import_formula::set_result_value` +* :cpp:func:`~orcus::spreadsheet::iface::import_formula::set_result_empty` +* :cpp:func:`~orcus::spreadsheet::iface::import_formula::set_result_bool` +* :cpp:func:`~orcus::spreadsheet::iface::import_formula::commit` + +Depending on the type of a formula cell, and depending on the format of the +document, some methods may not be called. The +:cpp:func:`~orcus::spreadsheet::iface::import_formula::set_position` method +always gets called regardless of the formula cell type, to specify the +position of the formula cell. The +:cpp:func:`~orcus::spreadsheet::iface::import_formula::set_formula` gets +called for a formula cell that does not share its formula expression with any +other formula cells, or a formula cell that shares its formula expression with +a group of other formuls cells and is the primary cell of that group. If it's +the primary cell of a grouped formula cells, the +:cpp:func:`~orcus::spreadsheet::iface::import_formula::set_shared_formula_index` +method also gets called to receive the identifier value of that group. All +formula cells belonging to the same group receives the same identifier value +via +:cpp:func:`~orcus::spreadsheet::iface::import_formula::set_shared_formula_index`, +but only the primary cell of a group receives the formula expression string +via :cpp:func:`~orcus::spreadsheet::iface::import_formula::set_formula`. The +rest of the methods - +:cpp:func:`~orcus::spreadsheet::iface::import_formula::set_result_string`, +:cpp:func:`~orcus::spreadsheet::iface::import_formula::set_result_value`, +:cpp:func:`~orcus::spreadsheet::iface::import_formula::set_result_empty` and +:cpp:func:`~orcus::spreadsheet::iface::import_formula::set_result_bool` - are +called to deliver the cached formula cell value when applicable. + +The :cpp:func:`~orcus::spreadsheet::iface::import_formula::commit` method gets +called at the very end to let the implementation commit the formula cell data +to the backend document store. + +Without further ado, here is the formula interface implementation that we will +use: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_with_formula.cpp + :language: C++ + :start-after: //!code-start: my_formula + :end-before: //!code-end: my_formula + +and here is the defintion of the ``formula`` struct that stores a formula expression +string as well as its grammer type: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_with_formula.cpp + :language: C++ + :start-after: //!code-start: formula + :end-before: //!code-end: formula + +Note that since we are loading a OpenDocument Spereadsheet file (.ods) which +does not support shared formulas, we do not need to handle the +:cpp:func:`~orcus::spreadsheet::iface::import_formula::set_shared_formula_index` +method. Likewise, we are leaving the ``set_result_*`` methods unhandled for +now. + +This interface class also stores references to ``cell_grid`` and +``std::vector<formula>`` instances, both of which are passed from the parent +sheet instance. + +We also need to make a few changes to the sheet interface class to provide a formula interface +and add a formula value store: + +.. literalinclude:: ../../doc_example/spreadsheet_doc_2_sheets_with_formula.cpp + :language: C++ + :start-after: //!code-start: my_sheet + :end-before: //!code-end: my_sheet + +We've added the +:cpp:func:`~orcus::spreadsheet::iface::import_sheet::get_formula` method which +returns a pointer to the ``my_formula`` class instance defined above. The +rest of the code is unchanged. + +Now let's see what happens when loading the same sheet from the previous +section: + +.. code-block:: text + + (sheet: 1; row: 0; col: 0): string index = 44 (X) + (sheet: 1; row: 0; col: 1): string index = 45 (Y) + (sheet: 1; row: 0; col: 2): string index = 46 (X + Y) + (sheet: 1; row: 1; col: 0): value = 18 + (sheet: 1; row: 1; col: 1): value = 79 + (sheet: 1; row: 2; col: 0): value = 48 + (sheet: 1; row: 2; col: 1): value = 55 + (sheet: 1; row: 3; col: 0): value = 99 + (sheet: 1; row: 3; col: 1): value = 35 + (sheet: 1; row: 4; col: 0): value = 41 + (sheet: 1; row: 4; col: 1): value = 69 + (sheet: 1; row: 5; col: 0): value = 5 + (sheet: 1; row: 5; col: 1): value = 18 + (sheet: 1; row: 6; col: 0): value = 46 + (sheet: 1; row: 6; col: 1): value = 69 + (sheet: 1; row: 7; col: 0): value = 36 + (sheet: 1; row: 7; col: 1): value = 67 + (sheet: 1; row: 8; col: 0): value = 78 + (sheet: 1; row: 8; col: 1): value = 2 + (sheet: 1; row: 1; col: 2): formula = [.A2]+[.B2] (ods) + (sheet: 1; row: 2; col: 2): formula = [.A3]+[.B3] (ods) + (sheet: 1; row: 3; col: 2): formula = [.A4]+[.B4] (ods) + (sheet: 1; row: 4; col: 2): formula = [.A5]+[.B5] (ods) + (sheet: 1; row: 5; col: 2): formula = [.A6]+[.B6] (ods) + (sheet: 1; row: 6; col: 2): formula = [.A7]+[.B7] (ods) + (sheet: 1; row: 7; col: 2): formula = [.A8]+[.B8] (ods) + (sheet: 1; row: 8; col: 2): formula = [.A9]+[.B9] (ods) + +Looks like we are getting the formula cell values this time around. + +One thing to note is that the formula expression strings you see here follow +the syntax defined in the OpenFormula specifications, which is the formula syntax +used in the OpenDocument Spreadsheet format. + + +Implement more interfaces +------------------------- + +This section has covered only a part of the available spreadsheet interfaces +you can implement in your code. Refer to the :ref:`spreadsheet-interfaces` +section to see the complete list of interfaces. diff --git a/doc/overview/index.rst b/doc/overview/index.rst new file mode 100644 index 0000000..0b95f8b --- /dev/null +++ b/doc/overview/index.rst @@ -0,0 +1,95 @@ + +.. highlight:: cpp + +Overview +======== + +Composition of the library +-------------------------- + +The primary goal of the orcus library is to provide a framework to import the +contents of documents stored in various spreadsheet or spreadsheet-like +formats. The library also provides several low-level parsers that can be used +independently of the spreadsheet-related features if so desired. In addition, +the library also provides support for some hierarchical documents, such as JSON +and YAML, which were a later addition to the library. + +You can use this library either through its C++ API, Python API, or CLI. However, +not all three methods equally expose all features of the library, and the C++ API +is more complete than the other two. + +The library is physically split into four parts: + + 1. the parser part that provides the aforementioned low-level parsers, + 2. the filter part that providers higher level import filters for spreadsheet + and hierarchical documents that internally use the low-level parsers, + 3. the spreadsheet document model part that includes the document model suitable + for storing spreadsheet document contents, and + 4. CLI for loading and converting spreadsheet and hierarchical documents. + +If you need to just use the parser part of the library, you need to only link +against the ``liborcus-parser`` library file. If you need to use the import +filter part, link againt both the ``liborcus-parser`` and the ``liborcus`` +libraries. Likewise, if you need to use the spreadsheet document model part, +link against the aforementioned two plus the ``liborcus-spreadsheet-model`` +library. + +Also note that the spreadsheet document model part has additional dependency on +the `ixion library <https://gitlab.com/ixion/ixion>`_ for handling formula +re-calculations on document load. + + +Loading spreadsheet documents +----------------------------- + +The orcus library's primary aim is to provide a framework to import the contents +of documents stored in various spreadsheet, or spreadsheet-like formats. It +supports two primary use cases. The first use case is where the client +program does not have its own document model, but needs to import data from a +spreadsheet-like document file and access its content without implementing its +own document store from scratch. In this particular use case, you can simply +use the :cpp:class:`~orcus::spreadsheet::document` class to get it populated, +and access its content through its API afterward. + +The second use case, which is a bit more advanced, is where the client program +already has its own internal document model, and needs to use orcus +to populate its document model. In this particular use case, you can +implement your own set of classes that support necessary interfaces, and pass +that to the orcus import filter. + +For each document type that orcus supports, there is a top-level import filter +class that serves as an entry point for loading the content of a document you +wish to load. You don't pass your document to this filter directly; instead, +you wrap your document with what we call an **import factory**, then pass this +factory instance to the loader. This import factory is then required to +implement necessary interfaces that the filter class uses in order for it +to pass data to the document as the file is getting parsed. + +When using orcus's own document model, you can simply use orcus's own import +factory implementation to wrap its document. When using your own document +model, on the other hand, you'll need to implement your own set of interface +classes to wrap your document with. + +The following sections describe how to load a spreadsheet document by using 1) +orcus's own spreadsheet document class, and 2) a user-defined custom docuemnt +class. + +.. toctree:: + :maxdepth: 1 + + doc-orcus.rst + doc-user.rst + + +Loading hierarchical documents +------------------------------ + +The orcus library also includes support for hierarchical document types such +as JSON and YAML. The following sections delve more into the support for +these types of documents. + +.. toctree:: + :maxdepth: 1 + + json.rst + yaml.rst diff --git a/doc/overview/json.rst b/doc/overview/json.rst new file mode 100644 index 0000000..0e252f9 --- /dev/null +++ b/doc/overview/json.rst @@ -0,0 +1,353 @@ + +.. highlight:: cpp + +JSON +==== + +The JSON part of orcus consists of a low-level parser class that handles +parsing of JSON strings, and a high-level document class that stores parsed +JSON structures as a node tree. + +There are two approaches to processing JSON strings using the orcus library. +One approach is to utilize the :cpp:class:`~orcus::json::document_tree` class +to load and populate the JSON structure tree via its +:cpp:func:`~orcus::json::document_tree::load()` method and traverse the tree +through its :cpp:func:`~orcus::json::document_tree::get_document_root()` method. +This approach is ideal if you want a quick way to parse and access the content +of a JSON document with minimal effort. + +Another approach is to use the low-level :cpp:class:`~orcus::json_parser` +class directly by providing your own handler class to receive callbacks from +the parser. This method requires a bit more effort on your part to provide +and populate your own data structure, but if you already have a data structure +to store the content of JSON, then this approach is ideal. The +:cpp:class:`~orcus::json::document_tree` class internally uses +:cpp:class:`~orcus::json_parser` to parse JSON contents. + + +Populating a document tree from JSON string +------------------------------------------- + +The following code snippet shows an example of how to populate an instance of +:cpp:class:`~orcus::json::document_tree` from a JSON string, and navigate its +content tree afterward. + +.. literalinclude:: ../../doc_example/json_doc_1.cpp + :language: C++ + +You'll see the following output when executing this code: + +.. code-block:: text + + name: John Doe + occupation: Software Engineer + score: + - 89 + - 67 + - 90 + + +Using the low-level parser +-------------------------- + +The following code snippet shows how to use the low-level :cpp:class:`~orcus::json_parser` +class by providing an own handler class and passing it as a template argument: + +.. literalinclude:: ../../doc_example/json_parser_1.cpp + :language: C++ + +The parser constructor expects the char array, its length, and the handler +instance. The base handler class :cpp:class:`~orcus::json_handler` implements +all required handler methods. By inheriting from it, you only need to +implement the handler methods you need. In this example, we are only +implementing the :cpp:func:`~orcus::json_handler::object_key`, +:cpp:func:`~orcus::json_handler::string`, and :cpp:func:`~orcus::json_handler::number` +methods to process object key values, string values and numeric values, +respectively. Refer to the :cpp:class:`~orcus::json_handler` class definition +for all available handler methods. + +Executing this code will generate the following output: + +.. code-block:: text + + JSON string: {"key1": [1,2,3,4,5], "key2": 12.3} + object key: key1 + number: 1 + number: 2 + number: 3 + number: 4 + number: 5 + object key: key2 + number: 12.3 + + +Building a document tree directly +--------------------------------- + +You can also create and populate a JSON document tree directly without needing +to parse a JSON string. This approach is ideal if you want to create a JSON +tree from scratch and export it as a string. The following series of code +snippets demonstrate how to exactly build JSON document trees directly and +export their contents as JSON strings. + +The first example shows how to initialize the tree with a simple array: + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: root list + :end-before: //!code-end: root list + +You can simply specify the content of the array via initialization list and +assign it to the document. The :cpp:func:`~orcus::json::document_tree::dump()` +method then turns the content into a single string instance, which looks like +the following: + +.. code-block:: text + + [ + 1, + 2, + "string value", + false, + null + ] + +If you need to build a array of arrays, do like the following: + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: list nested + :end-before: //!code-end: list nested + +This will create an array of two nested child arrays with three values each. +Dumping the content of the tree as a JSON string will produce something like +the following: + +.. code-block:: text + + [ + [ + true, + false, + null + ], + [ + 1.1, + 2.2, + "text" + ] + ] + +Creating an object can be done by nesting one of more key-value pairs, each of +which is surrounded by a pair of curly braces, inside another pair of curly +braces. For example, the following code: + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: list object + :end-before: //!code-end: list object + +produces the following output: + +.. code-block:: text + + { + "key1": 1.2, + "key2": "some text" + } + +indicating that the tree consists of a single object having two key-value +pairs. + +You may notice that this syntax is identical to the syntax for +creating an array of arrays as shown above. In fact, in order for this to be +an object, each of the inner sequences must have exactly two values, and its +first value must be a string value. Failing that, it will be interpreted as +an array of arrays. + +As with arrays, nesting of objects is also supported. The following code: + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: list object 2 + :end-before: //!code-end: list object 2 + +creates a root object having two key-value pairs one of which contains +another object having three key-value pairs, as evident in the following output +generated by this code: + +.. code-block:: text + + { + "parent1": { + "child1": true, + "child2": false, + "child3": 123.4 + }, + "parent2": "not-nested" + } + +There is one caveat that you need to be aware of because of this special +object creation syntax. When you have a nested array that exactly contains +two values and the first value is a string value, you must explicitly declare +that as an array by using an :cpp:class:`~orcus::json::array` class instance. +For instance, this code: + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: array ambiguous + :end-before: //!code-end: array ambiguous + +is intended to be an object containing an array. However, because the supposed +inner array contains exactly two values and the first value is a string +value, which could be interpreted as a key-value pair for the outer object, it +ends up being too ambiguous and a :cpp:class:`~orcus::json::key_value_error` +exception gets thrown as a result. + +To work around this ambiguity, you need to declare the inner array to be +explicit by using an :cpp:class:`~orcus::json::array` instance: + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: array explicit + :end-before: //!code-end: array explicit + +This code now correctly generates a root object containing one key-value pair +whose value is an array: + +.. code-block:: text + + { + "array": [ + "one", + 987 + ] + } + +Similar ambiguity issue arises when you want to construct a tree consisting +only of an empty root object. You may be tempted to write something like +this: + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: object ambiguous a + :end-before: //!code-end: object ambiguous a + +However, this will result in leaving the tree entirely unpopulated i.e. the +tree will not even have a root node! If you continue on and try to get a root +node from this tree, you'll get a :cpp:class:`~orcus::json::document_error` +thrown as a result. If you inspect the error message stored in the exception: + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: object ambiguous b + :end-before: //!code-end: object ambiguous b + +you will get + +.. code-block:: text + + json::document_error: document tree is empty + +giving you further proof that the tree is indeed empty! The solution here is +to directly assign an instance of :cpp:class:`~orcus::json::object` to the +document tree, which will initialize the tree with an empty root object. The +following code: + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: object explicit 1 + :end-before: //!code-end: object explicit 1 + +will therefore generate + +.. code-block:: text + + { + } + +You can also use the :cpp:class:`~orcus::json::object` class instances to +indicate empty objects anythere in the tree. For instance, this code: + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: object explicit 2 + :end-before: //!code-end: object explicit 2 + +is intended to create an array containing three empty objects as its elements, +and that's exactly what it does: + +.. code-block:: text + + [ + { + }, + { + }, + { + } + ] + +So far all the examples have shown how to initialize the document tree as the +tree itself is being constructed. But our next example shows how to create +new key-value pairs to existing objects after the document tree instance has +been initialized. + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: root object add child + :end-before: //!code-end: root object add child + +This code first initializes the tree with an empty object, then retrieves the +root empty object and assigns several key-value pairs to it. When converting +the tree content to a string and inspecting it you'll see something like the +following: + +.. code-block:: text + + { + "child array": [ + 1.1, + 1.2, + true + ], + "child1": 1, + "child3": [ + true, + false + ], + "child2": "string", + "child object": { + "key1": 100, + "key2": 200 + } + } + +The next example shows how to append values to an existing array after the +tree has been constructed. Let's take a look at the code: + +.. literalinclude:: ../../doc_example/json_doc_2.cpp + :language: C++ + :start-after: //!code-start: root array add child + :end-before: //!code-end: root array add child + +Like the previous example, this code first initializes the tree but this time +with an empty array as its root, retrieves the root array, then appends +several values to it via its :cpp:func:`~orcus::json::node::push_back` method. + +When you dump the content of this tree as a JSON string you'll get something +like this: + +.. code-block:: text + + [ + -1.2, + "string", + true, + null, + { + "key1": 1.1, + "key2": 1.2 + } + ] + diff --git a/doc/overview/yaml.rst b/doc/overview/yaml.rst new file mode 100644 index 0000000..4109cf2 --- /dev/null +++ b/doc/overview/yaml.rst @@ -0,0 +1,8 @@ + +.. highlight:: cpp + +YAML +==== + +TBD + diff --git a/doc/python/index.rst b/doc/python/index.rst new file mode 100644 index 0000000..bb76c47 --- /dev/null +++ b/doc/python/index.rst @@ -0,0 +1,17 @@ + +Python API reference +==================== + +Packages +-------- + +.. toctree:: + :maxdepth: 1 + + orcus/index.rst + orcus/tools/index.rst + orcus/csv/index.rst + orcus/gnumeric/index.rst + orcus/ods/index.rst + orcus/xlsx/index.rst + orcus/xls_xml/index.rst diff --git a/doc/python/orcus/cell.rst b/doc/python/orcus/cell.rst new file mode 100644 index 0000000..0143307 --- /dev/null +++ b/doc/python/orcus/cell.rst @@ -0,0 +1,31 @@ + +Cell +==== + +.. py:class:: orcus.Cell + + This class represents a single cell within a :py:class:`.Sheet` object. + + .. py:method:: get_formula_tokens + + :rtype: :py:class:`.FormulaTokens` + :return: an iterator object for a formula cell. + + Get an iterator object for formula tokens if the cell is a formula cell. + This method returns ``None`` for a non-formula cell. + + .. py:attribute:: type + :type: orcus.CellType + + Attribute specifying the type of this cell. + + .. py:attribute:: value + + Attribute containing the value of the cell. + + .. py:attribute:: formula + :type: str + + Attribute containing the formula string in case of a formula cell. This + value will be ``None`` for a non-formula cell. + diff --git a/doc/python/orcus/cell_type.rst b/doc/python/orcus/cell_type.rst new file mode 100644 index 0000000..26568d4 --- /dev/null +++ b/doc/python/orcus/cell_type.rst @@ -0,0 +1,7 @@ + +CellType +======== + +.. autoclass:: orcus.CellType + :members: + :undoc-members: diff --git a/doc/python/orcus/csv/index.rst b/doc/python/orcus/csv/index.rst new file mode 100644 index 0000000..f2b73a0 --- /dev/null +++ b/doc/python/orcus/csv/index.rst @@ -0,0 +1,19 @@ + +orcus.csv +========= + +.. py:function:: orcus.csv.read + + Read an CSV file from a specified file path and create a :py:class:`orcus.Document` + instance object. + + :param stream: either string value, or file object containing a string stream. + :rtype: :py:class:`orcus.Document` + :return: document instance object that stores the content of the file. + + Example:: + + from orcus import csv + + with open("path/to/file.csv", "r") as f: + doc = csv.read(f) diff --git a/doc/python/orcus/document.rst b/doc/python/orcus/document.rst new file mode 100644 index 0000000..d4b6fc5 --- /dev/null +++ b/doc/python/orcus/document.rst @@ -0,0 +1,21 @@ + +Document +======== + +.. py:class:: orcus.Document + + An instance of this class represents a document model. A document consists + of multiple sheet objects. + + .. py:attribute:: sheets + + Read-only attribute that stores a tuple of :py:class:`.Sheet` instance + objects. + + .. py:function:: get_named_expressions + + Get a named expressions iterator. + + :rtype: :obj:`.NamedExpressions` + :return: named expression object. + diff --git a/doc/python/orcus/format_type.rst b/doc/python/orcus/format_type.rst new file mode 100644 index 0000000..a3d4b01 --- /dev/null +++ b/doc/python/orcus/format_type.rst @@ -0,0 +1,7 @@ + +FormatType +========== + +.. autoclass:: orcus.FormatType + :members: + :undoc-members: diff --git a/doc/python/orcus/formula_token.rst b/doc/python/orcus/formula_token.rst new file mode 100644 index 0000000..d9627cf --- /dev/null +++ b/doc/python/orcus/formula_token.rst @@ -0,0 +1,19 @@ + +FormulaToken +============ + +.. py:class:: orcus.FormulaToken + + This class represents a single formula token value as returned from a + :py:class:`.FormulaTokens` iterator. + + .. py:attribute:: op + :type: orcus.FormulaTokenOp + + Attribute specifying the opcode of the formula token. + + .. py:attribute:: type + :type: orcus.FormulaTokenType + + Attribute specifying the type of the formula token. + diff --git a/doc/python/orcus/formula_token_op.rst b/doc/python/orcus/formula_token_op.rst new file mode 100644 index 0000000..0f29d59 --- /dev/null +++ b/doc/python/orcus/formula_token_op.rst @@ -0,0 +1,7 @@ + +FormulaTokenOp +============== + +.. autoclass:: orcus.FormulaTokenOp + :members: + :undoc-members: diff --git a/doc/python/orcus/formula_token_type.rst b/doc/python/orcus/formula_token_type.rst new file mode 100644 index 0000000..cc575d4 --- /dev/null +++ b/doc/python/orcus/formula_token_type.rst @@ -0,0 +1,7 @@ + +FormulaTokenType +================ + +.. autoclass:: orcus.FormulaTokenType + :members: + :undoc-members: diff --git a/doc/python/orcus/formula_tokens.rst b/doc/python/orcus/formula_tokens.rst new file mode 100644 index 0000000..ba5aa4e --- /dev/null +++ b/doc/python/orcus/formula_tokens.rst @@ -0,0 +1,8 @@ + +FormulaTokens +============= + +.. py:class:: orcus.FormulaTokens + + Iterator for formula tokens within a :py:class:`.Cell` object representing + a formula cell. Each iteration will return a :py:class:`.FormulaToken` object. diff --git a/doc/python/orcus/gnumeric/index.rst b/doc/python/orcus/gnumeric/index.rst new file mode 100644 index 0000000..a3ab2cc --- /dev/null +++ b/doc/python/orcus/gnumeric/index.rst @@ -0,0 +1,24 @@ + +orcus.gnumeric +============== + +.. py:function:: orcus.gnumeric.read + + Read an Gnumeric file from a specified file path and create a + :py:class:`orcus.Document` instance object. + + :param stream: file object containing byte streams. + :param bool recalc: optional parameter specifying whether or not to recalculate + the formula cells on load. Defaults to ``False``. + :param str error_policy: optional parameter indicating what to do when + encountering formula cells with invalid formula expressions. The value + must be either ``fail`` or ``skip``. Defaults to ``fail``. + :rtype: :py:class:`orcus.Document` + :return: document instance object that stores the content of the file. + + Example:: + + from orcus import gnumeric + + with open("path/to/file.gnumeric", "rb") as f: + doc = gnumeric.read(f, recalc=True, error_policy="fail") diff --git a/doc/python/orcus/index.rst b/doc/python/orcus/index.rst new file mode 100644 index 0000000..69ee284 --- /dev/null +++ b/doc/python/orcus/index.rst @@ -0,0 +1,34 @@ + +orcus +===== + +.. py:function:: orcus.detect_format + + Detects the file format of the stream. + + :param stream: either bytes, or file object containing a byte stream. + :rtype: :py:class:`orcus.FormatType` + :return: enum value specifying the detected file format. + + Example:: + + import orcus + + with open("path/to/file", "rb") as f: + fmt = orcus.detect_format(f) + + +.. toctree:: + :maxdepth: 1 + + cell.rst + cell_type.rst + document.rst + format_type.rst + formula_token.rst + formula_token_op.rst + formula_token_type.rst + formula_tokens.rst + named_expressions.rst + sheet.rst + sheet_rows.rst diff --git a/doc/python/orcus/named_expressions.rst b/doc/python/orcus/named_expressions.rst new file mode 100644 index 0000000..fc1aa81 --- /dev/null +++ b/doc/python/orcus/named_expressions.rst @@ -0,0 +1,13 @@ + +NamedExpressions +================ + +.. py:class:: NamedExpressions + + Iterator for named expressions. + + .. py:attribute:: names + :type: set + + A set of strings representing the names of the named expressions. + diff --git a/doc/python/orcus/ods/index.rst b/doc/python/orcus/ods/index.rst new file mode 100644 index 0000000..1dac00e --- /dev/null +++ b/doc/python/orcus/ods/index.rst @@ -0,0 +1,24 @@ + +orcus.ods +========== + +.. py:function:: orcus.ods.read + + Read an Open Document Spreadsheet file from a specified file path and create + a :py:class:`orcus.Document` instance object. + + :param stream: file object containing byte streams. + :param bool recalc: optional parameter specifying whether or not to recalculate + the formula cells on load. Defaults to ``False``. + :param str error_policy: optional parameter indicating what to do when + encountering formula cells with invalid formula expressions. The value + must be either ``fail`` or ``skip``. Defaults to ``fail``. + :rtype: :py:class:`orcus.Document` + :return: document instance object that stores the content of the file. + + Example:: + + from orcus import ods + + with open("path/to/file.ods", "rb") as f: + doc = ods.read(f, recalc=True, error_policy="fail") diff --git a/doc/python/orcus/sheet.rst b/doc/python/orcus/sheet.rst new file mode 100644 index 0000000..a94f64e --- /dev/null +++ b/doc/python/orcus/sheet.rst @@ -0,0 +1,56 @@ + +Sheet +===== + +.. py:class:: orcus.Sheet + + An instance of this class represents a single sheet inside a document. + + .. py:function:: get_rows + + This function returns a row iterator object that allows you to iterate + through rows in the data region. + + :rtype: :py:class:`.SheetRows` + :return: row iterator object. + + Example:: + + rows = sheet.get_rows() + + for row in rows: + print(row) # tuple of cell values + + .. py:function:: get_named_expressions + + Get a named expressions iterator. + + :rtype: :obj:`.NamedExpressions` + :return: named expression object. + + .. py:function:: write + + Write sheet content to specified file object. + + :param file: writable object to write the sheet content to. + :param format: format of the output. Note that it currently + only supports a subset of the formats provided by the :obj:`.FormatType` + type. + :type format: :obj:`.FormatType` + + .. py:attribute:: name + + Read-only attribute that stores the name of the sheet. + + .. py:attribute:: sheet_size + + Read-only dictionary object that stores the column and row sizes of the + sheet with the **column** and **row** keys, respectively. + + .. py:attribute:: data_size + + Read-only dictionary object that stores the column and row sizes of the + data region of the sheet with the **column** and **row** keys, respectively. + The data region is the smallest possible range that includes all non-empty + cells in the sheet. The top-left corner of the data region is always at + the top-left corner of the sheet. diff --git a/doc/python/orcus/sheet_rows.rst b/doc/python/orcus/sheet_rows.rst new file mode 100644 index 0000000..bd86e6e --- /dev/null +++ b/doc/python/orcus/sheet_rows.rst @@ -0,0 +1,9 @@ + +SheetRows +========= + +.. py:class:: SheetRows + + Iterator for rows within a :py:class:`.Sheet` object. Each iteration returns + a tuple of :py:class:`.Cell` objects for the row. + diff --git a/doc/python/orcus/tools/bugzilla.rst b/doc/python/orcus/tools/bugzilla.rst new file mode 100644 index 0000000..53cf869 --- /dev/null +++ b/doc/python/orcus/tools/bugzilla.rst @@ -0,0 +1,11 @@ + +bugzilla +======== + +.. argparse:: + :module: orcus.tools.bugzilla + :func: _create_argparser + :prog: orcus.tools.bugzilla + +.. autoclass:: orcus.tools.bugzilla.BugzillaAccess + :members: diff --git a/doc/python/orcus/tools/file_processor.rst b/doc/python/orcus/tools/file_processor.rst new file mode 100644 index 0000000..20b7e0a --- /dev/null +++ b/doc/python/orcus/tools/file_processor.rst @@ -0,0 +1,9 @@ + +file_processor +============== + +.. argparse:: + :module: orcus.tools.file_processor + :func: _create_argparser + :prog: orcus.tools.file_processor + diff --git a/doc/python/orcus/tools/index.rst b/doc/python/orcus/tools/index.rst new file mode 100644 index 0000000..0942183 --- /dev/null +++ b/doc/python/orcus/tools/index.rst @@ -0,0 +1,10 @@ + +orcus.tools +=========== + +.. toctree:: + :maxdepth: 1 + + bugzilla.rst + file_processor.rst + diff --git a/doc/python/orcus/xls_xml/index.rst b/doc/python/orcus/xls_xml/index.rst new file mode 100644 index 0000000..e7c07b1 --- /dev/null +++ b/doc/python/orcus/xls_xml/index.rst @@ -0,0 +1,25 @@ + +orcus.xls_xml +============= + +.. py:function:: orcus.xls_xml.read + + Read an Excel file from a specified file path and create a + :py:class:`orcus.Document` instance object. The file must be saved in the + SpreadsheetML format. + + :param stream: file object containing byte streams. + :param bool recalc: optional parameter specifying whether or not to recalculate + the formula cells on load. Defaults to ``False``. + :param str error_policy: optional parameter indicating what to do when + encountering formula cells with invalid formula expressions. The value + must be either ``fail`` or ``skip``. Defaults to ``fail``. + :rtype: :py:class:`orcus.Document` + :return: document instance object that stores the content of the file. + + Example:: + + from orcus import xls_xml + + with open("path/to/file.xls_xml", "rb") as f: + doc = xls_xml.read(f, recalc=True, error_policy="fail") diff --git a/doc/python/orcus/xlsx/index.rst b/doc/python/orcus/xlsx/index.rst new file mode 100644 index 0000000..977ea1c --- /dev/null +++ b/doc/python/orcus/xlsx/index.rst @@ -0,0 +1,25 @@ + +orcus.xlsx +========== + +.. py:function:: orcus.xlsx.read + + Read an Excel file from a specified file path and create a + :py:class:`orcus.Document` instance object. The file must be of Excel 2007 + XML format. + + :param stream: file object containing byte streams. + :param bool recalc: optional parameter specifying whether or not to recalculate + the formula cells on load. Defaults to ``False``. + :param str error_policy: optional parameter indicating what to do when + encountering formula cells with invalid formula expressions. The value + must be either ``fail`` or ``skip``. Defaults to ``fail``. + :rtype: :py:class:`orcus.Document` + :return: document instance object that stores the content of the file. + + Example:: + + from orcus import xlsx + + with open("path/to/file.xlsx", "rb") as f: + doc = xlsx.read(f, recalc=True, error_policy="fail") diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000..b3f96dd --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,3 @@ +breathe +sphinx-argparse +sphinx-rtd-theme diff --git a/doc_example/Makefile.am b/doc_example/Makefile.am new file mode 100644 index 0000000..0f097b2 --- /dev/null +++ b/doc_example/Makefile.am @@ -0,0 +1,110 @@ + +AM_CPPFLAGS = \ + -I$(top_srcdir)/include \ + $(LIBIXION_CFLAGS) \ + -DSRCDIR=\""$(top_srcdir)"\" + +bin_PROGRAMS = + +EXTRA_PROGRAMS = \ + json-doc-1 \ + json-doc-2 \ + json-parser-1 \ + xml-mapping-1 \ + spreadsheet-doc-1 \ + spreadsheet-doc-1-num-and-formula \ + spreadsheet-doc-2 \ + spreadsheet-doc-2-sheets-no-string-pool \ + spreadsheet-doc-2-sheets-with-string-pool \ + spreadsheet-doc-2-sheets-with-formula + +json_doc_1_SOURCES = \ + json_doc_1.cpp + +json_doc_1_LDADD = \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la + +json_doc_2_SOURCES = \ + json_doc_2.cpp + +json_doc_2_LDADD = \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la + +json_parser_1_SOURCES = \ + json_parser_1.cpp + +json_parser_1_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la + +xml_mapping_1_SOURCES = \ + xml_mapping_1.cpp + +xml_mapping_1_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la + +# Spreadsheet document examples. + +SPDOC_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la \ + ../src/spreadsheet/liborcus-spreadsheet-model-@ORCUS_API_VERSION@.la \ + $(LIBIXION_LIBS) + +spreadsheet_doc_1_SOURCES = \ + spreadsheet_doc_1.cpp + +spreadsheet_doc_1_LDADD = $(SPDOC_LDADD) + +spreadsheet_doc_1_num_and_formula_SOURCES = \ + spreadsheet_doc_1_num_and_formula.cpp + +spreadsheet_doc_1_num_and_formula_LDADD = $(SPDOC_LDADD) + +spreadsheet_doc_2_SOURCES = \ + spreadsheet_doc_2.cpp + +spreadsheet_doc_2_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la + +spreadsheet_doc_2_sheets_no_string_pool_SOURCES = \ + spreadsheet_doc_2_sheets_no_string_pool.cpp + +spreadsheet_doc_2_sheets_no_string_pool_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la + +spreadsheet_doc_2_sheets_with_string_pool_SOURCES = \ + spreadsheet_doc_2_sheets_with_string_pool.cpp + +spreadsheet_doc_2_sheets_with_string_pool_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la + +spreadsheet_doc_2_sheets_with_formula_SOURCES = \ + spreadsheet_doc_2_sheets_with_formula.cpp + +spreadsheet_doc_2_sheets_with_formula_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la + +AM_TESTS_ENVIRONMENT = \ + INPUTDIR=$(srcdir)/files; export INPUTDIR; + +TESTS = \ + json-doc-1 \ + json-doc-2 \ + json-parser-1 \ + xml-mapping-1 \ + spreadsheet-doc-1 \ + spreadsheet-doc-1-num-and-formula \ + spreadsheet-doc-2 \ + spreadsheet-doc-2-sheets-no-string-pool \ + spreadsheet-doc-2-sheets-with-string-pool \ + spreadsheet-doc-2-sheets-with-formula + +distclean-local: + rm -rf $(TESTS) diff --git a/doc_example/Makefile.in b/doc_example/Makefile.in new file mode 100644 index 0000000..5eaad38 --- /dev/null +++ b/doc_example/Makefile.in @@ -0,0 +1,1409 @@ +# Makefile.in generated by automake 1.16.5 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2021 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +bin_PROGRAMS = +EXTRA_PROGRAMS = json-doc-1$(EXEEXT) json-doc-2$(EXEEXT) \ + json-parser-1$(EXEEXT) xml-mapping-1$(EXEEXT) \ + spreadsheet-doc-1$(EXEEXT) \ + spreadsheet-doc-1-num-and-formula$(EXEEXT) \ + spreadsheet-doc-2$(EXEEXT) \ + spreadsheet-doc-2-sheets-no-string-pool$(EXEEXT) \ + spreadsheet-doc-2-sheets-with-string-pool$(EXEEXT) \ + spreadsheet-doc-2-sheets-with-formula$(EXEEXT) +TESTS = json-doc-1$(EXEEXT) json-doc-2$(EXEEXT) json-parser-1$(EXEEXT) \ + xml-mapping-1$(EXEEXT) spreadsheet-doc-1$(EXEEXT) \ + spreadsheet-doc-1-num-and-formula$(EXEEXT) \ + spreadsheet-doc-2$(EXEEXT) \ + spreadsheet-doc-2-sheets-no-string-pool$(EXEEXT) \ + spreadsheet-doc-2-sheets-with-string-pool$(EXEEXT) \ + spreadsheet-doc-2-sheets-with-formula$(EXEEXT) +subdir = doc_example +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/ax_cxx_compile_stdcxx.m4 \ + $(top_srcdir)/m4/ax_cxx_compile_stdcxx_17.m4 \ + $(top_srcdir)/m4/boost.m4 $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/m4/m4_ax_valgrind_check.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__installdirs = "$(DESTDIR)$(bindir)" +PROGRAMS = $(bin_PROGRAMS) +am_json_doc_1_OBJECTS = json_doc_1.$(OBJEXT) +json_doc_1_OBJECTS = $(am_json_doc_1_OBJECTS) +json_doc_1_DEPENDENCIES = \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am__v_lt_1 = +am_json_doc_2_OBJECTS = json_doc_2.$(OBJEXT) +json_doc_2_OBJECTS = $(am_json_doc_2_OBJECTS) +json_doc_2_DEPENDENCIES = \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la +am_json_parser_1_OBJECTS = json_parser_1.$(OBJEXT) +json_parser_1_OBJECTS = $(am_json_parser_1_OBJECTS) +json_parser_1_DEPENDENCIES = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la +am_spreadsheet_doc_1_OBJECTS = spreadsheet_doc_1.$(OBJEXT) +spreadsheet_doc_1_OBJECTS = $(am_spreadsheet_doc_1_OBJECTS) +am__DEPENDENCIES_1 = +am__DEPENDENCIES_2 = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la \ + ../src/spreadsheet/liborcus-spreadsheet-model-@ORCUS_API_VERSION@.la \ + $(am__DEPENDENCIES_1) +spreadsheet_doc_1_DEPENDENCIES = $(am__DEPENDENCIES_2) +am_spreadsheet_doc_1_num_and_formula_OBJECTS = \ + spreadsheet_doc_1_num_and_formula.$(OBJEXT) +spreadsheet_doc_1_num_and_formula_OBJECTS = \ + $(am_spreadsheet_doc_1_num_and_formula_OBJECTS) +spreadsheet_doc_1_num_and_formula_DEPENDENCIES = \ + $(am__DEPENDENCIES_2) +am_spreadsheet_doc_2_OBJECTS = spreadsheet_doc_2.$(OBJEXT) +spreadsheet_doc_2_OBJECTS = $(am_spreadsheet_doc_2_OBJECTS) +spreadsheet_doc_2_DEPENDENCIES = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la +am_spreadsheet_doc_2_sheets_no_string_pool_OBJECTS = \ + spreadsheet_doc_2_sheets_no_string_pool.$(OBJEXT) +spreadsheet_doc_2_sheets_no_string_pool_OBJECTS = \ + $(am_spreadsheet_doc_2_sheets_no_string_pool_OBJECTS) +spreadsheet_doc_2_sheets_no_string_pool_DEPENDENCIES = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la +am_spreadsheet_doc_2_sheets_with_formula_OBJECTS = \ + spreadsheet_doc_2_sheets_with_formula.$(OBJEXT) +spreadsheet_doc_2_sheets_with_formula_OBJECTS = \ + $(am_spreadsheet_doc_2_sheets_with_formula_OBJECTS) +spreadsheet_doc_2_sheets_with_formula_DEPENDENCIES = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la +am_spreadsheet_doc_2_sheets_with_string_pool_OBJECTS = \ + spreadsheet_doc_2_sheets_with_string_pool.$(OBJEXT) +spreadsheet_doc_2_sheets_with_string_pool_OBJECTS = \ + $(am_spreadsheet_doc_2_sheets_with_string_pool_OBJECTS) +spreadsheet_doc_2_sheets_with_string_pool_DEPENDENCIES = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la +am_xml_mapping_1_OBJECTS = xml_mapping_1.$(OBJEXT) +xml_mapping_1_OBJECTS = $(am_xml_mapping_1_OBJECTS) +xml_mapping_1_DEPENDENCIES = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = ./$(DEPDIR)/json_doc_1.Po \ + ./$(DEPDIR)/json_doc_2.Po ./$(DEPDIR)/json_parser_1.Po \ + ./$(DEPDIR)/spreadsheet_doc_1.Po \ + ./$(DEPDIR)/spreadsheet_doc_1_num_and_formula.Po \ + ./$(DEPDIR)/spreadsheet_doc_2.Po \ + ./$(DEPDIR)/spreadsheet_doc_2_sheets_no_string_pool.Po \ + ./$(DEPDIR)/spreadsheet_doc_2_sheets_with_formula.Po \ + ./$(DEPDIR)/spreadsheet_doc_2_sheets_with_string_pool.Po \ + ./$(DEPDIR)/xml_mapping_1.Po +am__mv = mv -f +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CXXFLAGS) $(CXXFLAGS) +AM_V_CXX = $(am__v_CXX_@AM_V@) +am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) +am__v_CXX_0 = @echo " CXX " $@; +am__v_CXX_1 = +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) +am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) +am__v_CXXLD_0 = @echo " CXXLD " $@; +am__v_CXXLD_1 = +SOURCES = $(json_doc_1_SOURCES) $(json_doc_2_SOURCES) \ + $(json_parser_1_SOURCES) $(spreadsheet_doc_1_SOURCES) \ + $(spreadsheet_doc_1_num_and_formula_SOURCES) \ + $(spreadsheet_doc_2_SOURCES) \ + $(spreadsheet_doc_2_sheets_no_string_pool_SOURCES) \ + $(spreadsheet_doc_2_sheets_with_formula_SOURCES) \ + $(spreadsheet_doc_2_sheets_with_string_pool_SOURCES) \ + $(xml_mapping_1_SOURCES) +DIST_SOURCES = $(json_doc_1_SOURCES) $(json_doc_2_SOURCES) \ + $(json_parser_1_SOURCES) $(spreadsheet_doc_1_SOURCES) \ + $(spreadsheet_doc_1_num_and_formula_SOURCES) \ + $(spreadsheet_doc_2_SOURCES) \ + $(spreadsheet_doc_2_sheets_no_string_pool_SOURCES) \ + $(spreadsheet_doc_2_sheets_with_formula_SOURCES) \ + $(spreadsheet_doc_2_sheets_with_string_pool_SOURCES) \ + $(xml_mapping_1_SOURCES) +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__extra_recursive_targets = check-valgrind-recursive \ + check-valgrind-memcheck-recursive \ + check-valgrind-helgrind-recursive check-valgrind-drd-recursive \ + check-valgrind-sgcheck-recursive +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +am__tty_colors_dummy = \ + mgn= red= grn= lgn= blu= brg= std=; \ + am__color_tests=no +am__tty_colors = { \ + $(am__tty_colors_dummy); \ + if test "X$(AM_COLOR_TESTS)" = Xno; then \ + am__color_tests=no; \ + elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ + am__color_tests=yes; \ + elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ + am__color_tests=yes; \ + fi; \ + if test $$am__color_tests = yes; then \ + red='[0;31m'; \ + grn='[0;32m'; \ + lgn='[1;32m'; \ + blu='[1;34m'; \ + mgn='[0;35m'; \ + brg='[1m'; \ + std='[m'; \ + fi; \ +} +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__recheck_rx = ^[ ]*:recheck:[ ]* +am__global_test_result_rx = ^[ ]*:global-test-result:[ ]* +am__copy_in_global_log_rx = ^[ ]*:copy-in-global-log:[ ]* +# A command that, given a newline-separated list of test names on the +# standard input, print the name of the tests that are to be re-run +# upon "make recheck". +am__list_recheck_tests = $(AWK) '{ \ + recheck = 1; \ + while ((rc = (getline line < ($$0 ".trs"))) != 0) \ + { \ + if (rc < 0) \ + { \ + if ((getline line2 < ($$0 ".log")) < 0) \ + recheck = 0; \ + break; \ + } \ + else if (line ~ /$(am__recheck_rx)[nN][Oo]/) \ + { \ + recheck = 0; \ + break; \ + } \ + else if (line ~ /$(am__recheck_rx)[yY][eE][sS]/) \ + { \ + break; \ + } \ + }; \ + if (recheck) \ + print $$0; \ + close ($$0 ".trs"); \ + close ($$0 ".log"); \ +}' +# A command that, given a newline-separated list of test names on the +# standard input, create the global log from their .trs and .log files. +am__create_global_log = $(AWK) ' \ +function fatal(msg) \ +{ \ + print "fatal: making $@: " msg | "cat >&2"; \ + exit 1; \ +} \ +function rst_section(header) \ +{ \ + print header; \ + len = length(header); \ + for (i = 1; i <= len; i = i + 1) \ + printf "="; \ + printf "\n\n"; \ +} \ +{ \ + copy_in_global_log = 1; \ + global_test_result = "RUN"; \ + while ((rc = (getline line < ($$0 ".trs"))) != 0) \ + { \ + if (rc < 0) \ + fatal("failed to read from " $$0 ".trs"); \ + if (line ~ /$(am__global_test_result_rx)/) \ + { \ + sub("$(am__global_test_result_rx)", "", line); \ + sub("[ ]*$$", "", line); \ + global_test_result = line; \ + } \ + else if (line ~ /$(am__copy_in_global_log_rx)[nN][oO]/) \ + copy_in_global_log = 0; \ + }; \ + if (copy_in_global_log) \ + { \ + rst_section(global_test_result ": " $$0); \ + while ((rc = (getline line < ($$0 ".log"))) != 0) \ + { \ + if (rc < 0) \ + fatal("failed to read from " $$0 ".log"); \ + print line; \ + }; \ + printf "\n"; \ + }; \ + close ($$0 ".trs"); \ + close ($$0 ".log"); \ +}' +# Restructured Text title. +am__rst_title = { sed 's/.*/ & /;h;s/./=/g;p;x;s/ *$$//;p;g' && echo; } +# Solaris 10 'make', and several other traditional 'make' implementations, +# pass "-e" to $(SHELL), and POSIX 2008 even requires this. Work around it +# by disabling -e (using the XSI extension "set +e") if it's set. +am__sh_e_setup = case $$- in *e*) set +e;; esac +# Default flags passed to test drivers. +am__common_driver_flags = \ + --color-tests "$$am__color_tests" \ + --enable-hard-errors "$$am__enable_hard_errors" \ + --expect-failure "$$am__expect_failure" +# To be inserted before the command running the test. Creates the +# directory for the log if needed. Stores in $dir the directory +# containing $f, in $tst the test, in $log the log. Executes the +# developer- defined test setup AM_TESTS_ENVIRONMENT (if any), and +# passes TESTS_ENVIRONMENT. Set up options for the wrapper that +# will run the test scripts (or their associated LOG_COMPILER, if +# thy have one). +am__check_pre = \ +$(am__sh_e_setup); \ +$(am__vpath_adj_setup) $(am__vpath_adj) \ +$(am__tty_colors); \ +srcdir=$(srcdir); export srcdir; \ +case "$@" in \ + */*) am__odir=`echo "./$@" | sed 's|/[^/]*$$||'`;; \ + *) am__odir=.;; \ +esac; \ +test "x$$am__odir" = x"." || test -d "$$am__odir" \ + || $(MKDIR_P) "$$am__odir" || exit $$?; \ +if test -f "./$$f"; then dir=./; \ +elif test -f "$$f"; then dir=; \ +else dir="$(srcdir)/"; fi; \ +tst=$$dir$$f; log='$@'; \ +if test -n '$(DISABLE_HARD_ERRORS)'; then \ + am__enable_hard_errors=no; \ +else \ + am__enable_hard_errors=yes; \ +fi; \ +case " $(XFAIL_TESTS) " in \ + *[\ \ ]$$f[\ \ ]* | *[\ \ ]$$dir$$f[\ \ ]*) \ + am__expect_failure=yes;; \ + *) \ + am__expect_failure=no;; \ +esac; \ +$(AM_TESTS_ENVIRONMENT) $(TESTS_ENVIRONMENT) +# A shell command to get the names of the tests scripts with any registered +# extension removed (i.e., equivalently, the names of the test logs, with +# the '.log' extension removed). The result is saved in the shell variable +# '$bases'. This honors runtime overriding of TESTS and TEST_LOGS. Sadly, +# we cannot use something simpler, involving e.g., "$(TEST_LOGS:.log=)", +# since that might cause problem with VPATH rewrites for suffix-less tests. +# See also 'test-harness-vpath-rewrite.sh' and 'test-trs-basic.sh'. +am__set_TESTS_bases = \ + bases='$(TEST_LOGS)'; \ + bases=`for i in $$bases; do echo $$i; done | sed 's/\.log$$//'`; \ + bases=`echo $$bases` +AM_TESTSUITE_SUMMARY_HEADER = ' for $(PACKAGE_STRING)' +RECHECK_LOGS = $(TEST_LOGS) +AM_RECURSIVE_TARGETS = check recheck +TEST_SUITE_LOG = test-suite.log +TEST_EXTENSIONS = @EXEEXT@ .test +LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver +LOG_COMPILE = $(LOG_COMPILER) $(AM_LOG_FLAGS) $(LOG_FLAGS) +am__set_b = \ + case '$@' in \ + */*) \ + case '$*' in \ + */*) b='$*';; \ + *) b=`echo '$@' | sed 's/\.log$$//'`; \ + esac;; \ + *) \ + b='$*';; \ + esac +am__test_logs1 = $(TESTS:=.log) +am__test_logs2 = $(am__test_logs1:@EXEEXT@.log=.log) +TEST_LOGS = $(am__test_logs2:.test.log=.log) +TEST_LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver +TEST_LOG_COMPILE = $(TEST_LOG_COMPILER) $(AM_TEST_LOG_FLAGS) \ + $(TEST_LOG_FLAGS) +am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp \ + $(top_srcdir)/test-driver +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AS = @AS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BOOST_CPPFLAGS = @BOOST_CPPFLAGS@ +BOOST_DATE_TIME_LDFLAGS = @BOOST_DATE_TIME_LDFLAGS@ +BOOST_DATE_TIME_LDPATH = @BOOST_DATE_TIME_LDPATH@ +BOOST_DATE_TIME_LIBS = @BOOST_DATE_TIME_LIBS@ +BOOST_FILESYSTEM_LDFLAGS = @BOOST_FILESYSTEM_LDFLAGS@ +BOOST_FILESYSTEM_LDPATH = @BOOST_FILESYSTEM_LDPATH@ +BOOST_FILESYSTEM_LIBS = @BOOST_FILESYSTEM_LIBS@ +BOOST_IOSTREAMS_LDFLAGS = @BOOST_IOSTREAMS_LDFLAGS@ +BOOST_IOSTREAMS_LDPATH = @BOOST_IOSTREAMS_LDPATH@ +BOOST_IOSTREAMS_LIBS = @BOOST_IOSTREAMS_LIBS@ +BOOST_LDPATH = @BOOST_LDPATH@ +BOOST_PROGRAM_OPTIONS_LDFLAGS = @BOOST_PROGRAM_OPTIONS_LDFLAGS@ +BOOST_PROGRAM_OPTIONS_LDPATH = @BOOST_PROGRAM_OPTIONS_LDPATH@ +BOOST_PROGRAM_OPTIONS_LIBS = @BOOST_PROGRAM_OPTIONS_LIBS@ +BOOST_ROOT = @BOOST_ROOT@ +BOOST_SYSTEM_LDFLAGS = @BOOST_SYSTEM_LDFLAGS@ +BOOST_SYSTEM_LDPATH = @BOOST_SYSTEM_LDPATH@ +BOOST_SYSTEM_LIBS = @BOOST_SYSTEM_LIBS@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DISTCHECK_CONFIGURE_FLAGS = @DISTCHECK_CONFIGURE_FLAGS@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ENABLE_VALGRIND_drd = @ENABLE_VALGRIND_drd@ +ENABLE_VALGRIND_helgrind = @ENABLE_VALGRIND_helgrind@ +ENABLE_VALGRIND_memcheck = @ENABLE_VALGRIND_memcheck@ +ENABLE_VALGRIND_sgcheck = @ENABLE_VALGRIND_sgcheck@ +ETAGS = @ETAGS@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_CXX17 = @HAVE_CXX17@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +IXION_REQUIRED_API_VERSION = @IXION_REQUIRED_API_VERSION@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBIXION_CFLAGS = @LIBIXION_CFLAGS@ +LIBIXION_LIBS = @LIBIXION_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MDDS_CFLAGS = @MDDS_CFLAGS@ +MDDS_LIBS = @MDDS_LIBS@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +ORCUS_API_VERSION = @ORCUS_API_VERSION@ +ORCUS_MAJOR_VERSION = @ORCUS_MAJOR_VERSION@ +ORCUS_MICRO_VERSION = @ORCUS_MICRO_VERSION@ +ORCUS_MINOR_VERSION = @ORCUS_MINOR_VERSION@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PARQUET_CFLAGS = @PARQUET_CFLAGS@ +PARQUET_LIBS = @PARQUET_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +POW_LIB = @POW_LIB@ +PYTHON = @PYTHON@ +PYTHON_CFLAGS = @PYTHON_CFLAGS@ +PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ +PYTHON_LIBS = @PYTHON_LIBS@ +PYTHON_PLATFORM = @PYTHON_PLATFORM@ +PYTHON_PREFIX = @PYTHON_PREFIX@ +PYTHON_VERSION = @PYTHON_VERSION@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VALGRIND = @VALGRIND@ +VALGRIND_ENABLED = @VALGRIND_ENABLED@ +VERSION = @VERSION@ +ZLIB_CFLAGS = @ZLIB_CFLAGS@ +ZLIB_LIBS = @ZLIB_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pkgpyexecdir = @pkgpyexecdir@ +pkgpythondir = @pkgpythondir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pyexecdir = @pyexecdir@ +pythondir = @pythondir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +valgrind_enabled_tools = @valgrind_enabled_tools@ +valgrind_tools = @valgrind_tools@ +AM_CPPFLAGS = \ + -I$(top_srcdir)/include \ + $(LIBIXION_CFLAGS) \ + -DSRCDIR=\""$(top_srcdir)"\" + +json_doc_1_SOURCES = \ + json_doc_1.cpp + +json_doc_1_LDADD = \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la + +json_doc_2_SOURCES = \ + json_doc_2.cpp + +json_doc_2_LDADD = \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la + +json_parser_1_SOURCES = \ + json_parser_1.cpp + +json_parser_1_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la + +xml_mapping_1_SOURCES = \ + xml_mapping_1.cpp + +xml_mapping_1_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la + + +# Spreadsheet document examples. +SPDOC_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la \ + ../src/spreadsheet/liborcus-spreadsheet-model-@ORCUS_API_VERSION@.la \ + $(LIBIXION_LIBS) + +spreadsheet_doc_1_SOURCES = \ + spreadsheet_doc_1.cpp + +spreadsheet_doc_1_LDADD = $(SPDOC_LDADD) +spreadsheet_doc_1_num_and_formula_SOURCES = \ + spreadsheet_doc_1_num_and_formula.cpp + +spreadsheet_doc_1_num_and_formula_LDADD = $(SPDOC_LDADD) +spreadsheet_doc_2_SOURCES = \ + spreadsheet_doc_2.cpp + +spreadsheet_doc_2_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la + +spreadsheet_doc_2_sheets_no_string_pool_SOURCES = \ + spreadsheet_doc_2_sheets_no_string_pool.cpp + +spreadsheet_doc_2_sheets_no_string_pool_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la + +spreadsheet_doc_2_sheets_with_string_pool_SOURCES = \ + spreadsheet_doc_2_sheets_with_string_pool.cpp + +spreadsheet_doc_2_sheets_with_string_pool_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la + +spreadsheet_doc_2_sheets_with_formula_SOURCES = \ + spreadsheet_doc_2_sheets_with_formula.cpp + +spreadsheet_doc_2_sheets_with_formula_LDADD = \ + ../src/parser/liborcus-parser-@ORCUS_API_VERSION@.la \ + ../src/liborcus/liborcus-@ORCUS_API_VERSION@.la + +AM_TESTS_ENVIRONMENT = \ + INPUTDIR=$(srcdir)/files; export INPUTDIR; + +all: all-am + +.SUFFIXES: +.SUFFIXES: .cpp .lo .log .o .obj .test .test$(EXEEXT) .trs +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc_example/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign doc_example/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ + fi; \ + for p in $$list; do echo "$$p $$p"; done | \ + sed 's/$(EXEEXT)$$//' | \ + while read p p1; do if test -f $$p \ + || test -f $$p1 \ + ; then echo "$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n;h' \ + -e 's|.*|.|' \ + -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ + sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) files[d] = files[d] " " $$1; \ + else { print "f", $$3 "/" $$4, $$1; } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-binPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ + -e 's/$$/$(EXEEXT)/' \ + `; \ + test -n "$$list" || exit 0; \ + echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ + cd "$(DESTDIR)$(bindir)" && rm -f $$files + +clean-binPROGRAMS: + @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ + echo " rm -f" $$list; \ + rm -f $$list || exit $$?; \ + test -n "$(EXEEXT)" || exit 0; \ + list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f" $$list; \ + rm -f $$list + +json-doc-1$(EXEEXT): $(json_doc_1_OBJECTS) $(json_doc_1_DEPENDENCIES) $(EXTRA_json_doc_1_DEPENDENCIES) + @rm -f json-doc-1$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(json_doc_1_OBJECTS) $(json_doc_1_LDADD) $(LIBS) + +json-doc-2$(EXEEXT): $(json_doc_2_OBJECTS) $(json_doc_2_DEPENDENCIES) $(EXTRA_json_doc_2_DEPENDENCIES) + @rm -f json-doc-2$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(json_doc_2_OBJECTS) $(json_doc_2_LDADD) $(LIBS) + +json-parser-1$(EXEEXT): $(json_parser_1_OBJECTS) $(json_parser_1_DEPENDENCIES) $(EXTRA_json_parser_1_DEPENDENCIES) + @rm -f json-parser-1$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(json_parser_1_OBJECTS) $(json_parser_1_LDADD) $(LIBS) + +spreadsheet-doc-1$(EXEEXT): $(spreadsheet_doc_1_OBJECTS) $(spreadsheet_doc_1_DEPENDENCIES) $(EXTRA_spreadsheet_doc_1_DEPENDENCIES) + @rm -f spreadsheet-doc-1$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(spreadsheet_doc_1_OBJECTS) $(spreadsheet_doc_1_LDADD) $(LIBS) + +spreadsheet-doc-1-num-and-formula$(EXEEXT): $(spreadsheet_doc_1_num_and_formula_OBJECTS) $(spreadsheet_doc_1_num_and_formula_DEPENDENCIES) $(EXTRA_spreadsheet_doc_1_num_and_formula_DEPENDENCIES) + @rm -f spreadsheet-doc-1-num-and-formula$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(spreadsheet_doc_1_num_and_formula_OBJECTS) $(spreadsheet_doc_1_num_and_formula_LDADD) $(LIBS) + +spreadsheet-doc-2$(EXEEXT): $(spreadsheet_doc_2_OBJECTS) $(spreadsheet_doc_2_DEPENDENCIES) $(EXTRA_spreadsheet_doc_2_DEPENDENCIES) + @rm -f spreadsheet-doc-2$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(spreadsheet_doc_2_OBJECTS) $(spreadsheet_doc_2_LDADD) $(LIBS) + +spreadsheet-doc-2-sheets-no-string-pool$(EXEEXT): $(spreadsheet_doc_2_sheets_no_string_pool_OBJECTS) $(spreadsheet_doc_2_sheets_no_string_pool_DEPENDENCIES) $(EXTRA_spreadsheet_doc_2_sheets_no_string_pool_DEPENDENCIES) + @rm -f spreadsheet-doc-2-sheets-no-string-pool$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(spreadsheet_doc_2_sheets_no_string_pool_OBJECTS) $(spreadsheet_doc_2_sheets_no_string_pool_LDADD) $(LIBS) + +spreadsheet-doc-2-sheets-with-formula$(EXEEXT): $(spreadsheet_doc_2_sheets_with_formula_OBJECTS) $(spreadsheet_doc_2_sheets_with_formula_DEPENDENCIES) $(EXTRA_spreadsheet_doc_2_sheets_with_formula_DEPENDENCIES) + @rm -f spreadsheet-doc-2-sheets-with-formula$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(spreadsheet_doc_2_sheets_with_formula_OBJECTS) $(spreadsheet_doc_2_sheets_with_formula_LDADD) $(LIBS) + +spreadsheet-doc-2-sheets-with-string-pool$(EXEEXT): $(spreadsheet_doc_2_sheets_with_string_pool_OBJECTS) $(spreadsheet_doc_2_sheets_with_string_pool_DEPENDENCIES) $(EXTRA_spreadsheet_doc_2_sheets_with_string_pool_DEPENDENCIES) + @rm -f spreadsheet-doc-2-sheets-with-string-pool$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(spreadsheet_doc_2_sheets_with_string_pool_OBJECTS) $(spreadsheet_doc_2_sheets_with_string_pool_LDADD) $(LIBS) + +xml-mapping-1$(EXEEXT): $(xml_mapping_1_OBJECTS) $(xml_mapping_1_DEPENDENCIES) $(EXTRA_xml_mapping_1_DEPENDENCIES) + @rm -f xml-mapping-1$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(xml_mapping_1_OBJECTS) $(xml_mapping_1_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/json_doc_1.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/json_doc_2.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/json_parser_1.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/spreadsheet_doc_1.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/spreadsheet_doc_1_num_and_formula.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/spreadsheet_doc_2.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/spreadsheet_doc_2_sheets_no_string_pool.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/spreadsheet_doc_2_sheets_with_formula.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/spreadsheet_doc_2_sheets_with_string_pool.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xml_mapping_1.Po@am__quote@ # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) + +.cpp.o: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< + +.cpp.obj: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cpp.lo: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +check-valgrind-local: +check-valgrind-memcheck-local: +check-valgrind-helgrind-local: +check-valgrind-drd-local: +check-valgrind-sgcheck-local: + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +# Recover from deleted '.trs' file; this should ensure that +# "rm -f foo.log; make foo.trs" re-run 'foo.test', and re-create +# both 'foo.log' and 'foo.trs'. Break the recipe in two subshells +# to avoid problems with "make -n". +.log.trs: + rm -f $< $@ + $(MAKE) $(AM_MAKEFLAGS) $< + +# Leading 'am--fnord' is there to ensure the list of targets does not +# expand to empty, as could happen e.g. with make check TESTS=''. +am--fnord $(TEST_LOGS) $(TEST_LOGS:.log=.trs): $(am__force_recheck) +am--force-recheck: + @: + +$(TEST_SUITE_LOG): $(TEST_LOGS) + @$(am__set_TESTS_bases); \ + am__f_ok () { test -f "$$1" && test -r "$$1"; }; \ + redo_bases=`for i in $$bases; do \ + am__f_ok $$i.trs && am__f_ok $$i.log || echo $$i; \ + done`; \ + if test -n "$$redo_bases"; then \ + redo_logs=`for i in $$redo_bases; do echo $$i.log; done`; \ + redo_results=`for i in $$redo_bases; do echo $$i.trs; done`; \ + if $(am__make_dryrun); then :; else \ + rm -f $$redo_logs && rm -f $$redo_results || exit 1; \ + fi; \ + fi; \ + if test -n "$$am__remaking_logs"; then \ + echo "fatal: making $(TEST_SUITE_LOG): possible infinite" \ + "recursion detected" >&2; \ + elif test -n "$$redo_logs"; then \ + am__remaking_logs=yes $(MAKE) $(AM_MAKEFLAGS) $$redo_logs; \ + fi; \ + if $(am__make_dryrun); then :; else \ + st=0; \ + errmsg="fatal: making $(TEST_SUITE_LOG): failed to create"; \ + for i in $$redo_bases; do \ + test -f $$i.trs && test -r $$i.trs \ + || { echo "$$errmsg $$i.trs" >&2; st=1; }; \ + test -f $$i.log && test -r $$i.log \ + || { echo "$$errmsg $$i.log" >&2; st=1; }; \ + done; \ + test $$st -eq 0 || exit 1; \ + fi + @$(am__sh_e_setup); $(am__tty_colors); $(am__set_TESTS_bases); \ + ws='[ ]'; \ + results=`for b in $$bases; do echo $$b.trs; done`; \ + test -n "$$results" || results=/dev/null; \ + all=` grep "^$$ws*:test-result:" $$results | wc -l`; \ + pass=` grep "^$$ws*:test-result:$$ws*PASS" $$results | wc -l`; \ + fail=` grep "^$$ws*:test-result:$$ws*FAIL" $$results | wc -l`; \ + skip=` grep "^$$ws*:test-result:$$ws*SKIP" $$results | wc -l`; \ + xfail=`grep "^$$ws*:test-result:$$ws*XFAIL" $$results | wc -l`; \ + xpass=`grep "^$$ws*:test-result:$$ws*XPASS" $$results | wc -l`; \ + error=`grep "^$$ws*:test-result:$$ws*ERROR" $$results | wc -l`; \ + if test `expr $$fail + $$xpass + $$error` -eq 0; then \ + success=true; \ + else \ + success=false; \ + fi; \ + br='==================='; br=$$br$$br$$br$$br; \ + result_count () \ + { \ + if test x"$$1" = x"--maybe-color"; then \ + maybe_colorize=yes; \ + elif test x"$$1" = x"--no-color"; then \ + maybe_colorize=no; \ + else \ + echo "$@: invalid 'result_count' usage" >&2; exit 4; \ + fi; \ + shift; \ + desc=$$1 count=$$2; \ + if test $$maybe_colorize = yes && test $$count -gt 0; then \ + color_start=$$3 color_end=$$std; \ + else \ + color_start= color_end=; \ + fi; \ + echo "$${color_start}# $$desc $$count$${color_end}"; \ + }; \ + create_testsuite_report () \ + { \ + result_count $$1 "TOTAL:" $$all "$$brg"; \ + result_count $$1 "PASS: " $$pass "$$grn"; \ + result_count $$1 "SKIP: " $$skip "$$blu"; \ + result_count $$1 "XFAIL:" $$xfail "$$lgn"; \ + result_count $$1 "FAIL: " $$fail "$$red"; \ + result_count $$1 "XPASS:" $$xpass "$$red"; \ + result_count $$1 "ERROR:" $$error "$$mgn"; \ + }; \ + { \ + echo "$(PACKAGE_STRING): $(subdir)/$(TEST_SUITE_LOG)" | \ + $(am__rst_title); \ + create_testsuite_report --no-color; \ + echo; \ + echo ".. contents:: :depth: 2"; \ + echo; \ + for b in $$bases; do echo $$b; done \ + | $(am__create_global_log); \ + } >$(TEST_SUITE_LOG).tmp || exit 1; \ + mv $(TEST_SUITE_LOG).tmp $(TEST_SUITE_LOG); \ + if $$success; then \ + col="$$grn"; \ + else \ + col="$$red"; \ + test x"$$VERBOSE" = x || cat $(TEST_SUITE_LOG); \ + fi; \ + echo "$${col}$$br$${std}"; \ + echo "$${col}Testsuite summary"$(AM_TESTSUITE_SUMMARY_HEADER)"$${std}"; \ + echo "$${col}$$br$${std}"; \ + create_testsuite_report --maybe-color; \ + echo "$$col$$br$$std"; \ + if $$success; then :; else \ + echo "$${col}See $(subdir)/$(TEST_SUITE_LOG)$${std}"; \ + if test -n "$(PACKAGE_BUGREPORT)"; then \ + echo "$${col}Please report to $(PACKAGE_BUGREPORT)$${std}"; \ + fi; \ + echo "$$col$$br$$std"; \ + fi; \ + $$success || exit 1 + +check-TESTS: + @list='$(RECHECK_LOGS)'; test -z "$$list" || rm -f $$list + @list='$(RECHECK_LOGS:.log=.trs)'; test -z "$$list" || rm -f $$list + @test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) + @set +e; $(am__set_TESTS_bases); \ + log_list=`for i in $$bases; do echo $$i.log; done`; \ + trs_list=`for i in $$bases; do echo $$i.trs; done`; \ + log_list=`echo $$log_list`; trs_list=`echo $$trs_list`; \ + $(MAKE) $(AM_MAKEFLAGS) $(TEST_SUITE_LOG) TEST_LOGS="$$log_list"; \ + exit $$?; +recheck: all + @test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) + @set +e; $(am__set_TESTS_bases); \ + bases=`for i in $$bases; do echo $$i; done \ + | $(am__list_recheck_tests)` || exit 1; \ + log_list=`for i in $$bases; do echo $$i.log; done`; \ + log_list=`echo $$log_list`; \ + $(MAKE) $(AM_MAKEFLAGS) $(TEST_SUITE_LOG) \ + am__force_recheck=am--force-recheck \ + TEST_LOGS="$$log_list"; \ + exit $$? +json-doc-1.log: json-doc-1$(EXEEXT) + @p='json-doc-1$(EXEEXT)'; \ + b='json-doc-1'; \ + $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +json-doc-2.log: json-doc-2$(EXEEXT) + @p='json-doc-2$(EXEEXT)'; \ + b='json-doc-2'; \ + $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +json-parser-1.log: json-parser-1$(EXEEXT) + @p='json-parser-1$(EXEEXT)'; \ + b='json-parser-1'; \ + $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +xml-mapping-1.log: xml-mapping-1$(EXEEXT) + @p='xml-mapping-1$(EXEEXT)'; \ + b='xml-mapping-1'; \ + $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +spreadsheet-doc-1.log: spreadsheet-doc-1$(EXEEXT) + @p='spreadsheet-doc-1$(EXEEXT)'; \ + b='spreadsheet-doc-1'; \ + $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +spreadsheet-doc-1-num-and-formula.log: spreadsheet-doc-1-num-and-formula$(EXEEXT) + @p='spreadsheet-doc-1-num-and-formula$(EXEEXT)'; \ + b='spreadsheet-doc-1-num-and-formula'; \ + $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +spreadsheet-doc-2.log: spreadsheet-doc-2$(EXEEXT) + @p='spreadsheet-doc-2$(EXEEXT)'; \ + b='spreadsheet-doc-2'; \ + $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +spreadsheet-doc-2-sheets-no-string-pool.log: spreadsheet-doc-2-sheets-no-string-pool$(EXEEXT) + @p='spreadsheet-doc-2-sheets-no-string-pool$(EXEEXT)'; \ + b='spreadsheet-doc-2-sheets-no-string-pool'; \ + $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +spreadsheet-doc-2-sheets-with-string-pool.log: spreadsheet-doc-2-sheets-with-string-pool$(EXEEXT) + @p='spreadsheet-doc-2-sheets-with-string-pool$(EXEEXT)'; \ + b='spreadsheet-doc-2-sheets-with-string-pool'; \ + $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +spreadsheet-doc-2-sheets-with-formula.log: spreadsheet-doc-2-sheets-with-formula$(EXEEXT) + @p='spreadsheet-doc-2-sheets-with-formula$(EXEEXT)'; \ + b='spreadsheet-doc-2-sheets-with-formula'; \ + $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +.test.log: + @p='$<'; \ + $(am__set_b); \ + $(am__check_pre) $(TEST_LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_TEST_LOG_DRIVER_FLAGS) $(TEST_LOG_DRIVER_FLAGS) -- $(TEST_LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +@am__EXEEXT_TRUE@.test$(EXEEXT).log: +@am__EXEEXT_TRUE@ @p='$<'; \ +@am__EXEEXT_TRUE@ $(am__set_b); \ +@am__EXEEXT_TRUE@ $(am__check_pre) $(TEST_LOG_DRIVER) --test-name "$$f" \ +@am__EXEEXT_TRUE@ --log-file $$b.log --trs-file $$b.trs \ +@am__EXEEXT_TRUE@ $(am__common_driver_flags) $(AM_TEST_LOG_DRIVER_FLAGS) $(TEST_LOG_DRIVER_FLAGS) -- $(TEST_LOG_COMPILE) \ +@am__EXEEXT_TRUE@ "$$tst" $(AM_TESTS_FD_REDIRECT) +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am + $(MAKE) $(AM_MAKEFLAGS) check-TESTS +check: check-am +all-am: Makefile $(PROGRAMS) +installdirs: + for dir in "$(DESTDIR)$(bindir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + -test -z "$(TEST_LOGS)" || rm -f $(TEST_LOGS) + -test -z "$(TEST_LOGS:.log=.trs)" || rm -f $(TEST_LOGS:.log=.trs) + -test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +check-valgrind: check-valgrind-am + +check-valgrind-am: check-valgrind-local + +check-valgrind-drd: check-valgrind-drd-am + +check-valgrind-drd-am: check-valgrind-drd-local + +check-valgrind-helgrind: check-valgrind-helgrind-am + +check-valgrind-helgrind-am: check-valgrind-helgrind-local + +check-valgrind-memcheck: check-valgrind-memcheck-am + +check-valgrind-memcheck-am: check-valgrind-memcheck-local + +check-valgrind-sgcheck: check-valgrind-sgcheck-am + +check-valgrind-sgcheck-am: check-valgrind-sgcheck-local + +clean: clean-am + +clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f ./$(DEPDIR)/json_doc_1.Po + -rm -f ./$(DEPDIR)/json_doc_2.Po + -rm -f ./$(DEPDIR)/json_parser_1.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_1.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_1_num_and_formula.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_2.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_2_sheets_no_string_pool.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_2_sheets_with_formula.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_2_sheets_with_string_pool.Po + -rm -f ./$(DEPDIR)/xml_mapping_1.Po + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-local distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-binPROGRAMS + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f ./$(DEPDIR)/json_doc_1.Po + -rm -f ./$(DEPDIR)/json_doc_2.Po + -rm -f ./$(DEPDIR)/json_parser_1.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_1.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_1_num_and_formula.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_2.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_2_sheets_no_string_pool.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_2_sheets_with_formula.Po + -rm -f ./$(DEPDIR)/spreadsheet_doc_2_sheets_with_string_pool.Po + -rm -f ./$(DEPDIR)/xml_mapping_1.Po + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-binPROGRAMS + +.MAKE: check-am install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-TESTS \ + check-am check-valgrind-am check-valgrind-drd-am \ + check-valgrind-drd-local check-valgrind-helgrind-am \ + check-valgrind-helgrind-local check-valgrind-local \ + check-valgrind-memcheck-am check-valgrind-memcheck-local \ + check-valgrind-sgcheck-am check-valgrind-sgcheck-local clean \ + clean-binPROGRAMS clean-generic clean-libtool cscopelist-am \ + ctags ctags-am distclean distclean-compile distclean-generic \ + distclean-libtool distclean-local distclean-tags distdir dvi \ + dvi-am html html-am info info-am install install-am \ + install-binPROGRAMS install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am recheck tags tags-am uninstall \ + uninstall-am uninstall-binPROGRAMS + +.PRECIOUS: Makefile + + +distclean-local: + rm -rf $(TESTS) + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/doc_example/files/document.ods b/doc_example/files/document.ods Binary files differnew file mode 100644 index 0000000..d310af2 --- /dev/null +++ b/doc_example/files/document.ods diff --git a/doc_example/files/multi-sheets.ods b/doc_example/files/multi-sheets.ods Binary files differnew file mode 100644 index 0000000..87685e7 --- /dev/null +++ b/doc_example/files/multi-sheets.ods diff --git a/doc_example/json_doc_1.cpp b/doc_example/json_doc_1.cpp new file mode 100644 index 0000000..cb5449f --- /dev/null +++ b/doc_example/json_doc_1.cpp @@ -0,0 +1,55 @@ + +#include <orcus/json_document_tree.hpp> +#include <orcus/config.hpp> + +#include <cstdlib> +#include <iostream> + +using namespace std; + +const char* json_string = "{" +" \"name\": \"John Doe\"," +" \"occupation\": \"Software Engineer\"," +" \"score\": [89, 67, 90]" +"}"; + +int main() +{ + using node = orcus::json::node; + + orcus::json_config config; // Use default configuration. + + orcus::json::document_tree doc; + doc.load(json_string, config); + + // Root is an object containing three key-value pairs. + node root = doc.get_document_root(); + + for (std::string_view key : root.keys()) + { + node value = root.child(key); + switch (value.type()) + { + case orcus::json::node_t::string: + // string value + cout << key << ": " << value.string_value() << endl; + break; + case orcus::json::node_t::array: + { + // array value + cout << key << ":" << endl; + + for (size_t i = 0; i < value.child_count(); ++i) + { + node array_element = value.child(i); + cout << " - " << array_element.numeric_value() << endl; + } + break; + } + default: + ; + } + } + + return EXIT_SUCCESS; +} diff --git a/doc_example/json_doc_2.cpp b/doc_example/json_doc_2.cpp new file mode 100644 index 0000000..97ed3b5 --- /dev/null +++ b/doc_example/json_doc_2.cpp @@ -0,0 +1,219 @@ + +#include <orcus/json_document_tree.hpp> +#include <orcus/config.hpp> + +#include <iostream> +#include <functional> +#include <vector> + +void example_root_list() +{ + //!code-start: root list + orcus::json::document_tree doc = { + 1.0, 2.0, "string value", false, nullptr + }; + + std::cout << doc.dump() << std::endl; + //!code-end: root list +} + +void example_list_nested() +{ + //!code-start: list nested + orcus::json::document_tree doc = { + { true, false, nullptr }, + { 1.1, 2.2, "text" } + }; + + std::cout << doc.dump() << std::endl; + //!code-end: list nested +} + +void example_list_object() +{ + //!code-start: list object + orcus::json::document_tree doc = { + { "key1", 1.2 }, + { "key2", "some text" }, + }; + + std::cout << doc.dump() << std::endl; + //!code-end: list object +} + +void example_list_object_2() +{ + //!code-start: list object 2 + orcus::json::document_tree doc = { + { "parent1", { + { "child1", true }, + { "child2", false }, + { "child3", 123.4 }, + } + }, + { "parent2", "not-nested" }, + }; + + std::cout << doc.dump() << std::endl; + //!code-end: list object 2 +} + +void example_array_ambiguous() +{ + //!code-start: array ambiguous + orcus::json::document_tree doc = { + { "array", { "one", 987.0 } } + }; + //!code-end: array ambiguous +} + +void example_array_explicit() +{ + //!code-start: array explicit + using namespace orcus; + + json::document_tree doc = { + { "array", json::array({ "one", 987.0 }) } + }; + + std::cout << doc.dump() << std::endl; + //!code-end: array explicit +} + +void example_object_ambiguous() +{ + //!code-start: object ambiguous a + using namespace orcus; + + json::document_tree doc = {}; + //!code-end: object ambiguous a + + //!code-start: object ambiguous b + try + { + auto root = doc.get_document_root(); + } + catch (const json::document_error& e) + { + std::cout << e.what() << std::endl; + } + //!code-end: object ambiguous b +} + +void example_object_explicit_1() +{ + //!code-start: object explicit 1 + using namespace orcus; + + json::document_tree doc = json::object(); + + std::cout << doc.dump() << std::endl; + //!code-end: object explicit 1 +} + +void example_object_explicit_2() +{ + //!code-start: object explicit 2 + using namespace orcus; + + json::document_tree doc = { + json::object(), + json::object(), + json::object() + }; + + std::cout << doc.dump() << std::endl; + //!code-end: object explicit 2 +} + +void example_root_object_add_child() +{ + //!code-start: root object add child + using namespace orcus; + + // Initialize the tree with an empty object. + json::document_tree doc = json::object(); + + // Get the root object, and assign three key-value pairs. + json::node root = doc.get_document_root(); + root["child1"] = 1.0; + root["child2"] = "string"; + root["child3"] = { true, false }; // implicit array + + // You can also create a key-value pair whose value is another object. + root["child object"] = { + { "key1", 100.0 }, + { "key2", 200.0 } + }; + + root["child array"] = json::array({ 1.1, 1.2, true }); // explicit array + + std::cout << doc.dump() << std::endl; + //!code-end: root object add child +} + +void example_root_array_add_child() +{ + //!code-start: root array add child + using namespace orcus; + + // Initialize the tree with an empty array root. + json::document_tree doc = json::array(); + + // Get the root array. + json::node root = doc.get_document_root(); + + // Append values to the array. + root.push_back(-1.2); + root.push_back("string"); + root.push_back(true); + root.push_back(nullptr); + + // You can append an object to the array via push_back() as well. + root.push_back({{"key1", 1.1}, {"key2", 1.2}}); + + std::cout << doc.dump() << std::endl; + //!code-end: root array add child +} + +int main() +{ + using func_type = std::function<void()>; + + std::vector<func_type> funcs = { + example_root_list, + example_list_nested, + example_list_object, + example_list_object_2, + example_array_explicit, + example_object_ambiguous, + example_object_explicit_1, + example_object_explicit_2, + example_root_object_add_child, + example_root_array_add_child, + }; + + for (func_type f : funcs) + { + std::cout << "--" << std::endl; + f(); + } + + std::vector<func_type> funcs_exc = { + example_array_ambiguous, + }; + + for (func_type f : funcs_exc) + { + try + { + f(); + } + catch (orcus::json::key_value_error&) + { + // expected + } + } + + return EXIT_SUCCESS; +} diff --git a/doc_example/json_parser_1.cpp b/doc_example/json_parser_1.cpp new file mode 100644 index 0000000..322316a --- /dev/null +++ b/doc_example/json_parser_1.cpp @@ -0,0 +1,41 @@ + +#include <orcus/json_parser.hpp> +#include <cstring> +#include <iostream> + +using namespace std; + +class json_parser_handler : public orcus::json_handler +{ +public: + void object_key(std::string_view key, bool /*transient*/) + { + cout << "object key: " << key << endl; + } + + void string(std::string_view val, bool /*transient*/) + { + cout << "string: " << val << endl; + } + + void number(double val) + { + cout << "number: " << val << endl; + } +}; + +int main() +{ + const char* test_code = "{\"key1\": [1,2,3,4,5], \"key2\": 12.3}"; + + cout << "JSON string: " << test_code << endl; + + // Instantiate the parser with an own handler. + json_parser_handler hdl; + orcus::json_parser<json_parser_handler> parser(test_code, hdl); + + // Parse the string. + parser.parse(); + + return EXIT_SUCCESS; +} diff --git a/doc_example/spreadsheet_doc_1.cpp b/doc_example/spreadsheet_doc_1.cpp new file mode 100644 index 0000000..00bbba4 --- /dev/null +++ b/doc_example/spreadsheet_doc_1.cpp @@ -0,0 +1,64 @@ + +#include <orcus/spreadsheet/document.hpp> +#include <orcus/spreadsheet/factory.hpp> +#include <orcus/orcus_ods.hpp> + +#include <ixion/address.hpp> +#include <ixion/model_context.hpp> + +#include <iostream> +#include <cstdlib> +#include <filesystem> + +using namespace orcus; + +int main() +{ + std::filesystem::path input_dir = std::getenv("INPUTDIR"); + + //!code-start: instantiate + spreadsheet::range_size_t ss{1048576, 16384}; + spreadsheet::document doc{ss}; + spreadsheet::import_factory factory{doc}; + //!code-end: instantiate + + //!code-start: loader + orcus_ods loader(&factory); + //!code-end: loader + + //!code-start: read-file + auto filepath = input_dir / "document.ods"; + loader.read_file(filepath.native()); + //!code-end: read-file + + //!code-start: model-context + const ixion::model_context& model = doc.get_model_context(); + //!code-end: model-context + + //!code-start: string-id + ixion::abs_address_t pos(0, 0, 0); // Set the cell position to A1. + ixion::string_id_t str_id = model.get_string_identifier(pos); + //!code-end: string-id + + //!code-start: print-string + const std::string* s = model.get_string(str_id); + assert(s); + std::cout << "A1: " << *s << std::endl; + //!code-end: print-string + + //!code-start: rest + pos.column = 1; // Move to B1 + str_id = model.get_string_identifier(pos); + s = model.get_string(str_id); + assert(s); + std::cout << "B1: " << *s << std::endl; + + pos.column = 2; // Move to C1 + str_id = model.get_string_identifier(pos); + s = model.get_string(str_id); + assert(s); + std::cout << "C1: " << *s << std::endl; + //!code-end: rest + + return EXIT_SUCCESS; +} diff --git a/doc_example/spreadsheet_doc_1_num_and_formula.cpp b/doc_example/spreadsheet_doc_1_num_and_formula.cpp new file mode 100644 index 0000000..88c405c --- /dev/null +++ b/doc_example/spreadsheet_doc_1_num_and_formula.cpp @@ -0,0 +1,62 @@ + +#include <orcus/spreadsheet/document.hpp> +#include <orcus/spreadsheet/factory.hpp> +#include <orcus/orcus_ods.hpp> + +#include <ixion/address.hpp> +#include <ixion/model_context.hpp> +#include <ixion/formula_result.hpp> +#include <ixion/cell.hpp> + +#include <iostream> +#include <filesystem> + +using namespace orcus; + +int main() +{ + std::filesystem::path input_dir = std::getenv("INPUTDIR"); + + // Instantiate a document, and wrap it with a factory. + spreadsheet::range_size_t ss{1048576, 16384}; + spreadsheet::document doc{ss}; + spreadsheet::import_factory factory{doc}; + + // Pass the factory to the document loader, and read the content from a file + // to populate the document. + orcus_ods loader(&factory); + auto filepath = input_dir / "document.ods"; + loader.read_file(filepath.native()); + doc.recalc_formula_cells(); + + // Now that the document is fully populated, access its content. + const ixion::model_context& model = doc.get_model_context(); + + //!code-start: print-numeric-cells + for (spreadsheet::row_t row = 1; row <= 6; ++row) + { + ixion::abs_address_t pos(0, row, 0); + double value = model.get_numeric_value(pos); + std::cout << "A" << (pos.row+1) << ": " << value << std::endl; + } + //!code-end: print-numeric-cells + + //!code-start: print-formula-cells + for (spreadsheet::row_t row = 1; row <=6; ++row) + { + ixion::abs_address_t pos(0, row, 2); // Column C + const ixion::formula_cell* fc = model.get_formula_cell(pos); + assert(fc); + + // Get the formula cell results. + const ixion::formula_result& result = fc->get_result_cache( + ixion::formula_result_wait_policy_t::throw_exception); + + // We already know the result is a string. + const std::string& s = result.get_string(); + std::cout << "C" << (pos.row+1) << ": " << s << std::endl; + } + //!code-end: print-formula-cells + + return EXIT_SUCCESS; +} diff --git a/doc_example/spreadsheet_doc_2.cpp b/doc_example/spreadsheet_doc_2.cpp new file mode 100644 index 0000000..614b50b --- /dev/null +++ b/doc_example/spreadsheet_doc_2.cpp @@ -0,0 +1,44 @@ + +#include <orcus/spreadsheet/import_interface.hpp> +#include <orcus/orcus_ods.hpp> + +#include <filesystem> +#include <iostream> + +namespace ss = orcus::spreadsheet; + +class my_empty_import_factory : public ss::iface::import_factory +{ +public: + virtual ss::iface::import_sheet* append_sheet(ss::sheet_t sheet_index, std::string_view name) override + { + std::cout << "append_sheet: sheet index: " << sheet_index << "; sheet name: " << name << std::endl; + return nullptr; + } + + virtual ss::iface::import_sheet* get_sheet(std::string_view name) override + { + std::cout << "get_sheet: sheet name: " << name << std::endl; + return nullptr; + } + + virtual ss::iface::import_sheet* get_sheet(ss::sheet_t sheet_index) override + { + std::cout << "get_sheet: sheet index: " << sheet_index << std::endl; + return nullptr; + } + + virtual void finalize() override {} +}; + +int main() +{ + std::filesystem::path input_dir = std::getenv("INPUTDIR"); + auto filepath = input_dir / "multi-sheets.ods"; + + my_empty_import_factory factory; + orcus::orcus_ods loader(&factory); + loader.read_file(filepath.native()); + + return EXIT_SUCCESS; +} diff --git a/doc_example/spreadsheet_doc_2_sheets_no_string_pool.cpp b/doc_example/spreadsheet_doc_2_sheets_no_string_pool.cpp new file mode 100644 index 0000000..ea58d5e --- /dev/null +++ b/doc_example/spreadsheet_doc_2_sheets_no_string_pool.cpp @@ -0,0 +1,123 @@ + +#include <orcus/spreadsheet/import_interface.hpp> +#include <orcus/orcus_ods.hpp> + +#include <iostream> +#include <memory> +#include <filesystem> + +//!code-start: cell_value +namespace ss = orcus::spreadsheet; + +enum class cell_value_type { empty, numeric, string }; + +struct cell_value +{ + cell_value_type type; + + union + { + std::size_t index; + double f; + }; + + cell_value() : type(cell_value_type::empty) {} +}; +//!code-end: cell_value + +//!code-start: my_sheet +class my_sheet : public ss::iface::import_sheet +{ + cell_value m_cells[100][1000]; + ss::range_size_t m_sheet_size; + ss::sheet_t m_sheet_index; + +public: + my_sheet(ss::sheet_t sheet_index) : + m_sheet_index(sheet_index) + { + m_sheet_size.rows = 1000; + m_sheet_size.columns = 100; + } + + virtual void set_string(ss::row_t row, ss::col_t col, ss::string_id_t sindex) override + { + std::cout << "(sheet: " << m_sheet_index << "; row: " << row << "; col: " << col + << "): string index = " << sindex << std::endl; + + m_cells[col][row].type = cell_value_type::string; + m_cells[col][row].index = sindex; + } + + virtual void set_value(ss::row_t row, ss::col_t col, double value) override + { + std::cout << "(sheet: " << m_sheet_index << "; row: " << row << "; col: " << col + << "): value = " << value << std::endl; + + m_cells[col][row].type = cell_value_type::numeric; + m_cells[col][row].f = value; + } + + virtual ss::range_size_t get_sheet_size() const override + { + return m_sheet_size; + } + + // We don't implement these methods for now. + virtual void set_auto(ss::row_t, ss::col_t, std::string_view) override {} + + virtual void set_bool(ss::row_t, ss::col_t, bool) override {} + + virtual void set_date_time(ss::row_t, ss::col_t, int, int, int, int, int, double) override {} + + virtual void set_format(ss::row_t, ss::col_t, std::size_t) override {} + + virtual void set_format(ss::row_t, ss::col_t, ss::row_t, ss::col_t, std::size_t) override {} + + virtual void set_column_format(ss::col_t, ss::col_t, std::size_t) override {} + + virtual void set_row_format(ss::col_t, std::size_t) override {} + + virtual void fill_down_cells(ss::row_t, ss::col_t, ss::row_t) override {} +}; +//!code-end: my_sheet + +//!code-start: my_import_factory +class my_import_factory : public ss::iface::import_factory +{ + std::vector<std::unique_ptr<my_sheet>> m_sheets; + +public: + virtual ss::iface::import_sheet* append_sheet(ss::sheet_t, std::string_view) override + { + m_sheets.push_back(std::make_unique<my_sheet>(m_sheets.size())); + return m_sheets.back().get(); + } + + virtual ss::iface::import_sheet* get_sheet(std::string_view) override + { + // TODO : implement this. + return nullptr; + } + + virtual ss::iface::import_sheet* get_sheet(ss::sheet_t sheet_index) override + { + ss::sheet_t sheet_count = m_sheets.size(); + return sheet_index < sheet_count ? m_sheets[sheet_index].get() : nullptr; + } + + virtual void finalize() override {} +}; +//!code-end: my_import_factory + +int main() +{ + std::filesystem::path input_dir = std::getenv("INPUTDIR"); + auto filepath = input_dir / "multi-sheets.ods"; + + my_import_factory factory; + orcus::orcus_ods loader(&factory); + loader.read_file(filepath.native()); + + return EXIT_SUCCESS; +} diff --git a/doc_example/spreadsheet_doc_2_sheets_with_formula.cpp b/doc_example/spreadsheet_doc_2_sheets_with_formula.cpp new file mode 100644 index 0000000..29511e4 --- /dev/null +++ b/doc_example/spreadsheet_doc_2_sheets_with_formula.cpp @@ -0,0 +1,290 @@ + +#include <orcus/spreadsheet/import_interface.hpp> +#include <orcus/orcus_ods.hpp> + +#include <iostream> +#include <memory> +#include <unordered_map> +#include <deque> +#include <filesystem> + +namespace ss = orcus::spreadsheet; + +//!code-start: cell_value_type +enum class cell_value_type { empty, numeric, string, formula }; // adding a formula type here +//!code-end: cell_value_type + +using ss_type = std::deque<std::string>; +using ss_hash_type = std::unordered_map<std::string_view, std::size_t>; + +struct cell_value +{ + cell_value_type type; + + union + { + size_t index; // either a string index or a formula index + double f; + }; + + cell_value() : type(cell_value_type::empty) {} +}; + +//!code-start: cell_grid +class cell_grid +{ + cell_value m_cells[100][1000]; +public: + + cell_value& operator()(ss::row_t row, ss::col_t col) + { + return m_cells[col][row]; + } +}; +//!code-end: cell_grid + +//!code-start: formula +struct formula +{ + std::string expression; + ss::formula_grammar_t grammar; + + formula() : grammar(ss::formula_grammar_t::unknown) {} + formula(std::string _expression, ss::formula_grammar_t _grammar) : + expression(std::move(_expression)), + grammar(_grammar) {} +}; +//!code-end: formula + +//!code-start: my_formula +class my_formula : public ss::iface::import_formula +{ + ss::sheet_t m_sheet_index; + cell_grid& m_cells; + std::vector<formula>& m_formula_store; + + ss::row_t m_row; + ss::col_t m_col; + formula m_formula; + +public: + my_formula(ss::sheet_t sheet, cell_grid& cells, std::vector<formula>& formulas) : + m_sheet_index(sheet), + m_cells(cells), + m_formula_store(formulas), + m_row(0), + m_col(0) {} + + virtual void set_position(ss::row_t row, ss::col_t col) override + { + m_row = row; + m_col = col; + } + + virtual void set_formula(ss::formula_grammar_t grammar, std::string_view formula) override + { + m_formula.expression = formula; + m_formula.grammar = grammar; + } + + virtual void set_shared_formula_index(std::size_t) override {} + + virtual void set_result_string(std::string_view) override {} + + virtual void set_result_value(double) override {} + + virtual void set_result_empty() override {} + + virtual void set_result_bool(bool) override {} + + virtual void commit() override + { + std::cout << "(sheet: " << m_sheet_index << "; row: " << m_row << "; col: " << m_col << "): formula = " + << m_formula.expression << " (" << m_formula.grammar << ")" << std::endl; + + std::size_t index = m_formula_store.size(); + m_cells(m_row, m_col).type = cell_value_type::formula; + m_cells(m_row, m_col).index = index; + m_formula_store.push_back(std::move(m_formula)); + } +}; +//!code-end: my_formula + +//!code-start: my_sheet +class my_sheet : public ss::iface::import_sheet +{ + cell_grid m_cells; + std::vector<formula> m_formula_store; + my_formula m_formula_iface; + ss::range_size_t m_sheet_size; + ss::sheet_t m_sheet_index; + const ss_type& m_string_pool; + +public: + my_sheet(ss::sheet_t sheet_index, const ss_type& string_pool) : + m_formula_iface(sheet_index, m_cells, m_formula_store), + m_sheet_index(sheet_index), + m_string_pool(string_pool) + { + m_sheet_size.rows = 1000; + m_sheet_size.columns = 100; + } + + virtual void set_string(ss::row_t row, ss::col_t col, ss::string_id_t sindex) override + { + std::cout << "(sheet: " << m_sheet_index << "; row: " << row << "; col: " << col + << "): string index = " << sindex << " (" << m_string_pool[sindex] << ")" << std::endl; + + m_cells(row, col).type = cell_value_type::string; + m_cells(row, col).index = sindex; + } + + virtual void set_value(ss::row_t row, ss::col_t col, double value) override + { + std::cout << "(sheet: " << m_sheet_index << "; row: " << row << "; col: " << col + << "): value = " << value << std::endl; + + m_cells(row, col).type = cell_value_type::numeric; + m_cells(row, col).f = value; + } + + virtual ss::range_size_t get_sheet_size() const override + { + return m_sheet_size; + } + + // We don't implement these methods for now. + virtual void set_auto(ss::row_t, ss::col_t, std::string_view) override {} + + virtual void set_bool(ss::row_t, ss::col_t, bool) override {} + + virtual void set_date_time(ss::row_t, ss::col_t, int, int, int, int, int, double) override {} + + virtual void set_format(ss::row_t, ss::col_t, std::size_t) override {} + + virtual void set_format(ss::row_t, ss::col_t, ss::row_t, ss::col_t, std::size_t) override {} + + virtual void set_column_format(ss::col_t, ss::col_t, std::size_t) override {} + + virtual void set_row_format(ss::col_t, std::size_t) override {} + + virtual void fill_down_cells(ss::row_t, ss::col_t, ss::row_t) override {} + + virtual ss::iface::import_formula* get_formula() override + { + return &m_formula_iface; + } +}; +//!code-end: my_sheet + +class my_shared_strings : public ss::iface::import_shared_strings +{ + ss_hash_type m_ss_hash; + ss_type& m_ss; + std::string m_current_string; + +public: + my_shared_strings(ss_type& ss) : m_ss(ss) {} + + virtual size_t add(std::string_view s) override + { + auto it = m_ss_hash.find(s); + if (it != m_ss_hash.end()) + // This string already exists in the pool. + return it->second; + + // This is a brand-new string. + return append(s); + } + + virtual size_t append(std::string_view s) override + { + std::size_t string_index = m_ss.size(); + m_ss.emplace_back(s); + m_ss_hash.emplace(s, string_index); + + return string_index; + } + + // The following methods are for formatted text segments, which we ignore for now. + virtual void set_segment_bold(bool) override {} + + virtual void set_segment_font(std::size_t) override {} + + virtual void set_segment_font_color( + ss::color_elem_t, + ss::color_elem_t, + ss::color_elem_t, + ss::color_elem_t) override {} + + virtual void set_segment_font_name(std::string_view) override {} + + virtual void set_segment_font_size(double) override {} + + virtual void set_segment_italic(bool) override {} + + virtual void append_segment(std::string_view s) override + { + m_current_string += s; + } + + virtual std::size_t commit_segments() override + { + std::size_t string_index = m_ss.size(); + m_ss.push_back(std::move(m_current_string)); + + const std::string& s = m_ss.back(); + std::string_view sv(s.data(), s.size()); + m_ss_hash.emplace(sv, string_index); + + return string_index; + } +}; + +class my_import_factory : public ss::iface::import_factory +{ + ss_type m_string_pool; // string pool to be shared everywhere. + my_shared_strings m_shared_strings; + std::vector<std::unique_ptr<my_sheet>> m_sheets; + +public: + my_import_factory() : m_shared_strings(m_string_pool) {} + + virtual ss::iface::import_shared_strings* get_shared_strings() override + { + return &m_shared_strings; + } + + virtual ss::iface::import_sheet* append_sheet(ss::sheet_t, std::string_view) override + { + // Pass the string pool to each sheet instance. + m_sheets.push_back(std::make_unique<my_sheet>(m_sheets.size(), m_string_pool)); + return m_sheets.back().get(); + } + + virtual ss::iface::import_sheet* get_sheet(std::string_view) override + { + // TODO : implement this. + return nullptr; + } + + virtual ss::iface::import_sheet* get_sheet(ss::sheet_t sheet_index) override + { + ss::sheet_t sheet_count = m_sheets.size(); + return sheet_index < sheet_count ? m_sheets[sheet_index].get() : nullptr; + } + + virtual void finalize() override {} +}; + +int main() +{ + std::filesystem::path input_dir = std::getenv("INPUTDIR"); + auto filepath = input_dir / "multi-sheets.ods"; + + my_import_factory factory; + orcus::orcus_ods loader(&factory); + loader.read_file(filepath.native()); + + return EXIT_SUCCESS; +} diff --git a/doc_example/spreadsheet_doc_2_sheets_with_string_pool.cpp b/doc_example/spreadsheet_doc_2_sheets_with_string_pool.cpp new file mode 100644 index 0000000..76fcf80 --- /dev/null +++ b/doc_example/spreadsheet_doc_2_sheets_with_string_pool.cpp @@ -0,0 +1,204 @@ + +#include <orcus/spreadsheet/import_interface.hpp> +#include <orcus/orcus_ods.hpp> + +#include <iostream> +#include <memory> +#include <unordered_map> +#include <deque> +#include <filesystem> + +namespace ss = orcus::spreadsheet; + +enum class cell_value_type { empty, numeric, string }; + +//!code-start: types +using ss_type = std::deque<std::string>; +using ss_hash_type = std::unordered_map<std::string_view, std::size_t>; +//!code-end: types + +struct cell_value +{ + cell_value_type type; + + union + { + std::size_t index; + double f; + }; + + cell_value() : type(cell_value_type::empty) {} +}; + +class my_sheet : public ss::iface::import_sheet +{ + cell_value m_cells[100][1000]; + ss::range_size_t m_sheet_size; + ss::sheet_t m_sheet_index; + const ss_type& m_string_pool; + +public: + my_sheet(ss::sheet_t sheet_index, const ss_type& string_pool) : + m_sheet_index(sheet_index), + m_string_pool(string_pool) + { + m_sheet_size.rows = 1000; + m_sheet_size.columns = 100; + } + + virtual void set_string(ss::row_t row, ss::col_t col, ss::string_id_t sindex) override + { + std::cout << "(sheet: " << m_sheet_index << "; row: " << row << "; col: " << col + << "): string index = " << sindex << " (" << m_string_pool[sindex] << ")" << std::endl; + + m_cells[col][row].type = cell_value_type::string; + m_cells[col][row].index = sindex; + } + + virtual void set_value(ss::row_t row, ss::col_t col, double value) override + { + std::cout << "(sheet: " << m_sheet_index << "; row: " << row << "; col: " << col + << "): value = " << value << std::endl; + + m_cells[col][row].type = cell_value_type::numeric; + m_cells[col][row].f = value; + } + + virtual ss::range_size_t get_sheet_size() const override + { + return m_sheet_size; + } + + // We don't implement these methods for now. + virtual void set_auto(ss::row_t, ss::col_t, std::string_view) override {} + + virtual void set_bool(ss::row_t, ss::col_t, bool) override {} + + virtual void set_date_time(ss::row_t, ss::col_t, int, int, int, int, int, double) override {} + + virtual void set_format(ss::row_t, ss::col_t, std::size_t) override {} + + virtual void set_format(ss::row_t, ss::col_t, ss::row_t, ss::col_t, std::size_t) override {} + + virtual void set_column_format(ss::col_t, ss::col_t, std::size_t) override {} + + virtual void set_row_format(ss::col_t, std::size_t) override {} + + virtual void fill_down_cells(ss::row_t, ss::col_t, ss::row_t) override {} +}; + +//!code-start: my_shared_strings +class my_shared_strings : public ss::iface::import_shared_strings +{ + ss_hash_type m_ss_hash; + ss_type& m_ss; + std::string m_current_string; + +public: + my_shared_strings(ss_type& ss) : m_ss(ss) {} + + virtual std::size_t add(std::string_view s) override + { + auto it = m_ss_hash.find(s); + if (it != m_ss_hash.end()) + // This string already exists in the pool. + return it->second; + + // This is a brand-new string. + return append(s); + } + + virtual std::size_t append(std::string_view s) override + { + std::size_t string_index = m_ss.size(); + m_ss.emplace_back(s); + m_ss_hash.emplace(s, string_index); + + return string_index; + } + + // The following methods are for formatted text segments, which we ignore for now. + virtual void set_segment_bold(bool) override {} + + virtual void set_segment_font(std::size_t) override {} + + virtual void set_segment_font_color( + ss::color_elem_t, + ss::color_elem_t, + ss::color_elem_t, + ss::color_elem_t) override {} + + virtual void set_segment_font_name(std::string_view) override {} + + virtual void set_segment_font_size(double) override {} + + virtual void set_segment_italic(bool) override {} + + virtual void append_segment(std::string_view s) override + { + m_current_string += s; + } + + virtual std::size_t commit_segments() override + { + std::size_t string_index = m_ss.size(); + m_ss.push_back(std::move(m_current_string)); + + const std::string& s = m_ss.back(); + std::string_view sv(s.data(), s.size()); + m_ss_hash.emplace(sv, string_index); + + return string_index; + } +}; +//!code-end: my_shared_strings + +//!code-start: my_import_factory +class my_import_factory : public ss::iface::import_factory +{ + ss_type m_string_pool; // string pool to be shared everywhere. + my_shared_strings m_shared_strings; + std::vector<std::unique_ptr<my_sheet>> m_sheets; + +public: + my_import_factory() : m_shared_strings(m_string_pool) {} + + virtual ss::iface::import_shared_strings* get_shared_strings() override + { + return &m_shared_strings; + } + + virtual ss::iface::import_sheet* append_sheet(ss::sheet_t, std::string_view) override + { + // Pass the string pool to each sheet instance. + m_sheets.push_back(std::make_unique<my_sheet>(m_sheets.size(), m_string_pool)); + return m_sheets.back().get(); + } + + virtual ss::iface::import_sheet* get_sheet(std::string_view) override + { + // TODO : implement this. + return nullptr; + } + + virtual ss::iface::import_sheet* get_sheet(ss::sheet_t sheet_index) override + { + ss::sheet_t sheet_count = m_sheets.size(); + return sheet_index < sheet_count ? m_sheets[sheet_index].get() : nullptr; + } + + virtual void finalize() override {} +}; +//!code-end: my_import_factory + +int main() +{ + std::filesystem::path input_dir = std::getenv("INPUTDIR"); + auto filepath = input_dir / "multi-sheets.ods"; + + my_import_factory factory; + orcus::orcus_ods loader(&factory); + loader.read_file(filepath.native()); + + return EXIT_SUCCESS; +} diff --git a/doc_example/xml_mapping_1.cpp b/doc_example/xml_mapping_1.cpp new file mode 100644 index 0000000..33d6ff2 --- /dev/null +++ b/doc_example/xml_mapping_1.cpp @@ -0,0 +1,129 @@ + +#include <orcus/xml_namespace.hpp> + +#include <iostream> + +using namespace orcus; +using namespace std; + +void run_xmlns_example() +{ + // A namespace repository is a shared storage for namespace strings for + // multiple contexts. The client code needs to simply create an instance + // from which to create contexts. + xmlns_repository ns_repo; + + xmlns_context ns_cxt = ns_repo.create_context(); + + // Push namespaces with their aliases as you counter them. The push() + // method then returns an identifier associated with the alias. + + // empty alias is for default namespace. You can either use nullptr or an + // empty string. + xmlns_id_t ns_default = ns_cxt.push( + std::string_view{}, "http://schemas.openxmlformats.org/spreadsheetml/2006/main"); + + xmlns_id_t ns_a = ns_cxt.push( + "a", "http://schemas.openxmlformats.org/drawingml/2006/main"); + + xmlns_id_t ns_r = ns_cxt.push( + "r", "http://schemas.openxmlformats.org/officeDocument/2006/relationships"); + + // You can retrieve the data associated with alias ID's. + for (const xmlns_id_t nsid : {ns_default, ns_a, ns_r}) + { + std::string_view alias = ns_cxt.get_alias(nsid); + cout << "Namespace alias '" << alias << "' has an index of " << ns_cxt.get_index(nsid) + << " and a short name of '" << ns_cxt.get_short_name(nsid) << "'." << endl; + cout << "The value of the alias '" << alias << "' is '" << ns_cxt.get(alias) << "'." << endl; + } + + // Iterate over all namespaces in the current context. + for (const xmlns_id_t nsid : ns_cxt.get_all_namespaces()) + cout << "'" << ns_cxt.get_alias(nsid) << "' = " << ns_cxt.get_short_name(nsid) << endl; +} + +void run_xmlns_stacked() +{ + xmlns_repository ns_repo; + xmlns_context ns_cxt = ns_repo.create_context(); + + // Push a first default namespace. + xmlns_id_t ns_default_1 = ns_cxt.push(std::string_view{}, "http://original"); + + // Push a nested deffault namespace. This overwrites the original. + xmlns_id_t current_default_ns = ns_cxt.push(std::string_view{}, "http://nested"); + cout << "same as original: " << (current_default_ns == ns_default_1) << endl; + + // Pop the current default namespace. After this the original namespace + // becomes the default namespace again. + ns_cxt.pop(std::string_view{}); + + // Get the current default namespace identifier. + current_default_ns = ns_cxt.get(std::string_view{}); + cout << "same as original: " << (current_default_ns == ns_default_1) << endl; +} + +void run_xmlns_same_ns_different_aliases() +{ + xmlns_repository ns_repo; + + // Same namespace URI may be associated with different aliases in different + // contexts. + + xmlns_id_t alias_1, alias_2; + { + xmlns_context ns_cxt = ns_repo.create_context(); + alias_1 = ns_cxt.push("foo", "http://some-namespace"); + + for (const xmlns_id_t nsid : ns_cxt.get_all_namespaces()) + cout << "'" << ns_cxt.get_alias(nsid) << "' = " << ns_cxt.get_short_name(nsid) << endl; + } + + { + xmlns_context ns_cxt = ns_repo.create_context(); + alias_2 = ns_cxt.push("bar", "http://some-namespace"); + + for (const xmlns_id_t nsid : ns_cxt.get_all_namespaces()) + cout << "'" << ns_cxt.get_alias(nsid) << "' = " << ns_cxt.get_short_name(nsid) << endl; + } + + cout << (alias_1 == alias_2 ? "same" : "different") << endl; +} + +void run_xmlns_different_ns_same_alias() +{ + xmlns_repository ns_repo; + + // Same alias may be associated with different namespace URI's in different + // contexts. + + xmlns_id_t alias_1, alias_2; + { + xmlns_context ns_cxt = ns_repo.create_context(); + alias_1 = ns_cxt.push("foo", "http://namespace-1"); + + for (const xmlns_id_t nsid : ns_cxt.get_all_namespaces()) + cout << "'" << ns_cxt.get_alias(nsid) << "' = " << ns_cxt.get_short_name(nsid) << endl; + } + + { + xmlns_context ns_cxt = ns_repo.create_context(); + alias_2 = ns_cxt.push("foo", "http://namespace-2"); + + for (const xmlns_id_t nsid : ns_cxt.get_all_namespaces()) + cout << "'" << ns_cxt.get_alias(nsid) << "' = " << ns_cxt.get_short_name(nsid) << endl; + } + + cout << (alias_1 == alias_2 ? "same" : "different") << endl; +} + +int main() +{ + run_xmlns_example(); + run_xmlns_stacked(); + run_xmlns_same_ns_different_aliases(); + run_xmlns_different_ns_same_alias(); + + return EXIT_SUCCESS; +} |