summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 02:59:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 02:59:59 +0000
commitc9ef4ed560a5de28fce031882257b6741565284e (patch)
tree07afa40629447df997726efd4de84d5987ff5e50
parentInitial commit. (diff)
downloadcli-helpers-c9ef4ed560a5de28fce031882257b6741565284e.tar.xz
cli-helpers-c9ef4ed560a5de28fce031882257b6741565284e.zip
Adding upstream version 2.3.1.upstream/2.3.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--.git-blame-ignore-revs2
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md11
-rw-r--r--.gitignore13
-rw-r--r--.pre-commit-config.yaml6
-rw-r--r--.travis.yml30
-rwxr-xr-x.travis/install.sh25
-rw-r--r--AUTHORS33
-rw-r--r--CHANGELOG134
-rw-r--r--CONTRIBUTING.rst118
-rw-r--r--LICENSE27
-rw-r--r--MANIFEST.in9
-rw-r--r--README.rst38
-rw-r--r--appveyor.yml15
-rw-r--r--cli_helpers/__init__.py1
-rw-r--r--cli_helpers/compat.py56
-rw-r--r--cli_helpers/config.py301
-rw-r--r--cli_helpers/tabular_output/__init__.py13
-rw-r--r--cli_helpers/tabular_output/delimited_output_adapter.py56
-rw-r--r--cli_helpers/tabular_output/output_formatter.py255
-rw-r--r--cli_helpers/tabular_output/preprocessors.py353
-rw-r--r--cli_helpers/tabular_output/tabulate_adapter.py212
-rw-r--r--cli_helpers/tabular_output/tsv_output_adapter.py17
-rw-r--r--cli_helpers/tabular_output/vertical_table_adapter.py67
-rw-r--r--cli_helpers/utils.py114
-rw-r--r--docs/Makefile20
-rw-r--r--docs/source/api.rst23
-rw-r--r--docs/source/authors.rst1
-rw-r--r--docs/source/changelog.rst1
-rw-r--r--docs/source/conf.py202
-rw-r--r--docs/source/contributing.rst1
-rw-r--r--docs/source/index.rst30
-rw-r--r--docs/source/license.rst13
-rw-r--r--docs/source/quickstart.rst153
-rw-r--r--release.py135
-rw-r--r--requirements-dev.txt10
-rw-r--r--setup.cfg13
-rwxr-xr-xsetup.py58
-rw-r--r--tasks.py126
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/compat.py86
-rw-r--r--tests/config_data/configrc18
-rw-r--r--tests/config_data/configspecrc20
-rw-r--r--tests/config_data/invalid_configrc18
-rw-r--r--tests/config_data/invalid_configspecrc20
-rw-r--r--tests/tabular_output/__init__.py0
-rw-r--r--tests/tabular_output/test_delimited_output_adapter.py55
-rw-r--r--tests/tabular_output/test_output_formatter.py271
-rw-r--r--tests/tabular_output/test_preprocessors.py350
-rw-r--r--tests/tabular_output/test_tabulate_adapter.py113
-rw-r--r--tests/tabular_output/test_tsv_output_adapter.py36
-rw-r--r--tests/tabular_output/test_vertical_table_adapter.py49
-rw-r--r--tests/test_cli_helpers.py5
-rw-r--r--tests/test_config.py293
-rw-r--r--tests/test_utils.py77
-rw-r--r--tests/utils.py18
-rw-r--r--tox.ini58
56 files changed, 4179 insertions, 0 deletions
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000..76b3e90
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,2 @@
+# Black all the code.
+33e8b461b6ddb717859dde664b71209ce69c119a
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000..8ee05e9
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,11 @@
+## Description
+<!--- Describe your changes in detail. -->
+
+
+
+## Checklist
+<!--- We appreciate your help and want to give you credit. Please take a moment to put an `x` in the boxes below as you complete them. -->
+- [ ] I've added this contribution to the `CHANGELOG`.
+- [ ] I've added my name to the `AUTHORS` file (or it's already there).
+- [ ] I installed pre-commit hooks (`pip install pre-commit && pre-commit install`), and ran `black` on my code.
+- [x] Please squash merge this pull request (uncheck if you'd like us to merge as multiple commits)
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..213f266
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,13 @@
+__pycache__
+*.pyc
+*.egg
+*.egg-info
+.coverage
+/.tox
+/build
+/docs/build
+/dist
+/cli_helpers.egg-info
+/cli_helpers_dev
+.idea/
+.cache/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..80dcf6a
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,6 @@
+repos:
+- repo: https://github.com/psf/black
+ rev: stable
+ hooks:
+ - id: black
+ language_version: python3.7
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..41d3b3a
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,30 @@
+sudo: false
+language: python
+cache: pip
+install: ./.travis/install.sh
+script:
+ - source ~/.venv/bin/activate
+ - tox
+ - if [[ "$TOXENV" == "py37" ]]; then black --check cli_helpers tests ; else echo "Skipping black for $TOXENV"; fi
+matrix:
+ include:
+ - os: linux
+ python: 3.6
+ env: TOXENV=py36
+ - os: linux
+ python: 3.6
+ env: TOXENV=noextras
+ - os: linux
+ python: 3.6
+ env: TOXENV=docs
+ - os: linux
+ python: 3.6
+ env: TOXENV=packaging
+ - os: osx
+ language: generic
+ env: TOXENV=py36
+ - os: linux
+ python: 3.7
+ env: TOXENV=py37
+ dist: xenial
+ sudo: true
diff --git a/.travis/install.sh b/.travis/install.sh
new file mode 100755
index 0000000..1768c89
--- /dev/null
+++ b/.travis/install.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+set -ex
+
+if [[ "$(uname -s)" == 'Darwin' ]]; then
+ sw_vers
+
+ git clone --depth 1 https://github.com/pyenv/pyenv ~/.pyenv
+ export PYENV_ROOT="$HOME/.pyenv"
+ export PATH="$PYENV_ROOT/bin:$PATH"
+ eval "$(pyenv init --path)"
+
+ case "${TOXENV}" in
+ py36)
+ pyenv install 3.6.1
+ pyenv global 3.6.1
+ ;;
+ esac
+ pyenv rehash
+fi
+
+pip install virtualenv
+python -m virtualenv ~/.venv
+source ~/.venv/bin/activate
+pip install -r requirements-dev.txt -U --upgrade-strategy only-if-needed
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..40f2b90
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,33 @@
+Authors
+=======
+
+CLI Helpers is written and maintained by the following people:
+
+- Amjith Ramanujam
+- Dick Marinus
+- Irina Truong
+- Thomas Roten
+
+
+Contributors
+------------
+
+This project receives help from these awesome contributors:
+
+- Terje Røsten
+- Frederic Aoustin
+- Zhaolong Zhu
+- Karthikeyan Singaravelan
+- laixintao
+- Georgy Frolov
+- Michał Górny
+- Waldir Pimenta
+- Mel Dafert
+- Andrii Kohut
+- Roland Walker
+
+Thanks
+------
+
+This project exists because of the amazing contributors from
+`pgcli <https://pgcli.com/>`_ and `mycli <http://mycli.net/>`_.
diff --git a/CHANGELOG b/CHANGELOG
new file mode 100644
index 0000000..7b7bd02
--- /dev/null
+++ b/CHANGELOG
@@ -0,0 +1,134 @@
+# Changelog
+
+## Version 2.3.1
+
+- Don't escape newlines in `ascii` tables, and add `ascii_escaped` table format.
+- Updated tabulate version to latest, to fix ImportError in pgcli.
+
+## Version 2.2.1
+
+(released on 2022-01-17)
+
+- Fix pygments tokens passed as strings
+
+## Version 2.2.0
+
+(released on 2021-08-27)
+
+- Remove dependency on terminaltables
+- Add psql_unicode table format
+- Add minimal table format
+- Fix pip2 installing py3-only versions
+- Format unprintable bytes (eg 0x00, 0x01) as hex
+
+## Version 2.1.0
+
+(released on 2020-07-29)
+
+- Speed up output styling of tables.
+
+## Version 2.0.1
+
+(released on 2020-05-27)
+
+- Fix newline escaping in plain-text formatters (ascii, double, github)
+- Use built-in unittest.mock instead of mock.
+
+## Version 2.0.0
+
+(released on 2020-05-26)
+
+- Remove Python 2.7 and 3.5.
+- Style config for missing value.
+
+## Version 1.2.1
+
+(released on 2019-06-09)
+
+- Pin Pygments to >= 2.4.0 for tests.
+- Remove Python 3.4 from tests and Trove classifier.
+- Add an option to skip truncating multi-line strings.
+- When truncating long strings, add ellipsis.
+
+## Version 1.2.0
+
+(released on 2019-04-05)
+
+- Fix issue with writing non-ASCII characters to config files.
+- Run tests on Python 3.7.
+- Use twine check during packaging tests.
+- Rename old tsv format to csv-tab (because it add quotes), introduce new tsv output adapter.
+- Truncate long fields for tabular display.
+- Return the supported table formats as unicode.
+- Override tab with 4 spaces for terminal tables.
+
+## Version 1.1.0
+
+(released on 2018-10-18)
+
+- Adds config file reading/writing.
+- Style formatted tables with Pygments (optional).
+
+## Version 1.0.2
+
+(released on 2018-04-07)
+
+- Copy unit test from pgcli
+- Use safe float for unit test
+- Move strip_ansi from tests.utils to cli_helpers.utils
+
+## Version 1.0.1
+
+(released on 2017-11-27)
+
+- Output all unicode for terminaltables, add unit test.
+
+## Version 1.0.0
+
+(released on 2017-10-11)
+
+- Output as generator
+- Use backports.csv only for py2
+- Require tabulate as a dependency instead of using vendored module.
+- Drop support for Python 3.3.
+
+## Version 0.2.3
+
+(released on 2017-08-01)
+
+- Fix unicode error on Python 2 with newlines in output row.
+- Fixes to accept iterator.
+
+## Version 0.2.2
+
+(released on 2017-07-16)
+
+- Fix IndexError from being raised with uneven rows.
+
+## Version 0.2.1
+
+(released on 2017-07-11)
+
+- Run tests on macOS via Travis.
+- Fix unicode issues on Python 2 (csv and styling output).
+
+## Version 0.2.0
+
+(released on 2017-06-23)
+
+- Make vertical table separator more customizable.
+- Add format numbers preprocessor.
+- Add test coverage reports.
+- Add ability to pass additional preprocessors when formatting output.
+- Don't install tests.tabular_output.
+- Add .gitignore
+- Coverage for tox tests.
+- Style formatted output with Pygments (optional).
+- Fix issue where tabulate can't handle ANSI escape codes in default values.
+- Run tests on Windows via Appveyor.
+
+## Version 0.1.0
+
+(released on 2017-05-01)
+
+- Pretty print tabular data using a variety of formatting libraries.
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000..a7e9d2a
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,118 @@
+How to Contribute
+=================
+
+CLI Helpers would love your help! We appreciate your time and always give credit.
+
+Development Setup
+-----------------
+
+Ready to contribute? Here's how to set up CLI Helpers for local development.
+
+1. `Fork the repository <https://github.com/dbcli/cli_helpers>`_ on GitHub.
+2. Clone your fork locally::
+
+ $ git clone <url-for-your-fork>
+
+3. Add the official repository (``upstream``) as a remote repository::
+
+ $ git remote add upstream git@github.com:dbcli/cli_helpers.git
+
+4. Set up a `virtual environment <http://docs.python-guide.org/en/latest/dev/virtualenvs>`_
+ for development::
+
+ $ cd cli_helpers
+ $ pip install virtualenv
+ $ virtualenv cli_helpers_dev
+
+ We've just created a virtual environment called ``cli_helpers_dev``
+ that we'll use to install all the dependencies and tools we need to work on CLI Helpers.
+ Whenever you want to work on CLI Helpers, you need to activate the virtual environment::
+
+ $ source cli_helpers_dev/bin/activate
+
+ When you're done working, you can deactivate the virtual environment::
+
+ $ deactivate
+
+5. From within the virtual environment, install the dependencies and development tools::
+
+ $ pip install -r requirements-dev.txt
+ $ pip install --editable .
+
+6. Create a branch for your bugfix or feature based off the ``master`` branch::
+
+ $ git checkout -b <name-of-bugfix-or-feature> master
+
+7. While you work on your bugfix or feature, be sure to pull the latest changes from ``upstream``.
+ This ensures that your local codebase is up-to-date::
+
+ $ git pull upstream master
+
+8. When your work is ready for the CLI Helpers team to review it,
+ make sure to add an entry to CHANGELOG file, and add your name to the AUTHORS file.
+ Then, push your branch to your fork::
+
+ $ git push origin <name-of-bugfix-or-feature>
+
+9. `Create a pull request <https://help.github.com/articles/creating-a-pull-request-from-a-fork/>`_
+ on GitHub.
+
+
+Running the Tests
+-----------------
+
+While you work on CLI Helpers, it's important to run the tests to make sure your code
+hasn't broken any existing functionality. To run the tests, just type in::
+
+ $ pytest
+
+CLI Helpers supports Python 3.6+. You can test against multiple versions of
+Python by running::
+
+ $ tox
+
+You can also measure CLI Helper's test coverage by running::
+
+ $ pytest --cov-report= --cov=cli_helpers
+ $ coverage report
+
+
+Coding Style
+------------
+
+When you submit a PR, the changeset is checked for pep8 compliance using
+`black <https://github.com/psf/black>`_. If you see a build failing because
+of these checks, install ``black`` and apply style fixes:
+
+::
+
+ $ pip install black
+ $ black .
+
+Then commit and push the fixes.
+
+To enforce ``black`` applied on every commit, we also suggest installing ``pre-commit`` and
+using the ``pre-commit`` hooks available in this repo:
+
+::
+
+ $ pip install pre-commit
+ $ pre-commit install
+
+Git blame
+---------
+
+Use ``git blame my_file.py --ignore-revs-file .git-blame-ignore-revs`` to exclude irrelevant commits
+(specifically Black) from ``git blame``. For more information,
+see `here <https://github.com/psf/black#migrating-your-code-style-without-ruining-git-blame>`_.
+
+Documentation
+-------------
+
+If your work in CLI Helpers requires a documentation change or addition, you can
+build the documentation by running::
+
+ $ make -C docs clean html
+ $ open docs/build/html/index.html
+
+That will build the documentation and open it in your web browser.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..cf18b7b
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017, dbcli
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of dbcli nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..e1cdca0
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,9 @@
+include *.txt *.rst *.py
+include AUTHORS CHANGELOG LICENSE
+include tox.ini
+recursive-include docs *.py
+recursive-include docs *.rst
+recursive-include docs Makefile
+recursive-include tests *.py
+include tests/config_data/*
+exclude .pre-commit-config.yaml .git-blame-ignore-revs \ No newline at end of file
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..4936647
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,38 @@
+===========
+CLI Helpers
+===========
+
+.. image:: https://travis-ci.org/dbcli/cli_helpers.svg?branch=master
+ :target: https://travis-ci.org/dbcli/cli_helpers
+
+.. image:: https://ci.appveyor.com/api/projects/status/37a1ri2nbcp237tr/branch/master?svg=true
+ :target: https://ci.appveyor.com/project/dbcli/cli-helpers
+
+.. image:: https://codecov.io/gh/dbcli/cli_helpers/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/dbcli/cli_helpers
+
+.. image:: https://img.shields.io/pypi/v/cli_helpers.svg?style=flat
+ :target: https://pypi.python.org/pypi/cli_helpers
+
+.. start-body
+
+CLI Helpers is a Python package that makes it easy to perform common tasks when
+building command-line apps. It's a helper library for command-line interfaces.
+
+Libraries like `Click <http://click.pocoo.org/5/>`_ and
+`Python Prompt Toolkit <https://python-prompt-toolkit.readthedocs.io/en/latest/>`_
+are amazing tools that help you create quality apps. CLI Helpers complements
+these libraries by wrapping up common tasks in simple interfaces.
+
+CLI Helpers is not focused on your app's design pattern or framework -- you can
+use it on its own or in combination with other libraries. It's lightweight and
+easy to extend.
+
+What's included in CLI Helpers?
+
+- Prettyprinting of tabular data with custom pre-processing
+- Config file reading/writing
+
+.. end-body
+
+Read the documentation at http://cli-helpers.rtfd.io
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..68eded1
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,15 @@
+environment:
+ matrix:
+ - PYTHON: "C:\\Python36"
+ - PYTHON: "C:\\Python37"
+
+build: off
+
+before_test:
+ - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
+ - pip install -r requirements-dev.txt
+ - pip install -e .
+test_script:
+ - pytest --cov-report= --cov=cli_helpers
+ - coverage report
+ - codecov
diff --git a/cli_helpers/__init__.py b/cli_helpers/__init__.py
new file mode 100644
index 0000000..3a5935a
--- /dev/null
+++ b/cli_helpers/__init__.py
@@ -0,0 +1 @@
+__version__ = "2.3.1"
diff --git a/cli_helpers/compat.py b/cli_helpers/compat.py
new file mode 100644
index 0000000..422403c
--- /dev/null
+++ b/cli_helpers/compat.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+"""OS and Python compatibility support."""
+
+from decimal import Decimal
+from types import SimpleNamespace
+import sys
+
+PY2 = sys.version_info[0] == 2
+WIN = sys.platform.startswith("win")
+MAC = sys.platform == "darwin"
+
+
+if PY2:
+ text_type = unicode
+ binary_type = str
+ long_type = long
+ int_types = (int, long)
+
+ from UserDict import UserDict
+ from backports import csv
+
+ from StringIO import StringIO
+ from itertools import izip_longest as zip_longest
+else:
+ text_type = str
+ binary_type = bytes
+ long_type = int
+ int_types = (int,)
+
+ from collections import UserDict
+ import csv
+ from io import StringIO
+ from itertools import zip_longest
+
+
+HAS_PYGMENTS = True
+try:
+ from pygments.token import Token
+ from pygments.formatters.terminal256 import Terminal256Formatter
+except ImportError:
+ HAS_PYGMENTS = False
+ Terminal256Formatter = None
+ Token = SimpleNamespace()
+ Token.Output = SimpleNamespace()
+ Token.Output.Header = None
+ Token.Output.OddRow = None
+ Token.Output.EvenRow = None
+ Token.Output.Null = None
+ Token.Output.TableSeparator = None
+ Token.Results = SimpleNamespace()
+ Token.Results.Header = None
+ Token.Results.OddRow = None
+ Token.Results.EvenRow = None
+
+
+float_types = (float, Decimal)
diff --git a/cli_helpers/config.py b/cli_helpers/config.py
new file mode 100644
index 0000000..7669717
--- /dev/null
+++ b/cli_helpers/config.py
@@ -0,0 +1,301 @@
+# -*- coding: utf-8 -*-
+"""Read and write an application's config files."""
+
+from __future__ import unicode_literals
+import io
+import logging
+import os
+
+from configobj import ConfigObj, ConfigObjError
+from validate import ValidateError, Validator
+
+from .compat import MAC, text_type, UserDict, WIN
+
+logger = logging.getLogger(__name__)
+
+
+class ConfigError(Exception):
+ """Base class for exceptions in this module."""
+
+ pass
+
+
+class DefaultConfigValidationError(ConfigError):
+ """Indicates the default config file did not validate correctly."""
+
+ pass
+
+
+class Config(UserDict, object):
+ """Config reader/writer class.
+
+ :param str app_name: The application's name.
+ :param str app_author: The application author/organization.
+ :param str filename: The config filename to look for (e.g. ``config``).
+ :param dict/str default: The default config values or absolute path to
+ config file.
+ :param bool validate: Whether or not to validate the config file.
+ :param bool write_default: Whether or not to write the default config
+ file to the user config directory if it doesn't
+ already exist.
+ :param tuple additional_dirs: Additional directories to check for a config
+ file.
+ """
+
+ def __init__(
+ self,
+ app_name,
+ app_author,
+ filename,
+ default=None,
+ validate=False,
+ write_default=False,
+ additional_dirs=(),
+ ):
+ super(Config, self).__init__()
+ #: The :class:`ConfigObj` instance.
+ self.data = ConfigObj(encoding="utf8")
+
+ self.default = {}
+ self.default_file = self.default_config = None
+ self.config_filenames = []
+
+ self.app_name, self.app_author = app_name, app_author
+ self.filename = filename
+ self.write_default = write_default
+ self.validate = validate
+ self.additional_dirs = additional_dirs
+
+ if isinstance(default, dict):
+ self.default = default
+ self.update(default)
+ elif isinstance(default, text_type):
+ self.default_file = default
+ elif default is not None:
+ raise TypeError(
+ '"default" must be a dict or {}, not {}'.format(
+ text_type.__name__, type(default)
+ )
+ )
+
+ if self.write_default and not self.default_file:
+ raise ValueError(
+ 'Cannot use "write_default" without specifying ' "a default file."
+ )
+
+ if self.validate and not self.default_file:
+ raise ValueError(
+ 'Cannot use "validate" without specifying a ' "default file."
+ )
+
+ def read_default_config(self):
+ """Read the default config file.
+
+ :raises DefaultConfigValidationError: There was a validation error with
+ the *default* file.
+ """
+ if self.validate:
+ self.default_config = ConfigObj(
+ configspec=self.default_file,
+ list_values=False,
+ _inspec=True,
+ encoding="utf8",
+ )
+ # ConfigObj does not set the encoding on the configspec.
+ self.default_config.configspec.encoding = "utf8"
+
+ valid = self.default_config.validate(
+ Validator(), copy=True, preserve_errors=True
+ )
+ if valid is not True:
+ for name, section in valid.items():
+ if section is True:
+ continue
+ for key, value in section.items():
+ if isinstance(value, ValidateError):
+ raise DefaultConfigValidationError(
+ 'section [{}], key "{}": {}'.format(name, key, value)
+ )
+ elif self.default_file:
+ self.default_config, _ = self.read_config_file(self.default_file)
+
+ self.update(self.default_config)
+
+ def read(self):
+ """Read the default, additional, system, and user config files.
+
+ :raises DefaultConfigValidationError: There was a validation error with
+ the *default* file.
+ """
+ if self.default_file:
+ self.read_default_config()
+ return self.read_config_files(self.all_config_files())
+
+ def user_config_file(self):
+ """Get the absolute path to the user config file."""
+ return os.path.join(
+ get_user_config_dir(self.app_name, self.app_author), self.filename
+ )
+
+ def system_config_files(self):
+ """Get a list of absolute paths to the system config files."""
+ return [
+ os.path.join(f, self.filename)
+ for f in get_system_config_dirs(self.app_name, self.app_author)
+ ]
+
+ def additional_files(self):
+ """Get a list of absolute paths to the additional config files."""
+ return [os.path.join(f, self.filename) for f in self.additional_dirs]
+
+ def all_config_files(self):
+ """Get a list of absolute paths to all the config files."""
+ return (
+ self.additional_files()
+ + self.system_config_files()
+ + [self.user_config_file()]
+ )
+
+ def write_default_config(self, overwrite=False):
+ """Write the default config to the user's config file.
+
+ :param bool overwrite: Write over an existing config if it exists.
+ """
+ destination = self.user_config_file()
+ if not overwrite and os.path.exists(destination):
+ return
+
+ with io.open(destination, mode="wb") as f:
+ self.default_config.write(f)
+
+ def write(self, outfile=None, section=None):
+ """Write the current config to a file (defaults to user config).
+
+ :param str outfile: The path to the file to write to.
+ :param None/str section: The config section to write, or :data:`None`
+ to write the entire config.
+ """
+ with io.open(outfile or self.user_config_file(), "wb") as f:
+ self.data.write(outfile=f, section=section)
+
+ def read_config_file(self, f):
+ """Read a config file *f*.
+
+ :param str f: The path to a file to read.
+ """
+ configspec = self.default_file if self.validate else None
+ try:
+ config = ConfigObj(
+ infile=f, configspec=configspec, interpolation=False, encoding="utf8"
+ )
+ # ConfigObj does not set the encoding on the configspec.
+ if config.configspec is not None:
+ config.configspec.encoding = "utf8"
+ except ConfigObjError as e:
+ logger.warning(
+ "Unable to parse line {} of config file {}".format(e.line_number, f)
+ )
+ config = e.config
+
+ valid = True
+ if self.validate:
+ valid = config.validate(Validator(), preserve_errors=True, copy=True)
+ if bool(config):
+ self.config_filenames.append(config.filename)
+
+ return config, valid
+
+ def read_config_files(self, files):
+ """Read a list of config files.
+
+ :param iterable files: An iterable (e.g. list) of files to read.
+ """
+ errors = {}
+ for _file in files:
+ config, valid = self.read_config_file(_file)
+ self.update(config)
+ if valid is not True:
+ errors[_file] = valid
+ return errors or True
+
+
+def get_user_config_dir(app_name, app_author, roaming=True, force_xdg=True):
+ """Returns the config folder for the application. The default behavior
+ is to return whatever is most appropriate for the operating system.
+
+ For an example application called ``"My App"`` by ``"Acme"``,
+ something like the following folders could be returned:
+
+ macOS (non-XDG):
+ ``~/Library/Application Support/My App``
+ Mac OS X (XDG):
+ ``~/.config/my-app``
+ Unix:
+ ``~/.config/my-app``
+ Windows 7 (roaming):
+ ``C:\\Users\\<user>\\AppData\\Roaming\\Acme\\My App``
+ Windows 7 (not roaming):
+ ``C:\\Users\\<user>\\AppData\\Local\\Acme\\My App``
+
+ :param app_name: the application name. This should be properly capitalized
+ and can contain whitespace.
+ :param app_author: The app author's name (or company). This should be
+ properly capitalized and can contain whitespace.
+ :param roaming: controls if the folder should be roaming or not on Windows.
+ Has no effect on non-Windows systems.
+ :param force_xdg: if this is set to `True`, then on macOS the XDG Base
+ Directory Specification will be followed. Has no effect
+ on non-macOS systems.
+
+ """
+ if WIN:
+ key = "APPDATA" if roaming else "LOCALAPPDATA"
+ folder = os.path.expanduser(os.environ.get(key, "~"))
+ return os.path.join(folder, app_author, app_name)
+ if MAC and not force_xdg:
+ return os.path.join(
+ os.path.expanduser("~/Library/Application Support"), app_name
+ )
+ return os.path.join(
+ os.path.expanduser(os.environ.get("XDG_CONFIG_HOME", "~/.config")),
+ _pathify(app_name),
+ )
+
+
+def get_system_config_dirs(app_name, app_author, force_xdg=True):
+ r"""Returns a list of system-wide config folders for the application.
+
+ For an example application called ``"My App"`` by ``"Acme"``,
+ something like the following folders could be returned:
+
+ macOS (non-XDG):
+ ``['/Library/Application Support/My App']``
+ Mac OS X (XDG):
+ ``['/etc/xdg/my-app']``
+ Unix:
+ ``['/etc/xdg/my-app']``
+ Windows 7:
+ ``['C:\ProgramData\Acme\My App']``
+
+ :param app_name: the application name. This should be properly capitalized
+ and can contain whitespace.
+ :param app_author: The app author's name (or company). This should be
+ properly capitalized and can contain whitespace.
+ :param force_xdg: if this is set to `True`, then on macOS the XDG Base
+ Directory Specification will be followed. Has no effect
+ on non-macOS systems.
+
+ """
+ if WIN:
+ folder = os.environ.get("PROGRAMDATA")
+ return [os.path.join(folder, app_author, app_name)]
+ if MAC and not force_xdg:
+ return [os.path.join("/Library/Application Support", app_name)]
+ dirs = os.environ.get("XDG_CONFIG_DIRS", "/etc/xdg")
+ paths = [os.path.expanduser(x) for x in dirs.split(os.pathsep)]
+ return [os.path.join(d, _pathify(app_name)) for d in paths]
+
+
+def _pathify(s):
+ """Convert spaces to hyphens and lowercase a string."""
+ return "-".join(s.split()).lower()
diff --git a/cli_helpers/tabular_output/__init__.py b/cli_helpers/tabular_output/__init__.py
new file mode 100644
index 0000000..e4247bd
--- /dev/null
+++ b/cli_helpers/tabular_output/__init__.py
@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+"""CLI Helper's tabular output module makes it easy to format your data using
+various formatting libraries.
+
+When formatting data, you'll primarily use the
+:func:`~cli_helpers.tabular_output.format_output` function and
+:class:`~cli_helpers.tabular_output.TabularOutputFormatter` class.
+
+"""
+
+from .output_formatter import format_output, TabularOutputFormatter
+
+__all__ = ["format_output", "TabularOutputFormatter"]
diff --git a/cli_helpers/tabular_output/delimited_output_adapter.py b/cli_helpers/tabular_output/delimited_output_adapter.py
new file mode 100644
index 0000000..b812456
--- /dev/null
+++ b/cli_helpers/tabular_output/delimited_output_adapter.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+"""A delimited data output adapter (e.g. CSV, TSV)."""
+
+from __future__ import unicode_literals
+import contextlib
+
+from cli_helpers.compat import csv, StringIO
+from cli_helpers.utils import filter_dict_by_key
+from .preprocessors import bytes_to_string, override_missing_value
+
+supported_formats = ("csv", "csv-tab")
+preprocessors = (override_missing_value, bytes_to_string)
+
+
+class linewriter(object):
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.line = None
+
+ def write(self, d):
+ self.line = d
+
+
+def adapter(data, headers, table_format="csv", **kwargs):
+ """Wrap the formatting inside a function for TabularOutputFormatter."""
+ keys = (
+ "dialect",
+ "delimiter",
+ "doublequote",
+ "escapechar",
+ "quotechar",
+ "quoting",
+ "skipinitialspace",
+ "strict",
+ )
+ if table_format == "csv":
+ delimiter = ","
+ elif table_format == "csv-tab":
+ delimiter = "\t"
+ else:
+ raise ValueError("Invalid table_format specified.")
+
+ ckwargs = {"delimiter": delimiter, "lineterminator": ""}
+ ckwargs.update(filter_dict_by_key(kwargs, keys))
+
+ l = linewriter()
+ writer = csv.writer(l, **ckwargs)
+ writer.writerow(headers)
+ yield l.line
+
+ for row in data:
+ l.reset()
+ writer.writerow(row)
+ yield l.line
diff --git a/cli_helpers/tabular_output/output_formatter.py b/cli_helpers/tabular_output/output_formatter.py
new file mode 100644
index 0000000..6cadf6c
--- /dev/null
+++ b/cli_helpers/tabular_output/output_formatter.py
@@ -0,0 +1,255 @@
+# -*- coding: utf-8 -*-
+"""A generic tabular data output formatter interface."""
+
+from __future__ import unicode_literals
+from collections import namedtuple
+
+from cli_helpers.compat import (
+ text_type,
+ binary_type,
+ int_types,
+ float_types,
+ zip_longest,
+)
+from cli_helpers.utils import unique_items
+from . import (
+ delimited_output_adapter,
+ vertical_table_adapter,
+ tabulate_adapter,
+ tsv_output_adapter,
+)
+from decimal import Decimal
+
+import itertools
+
+MISSING_VALUE = "<null>"
+MAX_FIELD_WIDTH = 500
+
+TYPES = {
+ type(None): 0,
+ bool: 1,
+ int: 2,
+ float: 3,
+ Decimal: 3,
+ binary_type: 4,
+ text_type: 5,
+}
+
+OutputFormatHandler = namedtuple(
+ "OutputFormatHandler", "format_name preprocessors formatter formatter_args"
+)
+
+
+class TabularOutputFormatter(object):
+ """An interface to various tabular data formatting libraries.
+
+ The formatting libraries supported include:
+ - `tabulate <https://bitbucket.org/astanin/python-tabulate>`_
+ - `terminaltables <https://robpol86.github.io/terminaltables/>`_
+ - a CLI Helper vertical table layout
+ - delimited formats (CSV and TSV)
+
+ :param str format_name: An optional, default format name.
+
+ Usage::
+
+ >>> from cli_helpers.tabular_output import TabularOutputFormatter
+ >>> formatter = TabularOutputFormatter(format_name='simple')
+ >>> data = ((1, 87), (2, 80), (3, 79))
+ >>> headers = ('day', 'temperature')
+ >>> print(formatter.format_output(data, headers))
+ day temperature
+ ----- -------------
+ 1 87
+ 2 80
+ 3 79
+
+ You can use any :term:`iterable` for the data or headers::
+
+ >>> data = enumerate(('87', '80', '79'), 1)
+ >>> print(formatter.format_output(data, headers))
+ day temperature
+ ----- -------------
+ 1 87
+ 2 80
+ 3 79
+
+ """
+
+ _output_formats = {}
+
+ def __init__(self, format_name=None):
+ """Set the default *format_name*."""
+ self._format_name = None
+
+ if format_name:
+ self.format_name = format_name
+
+ @property
+ def format_name(self):
+ """The current format name.
+
+ This value must be in :data:`supported_formats`.
+
+ """
+ return self._format_name
+
+ @format_name.setter
+ def format_name(self, format_name):
+ """Set the default format name.
+
+ :param str format_name: The display format name.
+ :raises ValueError: if the format is not recognized.
+
+ """
+ if format_name in self.supported_formats:
+ self._format_name = format_name
+ else:
+ raise ValueError('unrecognized format_name "{}"'.format(format_name))
+
+ @property
+ def supported_formats(self):
+ """The names of the supported output formats in a :class:`tuple`."""
+ return tuple(self._output_formats.keys())
+
+ @classmethod
+ def register_new_formatter(
+ cls, format_name, handler, preprocessors=(), kwargs=None
+ ):
+ """Register a new output formatter.
+
+ :param str format_name: The name of the format.
+ :param callable handler: The function that formats the data.
+ :param tuple preprocessors: The preprocessors to call before
+ formatting.
+ :param dict kwargs: Keys/values for keyword argument defaults.
+
+ """
+ cls._output_formats[format_name] = OutputFormatHandler(
+ format_name, preprocessors, handler, kwargs or {}
+ )
+
+ def format_output(
+ self,
+ data,
+ headers,
+ format_name=None,
+ preprocessors=(),
+ column_types=None,
+ **kwargs
+ ):
+ r"""Format the headers and data using a specific formatter.
+
+ *format_name* must be a supported formatter (see
+ :attr:`supported_formats`).
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :param str format_name: The display format to use (optional, if the
+ :class:`TabularOutputFormatter` object has a default format set).
+ :param tuple preprocessors: Additional preprocessors to call before
+ any formatter preprocessors.
+ :param \*\*kwargs: Optional arguments for the formatter.
+ :return: The formatted data.
+ :rtype: str
+ :raises ValueError: If the *format_name* is not recognized.
+
+ """
+ format_name = format_name or self._format_name
+ if format_name not in self.supported_formats:
+ raise ValueError('unrecognized format "{}"'.format(format_name))
+
+ (_, _preprocessors, formatter, fkwargs) = self._output_formats[format_name]
+ fkwargs.update(kwargs)
+ if column_types is None:
+ data = list(data)
+ column_types = self._get_column_types(data)
+ for f in unique_items(preprocessors + _preprocessors):
+ data, headers = f(data, headers, column_types=column_types, **fkwargs)
+ return formatter(list(data), headers, column_types=column_types, **fkwargs)
+
+ def _get_column_types(self, data):
+ """Get a list of the data types for each column in *data*."""
+ columns = list(zip_longest(*data))
+ return [self._get_column_type(column) for column in columns]
+
+ def _get_column_type(self, column):
+ """Get the most generic data type for iterable *column*."""
+ type_values = [TYPES[self._get_type(v)] for v in column]
+ inverse_types = {v: k for k, v in TYPES.items()}
+ return inverse_types[max(type_values)]
+
+ def _get_type(self, value):
+ """Get the data type for *value*."""
+ if value is None:
+ return type(None)
+ elif type(value) in int_types:
+ return int
+ elif type(value) in float_types:
+ return float
+ elif isinstance(value, binary_type):
+ return binary_type
+ else:
+ return text_type
+
+
+def format_output(data, headers, format_name, **kwargs):
+ r"""Format output using *format_name*.
+
+ This is a wrapper around the :class:`TabularOutputFormatter` class.
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :param str format_name: The display format to use.
+ :param \*\*kwargs: Optional arguments for the formatter.
+ :return: The formatted data.
+ :rtype: str
+
+ """
+ formatter = TabularOutputFormatter(format_name=format_name)
+ return formatter.format_output(data, headers, **kwargs)
+
+
+for vertical_format in vertical_table_adapter.supported_formats:
+ TabularOutputFormatter.register_new_formatter(
+ vertical_format,
+ vertical_table_adapter.adapter,
+ vertical_table_adapter.preprocessors,
+ {
+ "table_format": vertical_format,
+ "missing_value": MISSING_VALUE,
+ "max_field_width": None,
+ },
+ )
+
+for delimited_format in delimited_output_adapter.supported_formats:
+ TabularOutputFormatter.register_new_formatter(
+ delimited_format,
+ delimited_output_adapter.adapter,
+ delimited_output_adapter.preprocessors,
+ {
+ "table_format": delimited_format,
+ "missing_value": "",
+ "max_field_width": None,
+ },
+ )
+
+for tabulate_format in tabulate_adapter.supported_formats:
+ TabularOutputFormatter.register_new_formatter(
+ tabulate_format,
+ tabulate_adapter.adapter,
+ tabulate_adapter.get_preprocessors(tabulate_format),
+ {
+ "table_format": tabulate_format,
+ "missing_value": MISSING_VALUE,
+ "max_field_width": MAX_FIELD_WIDTH,
+ },
+ ),
+
+for tsv_format in tsv_output_adapter.supported_formats:
+ TabularOutputFormatter.register_new_formatter(
+ tsv_format,
+ tsv_output_adapter.adapter,
+ tsv_output_adapter.preprocessors,
+ {"table_format": tsv_format, "missing_value": "", "max_field_width": None},
+ )
diff --git a/cli_helpers/tabular_output/preprocessors.py b/cli_helpers/tabular_output/preprocessors.py
new file mode 100644
index 0000000..8342d67
--- /dev/null
+++ b/cli_helpers/tabular_output/preprocessors.py
@@ -0,0 +1,353 @@
+# -*- coding: utf-8 -*-
+"""These preprocessor functions are used to process data prior to output."""
+
+import string
+
+from cli_helpers import utils
+from cli_helpers.compat import text_type, int_types, float_types, HAS_PYGMENTS, Token
+
+
+def truncate_string(
+ data, headers, max_field_width=None, skip_multiline_string=True, **_
+):
+ """Truncate very long strings. Only needed for tabular
+ representation, because trying to tabulate very long data
+ is problematic in terms of performance, and does not make any
+ sense visually.
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :param int max_field_width: Width to truncate field for display
+ :return: The processed data and headers.
+ :rtype: tuple
+ """
+ return (
+ (
+ [
+ utils.truncate_string(v, max_field_width, skip_multiline_string)
+ for v in row
+ ]
+ for row in data
+ ),
+ [
+ utils.truncate_string(h, max_field_width, skip_multiline_string)
+ for h in headers
+ ],
+ )
+
+
+def convert_to_string(data, headers, **_):
+ """Convert all *data* and *headers* to strings.
+
+ Binary data that cannot be decoded is converted to a hexadecimal
+ representation via :func:`binascii.hexlify`.
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :return: The processed data and headers.
+ :rtype: tuple
+
+ """
+ return (
+ ([utils.to_string(v) for v in row] for row in data),
+ [utils.to_string(h) for h in headers],
+ )
+
+
+def override_missing_value(
+ data,
+ headers,
+ style=None,
+ missing_value_token=Token.Output.Null,
+ missing_value="",
+ **_,
+):
+ """Override missing values in the *data* with *missing_value*.
+
+ A missing value is any value that is :data:`None`.
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :param style: Style for missing_value.
+ :param missing_value_token: The Pygments token used for missing data.
+ :param missing_value: The default value to use for missing data.
+ :return: The processed data and headers.
+ :rtype: tuple
+
+ """
+
+ def fields():
+ for row in data:
+ processed = []
+ for field in row:
+ if field is None and style and HAS_PYGMENTS:
+ styled = utils.style_field(
+ missing_value_token, missing_value, style
+ )
+ processed.append(styled)
+ elif field is None:
+ processed.append(missing_value)
+ else:
+ processed.append(field)
+ yield processed
+
+ return (fields(), headers)
+
+
+def override_tab_value(data, headers, new_value=" ", **_):
+ """Override tab values in the *data* with *new_value*.
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :param new_value: The new value to use for tab.
+ :return: The processed data and headers.
+ :rtype: tuple
+
+ """
+ return (
+ (
+ [v.replace("\t", new_value) if isinstance(v, text_type) else v for v in row]
+ for row in data
+ ),
+ headers,
+ )
+
+
+def escape_newlines(data, headers, **_):
+ """Escape newline characters (\n -> \\n, \r -> \\r)
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :return: The processed data and headers.
+ :rtype: tuple
+
+ """
+ return (
+ (
+ [
+ v.replace("\r", r"\r").replace("\n", r"\n")
+ if isinstance(v, text_type)
+ else v
+ for v in row
+ ]
+ for row in data
+ ),
+ headers,
+ )
+
+
+def bytes_to_string(data, headers, **_):
+ """Convert all *data* and *headers* bytes to strings.
+
+ Binary data that cannot be decoded is converted to a hexadecimal
+ representation via :func:`binascii.hexlify`.
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :return: The processed data and headers.
+ :rtype: tuple
+
+ """
+ return (
+ ([utils.bytes_to_string(v) for v in row] for row in data),
+ [utils.bytes_to_string(h) for h in headers],
+ )
+
+
+def align_decimals(data, headers, column_types=(), **_):
+ """Align numbers in *data* on their decimal points.
+
+ Whitespace padding is added before a number so that all numbers in a
+ column are aligned.
+
+ Outputting data before aligning the decimals::
+
+ 1
+ 2.1
+ 10.59
+
+ Outputting data after aligning the decimals::
+
+ 1
+ 2.1
+ 10.59
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :param iterable column_types: The columns' type objects (e.g. int or float).
+ :return: The processed data and headers.
+ :rtype: tuple
+
+ """
+ pointpos = len(headers) * [0]
+ data = list(data)
+ for row in data:
+ for i, v in enumerate(row):
+ if column_types[i] is float and type(v) in float_types:
+ v = text_type(v)
+ pointpos[i] = max(utils.intlen(v), pointpos[i])
+
+ def results(data):
+ for row in data:
+ result = []
+ for i, v in enumerate(row):
+ if column_types[i] is float and type(v) in float_types:
+ v = text_type(v)
+ result.append((pointpos[i] - utils.intlen(v)) * " " + v)
+ else:
+ result.append(v)
+ yield result
+
+ return results(data), headers
+
+
+def quote_whitespaces(data, headers, quotestyle="'", **_):
+ """Quote leading/trailing whitespace in *data*.
+
+ When outputing data with leading or trailing whitespace, it can be useful
+ to put quotation marks around the value so the whitespace is more
+ apparent. If one value in a column needs quoted, then all values in that
+ column are quoted to keep things consistent.
+
+ .. NOTE::
+ :data:`string.whitespace` is used to determine which characters are
+ whitespace.
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :param str quotestyle: The quotation mark to use (defaults to ``'``).
+ :return: The processed data and headers.
+ :rtype: tuple
+
+ """
+ whitespace = tuple(string.whitespace)
+ quote = len(headers) * [False]
+ data = list(data)
+ for row in data:
+ for i, v in enumerate(row):
+ v = text_type(v)
+ if v.startswith(whitespace) or v.endswith(whitespace):
+ quote[i] = True
+
+ def results(data):
+ for row in data:
+ result = []
+ for i, v in enumerate(row):
+ quotation = quotestyle if quote[i] else ""
+ result.append(
+ "{quotestyle}{value}{quotestyle}".format(
+ quotestyle=quotation, value=v
+ )
+ )
+ yield result
+
+ return results(data), headers
+
+
+def style_output(
+ data,
+ headers,
+ style=None,
+ header_token=Token.Output.Header,
+ odd_row_token=Token.Output.OddRow,
+ even_row_token=Token.Output.EvenRow,
+ **_,
+):
+ """Style the *data* and *headers* (e.g. bold, italic, and colors)
+
+ .. NOTE::
+ This requires the `Pygments <http://pygments.org/>`_ library to
+ be installed. You can install it with CLI Helpers as an extra::
+ $ pip install cli_helpers[styles]
+
+ Example usage::
+
+ from cli_helpers.tabular_output.preprocessors import style_output
+ from pygments.style import Style
+ from pygments.token import Token
+
+ class YourStyle(Style):
+ default_style = ""
+ styles = {
+ Token.Output.Header: 'bold ansibrightred',
+ Token.Output.OddRow: 'bg:#eee #111',
+ Token.Output.EvenRow: '#0f0'
+ }
+
+ headers = ('First Name', 'Last Name')
+ data = [['Fred', 'Roberts'], ['George', 'Smith']]
+
+ data, headers = style_output(data, headers, style=YourStyle)
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :param str/pygments.style.Style style: A Pygments style. You can `create
+ your own styles <https://pygments.org/docs/styles#creating-own-styles>`_.
+ :param str header_token: The token type to be used for the headers.
+ :param str odd_row_token: The token type to be used for odd rows.
+ :param str even_row_token: The token type to be used for even rows.
+ :return: The styled data and headers.
+ :rtype: tuple
+
+ """
+ from cli_helpers.utils import filter_style_table
+
+ relevant_styles = filter_style_table(
+ style, header_token, odd_row_token, even_row_token
+ )
+ if style and HAS_PYGMENTS:
+ if relevant_styles.get(header_token):
+ headers = [
+ utils.style_field(header_token, header, style) for header in headers
+ ]
+ if relevant_styles.get(odd_row_token) or relevant_styles.get(even_row_token):
+ data = (
+ [
+ utils.style_field(
+ odd_row_token if i % 2 else even_row_token, f, style
+ )
+ for f in r
+ ]
+ for i, r in enumerate(data, 1)
+ )
+
+ return iter(data), headers
+
+
+def format_numbers(
+ data, headers, column_types=(), integer_format=None, float_format=None, **_
+):
+ """Format numbers according to a format specification.
+
+ This uses Python's format specification to format numbers of the following
+ types: :class:`int`, :class:`py2:long` (Python 2), :class:`float`, and
+ :class:`~decimal.Decimal`. See the :ref:`python:formatspec` for more
+ information about the format strings.
+
+ .. NOTE::
+ A column is only formatted if all of its values are the same type
+ (except for :data:`None`).
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :param iterable column_types: The columns' type objects (e.g. int or float).
+ :param str integer_format: The format string to use for integer columns.
+ :param str float_format: The format string to use for float columns.
+ :return: The processed data and headers.
+ :rtype: tuple
+
+ """
+ if (integer_format is None and float_format is None) or not column_types:
+ return iter(data), headers
+
+ def _format_number(field, column_type):
+ if integer_format and column_type is int and type(field) in int_types:
+ return format(field, integer_format)
+ elif float_format and column_type is float and type(field) in float_types:
+ return format(field, float_format)
+ return field
+
+ data = (
+ [_format_number(v, column_types[i]) for i, v in enumerate(row)] for row in data
+ )
+ return data, headers
diff --git a/cli_helpers/tabular_output/tabulate_adapter.py b/cli_helpers/tabular_output/tabulate_adapter.py
new file mode 100644
index 0000000..2c557f8
--- /dev/null
+++ b/cli_helpers/tabular_output/tabulate_adapter.py
@@ -0,0 +1,212 @@
+# -*- coding: utf-8 -*-
+"""Format adapter for the tabulate module."""
+
+from __future__ import unicode_literals
+
+from cli_helpers.utils import filter_dict_by_key
+from cli_helpers.compat import Terminal256Formatter, Token, StringIO
+from .preprocessors import (
+ convert_to_string,
+ truncate_string,
+ override_missing_value,
+ style_output,
+ HAS_PYGMENTS,
+ escape_newlines,
+)
+
+import tabulate
+
+
+tabulate.MIN_PADDING = 0
+
+tabulate._table_formats["psql_unicode"] = tabulate.TableFormat(
+ lineabove=tabulate.Line("┌", "─", "┬", "┐"),
+ linebelowheader=tabulate.Line("├", "─", "┼", "┤"),
+ linebetweenrows=None,
+ linebelow=tabulate.Line("└", "─", "┴", "┘"),
+ headerrow=tabulate.DataRow("│", "│", "│"),
+ datarow=tabulate.DataRow("│", "│", "│"),
+ padding=1,
+ with_header_hide=None,
+)
+
+tabulate._table_formats["double"] = tabulate.TableFormat(
+ lineabove=tabulate.Line("╔", "═", "╦", "╗"),
+ linebelowheader=tabulate.Line("╠", "═", "╬", "╣"),
+ linebetweenrows=None,
+ linebelow=tabulate.Line("╚", "═", "╩", "╝"),
+ headerrow=tabulate.DataRow("║", "║", "║"),
+ datarow=tabulate.DataRow("║", "║", "║"),
+ padding=1,
+ with_header_hide=None,
+)
+
+tabulate._table_formats["ascii"] = tabulate.TableFormat(
+ lineabove=tabulate.Line("+", "-", "+", "+"),
+ linebelowheader=tabulate.Line("+", "-", "+", "+"),
+ linebetweenrows=None,
+ linebelow=tabulate.Line("+", "-", "+", "+"),
+ headerrow=tabulate.DataRow("|", "|", "|"),
+ datarow=tabulate.DataRow("|", "|", "|"),
+ padding=1,
+ with_header_hide=None,
+)
+
+tabulate._table_formats["ascii_escaped"] = tabulate.TableFormat(
+ lineabove=tabulate.Line("+", "-", "+", "+"),
+ linebelowheader=tabulate.Line("+", "-", "+", "+"),
+ linebetweenrows=None,
+ linebelow=tabulate.Line("+", "-", "+", "+"),
+ headerrow=tabulate.DataRow("|", "|", "|"),
+ datarow=tabulate.DataRow("|", "|", "|"),
+ padding=1,
+ with_header_hide=None,
+)
+
+# "minimal" is the same as "plain", but without headers
+tabulate._table_formats["minimal"] = tabulate._table_formats["plain"]
+
+tabulate.multiline_formats["psql_unicode"] = "psql_unicode"
+tabulate.multiline_formats["double"] = "double"
+tabulate.multiline_formats["ascii"] = "ascii"
+tabulate.multiline_formats["minimal"] = "minimal"
+
+supported_markup_formats = (
+ "mediawiki",
+ "html",
+ "latex",
+ "latex_booktabs",
+ "textile",
+ "moinmoin",
+ "jira",
+)
+supported_table_formats = (
+ "ascii",
+ "ascii_escaped",
+ "plain",
+ "simple",
+ "minimal",
+ "grid",
+ "fancy_grid",
+ "pipe",
+ "orgtbl",
+ "psql",
+ "psql_unicode",
+ "rst",
+ "github",
+ "double",
+)
+
+supported_formats = supported_markup_formats + supported_table_formats
+
+default_kwargs = {
+ "ascii": {"numalign": "left"},
+ "ascii_escaped": {"numalign": "left"},
+}
+headless_formats = ("minimal",)
+
+
+def get_preprocessors(format_name):
+ common_formatters = (
+ override_missing_value,
+ convert_to_string,
+ truncate_string,
+ style_output,
+ )
+
+ if tabulate.multiline_formats.get(format_name):
+ return common_formatters + (style_output_table(format_name),)
+ else:
+ return common_formatters + (escape_newlines, style_output_table(format_name))
+
+
+def style_output_table(format_name=""):
+ def style_output(
+ data,
+ headers,
+ style=None,
+ table_separator_token=Token.Output.TableSeparator,
+ **_,
+ ):
+ """Style the *table* a(e.g. bold, italic, and colors)
+
+ .. NOTE::
+ This requires the `Pygments <http://pygments.org/>`_ library to
+ be installed. You can install it with CLI Helpers as an extra::
+ $ pip install cli_helpers[styles]
+
+ Example usage::
+
+ from cli_helpers.tabular_output import tabulate_adapter
+ from pygments.style import Style
+ from pygments.token import Token
+
+ class YourStyle(Style):
+ default_style = ""
+ styles = {
+ Token.Output.TableSeparator: '#ansigray'
+ }
+
+ headers = ('First Name', 'Last Name')
+ data = [['Fred', 'Roberts'], ['George', 'Smith']]
+ style_output_table = tabulate_adapter.style_output_table('psql')
+ style_output_table(data, headers, style=CliStyle)
+
+ data, headers = style_output(data, headers, style=YourStyle)
+ output = tabulate_adapter.adapter(data, headers, style=YourStyle)
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :param str/pygments.style.Style style: A Pygments style. You can `create
+ your own styles <https://pygments.org/docs/styles#creating-own-styles>`_.
+ :param str table_separator_token: The token type to be used for the table separator.
+ :return: data and headers.
+ :rtype: tuple
+
+ """
+ if style and HAS_PYGMENTS and format_name in supported_table_formats:
+ formatter = Terminal256Formatter(style=style)
+
+ def style_field(token, field):
+ """Get the styled text for a *field* using *token* type."""
+ s = StringIO()
+ formatter.format(((token, field),), s)
+ return s.getvalue()
+
+ def addColorInElt(elt):
+ if not elt:
+ return elt
+ if elt.__class__ == tabulate.Line:
+ return tabulate.Line(
+ *(style_field(table_separator_token, val) for val in elt)
+ )
+ if elt.__class__ == tabulate.DataRow:
+ return tabulate.DataRow(
+ *(style_field(table_separator_token, val) for val in elt)
+ )
+ return elt
+
+ srcfmt = tabulate._table_formats[format_name]
+ newfmt = tabulate.TableFormat(*(addColorInElt(val) for val in srcfmt))
+ tabulate._table_formats[format_name] = newfmt
+
+ return iter(data), headers
+
+ return style_output
+
+
+def adapter(data, headers, table_format=None, preserve_whitespace=False, **kwargs):
+ """Wrap tabulate inside a function for TabularOutputFormatter."""
+ keys = ("floatfmt", "numalign", "stralign", "showindex", "disable_numparse")
+ tkwargs = {"tablefmt": table_format}
+ tkwargs.update(filter_dict_by_key(kwargs, keys))
+
+ if table_format in supported_markup_formats:
+ tkwargs.update(numalign=None, stralign=None)
+
+ tabulate.PRESERVE_WHITESPACE = preserve_whitespace
+
+ tkwargs.update(default_kwargs.get(table_format, {}))
+ if table_format in headless_formats:
+ headers = []
+ return iter(tabulate.tabulate(data, headers, **tkwargs).split("\n"))
diff --git a/cli_helpers/tabular_output/tsv_output_adapter.py b/cli_helpers/tabular_output/tsv_output_adapter.py
new file mode 100644
index 0000000..75518b3
--- /dev/null
+++ b/cli_helpers/tabular_output/tsv_output_adapter.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+"""A tsv data output adapter"""
+
+from __future__ import unicode_literals
+
+from .preprocessors import bytes_to_string, override_missing_value, convert_to_string
+from itertools import chain
+from cli_helpers.utils import replace
+
+supported_formats = ("tsv",)
+preprocessors = (override_missing_value, bytes_to_string, convert_to_string)
+
+
+def adapter(data, headers, **kwargs):
+ """Wrap the formatting inside a function for TabularOutputFormatter."""
+ for row in chain((headers,), data):
+ yield "\t".join((replace(r, (("\n", r"\n"), ("\t", r"\t"))) for r in row))
diff --git a/cli_helpers/tabular_output/vertical_table_adapter.py b/cli_helpers/tabular_output/vertical_table_adapter.py
new file mode 100644
index 0000000..0b96cb2
--- /dev/null
+++ b/cli_helpers/tabular_output/vertical_table_adapter.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+"""Format data into a vertical table layout."""
+
+from __future__ import unicode_literals
+
+from cli_helpers.utils import filter_dict_by_key
+from .preprocessors import convert_to_string, override_missing_value, style_output
+
+supported_formats = ("vertical",)
+preprocessors = (override_missing_value, convert_to_string, style_output)
+
+
+def _get_separator(num, sep_title, sep_character, sep_length):
+ """Get a row separator for row *num*."""
+ left_divider_length = right_divider_length = sep_length
+ if isinstance(sep_length, tuple):
+ left_divider_length, right_divider_length = sep_length
+ left_divider = sep_character * left_divider_length
+ right_divider = sep_character * right_divider_length
+ title = sep_title.format(n=num + 1)
+
+ return "{left_divider}[ {title} ]{right_divider}\n".format(
+ left_divider=left_divider, right_divider=right_divider, title=title
+ )
+
+
+def _format_row(headers, row):
+ """Format a row."""
+ formatted_row = [" | ".join(field) for field in zip(headers, row)]
+ return "\n".join(formatted_row)
+
+
+def vertical_table(
+ data, headers, sep_title="{n}. row", sep_character="*", sep_length=27
+):
+ """Format *data* and *headers* as an vertical table.
+
+ The values in *data* and *headers* must be strings.
+
+ :param iterable data: An :term:`iterable` (e.g. list) of rows.
+ :param iterable headers: The column headers.
+ :param str sep_title: The title given to each row separator. Defaults to
+ ``'{n}. row'``. Any instance of ``'{n}'`` is
+ replaced by the record number.
+ :param str sep_character: The character used to separate rows. Defaults to
+ ``'*'``.
+ :param int/tuple sep_length: The number of separator characters that should
+ appear on each side of the *sep_title*. Use
+ a tuple to specify the left and right values
+ separately.
+ :return: The formatted data.
+ :rtype: str
+
+ """
+ header_len = max([len(x) for x in headers])
+ padded_headers = [x.ljust(header_len) for x in headers]
+ formatted_rows = [_format_row(padded_headers, row) for row in data]
+
+ output = []
+ for i, result in enumerate(formatted_rows):
+ yield _get_separator(i, sep_title, sep_character, sep_length) + result
+
+
+def adapter(data, headers, **kwargs):
+ """Wrap vertical table in a function for TabularOutputFormatter."""
+ keys = ("sep_title", "sep_character", "sep_length")
+ return vertical_table(data, headers, **filter_dict_by_key(kwargs, keys))
diff --git a/cli_helpers/utils.py b/cli_helpers/utils.py
new file mode 100644
index 0000000..053bdea
--- /dev/null
+++ b/cli_helpers/utils.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+"""Various utility functions and helpers."""
+
+import binascii
+import re
+from functools import lru_cache
+from typing import Dict
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from pygments.style import StyleMeta
+
+from cli_helpers.compat import binary_type, text_type, Terminal256Formatter, StringIO
+
+
+def bytes_to_string(b):
+ """Convert bytes *b* to a string.
+
+ Hexlify bytes that can't be decoded.
+
+ """
+ if isinstance(b, binary_type):
+ needs_hex = False
+ try:
+ result = b.decode("utf8")
+ needs_hex = not result.isprintable()
+ except UnicodeDecodeError:
+ needs_hex = True
+ if needs_hex:
+ return "0x" + binascii.hexlify(b).decode("ascii")
+ else:
+ return result
+ return b
+
+
+def to_string(value):
+ """Convert *value* to a string."""
+ if isinstance(value, binary_type):
+ return bytes_to_string(value)
+ else:
+ return text_type(value)
+
+
+def truncate_string(value, max_width=None, skip_multiline_string=True):
+ """Truncate string values."""
+ if skip_multiline_string and isinstance(value, text_type) and "\n" in value:
+ return value
+ elif (
+ isinstance(value, text_type)
+ and max_width is not None
+ and len(value) > max_width
+ ):
+ return value[: max_width - 3] + "..."
+ return value
+
+
+def intlen(n):
+ """Find the length of the integer part of a number *n*."""
+ pos = n.find(".")
+ return len(n) if pos < 0 else pos
+
+
+def filter_dict_by_key(d, keys):
+ """Filter the dict *d* to remove keys not in *keys*."""
+ return {k: v for k, v in d.items() if k in keys}
+
+
+def unique_items(seq):
+ """Return the unique items from iterable *seq* (in order)."""
+ seen = set()
+ return [x for x in seq if not (x in seen or seen.add(x))]
+
+
+_ansi_re = re.compile("\033\\[((?:\\d|;)*)([a-zA-Z])")
+
+
+def strip_ansi(value):
+ """Strip the ANSI escape sequences from a string."""
+ return _ansi_re.sub("", value)
+
+
+def replace(s, replace):
+ """Replace multiple values in a string"""
+ for r in replace:
+ s = s.replace(*r)
+ return s
+
+
+@lru_cache()
+def _get_formatter(style) -> Terminal256Formatter:
+ return Terminal256Formatter(style=style)
+
+
+def style_field(token, field, style):
+ """Get the styled text for a *field* using *token* type."""
+ formatter = _get_formatter(style)
+ s = StringIO()
+ formatter.format(((token, field),), s)
+ return s.getvalue()
+
+
+def filter_style_table(style: "StyleMeta", *relevant_styles: str) -> Dict:
+ """
+ get a dictionary of styles for given tokens. Typical usage:
+
+ filter_style_table(style, Token.Output.EvenRow, Token.Output.OddRow) == {
+ Token.Output.EvenRow: "",
+ Token.Output.OddRow: "",
+ }
+ """
+ _styles_iter = ((key, val) for key, val in getattr(style, "styles", {}).items())
+ _relevant_styles_iter = filter(lambda tpl: tpl[0] in relevant_styles, _styles_iter)
+ return {key: val for key, val in _relevant_styles_iter}
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..4efb828
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+SPHINXPROJ = CLIHelpers
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file
diff --git a/docs/source/api.rst b/docs/source/api.rst
new file mode 100644
index 0000000..a95d314
--- /dev/null
+++ b/docs/source/api.rst
@@ -0,0 +1,23 @@
+API
+===
+
+.. automodule:: cli_helpers
+
+Tabular Output
+--------------
+
+.. automodule:: cli_helpers.tabular_output
+ :members:
+ :imported-members:
+
+Preprocessors
++++++++++++++
+
+.. automodule:: cli_helpers.tabular_output.preprocessors
+ :members:
+
+Config
+------
+
+.. automodule:: cli_helpers.config
+ :members:
diff --git a/docs/source/authors.rst b/docs/source/authors.rst
new file mode 100644
index 0000000..5078189
--- /dev/null
+++ b/docs/source/authors.rst
@@ -0,0 +1 @@
+.. include:: ../../AUTHORS
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
new file mode 100644
index 0000000..e272c5a
--- /dev/null
+++ b/docs/source/changelog.rst
@@ -0,0 +1 @@
+.. include:: ../../CHANGELOG
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000..cf99ea6
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# CLI Helpers documentation build configuration file, created by
+# sphinx-quickstart on Mon Apr 17 20:26:02 2017.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import ast
+from collections import OrderedDict
+
+# import os
+import re
+
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
+
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.viewcode"]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ["_templates"]
+
+html_sidebars = {
+ "**": [
+ "about.html",
+ "navigation.html",
+ "relations.html",
+ "searchbox.html",
+ "donate.html",
+ ]
+}
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = ".rst"
+
+# The master toctree document.
+master_doc = "index"
+
+# General information about the project.
+project = "CLI Helpers"
+author = "dbcli"
+description = "Python helpers for common CLI tasks"
+copyright = "2017, dbcli"
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+_version_re = re.compile(r"__version__\s+=\s+(.*)")
+with open("../../cli_helpers/__init__.py", "rb") as f:
+ version = str(
+ ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1))
+ )
+
+# The full version, including alpha/beta/rc tags.
+release = version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = []
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = "sphinx"
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = "alabaster"
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+
+nav_links = OrderedDict(
+ (
+ ("CLI Helpers at GitHub", "https://github.com/dbcli/cli_helpers"),
+ ("CLI Helpers at PyPI", "https://pypi.org/project/cli_helpers"),
+ ("Issue Tracker", "https://github.com/dbcli/cli_helpers/issues"),
+ )
+)
+
+html_theme_options = {
+ "description": description,
+ "github_user": "dbcli",
+ "github_repo": "cli_helpers",
+ "github_banner": False,
+ "github_button": False,
+ "github_type": "watch",
+ "github_count": False,
+ "extra_nav_links": nav_links,
+}
+
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ["_static"]
+
+
+# -- Options for HTMLHelp output ------------------------------------------
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = "CLIHelpersdoc"
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, "CLIHelpers.tex", "CLI Helpers Documentation", "dbcli", "manual"),
+]
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [(master_doc, "clihelpers", "CLI Helpers Documentation", [author], 1)]
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (
+ master_doc,
+ "CLIHelpers",
+ "CLI Helpers Documentation",
+ author,
+ "CLIHelpers",
+ description,
+ "Miscellaneous",
+ ),
+]
+
+
+intersphinx_mapping = {
+ "python": ("https://docs.python.org/3", None),
+ "py2": ("https://docs.python.org/2", None),
+ "pymysql": ("https://pymysql.readthedocs.io/en/latest/", None),
+ "numpy": ("https://docs.scipy.org/doc/numpy", None),
+ "configobj": ("https://configobj.readthedocs.io/en/latest", None),
+}
+
+linkcheck_ignore = ["https://github.com/psf/black.*"]
diff --git a/docs/source/contributing.rst b/docs/source/contributing.rst
new file mode 100644
index 0000000..ac7b6bc
--- /dev/null
+++ b/docs/source/contributing.rst
@@ -0,0 +1 @@
+.. include:: ../../CONTRIBUTING.rst
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 0000000..09035fe
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,30 @@
+Welcome to CLI Helpers
+======================
+
+.. include:: ../../README.rst
+ :start-after: start-body
+ :end-before: end-body
+
+Installation
+------------
+You can get the library directly from `PyPI <https://pypi.org/>`_::
+
+ $ pip install cli_helpers
+
+User Guide
+----------
+.. toctree::
+ :maxdepth: 2
+
+ quickstart
+ contributing
+ changelog
+ authors
+ license
+
+API
+---
+.. toctree::
+ :maxdepth: 2
+
+ api
diff --git a/docs/source/license.rst b/docs/source/license.rst
new file mode 100644
index 0000000..caa73a3
--- /dev/null
+++ b/docs/source/license.rst
@@ -0,0 +1,13 @@
+License
+=======
+
+CLI Helpers is licensed under the BSD 3-clause license. This basically means
+you can do what you'd like with the source code as long as you include a copy
+of the license, don't modify the conditions, and keep the disclaimer around.
+Plus, you can't use the authors' names to promote your software without their
+written consent.
+
+License Text
+++++++++++++
+
+.. include:: ../../LICENSE
diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst
new file mode 100644
index 0000000..b304de2
--- /dev/null
+++ b/docs/source/quickstart.rst
@@ -0,0 +1,153 @@
+Quickstart
+==========
+
+Displaying Tabular Data
+-----------------------
+
+
+The Basics
+++++++++++
+
+CLI Helpers provides a simple way to display your tabular data (columns/rows) in a visually-appealing manner::
+
+ >>> from cli_helpers import tabular_output
+
+ >>> data = [[1, 'Asgard', True], [2, 'Camelot', False], [3, 'El Dorado', True]]
+ >>> headers = ['id', 'city', 'visited']
+
+ >>> print("\n".join(tabular_output.format_output(iter(data), headers, format_name='simple')))
+
+ id city visited
+ ---- --------- ---------
+ 1 Asgard True
+ 2 Camelot False
+ 3 El Dorado True
+
+Let's take a look at what we did there.
+
+1. We imported the :mod:`~cli_helpers.tabular_output` module. This module gives us access to the :func:`~cli_helpers.tabular_output.format_output` function.
+
+2. Next we generate some data. Plus, we need a list of headers to give our data some context.
+
+3. We format the output using the display format ``simple``. That's a nice looking table!
+
+
+Display Formats
++++++++++++++++
+
+To display your data, :mod:`~cli_helpers.tabular_output` uses
+`tabulate <https://bitbucket.org/astanin/python-tabulate>`_,
+`terminaltables <https://robpol86.github.io/terminaltables/>`_, :mod:`csv`,
+and its own vertical table layout.
+
+The best way to see the various display formats is to use the
+:class:`~cli_helpers.tabular_output.TabularOutputFormatter` class. This is
+what the :func:`~cli_helpers.tabular_output.format_output` function in our
+first example uses behind the scenes.
+
+Let's get a list of all the supported format names::
+
+ >>> from cli_helpers.tabular_output import TabularOutputFormatter
+ >>> formatter = TabularOutputFormatter()
+ >>> formatter.supported_formats
+ ('vertical', 'csv', 'tsv', 'mediawiki', 'html', 'latex', 'latex_booktabs', 'textile', 'moinmoin', 'jira', 'plain', 'minimal', 'simple', 'grid', 'fancy_grid', 'pipe', 'orgtbl', 'psql', 'psql_unicode', 'rst', 'ascii', 'double', 'github')
+
+You can format your data in any of those supported formats. Let's take the
+same data from our first example and put it in the ``fancy_grid`` format::
+
+ >>> data = [[1, 'Asgard', True], [2, 'Camelot', False], [3, 'El Dorado', True]]
+ >>> headers = ['id', 'city', 'visited']
+ >>> print("\n".join(formatter.format_output(iter(data), headers, format_name='fancy_grid')))
+ ╒══════╤═══════════╤═══════════╕
+ │ id │ city │ visited │
+ ╞══════╪═══════════╪═══════════╡
+ │ 1 │ Asgard │ True │
+ ├──────┼───────────┼───────────┤
+ │ 2 │ Camelot │ False │
+ ├──────┼───────────┼───────────┤
+ │ 3 │ El Dorado │ True │
+ ╘══════╧═══════════╧═══════════╛
+
+That was easy! How about CLI Helper's vertical table layout?
+
+ >>> print("\n".join(formatter.format_output(iter(data), headers, format_name='vertical')))
+ ***************************[ 1. row ]***************************
+ id | 1
+ city | Asgard
+ visited | True
+ ***************************[ 2. row ]***************************
+ id | 2
+ city | Camelot
+ visited | False
+ ***************************[ 3. row ]***************************
+ id | 3
+ city | El Dorado
+ visited | True
+
+
+Default Format
+++++++++++++++
+
+When you create a :class:`~cli_helpers.tabular_output.TabularOutputFormatter`
+object, you can specify a default formatter so you don't have to pass the
+format name each time you want to format your data::
+
+ >>> formatter = TabularOutputFormatter(format_name='plain')
+ >>> print("\n".join(formatter.format_output(iter(data), headers)))
+ id city visited
+ 1 Asgard True
+ 2 Camelot False
+ 3 El Dorado True
+
+.. TIP::
+ You can get or set the default format whenever you'd like through
+ :data:`TabularOutputFormatter.format_name <cli_helpers.tabular_output.TabularOutputFormatter.format_name>`.
+
+
+Passing Options to the Formatters
++++++++++++++++++++++++++++++++++
+
+Many of the formatters have settings that can be tweaked by passing
+an optional argument when you format your data. For example,
+if we wanted to enable or disable number parsing on any of
+`tabulate's <https://bitbucket.org/astanin/python-tabulate>`_
+formats, we could::
+
+ >>> data = [[1, 1.5], [2, 19.605], [3, 100.0]]
+ >>> headers = ['id', 'rating']
+ >>> print("\n".join(format_output(iter(data), headers, format_name='simple', disable_numparse=True)))
+ id rating
+ ---- --------
+ 1 1.5
+ 2 19.605
+ 3 100.0
+ >>> print("\n".join(format_output(iter(data), headers, format_name='simple', disable_numparse=False)))
+ id rating
+ ---- --------
+ 1 1.5
+ 2 19.605
+ 3 100
+
+
+Lists and tuples and bytearrays. Oh my!
++++++++++++++++++++++++++++++++++++++++
+
+:mod:`~cli_helpers.tabular_output` supports any :term:`iterable`, not just
+a :class:`list` or :class:`tuple`. You can use a :class:`range`,
+:func:`enumerate`, a :class:`str`, or even a :class:`bytearray`! Here is a
+far-fetched example to prove the point::
+
+ >>> step = 3
+ >>> data = [range(n, n + step) for n in range(0, 9, step)]
+ >>> headers = 'abc'
+ >>> print("\n".join(format_output(iter(data), headers, format_name='simple')))
+ a b c
+ --- --- ---
+ 0 1 2
+ 3 4 5
+ 6 7 8
+
+Real life examples include a PyMySQL
+:class:`Cursor <pymysql:pymysql.cursors.Cursor>` with
+database results or
+NumPy :class:`ndarray <numpy:numpy.ndarray>` with data points.
diff --git a/release.py b/release.py
new file mode 100644
index 0000000..7a68271
--- /dev/null
+++ b/release.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+"""A script to publish a release of cli_helpers to PyPI."""
+
+import io
+from optparse import OptionParser
+import re
+import subprocess
+import sys
+
+import click
+
+DEBUG = False
+CONFIRM_STEPS = False
+DRY_RUN = False
+
+
+def skip_step():
+ """
+ Asks for user's response whether to run a step. Default is yes.
+ :return: boolean
+ """
+ global CONFIRM_STEPS
+
+ if CONFIRM_STEPS:
+ return not click.confirm("--- Run this step?", default=True)
+ return False
+
+
+def run_step(*args):
+ """
+ Prints out the command and asks if it should be run.
+ If yes (default), runs it.
+ :param args: list of strings (command and args)
+ """
+ global DRY_RUN
+
+ cmd = args
+ print(" ".join(cmd))
+ if skip_step():
+ print("--- Skipping...")
+ elif DRY_RUN:
+ print("--- Pretending to run...")
+ else:
+ subprocess.check_output(cmd)
+
+
+def version(version_file):
+ _version_re = re.compile(
+ r'__version__\s+=\s+(?P<quote>[\'"])(?P<version>.*)(?P=quote)'
+ )
+
+ with io.open(version_file, encoding="utf-8") as f:
+ ver = _version_re.search(f.read()).group("version")
+
+ return ver
+
+
+def commit_for_release(version_file, ver):
+ run_step("git", "reset")
+ run_step("git", "add", version_file)
+ run_step("git", "commit", "--message", "Releasing version {}".format(ver))
+
+
+def create_git_tag(tag_name):
+ run_step("git", "tag", tag_name)
+
+
+def create_distribution_files():
+ run_step("python", "setup.py", "clean", "--all", "sdist", "bdist_wheel")
+
+
+def upload_distribution_files():
+ run_step("twine", "upload", "dist/*")
+
+
+def push_to_github():
+ run_step("git", "push", "origin", "master")
+
+
+def push_tags_to_github():
+ run_step("git", "push", "--tags", "origin")
+
+
+def checklist(questions):
+ for question in questions:
+ if not click.confirm("--- {}".format(question), default=False):
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ if DEBUG:
+ subprocess.check_output = lambda x: x
+
+ checks = [
+ "Have you updated the AUTHORS file?",
+ "Have you updated the `Usage` section of the README?",
+ ]
+ checklist(checks)
+
+ ver = version("cli_helpers/__init__.py")
+ print("Releasing Version:", ver)
+
+ parser = OptionParser()
+ parser.add_option(
+ "-c",
+ "--confirm-steps",
+ action="store_true",
+ dest="confirm_steps",
+ default=False,
+ help=(
+ "Confirm every step. If the step is not " "confirmed, it will be skipped."
+ ),
+ )
+ parser.add_option(
+ "-d",
+ "--dry-run",
+ action="store_true",
+ dest="dry_run",
+ default=False,
+ help="Print out, but not actually run any steps.",
+ )
+
+ popts, pargs = parser.parse_args()
+ CONFIRM_STEPS = popts.confirm_steps
+ DRY_RUN = popts.dry_run
+
+ if not click.confirm("Are you sure?", default=False):
+ sys.exit(1)
+
+ commit_for_release("cli_helpers/__init__.py", ver)
+ create_git_tag("v{}".format(ver))
+ create_distribution_files()
+ push_to_github()
+ push_tags_to_github()
+ upload_distribution_files()
diff --git a/requirements-dev.txt b/requirements-dev.txt
new file mode 100644
index 0000000..9b331c2
--- /dev/null
+++ b/requirements-dev.txt
@@ -0,0 +1,10 @@
+autopep8==1.3.3
+codecov==2.1.13
+coverage==4.3.4
+black>=20.8b1
+Pygments>=2.4.0
+pytest==7.4.3
+pytest-cov==2.4.0
+Sphinx==1.5.5
+tox==2.7.0
+twine==1.12.1
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..9b4f8cd
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,13 @@
+[coverage:run]
+source = cli_helpers
+omit = cli_helpers/packages/*.py
+
+[check-manifest]
+ignore =
+ appveyor.yml
+ .travis.yml
+ .github*
+ .travis*
+
+[tool:pytest]
+testpaths = tests
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000..16ecf5b
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import ast
+from io import open
+import re
+import sys
+
+from setuptools import find_packages, setup
+
+_version_re = re.compile(r"__version__\s+=\s+(.*)")
+
+with open("cli_helpers/__init__.py", "rb") as f:
+ version = str(
+ ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1))
+ )
+
+
+def open_file(filename):
+ """Open and read the file *filename*."""
+ with open(filename) as f:
+ return f.read()
+
+
+readme = open_file("README.rst")
+
+setup(
+ name="cli_helpers",
+ author="dbcli",
+ author_email="thomas@roten.us",
+ version=version,
+ url="https://github.com/dbcli/cli_helpers",
+ packages=find_packages(exclude=["docs", "tests", "tests.tabular_output"]),
+ include_package_data=True,
+ description="Helpers for building command-line apps",
+ long_description=readme,
+ long_description_content_type="text/x-rst",
+ install_requires=[
+ "configobj >= 5.0.5",
+ "tabulate[widechars] >= 0.9.0",
+ ],
+ extras_require={
+ "styles": ["Pygments >= 1.6"],
+ },
+ python_requires=">=3.6",
+ classifiers=[
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: BSD License",
+ "Operating System :: Unix",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Topic :: Software Development",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: Terminals :: Terminal Emulators/X Terminals",
+ ],
+)
diff --git a/tasks.py b/tasks.py
new file mode 100644
index 0000000..3a7e1e3
--- /dev/null
+++ b/tasks.py
@@ -0,0 +1,126 @@
+# -*- coding: utf-8 -*-
+"""Common development tasks for setup.py to use."""
+
+import re
+import subprocess
+import sys
+
+from setuptools import Command
+
+
+class BaseCommand(Command, object):
+ """The base command for project tasks."""
+
+ user_options = []
+
+ default_cmd_options = ("verbose", "quiet", "dry_run")
+
+ def __init__(self, *args, **kwargs):
+ super(BaseCommand, self).__init__(*args, **kwargs)
+ self.verbose = False
+
+ def initialize_options(self):
+ """Override the distutils abstract method."""
+ pass
+
+ def finalize_options(self):
+ """Override the distutils abstract method."""
+ # Distutils uses incrementing integers for verbosity.
+ self.verbose = bool(self.verbose)
+
+ def call_and_exit(self, cmd, shell=True):
+ """Run the *cmd* and exit with the proper exit code."""
+ sys.exit(subprocess.call(cmd, shell=shell))
+
+ def call_in_sequence(self, cmds, shell=True):
+ """Run multiple commmands in a row, exiting if one fails."""
+ for cmd in cmds:
+ if subprocess.call(cmd, shell=shell) == 1:
+ sys.exit(1)
+
+ def apply_options(self, cmd, options=()):
+ """Apply command-line options."""
+ for option in self.default_cmd_options + options:
+ cmd = self.apply_option(cmd, option, active=getattr(self, option, False))
+ return cmd
+
+ def apply_option(self, cmd, option, active=True):
+ """Apply a command-line option."""
+ return re.sub(
+ r"{{{}\:(?P<option>[^}}]*)}}".format(option),
+ r"\g<option>" if active else "",
+ cmd,
+ )
+
+
+class lint(BaseCommand):
+ """A PEP 8 lint command that optionally fixes violations."""
+
+ description = "check code against PEP 8 (and fix violations)"
+
+ user_options = [
+ ("branch=", "b", "branch or revision to compare against (e.g. master)"),
+ ("fix", "f", "fix the violations in place"),
+ ]
+
+ def initialize_options(self):
+ """Set the default options."""
+ self.branch = "master"
+ self.fix = False
+ super(lint, self).initialize_options()
+
+ def run(self):
+ """Run the linter."""
+ cmd = "black ."
+ cmd = cmd.format(branch=self.branch)
+ self.call_and_exit(self.apply_options(cmd, ("fix",)))
+
+
+class test(BaseCommand):
+ """Run the test suites for this project."""
+
+ description = "run the test suite"
+
+ user_options = [
+ ("all", "a", "test against all supported versions of Python"),
+ ("coverage", "c", "measure test coverage"),
+ ]
+
+ unit_test_cmd = (
+ "pytest{quiet: -q}{verbose: -v}{dry_run: --setup-only}"
+ "{coverage: --cov-report= --cov=cli_helpers}"
+ )
+ test_all_cmd = "tox{verbose: -v}{dry_run: --notest}"
+ coverage_cmd = "coverage report"
+
+ def initialize_options(self):
+ """Set the default options."""
+ self.all = False
+ self.coverage = False
+ super(test, self).initialize_options()
+
+ def run(self):
+ """Run the test suites."""
+ if self.all:
+ cmd = self.apply_options(self.test_all_cmd)
+ self.call_and_exit(cmd)
+ else:
+ cmds = (self.apply_options(self.unit_test_cmd, ("coverage",)),)
+ if self.coverage:
+ cmds += (self.apply_options(self.coverage_cmd),)
+ self.call_in_sequence(cmds)
+
+
+class docs(BaseCommand):
+ """Use Sphinx Makefile to generate documentation."""
+
+ description = "generate the Sphinx HTML documentation"
+
+ clean_docs_cmd = "make -C docs clean"
+ html_docs_cmd = "make -C docs html"
+ view_docs_cmd = "open docs/build/html/index.html"
+
+ def run(self):
+ """Generate and view the documentation."""
+ cmds = (self.clean_docs_cmd, self.html_docs_cmd, self.view_docs_cmd)
+ self.call_in_sequence(cmds)
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/compat.py b/tests/compat.py
new file mode 100644
index 0000000..dfc57f3
--- /dev/null
+++ b/tests/compat.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+"""Python compatibility support for CLI Helpers' tests."""
+
+from __future__ import unicode_literals
+import os as _os
+import shutil as _shutil
+import tempfile as _tempfile
+import warnings as _warnings
+
+from cli_helpers.compat import PY2
+
+
+class _TempDirectory(object):
+ """Create and return a temporary directory. This has the same
+ behavior as mkdtemp but can be used as a context manager. For
+ example:
+
+ with TemporaryDirectory() as tmpdir:
+ ...
+
+ Upon exiting the context, the directory and everything contained
+ in it are removed.
+
+ NOTE: Copied from the Python 3 standard library.
+ """
+
+ # Handle mkdtemp raising an exception
+ name = None
+ _closed = False
+
+ def __init__(self, suffix="", prefix="tmp", dir=None):
+ self.name = _tempfile.mkdtemp(suffix, prefix, dir)
+
+ def __repr__(self):
+ return "<{} {!r}>".format(self.__class__.__name__, self.name)
+
+ def __enter__(self):
+ return self.name
+
+ def cleanup(self, _warn=False, _warnings=_warnings):
+ if self.name and not self._closed:
+ try:
+ _shutil.rmtree(self.name)
+ except (TypeError, AttributeError) as ex:
+ if "None" not in "%s" % (ex,):
+ raise
+ self._rmtree(self.name)
+ self._closed = True
+ if _warn and _warnings.warn:
+ _warnings.warn(
+ "Implicitly cleaning up {!r}".format(self), ResourceWarning
+ )
+
+ def __exit__(self, exc, value, tb):
+ self.cleanup()
+
+ def __del__(self):
+ # Issue a ResourceWarning if implicit cleanup needed
+ self.cleanup(_warn=True)
+
+ def _rmtree(
+ self,
+ path,
+ _OSError=OSError,
+ _sep=_os.path.sep,
+ _listdir=_os.listdir,
+ _remove=_os.remove,
+ _rmdir=_os.rmdir,
+ ):
+ # Essentially a stripped down version of shutil.rmtree. We can't
+ # use globals because they may be None'ed out at shutdown.
+ if not isinstance(path, str):
+ _sep = _sep.encode()
+ try:
+ for name in _listdir(path):
+ fullname = path + _sep + name
+ try:
+ _remove(fullname)
+ except _OSError:
+ self._rmtree(fullname)
+ _rmdir(path)
+ except _OSError:
+ pass
+
+
+TemporaryDirectory = _TempDirectory if PY2 else _tempfile.TemporaryDirectory
diff --git a/tests/config_data/configrc b/tests/config_data/configrc
new file mode 100644
index 0000000..8050b58
--- /dev/null
+++ b/tests/config_data/configrc
@@ -0,0 +1,18 @@
+# vi: ft=dosini
+# Test file comment
+
+[section]
+# Test section comment
+
+# Test field comment
+test_boolean_default = True
+
+# Test field commented out
+# Uncomment to enable
+# test_boolean = True
+
+test_string_file = '~/myfile'
+
+test_option = 'foobar✔'
+
+[section2]
diff --git a/tests/config_data/configspecrc b/tests/config_data/configspecrc
new file mode 100644
index 0000000..afa1c6d
--- /dev/null
+++ b/tests/config_data/configspecrc
@@ -0,0 +1,20 @@
+# vi: ft=dosini
+# Test file comment
+
+[section]
+# Test section comment
+
+# Test field comment
+test_boolean_default = boolean(default=True)
+
+test_boolean = boolean()
+
+# Test field commented out
+# Uncomment to enable
+# test_boolean = True
+
+test_string_file = string(default='~/myfile')
+
+test_option = option('foo', 'bar', 'foobar', 'foobar✔', default='foobar✔')
+
+[section2]
diff --git a/tests/config_data/invalid_configrc b/tests/config_data/invalid_configrc
new file mode 100644
index 0000000..8e66190
--- /dev/null
+++ b/tests/config_data/invalid_configrc
@@ -0,0 +1,18 @@
+# vi: ft=dosini
+# Test file comment
+
+[section]
+# Test section comment
+
+# Test field comment
+test_boolean_default True
+
+# Test field commented out
+# Uncomment to enable
+# test_boolean = True
+
+test_string_file = '~/myfile'
+
+test_option = 'foobar✔'
+
+[section2]
diff --git a/tests/config_data/invalid_configspecrc b/tests/config_data/invalid_configspecrc
new file mode 100644
index 0000000..d405e52
--- /dev/null
+++ b/tests/config_data/invalid_configspecrc
@@ -0,0 +1,20 @@
+# vi: ft=dosini
+# Test file comment
+
+[section]
+# Test section comment
+
+# Test field comment
+test_boolean_default = boolean(default=True)
+
+test_boolean = bool(default=False)
+
+# Test field commented out
+# Uncomment to enable
+# test_boolean = True
+
+test_string_file = string(default='~/myfile')
+
+test_option = option('foo', 'bar', 'foobar', 'foobar✔', default='foobar✔')
+
+[section2]
diff --git a/tests/tabular_output/__init__.py b/tests/tabular_output/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/tabular_output/__init__.py
diff --git a/tests/tabular_output/test_delimited_output_adapter.py b/tests/tabular_output/test_delimited_output_adapter.py
new file mode 100644
index 0000000..86a622e
--- /dev/null
+++ b/tests/tabular_output/test_delimited_output_adapter.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+"""Test the delimited output adapter."""
+
+from __future__ import unicode_literals
+from textwrap import dedent
+
+import pytest
+
+from cli_helpers.tabular_output import delimited_output_adapter
+
+
+def test_csv_wrapper():
+ """Test the delimited output adapter."""
+ # Test comma-delimited output.
+ data = [["abc", "1"], ["d", "456"]]
+ headers = ["letters", "number"]
+ output = delimited_output_adapter.adapter(iter(data), headers, dialect="unix")
+ assert "\n".join(output) == dedent(
+ '''\
+ "letters","number"\n\
+ "abc","1"\n\
+ "d","456"'''
+ )
+
+ # Test tab-delimited output.
+ data = [["abc", "1"], ["d", "456"]]
+ headers = ["letters", "number"]
+ output = delimited_output_adapter.adapter(
+ iter(data), headers, table_format="csv-tab", dialect="unix"
+ )
+ assert "\n".join(output) == dedent(
+ '''\
+ "letters"\t"number"\n\
+ "abc"\t"1"\n\
+ "d"\t"456"'''
+ )
+
+ with pytest.raises(ValueError):
+ output = delimited_output_adapter.adapter(
+ iter(data), headers, table_format="foobar"
+ )
+ list(output)
+
+
+def test_unicode_with_csv():
+ """Test that the csv wrapper can handle non-ascii characters."""
+ data = [["观音", "1"], ["Ποσειδῶν", "456"]]
+ headers = ["letters", "number"]
+ output = delimited_output_adapter.adapter(data, headers)
+ assert "\n".join(output) == dedent(
+ """\
+ letters,number\n\
+ 观音,1\n\
+ Ποσειδῶν,456"""
+ )
diff --git a/tests/tabular_output/test_output_formatter.py b/tests/tabular_output/test_output_formatter.py
new file mode 100644
index 0000000..8e1fa92
--- /dev/null
+++ b/tests/tabular_output/test_output_formatter.py
@@ -0,0 +1,271 @@
+# -*- coding: utf-8 -*-
+"""Test the generic output formatter interface."""
+
+from __future__ import unicode_literals
+from decimal import Decimal
+from textwrap import dedent
+
+import pytest
+
+from cli_helpers.tabular_output import format_output, TabularOutputFormatter
+from cli_helpers.compat import binary_type, text_type
+from cli_helpers.utils import strip_ansi
+
+
+def test_tabular_output_formatter():
+ """Test the TabularOutputFormatter class."""
+ headers = ["text", "numeric"]
+ data = [
+ ["abc", Decimal(1)],
+ ["defg", Decimal("11.1")],
+ ["hi", Decimal("1.1")],
+ ["Pablo\rß\n", 0],
+ ]
+ expected = dedent(
+ """\
+ +-------+---------+
+ | text | numeric |
+ +-------+---------+
+ | abc | 1 |
+ | defg | 11.1 |
+ | hi | 1.1 |
+ | Pablo | 0 |
+ | ß | |
+ +-------+---------+"""
+ )
+
+ print(expected)
+ print(
+ "\n".join(
+ TabularOutputFormatter().format_output(
+ iter(data), headers, format_name="ascii"
+ )
+ )
+ )
+ assert expected == "\n".join(
+ TabularOutputFormatter().format_output(iter(data), headers, format_name="ascii")
+ )
+
+
+def test_tabular_output_escaped():
+ """Test the ascii_escaped output format."""
+ headers = ["text", "numeric"]
+ data = [
+ ["abc", Decimal(1)],
+ ["defg", Decimal("11.1")],
+ ["hi", Decimal("1.1")],
+ ["Pablo\rß\n", 0],
+ ]
+ expected = dedent(
+ """\
+ +------------+---------+
+ | text | numeric |
+ +------------+---------+
+ | abc | 1 |
+ | defg | 11.1 |
+ | hi | 1.1 |
+ | Pablo\\rß\\n | 0 |
+ +------------+---------+"""
+ )
+
+ print(expected)
+ print(
+ "\n".join(
+ TabularOutputFormatter().format_output(
+ iter(data), headers, format_name="ascii_escaped"
+ )
+ )
+ )
+ assert expected == "\n".join(
+ TabularOutputFormatter().format_output(
+ iter(data), headers, format_name="ascii_escaped"
+ )
+ )
+
+
+def test_tabular_format_output_wrapper():
+ """Test the format_output wrapper."""
+ data = [["1", None], ["2", "Sam"], ["3", "Joe"]]
+ headers = ["id", "name"]
+ expected = dedent(
+ """\
+ +----+------+
+ | id | name |
+ +----+------+
+ | 1 | N/A |
+ | 2 | Sam |
+ | 3 | Joe |
+ +----+------+"""
+ )
+
+ assert expected == "\n".join(
+ format_output(iter(data), headers, format_name="ascii", missing_value="N/A")
+ )
+
+
+def test_additional_preprocessors():
+ """Test that additional preprocessors are run."""
+
+ def hello_world(data, headers, **_):
+ def hello_world_data(data):
+ for row in data:
+ for i, value in enumerate(row):
+ if value == "hello":
+ row[i] = "{}, world".format(value)
+ yield row
+
+ return hello_world_data(data), headers
+
+ data = [["foo", None], ["hello!", "hello"]]
+ headers = "ab"
+
+ expected = dedent(
+ """\
+ +--------+--------------+
+ | a | b |
+ +--------+--------------+
+ | foo | hello |
+ | hello! | hello, world |
+ +--------+--------------+"""
+ )
+
+ assert expected == "\n".join(
+ TabularOutputFormatter().format_output(
+ iter(data),
+ headers,
+ format_name="ascii",
+ preprocessors=(hello_world,),
+ missing_value="hello",
+ )
+ )
+
+
+def test_format_name_attribute():
+ """Test the the format_name attribute be set and retrieved."""
+ formatter = TabularOutputFormatter(format_name="plain")
+ assert formatter.format_name == "plain"
+ formatter.format_name = "simple"
+ assert formatter.format_name == "simple"
+
+ with pytest.raises(ValueError):
+ formatter.format_name = "foobar"
+
+
+def test_headless_tabulate_format():
+ """Test that a headless formatter doesn't display headers"""
+ formatter = TabularOutputFormatter(format_name="minimal")
+ headers = ["text", "numeric"]
+ data = [["a"], ["b"], ["c"]]
+ expected = "a\nb\nc"
+ assert expected == "\n".join(
+ TabularOutputFormatter().format_output(
+ iter(data),
+ headers,
+ format_name="minimal",
+ )
+ )
+
+
+def test_unsupported_format():
+ """Test that TabularOutputFormatter rejects unknown formats."""
+ formatter = TabularOutputFormatter()
+
+ with pytest.raises(ValueError):
+ formatter.format_name = "foobar"
+
+ with pytest.raises(ValueError):
+ formatter.format_output((), (), format_name="foobar")
+
+
+def test_tabulate_ansi_escape_in_default_value():
+ """Test that ANSI escape codes work with tabulate."""
+
+ data = [["1", None], ["2", "Sam"], ["3", "Joe"]]
+ headers = ["id", "name"]
+
+ styled = format_output(
+ iter(data),
+ headers,
+ format_name="psql",
+ missing_value="\x1b[38;5;10mNULL\x1b[39m",
+ )
+ unstyled = format_output(
+ iter(data), headers, format_name="psql", missing_value="NULL"
+ )
+
+ stripped_styled = [strip_ansi(s) for s in styled]
+
+ assert list(unstyled) == stripped_styled
+
+
+def test_get_type():
+ """Test that _get_type returns the expected type."""
+ formatter = TabularOutputFormatter()
+
+ tests = (
+ (1, int),
+ (2.0, float),
+ (b"binary", binary_type),
+ ("text", text_type),
+ (None, type(None)),
+ ((), text_type),
+ )
+
+ for value, data_type in tests:
+ assert data_type is formatter._get_type(value)
+
+
+def test_provide_column_types():
+ """Test that provided column types are passed to preprocessors."""
+ expected_column_types = (bool, float)
+ data = ((1, 1.0), (0, 2))
+ headers = ("a", "b")
+
+ def preprocessor(data, headers, column_types=(), **_):
+ assert expected_column_types == column_types
+ return data, headers
+
+ format_output(
+ data,
+ headers,
+ "csv",
+ column_types=expected_column_types,
+ preprocessors=(preprocessor,),
+ )
+
+
+def test_enforce_iterable():
+ """Test that all output formatters accept iterable"""
+ formatter = TabularOutputFormatter()
+ loremipsum = (
+ "lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod".split(
+ " "
+ )
+ )
+
+ for format_name in formatter.supported_formats:
+ formatter.format_name = format_name
+ try:
+ formatted = next(formatter.format_output(zip(loremipsum), ["lorem"]))
+ except TypeError:
+ assert False, "{0} doesn't return iterable".format(format_name)
+
+
+@pytest.mark.parametrize(
+ "extra_kwargs",
+ [
+ {},
+ {"style": "default"},
+ {"style": "colorful"},
+ ],
+)
+def test_all_text_type(extra_kwargs):
+ """Test the TabularOutputFormatter class."""
+ data = [[1, "", None, Decimal(2)]]
+ headers = ["col1", "col2", "col3", "col4"]
+ output_formatter = TabularOutputFormatter()
+ for format_name in output_formatter.supported_formats:
+ for row in output_formatter.format_output(
+ iter(data), headers, format_name=format_name, **extra_kwargs
+ ):
+ assert isinstance(row, text_type), "not unicode for {}".format(format_name)
diff --git a/tests/tabular_output/test_preprocessors.py b/tests/tabular_output/test_preprocessors.py
new file mode 100644
index 0000000..e428bfa
--- /dev/null
+++ b/tests/tabular_output/test_preprocessors.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+"""Test CLI Helpers' tabular output preprocessors."""
+
+from __future__ import unicode_literals
+from decimal import Decimal
+
+import pytest
+
+from cli_helpers.compat import HAS_PYGMENTS
+from cli_helpers.tabular_output.preprocessors import (
+ align_decimals,
+ bytes_to_string,
+ convert_to_string,
+ quote_whitespaces,
+ override_missing_value,
+ override_tab_value,
+ style_output,
+ format_numbers,
+)
+
+if HAS_PYGMENTS:
+ from pygments.style import Style
+ from pygments.token import Token
+
+import inspect
+import cli_helpers.tabular_output.preprocessors
+import types
+
+
+def test_convert_to_string():
+ """Test the convert_to_string() function."""
+ data = [[1, "John"], [2, "Jill"]]
+ headers = [0, "name"]
+ expected = ([["1", "John"], ["2", "Jill"]], ["0", "name"])
+ results = convert_to_string(data, headers)
+
+ assert expected == (list(results[0]), results[1])
+
+
+def test_override_missing_values():
+ """Test the override_missing_values() function."""
+ data = [[1, None], [2, "Jill"]]
+ headers = [0, "name"]
+ expected = ([[1, "<EMPTY>"], [2, "Jill"]], [0, "name"])
+ results = override_missing_value(data, headers, missing_value="<EMPTY>")
+
+ assert expected == (list(results[0]), results[1])
+
+
+@pytest.mark.skipif(not HAS_PYGMENTS, reason="requires the Pygments library")
+def test_override_missing_value_with_style():
+ """Test that *override_missing_value()* styles output."""
+
+ class NullStyle(Style):
+ styles = {Token.Output.Null: "#0f0"}
+
+ headers = ["h1", "h2"]
+ data = [[None, "2"], ["abc", None]]
+
+ expected_headers = ["h1", "h2"]
+ expected_data = [
+ ["\x1b[38;5;10m<null>\x1b[39m", "2"],
+ ["abc", "\x1b[38;5;10m<null>\x1b[39m"],
+ ]
+ results = override_missing_value(
+ data, headers, style=NullStyle, missing_value="<null>"
+ )
+
+ assert (expected_data, expected_headers) == (list(results[0]), results[1])
+
+
+def test_override_tab_value():
+ """Test the override_tab_value() function."""
+ data = [[1, "\tJohn"], [2, "Jill"]]
+ headers = ["id", "name"]
+ expected = ([[1, " John"], [2, "Jill"]], ["id", "name"])
+ results = override_tab_value(data, headers)
+
+ assert expected == (list(results[0]), results[1])
+
+
+def test_bytes_to_string():
+ """Test the bytes_to_string() function."""
+ data = [[1, "John"], [2, b"Jill"]]
+ headers = [0, "name"]
+ expected = ([[1, "John"], [2, "Jill"]], [0, "name"])
+ results = bytes_to_string(data, headers)
+
+ assert expected == (list(results[0]), results[1])
+
+
+def test_align_decimals():
+ """Test the align_decimals() function."""
+ data = [[Decimal("200"), Decimal("1")], [Decimal("1.00002"), Decimal("1.0")]]
+ headers = ["num1", "num2"]
+ column_types = (float, float)
+ expected = ([["200", "1"], [" 1.00002", "1.0"]], ["num1", "num2"])
+ results = align_decimals(data, headers, column_types=column_types)
+
+ assert expected == (list(results[0]), results[1])
+
+
+def test_align_decimals_empty_result():
+ """Test align_decimals() with no results."""
+ data = []
+ headers = ["num1", "num2"]
+ column_types = ()
+ expected = ([], ["num1", "num2"])
+ results = align_decimals(data, headers, column_types=column_types)
+
+ assert expected == (list(results[0]), results[1])
+
+
+def test_align_decimals_non_decimals():
+ """Test align_decimals() with non-decimals."""
+ data = [[Decimal("200.000"), Decimal("1.000")], [None, None]]
+ headers = ["num1", "num2"]
+ column_types = (float, float)
+ expected = ([["200.000", "1.000"], [None, None]], ["num1", "num2"])
+ results = align_decimals(data, headers, column_types=column_types)
+
+ assert expected == (list(results[0]), results[1])
+
+
+def test_quote_whitespaces():
+ """Test the quote_whitespaces() function."""
+ data = [[" before", "after "], [" both ", "none"]]
+ headers = ["h1", "h2"]
+ expected = ([["' before'", "'after '"], ["' both '", "'none'"]], ["h1", "h2"])
+ results = quote_whitespaces(data, headers)
+
+ assert expected == (list(results[0]), results[1])
+
+
+def test_quote_whitespaces_empty_result():
+ """Test the quote_whitespaces() function with no results."""
+ data = []
+ headers = ["h1", "h2"]
+ expected = ([], ["h1", "h2"])
+ results = quote_whitespaces(data, headers)
+
+ assert expected == (list(results[0]), results[1])
+
+
+def test_quote_whitespaces_non_spaces():
+ """Test the quote_whitespaces() function with non-spaces."""
+ data = [["\tbefore", "after \r"], ["\n both ", "none"]]
+ headers = ["h1", "h2"]
+ expected = ([["'\tbefore'", "'after \r'"], ["'\n both '", "'none'"]], ["h1", "h2"])
+ results = quote_whitespaces(data, headers)
+
+ assert expected == (list(results[0]), results[1])
+
+
+@pytest.mark.skipif(not HAS_PYGMENTS, reason="requires the Pygments library")
+def test_style_output_no_styles():
+ """Test that *style_output()* does not style without styles."""
+ headers = ["h1", "h2"]
+ data = [["1", "2"], ["a", "b"]]
+ results = style_output(data, headers)
+
+ assert (data, headers) == (list(results[0]), results[1])
+
+
+@pytest.mark.skipif(HAS_PYGMENTS, reason="requires the Pygments library be missing")
+def test_style_output_no_pygments():
+ """Test that *style_output()* does not try to style without Pygments."""
+ headers = ["h1", "h2"]
+ data = [["1", "2"], ["a", "b"]]
+ results = style_output(data, headers)
+
+ assert (data, headers) == (list(results[0]), results[1])
+
+
+@pytest.mark.skipif(not HAS_PYGMENTS, reason="requires the Pygments library")
+def test_style_output():
+ """Test that *style_output()* styles output."""
+
+ class CliStyle(Style):
+ default_style = ""
+ styles = {
+ Token.Output.Header: "bold ansibrightred",
+ Token.Output.OddRow: "bg:#eee #111",
+ Token.Output.EvenRow: "#0f0",
+ }
+
+ headers = ["h1", "h2"]
+ data = [["观音", "2"], ["Ποσειδῶν", "b"]]
+
+ expected_headers = ["\x1b[91;01mh1\x1b[39;00m", "\x1b[91;01mh2\x1b[39;00m"]
+ expected_data = [
+ ["\x1b[38;5;233;48;5;7m观音\x1b[39;49m", "\x1b[38;5;233;48;5;7m2\x1b[39;49m"],
+ ["\x1b[38;5;10mΠοσειδῶν\x1b[39m", "\x1b[38;5;10mb\x1b[39m"],
+ ]
+ results = style_output(data, headers, style=CliStyle)
+
+ assert (expected_data, expected_headers) == (list(results[0]), results[1])
+
+
+@pytest.mark.skipif(not HAS_PYGMENTS, reason="requires the Pygments library")
+def test_style_output_with_newlines():
+ """Test that *style_output()* styles output with newlines in it."""
+
+ class CliStyle(Style):
+ default_style = ""
+ styles = {
+ Token.Output.Header: "bold ansibrightred",
+ Token.Output.OddRow: "bg:#eee #111",
+ Token.Output.EvenRow: "#0f0",
+ }
+
+ headers = ["h1", "h2"]
+ data = [["观音\nLine2", "Ποσειδῶν"]]
+
+ expected_headers = ["\x1b[91;01mh1\x1b[39;00m", "\x1b[91;01mh2\x1b[39;00m"]
+ expected_data = [
+ [
+ "\x1b[38;5;233;48;5;7m观音\x1b[39;49m\n\x1b[38;5;233;48;5;7m"
+ "Line2\x1b[39;49m",
+ "\x1b[38;5;233;48;5;7mΠοσειδῶν\x1b[39;49m",
+ ]
+ ]
+ results = style_output(data, headers, style=CliStyle)
+
+ assert (expected_data, expected_headers) == (list(results[0]), results[1])
+
+
+@pytest.mark.skipif(not HAS_PYGMENTS, reason="requires the Pygments library")
+def test_style_output_custom_tokens():
+ """Test that *style_output()* styles output with custom token names."""
+
+ class CliStyle(Style):
+ default_style = ""
+ styles = {
+ Token.Results.Headers: "bold ansibrightred",
+ Token.Results.OddRows: "bg:#eee #111",
+ Token.Results.EvenRows: "#0f0",
+ }
+
+ headers = ["h1", "h2"]
+ data = [["1", "2"], ["a", "b"]]
+
+ expected_headers = ["\x1b[91;01mh1\x1b[39;00m", "\x1b[91;01mh2\x1b[39;00m"]
+ expected_data = [
+ ["\x1b[38;5;233;48;5;7m1\x1b[39;49m", "\x1b[38;5;233;48;5;7m2\x1b[39;49m"],
+ ["\x1b[38;5;10ma\x1b[39m", "\x1b[38;5;10mb\x1b[39m"],
+ ]
+
+ output = style_output(
+ data,
+ headers,
+ style=CliStyle,
+ header_token=Token.Results.Headers,
+ odd_row_token=Token.Results.OddRows,
+ even_row_token=Token.Results.EvenRows,
+ )
+
+ assert (expected_data, expected_headers) == (list(output[0]), output[1])
+
+
+def test_format_integer():
+ """Test formatting for an INTEGER datatype."""
+ data = [[1], [1000], [1000000]]
+ headers = ["h1"]
+ result_data, result_headers = format_numbers(
+ data, headers, column_types=(int,), integer_format=",", float_format=","
+ )
+
+ expected = [["1"], ["1,000"], ["1,000,000"]]
+ assert expected == list(result_data)
+ assert headers == result_headers
+
+
+def test_format_decimal():
+ """Test formatting for a DECIMAL(12, 4) datatype."""
+ data = [[Decimal("1.0000")], [Decimal("1000.0000")], [Decimal("1000000.0000")]]
+ headers = ["h1"]
+ result_data, result_headers = format_numbers(
+ data, headers, column_types=(float,), integer_format=",", float_format=","
+ )
+
+ expected = [["1.0000"], ["1,000.0000"], ["1,000,000.0000"]]
+ assert expected == list(result_data)
+ assert headers == result_headers
+
+
+def test_format_float():
+ """Test formatting for a REAL datatype."""
+ data = [[1.0], [1000.0], [1000000.0]]
+ headers = ["h1"]
+ result_data, result_headers = format_numbers(
+ data, headers, column_types=(float,), integer_format=",", float_format=","
+ )
+ expected = [["1.0"], ["1,000.0"], ["1,000,000.0"]]
+ assert expected == list(result_data)
+ assert headers == result_headers
+
+
+def test_format_integer_only():
+ """Test that providing one format string works."""
+ data = [[1, 1.0], [1000, 1000.0], [1000000, 1000000.0]]
+ headers = ["h1", "h2"]
+ result_data, result_headers = format_numbers(
+ data, headers, column_types=(int, float), integer_format=","
+ )
+
+ expected = [["1", 1.0], ["1,000", 1000.0], ["1,000,000", 1000000.0]]
+ assert expected == list(result_data)
+ assert headers == result_headers
+
+
+def test_format_numbers_no_format_strings():
+ """Test that numbers aren't formatted without format strings."""
+ data = ((1), (1000), (1000000))
+ headers = ("h1",)
+ result_data, result_headers = format_numbers(data, headers, column_types=(int,))
+ assert list(data) == list(result_data)
+ assert headers == result_headers
+
+
+def test_format_numbers_no_column_types():
+ """Test that numbers aren't formatted without column types."""
+ data = ((1), (1000), (1000000))
+ headers = ("h1",)
+ result_data, result_headers = format_numbers(
+ data, headers, integer_format=",", float_format=","
+ )
+ assert list(data) == list(result_data)
+ assert headers == result_headers
+
+
+def test_enforce_iterable():
+ preprocessors = inspect.getmembers(
+ cli_helpers.tabular_output.preprocessors, inspect.isfunction
+ )
+ loremipsum = (
+ "lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod".split(
+ " "
+ )
+ )
+ for name, preprocessor in preprocessors:
+ preprocessed = preprocessor(zip(loremipsum), ["lorem"], column_types=(str,))
+ try:
+ first = next(preprocessed[0])
+ except StopIteration:
+ assert False, "{} gives no output with iterator data".format(name)
+ except TypeError:
+ assert False, "{} doesn't return iterable".format(name)
+ if isinstance(preprocessed[1], types.GeneratorType):
+ assert False, "{} returns headers as iterator".format(name)
diff --git a/tests/tabular_output/test_tabulate_adapter.py b/tests/tabular_output/test_tabulate_adapter.py
new file mode 100644
index 0000000..6e7c7db
--- /dev/null
+++ b/tests/tabular_output/test_tabulate_adapter.py
@@ -0,0 +1,113 @@
+# -*- coding: utf-8 -*-
+"""Test the tabulate output adapter."""
+
+from __future__ import unicode_literals
+from textwrap import dedent
+
+import pytest
+
+from cli_helpers.compat import HAS_PYGMENTS
+from cli_helpers.tabular_output import tabulate_adapter
+
+if HAS_PYGMENTS:
+ from pygments.style import Style
+ from pygments.token import Token
+
+
+def test_tabulate_wrapper():
+ """Test the *output_formatter.tabulate_wrapper()* function."""
+ data = [["abc", 1], ["d", 456]]
+ headers = ["letters", "number"]
+ output = tabulate_adapter.adapter(iter(data), headers, table_format="psql")
+ assert "\n".join(output) == dedent(
+ """\
+ +---------+--------+
+ | letters | number |
+ |---------+--------|
+ | abc | 1 |
+ | d | 456 |
+ +---------+--------+"""
+ )
+
+ data = [["abc", 1], ["d", 456]]
+ headers = ["letters", "number"]
+ output = tabulate_adapter.adapter(iter(data), headers, table_format="psql_unicode")
+ assert "\n".join(output) == dedent(
+ """\
+ ┌─────────┬────────┐
+ │ letters │ number │
+ ├─────────┼────────┤
+ │ abc │ 1 │
+ │ d │ 456 │
+ └─────────┴────────┘"""
+ )
+
+ data = [["{1,2,3}", "{{1,2},{3,4}}", "{å,魚,текст}"], ["{}", "<null>", "{<null>}"]]
+ headers = ["bigint_array", "nested_numeric_array", "配列"]
+ output = tabulate_adapter.adapter(iter(data), headers, table_format="psql")
+ assert "\n".join(output) == dedent(
+ """\
+ +--------------+----------------------+--------------+
+ | bigint_array | nested_numeric_array | 配列 |
+ |--------------+----------------------+--------------|
+ | {1,2,3} | {{1,2},{3,4}} | {å,魚,текст} |
+ | {} | <null> | {<null>} |
+ +--------------+----------------------+--------------+"""
+ )
+
+
+def test_markup_format():
+ """Test that markup formats do not have number align or string align."""
+ data = [["abc", 1], ["d", 456]]
+ headers = ["letters", "number"]
+ output = tabulate_adapter.adapter(iter(data), headers, table_format="mediawiki")
+ assert "\n".join(output) == dedent(
+ """\
+ {| class="wikitable" style="text-align: left;"
+ |+ <!-- caption -->
+ |-
+ ! letters !! number
+ |-
+ | abc || 1
+ |-
+ | d || 456
+ |}"""
+ )
+
+
+@pytest.mark.skipif(not HAS_PYGMENTS, reason="requires the Pygments library")
+def test_style_output_table():
+ """Test that *style_output_table()* styles the output table."""
+
+ class CliStyle(Style):
+ default_style = ""
+ styles = {
+ Token.Output.TableSeparator: "ansibrightred",
+ }
+
+ headers = ["h1", "h2"]
+ data = [["观音", "2"], ["Ποσειδῶν", "b"]]
+ style_output_table = tabulate_adapter.style_output_table("psql")
+
+ style_output_table(data, headers, style=CliStyle)
+ output = tabulate_adapter.adapter(iter(data), headers, table_format="psql")
+ PLUS = "\x1b[91m+\x1b[39m"
+ MINUS = "\x1b[91m-\x1b[39m"
+ PIPE = "\x1b[91m|\x1b[39m"
+
+ expected = (
+ dedent(
+ """\
+ +----------+----+
+ | h1 | h2 |
+ |----------+----|
+ | 观音 | 2 |
+ | Ποσειδῶν | b |
+ +----------+----+"""
+ )
+ .replace("+", PLUS)
+ .replace("-", MINUS)
+ .replace("|", PIPE)
+ )
+
+ assert "\n".join(output) == expected
diff --git a/tests/tabular_output/test_tsv_output_adapter.py b/tests/tabular_output/test_tsv_output_adapter.py
new file mode 100644
index 0000000..9249d87
--- /dev/null
+++ b/tests/tabular_output/test_tsv_output_adapter.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+"""Test the tsv delimited output adapter."""
+
+from __future__ import unicode_literals
+from textwrap import dedent
+
+import pytest
+
+from cli_helpers.tabular_output import tsv_output_adapter
+
+
+def test_tsv_wrapper():
+ """Test the tsv output adapter."""
+ # Test tab-delimited output.
+ data = [["ab\r\nc", "1"], ["d", "456"]]
+ headers = ["letters", "number"]
+ output = tsv_output_adapter.adapter(iter(data), headers, table_format="tsv")
+ assert "\n".join(output) == dedent(
+ """\
+ letters\tnumber\n\
+ ab\r\\nc\t1\n\
+ d\t456"""
+ )
+
+
+def test_unicode_with_tsv():
+ """Test that the tsv wrapper can handle non-ascii characters."""
+ data = [["观音", "1"], ["Ποσειδῶν", "456"]]
+ headers = ["letters", "number"]
+ output = tsv_output_adapter.adapter(data, headers)
+ assert "\n".join(output) == dedent(
+ """\
+ letters\tnumber\n\
+ 观音\t1\n\
+ Ποσειδῶν\t456"""
+ )
diff --git a/tests/tabular_output/test_vertical_table_adapter.py b/tests/tabular_output/test_vertical_table_adapter.py
new file mode 100644
index 0000000..359d9d9
--- /dev/null
+++ b/tests/tabular_output/test_vertical_table_adapter.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+"""Test the vertical table formatter."""
+
+from textwrap import dedent
+
+from cli_helpers.compat import text_type
+from cli_helpers.tabular_output import vertical_table_adapter
+
+
+def test_vertical_table():
+ """Test the default settings for vertical_table()."""
+ results = [("hello", text_type(123)), ("world", text_type(456))]
+
+ expected = dedent(
+ """\
+ ***************************[ 1. row ]***************************
+ name | hello
+ age | 123
+ ***************************[ 2. row ]***************************
+ name | world
+ age | 456"""
+ )
+ assert expected == "\n".join(
+ vertical_table_adapter.adapter(results, ("name", "age"))
+ )
+
+
+def test_vertical_table_customized():
+ """Test customized settings for vertical_table()."""
+ results = [("john", text_type(47)), ("jill", text_type(50))]
+
+ expected = dedent(
+ """\
+ -[ PERSON 1 ]-----
+ name | john
+ age | 47
+ -[ PERSON 2 ]-----
+ name | jill
+ age | 50"""
+ )
+ assert expected == "\n".join(
+ vertical_table_adapter.adapter(
+ results,
+ ("name", "age"),
+ sep_title="PERSON {n}",
+ sep_character="-",
+ sep_length=(1, 5),
+ )
+ )
diff --git a/tests/test_cli_helpers.py b/tests/test_cli_helpers.py
new file mode 100644
index 0000000..50b8c81
--- /dev/null
+++ b/tests/test_cli_helpers.py
@@ -0,0 +1,5 @@
+import cli_helpers
+
+
+def test_cli_helpers():
+ assert True
diff --git a/tests/test_config.py b/tests/test_config.py
new file mode 100644
index 0000000..131bc8c
--- /dev/null
+++ b/tests/test_config.py
@@ -0,0 +1,293 @@
+# -*- coding: utf-8 -*-
+"""Test the cli_helpers.config module."""
+
+from __future__ import unicode_literals
+import os
+
+from unittest.mock import MagicMock
+import pytest
+
+from cli_helpers.compat import MAC, text_type, WIN
+from cli_helpers.config import (
+ Config,
+ DefaultConfigValidationError,
+ get_system_config_dirs,
+ get_user_config_dir,
+ _pathify,
+)
+from .utils import with_temp_dir
+
+APP_NAME, APP_AUTHOR = "Test", "Acme"
+TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "config_data")
+DEFAULT_CONFIG = {
+ "section": {
+ "test_boolean_default": "True",
+ "test_string_file": "~/myfile",
+ "test_option": "foobar✔",
+ },
+ "section2": {},
+}
+DEFAULT_VALID_CONFIG = {
+ "section": {
+ "test_boolean_default": True,
+ "test_string_file": "~/myfile",
+ "test_option": "foobar✔",
+ },
+ "section2": {},
+}
+
+
+def _mocked_user_config(temp_dir, *args, **kwargs):
+ config = Config(*args, **kwargs)
+ config.user_config_file = MagicMock(
+ return_value=os.path.join(temp_dir, config.filename)
+ )
+ return config
+
+
+def test_user_config_dir():
+ """Test that the config directory is a string with the app name in it."""
+ if "XDG_CONFIG_HOME" in os.environ:
+ del os.environ["XDG_CONFIG_HOME"]
+ config_dir = get_user_config_dir(APP_NAME, APP_AUTHOR)
+ assert isinstance(config_dir, text_type)
+ assert config_dir.endswith(APP_NAME) or config_dir.endswith(_pathify(APP_NAME))
+
+
+def test_sys_config_dirs():
+ """Test that the sys config directories are returned correctly."""
+ if "XDG_CONFIG_DIRS" in os.environ:
+ del os.environ["XDG_CONFIG_DIRS"]
+ config_dirs = get_system_config_dirs(APP_NAME, APP_AUTHOR)
+ assert isinstance(config_dirs, list)
+ assert config_dirs[0].endswith(APP_NAME) or config_dirs[0].endswith(
+ _pathify(APP_NAME)
+ )
+
+
+@pytest.mark.skipif(not WIN, reason="requires Windows")
+def test_windows_user_config_dir_no_roaming():
+ """Test that Windows returns the user config directory without roaming."""
+ config_dir = get_user_config_dir(APP_NAME, APP_AUTHOR, roaming=False)
+ assert isinstance(config_dir, text_type)
+ assert config_dir.endswith(APP_NAME)
+ assert "Local" in config_dir
+
+
+@pytest.mark.skipif(not MAC, reason="requires macOS")
+def test_mac_user_config_dir_no_xdg():
+ """Test that macOS returns the user config directory without XDG."""
+ config_dir = get_user_config_dir(APP_NAME, APP_AUTHOR, force_xdg=False)
+ assert isinstance(config_dir, text_type)
+ assert config_dir.endswith(APP_NAME)
+ assert "Library" in config_dir
+
+
+@pytest.mark.skipif(not MAC, reason="requires macOS")
+def test_mac_system_config_dirs_no_xdg():
+ """Test that macOS returns the system config directories without XDG."""
+ config_dirs = get_system_config_dirs(APP_NAME, APP_AUTHOR, force_xdg=False)
+ assert isinstance(config_dirs, list)
+ assert config_dirs[0].endswith(APP_NAME)
+ assert "Library" in config_dirs[0]
+
+
+def test_config_reading_raise_errors():
+ """Test that instantiating Config will raise errors when appropriate."""
+ with pytest.raises(ValueError):
+ Config(APP_NAME, APP_AUTHOR, "test_config", write_default=True)
+
+ with pytest.raises(ValueError):
+ Config(APP_NAME, APP_AUTHOR, "test_config", validate=True)
+
+ with pytest.raises(TypeError):
+ Config(APP_NAME, APP_AUTHOR, "test_config", default=b"test")
+
+
+def test_config_user_file():
+ """Test that the Config user_config_file is appropriate."""
+ config = Config(APP_NAME, APP_AUTHOR, "test_config")
+ assert get_user_config_dir(APP_NAME, APP_AUTHOR) in config.user_config_file()
+
+
+def test_config_reading_default_dict():
+ """Test that the Config constructor will read in defaults from a dict."""
+ default = {"main": {"foo": "bar"}}
+ config = Config(APP_NAME, APP_AUTHOR, "test_config", default=default)
+ assert config.data == default
+
+
+def test_config_reading_no_default():
+ """Test that the Config constructor will work without any defaults."""
+ config = Config(APP_NAME, APP_AUTHOR, "test_config")
+ assert config.data == {}
+
+
+def test_config_reading_default_file():
+ """Test that the Config will work with a default file."""
+ config = Config(
+ APP_NAME,
+ APP_AUTHOR,
+ "test_config",
+ default=os.path.join(TEST_DATA_DIR, "configrc"),
+ )
+ config.read_default_config()
+ assert config.data == DEFAULT_CONFIG
+
+
+def test_config_reading_configspec():
+ """Test that the Config default file will work with a configspec."""
+ config = Config(
+ APP_NAME,
+ APP_AUTHOR,
+ "test_config",
+ validate=True,
+ default=os.path.join(TEST_DATA_DIR, "configspecrc"),
+ )
+ config.read_default_config()
+ assert config.data == DEFAULT_VALID_CONFIG
+
+
+def test_config_reading_configspec_with_error():
+ """Test that reading an invalid configspec raises and exception."""
+ with pytest.raises(DefaultConfigValidationError):
+ config = Config(
+ APP_NAME,
+ APP_AUTHOR,
+ "test_config",
+ validate=True,
+ default=os.path.join(TEST_DATA_DIR, "invalid_configspecrc"),
+ )
+ config.read_default_config()
+
+
+@with_temp_dir
+def test_write_and_read_default_config(temp_dir=None):
+ config_file = "test_config"
+ default_file = os.path.join(TEST_DATA_DIR, "configrc")
+ temp_config_file = os.path.join(temp_dir, config_file)
+
+ config = _mocked_user_config(
+ temp_dir, APP_NAME, APP_AUTHOR, config_file, default=default_file
+ )
+ config.read_default_config()
+ config.write_default_config()
+
+ user_config = _mocked_user_config(
+ temp_dir, APP_NAME, APP_AUTHOR, config_file, default=default_file
+ )
+ user_config.read()
+ assert temp_config_file in user_config.config_filenames
+ assert user_config == config
+
+ with open(temp_config_file) as f:
+ contents = f.read()
+ assert "# Test file comment" in contents
+ assert "# Test section comment" in contents
+ assert "# Test field comment" in contents
+ assert "# Test field commented out" in contents
+
+
+@with_temp_dir
+def test_write_and_read_default_config_from_configspec(temp_dir=None):
+ config_file = "test_config"
+ default_file = os.path.join(TEST_DATA_DIR, "configspecrc")
+ temp_config_file = os.path.join(temp_dir, config_file)
+
+ config = _mocked_user_config(
+ temp_dir, APP_NAME, APP_AUTHOR, config_file, default=default_file, validate=True
+ )
+ config.read_default_config()
+ config.write_default_config()
+
+ user_config = _mocked_user_config(
+ temp_dir, APP_NAME, APP_AUTHOR, config_file, default=default_file, validate=True
+ )
+ user_config.read()
+ assert temp_config_file in user_config.config_filenames
+ assert user_config == config
+
+ with open(temp_config_file) as f:
+ contents = f.read()
+ assert "# Test file comment" in contents
+ assert "# Test section comment" in contents
+ assert "# Test field comment" in contents
+ assert "# Test field commented out" in contents
+
+
+@with_temp_dir
+def test_overwrite_default_config_from_configspec(temp_dir=None):
+ config_file = "test_config"
+ default_file = os.path.join(TEST_DATA_DIR, "configspecrc")
+ temp_config_file = os.path.join(temp_dir, config_file)
+
+ config = _mocked_user_config(
+ temp_dir, APP_NAME, APP_AUTHOR, config_file, default=default_file, validate=True
+ )
+ config.read_default_config()
+ config.write_default_config()
+
+ with open(temp_config_file, "a") as f:
+ f.write("--APPEND--")
+
+ config.write_default_config()
+
+ with open(temp_config_file) as f:
+ assert "--APPEND--" in f.read()
+
+ config.write_default_config(overwrite=True)
+
+ with open(temp_config_file) as f:
+ assert "--APPEND--" not in f.read()
+
+
+def test_read_invalid_config_file():
+ config_file = "invalid_configrc"
+
+ config = _mocked_user_config(TEST_DATA_DIR, APP_NAME, APP_AUTHOR, config_file)
+ config.read()
+ assert "section" in config
+ assert "test_string_file" in config["section"]
+ assert "test_boolean_default" not in config["section"]
+ assert "section2" in config
+
+
+@with_temp_dir
+def test_write_to_user_config(temp_dir=None):
+ config_file = "test_config"
+ default_file = os.path.join(TEST_DATA_DIR, "configrc")
+ temp_config_file = os.path.join(temp_dir, config_file)
+
+ config = _mocked_user_config(
+ temp_dir, APP_NAME, APP_AUTHOR, config_file, default=default_file
+ )
+ config.read_default_config()
+ config.write_default_config()
+
+ with open(temp_config_file) as f:
+ assert "test_boolean_default = True" in f.read()
+
+ config["section"]["test_boolean_default"] = False
+ config.write()
+
+ with open(temp_config_file) as f:
+ assert "test_boolean_default = False" in f.read()
+
+
+@with_temp_dir
+def test_write_to_outfile(temp_dir=None):
+ config_file = "test_config"
+ outfile = os.path.join(temp_dir, "foo")
+ default_file = os.path.join(TEST_DATA_DIR, "configrc")
+
+ config = _mocked_user_config(
+ temp_dir, APP_NAME, APP_AUTHOR, config_file, default=default_file
+ )
+ config.read_default_config()
+ config.write_default_config()
+
+ config["section"]["test_boolean_default"] = False
+ config.write(outfile=outfile)
+
+ with open(outfile) as f:
+ assert "test_boolean_default = False" in f.read()
diff --git a/tests/test_utils.py b/tests/test_utils.py
new file mode 100644
index 0000000..ba43937
--- /dev/null
+++ b/tests/test_utils.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+"""Test CLI Helpers' utility functions and helpers."""
+
+from __future__ import unicode_literals
+
+from cli_helpers import utils
+
+
+def test_bytes_to_string_hexlify():
+ """Test that bytes_to_string() hexlifies binary data."""
+ assert utils.bytes_to_string(b"\xff") == "0xff"
+
+
+def test_bytes_to_string_decode_bytes():
+ """Test that bytes_to_string() decodes bytes."""
+ assert utils.bytes_to_string(b"foobar") == "foobar"
+
+
+def test_bytes_to_string_unprintable():
+ """Test that bytes_to_string() hexlifies data that is valid unicode, but unprintable."""
+ assert utils.bytes_to_string(b"\0") == "0x00"
+ assert utils.bytes_to_string(b"\1") == "0x01"
+ assert utils.bytes_to_string(b"a\0") == "0x6100"
+
+
+def test_bytes_to_string_non_bytes():
+ """Test that bytes_to_string() returns non-bytes untouched."""
+ assert utils.bytes_to_string("abc") == "abc"
+ assert utils.bytes_to_string(1) == 1
+
+
+def test_to_string_bytes():
+ """Test that to_string() converts bytes to a string."""
+ assert utils.to_string(b"foo") == "foo"
+
+
+def test_to_string_non_bytes():
+ """Test that to_string() converts non-bytes to a string."""
+ assert utils.to_string(1) == "1"
+ assert utils.to_string(2.29) == "2.29"
+
+
+def test_truncate_string():
+ """Test string truncate preprocessor."""
+ val = "x" * 100
+ assert utils.truncate_string(val, 10) == "xxxxxxx..."
+
+ val = "x " * 100
+ assert utils.truncate_string(val, 10) == "x x x x..."
+
+ val = "x" * 100
+ assert utils.truncate_string(val) == "x" * 100
+
+ val = ["x"] * 100
+ val[20] = "\n"
+ str_val = "".join(val)
+ assert utils.truncate_string(str_val, 10, skip_multiline_string=True) == str_val
+
+
+def test_intlen_with_decimal():
+ """Test that intlen() counts correctly with a decimal place."""
+ assert utils.intlen("11.1") == 2
+ assert utils.intlen("1.1") == 1
+
+
+def test_intlen_without_decimal():
+ """Test that intlen() counts correctly without a decimal place."""
+ assert utils.intlen("11") == 2
+
+
+def test_filter_dict_by_key():
+ """Test that filter_dict_by_key() filter unwanted items."""
+ keys = ("foo", "bar")
+ d = {"foo": 1, "foobar": 2}
+ fd = utils.filter_dict_by_key(d, keys)
+ assert len(fd) == 1
+ assert all([k in keys for k in fd])
diff --git a/tests/utils.py b/tests/utils.py
new file mode 100644
index 0000000..0088eec
--- /dev/null
+++ b/tests/utils.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+"""Utility functions for CLI Helpers' tests."""
+
+from __future__ import unicode_literals
+from functools import wraps
+
+from .compat import TemporaryDirectory
+
+
+def with_temp_dir(f):
+ """A wrapper that creates and deletes a temporary directory."""
+
+ @wraps(f)
+ def wrapped(*args, **kwargs):
+ with TemporaryDirectory() as temp_dir:
+ return f(*args, temp_dir=temp_dir, **kwargs)
+
+ return wrapped
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..d0d97f8
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,58 @@
+[tox]
+envlist = cov-init, py36, py37, noextras, docs, packaging, cov-report
+
+[testenv]
+passenv = CI TRAVIS TRAVIS_* CODECOV
+whitelist_externals =
+ bash
+ make
+setenv =
+ PYTHONPATH = {toxinidir}:{toxinidir}/cli_helpers
+ COVERAGE_FILE = .coverage.{envname}
+commands =
+ pytest --cov-report= --cov=cli_helpers
+ coverage report
+ bash -c 'if [ -n "$CODECOV" ]; then {envbindir}/coverage xml && {envbindir}/codecov; fi'
+deps = -r{toxinidir}/requirements-dev.txt
+usedevelop = True
+
+[testenv:noextras]
+commands =
+ pip uninstall -y Pygments
+ {[testenv]commands}
+
+[testenv:docs]
+changedir = docs
+deps = sphinx
+whitelist_externals = make
+commands =
+ make clean
+ make html
+ make linkcheck
+
+[testenv:packaging]
+deps =
+ check-manifest
+ readme_renderer[md]
+ -r{toxinidir}/requirements-dev.txt
+commands =
+ check-manifest --ignore .travis/*
+ ./setup.py sdist
+ twine check dist/*
+ ./setup.py check -m -s
+
+[testenv:cov-init]
+setenv =
+ COVERAGE_FILE = .coverage
+deps = coverage
+commands =
+ coverage erase
+
+
+[testenv:cov-report]
+setenv =
+ COVERAGE_FILE = .coverage
+deps = coverage
+commands =
+ coverage combine
+ coverage report