summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-04-09 16:58:57 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-04-09 16:58:57 +0000
commit645027d47c864f23c499f0ef5b1e5bd15730b108 (patch)
treecfd9d7dce9775a82b7ef04cca857b0b956dac43f
parentInitial commit. (diff)
downloadansible-runner-645027d47c864f23c499f0ef5b1e5bd15730b108.tar.xz
ansible-runner-645027d47c864f23c499f0ef5b1e5bd15730b108.zip
Adding upstream version 1.4.6.upstream/1.4.6upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.dockerignore173
-rw-r--r--.github/CODE_OF_CONDUCT.md3
-rw-r--r--.gitignore20
-rw-r--r--.yamllint14
-rw-r--r--CHANGES.rst194
-rw-r--r--CONTRIBUTING.md35
-rw-r--r--Dockerfile37
-rw-r--r--Dockerfile.dev21
-rw-r--r--LICENSE.md168
-rw-r--r--MANIFEST.in4
-rw-r--r--Makefile173
-rw-r--r--Pipfile21
-rw-r--r--Pipfile.lock319
-rw-r--r--README.md22
-rw-r--r--ansible_runner/__init__.py13
-rw-r--r--ansible_runner/__main__.py623
-rw-r--r--ansible_runner/callbacks/__init__.py0
-rw-r--r--ansible_runner/callbacks/awx_display.py50
-rw-r--r--ansible_runner/callbacks/minimal.py50
-rw-r--r--ansible_runner/display_callback/__init__.py25
-rw-r--r--ansible_runner/display_callback/cleanup.py80
-rw-r--r--ansible_runner/display_callback/display.py98
-rw-r--r--ansible_runner/display_callback/events.py203
-rw-r--r--ansible_runner/display_callback/minimal.py29
-rw-r--r--ansible_runner/display_callback/module.py548
-rw-r--r--ansible_runner/exceptions.py11
-rw-r--r--ansible_runner/interface.py193
-rw-r--r--ansible_runner/loader.py184
-rw-r--r--ansible_runner/output.py91
-rw-r--r--ansible_runner/plugins/__init__.py0
-rw-r--r--ansible_runner/runner.py434
-rw-r--r--ansible_runner/runner_config.py533
-rw-r--r--ansible_runner/utils.py389
-rw-r--r--bindep.txt7
-rw-r--r--demo/env/envvars2
-rw-r--r--demo/env/extravars3
-rw-r--r--demo/env/passwords2
-rw-r--r--demo/env/settings4
-rw-r--r--demo/env/ssh_key0
-rw-r--r--demo/inventory/hosts1
-rw-r--r--demo/project/roles/testrole/README.md38
-rw-r--r--demo/project/roles/testrole/defaults/main.yml2
-rw-r--r--demo/project/roles/testrole/handlers/main.yml2
-rw-r--r--demo/project/roles/testrole/meta/main.yml58
-rw-r--r--demo/project/roles/testrole/tasks/main.yml5
-rw-r--r--demo/project/roles/testrole/tests/inventory2
-rw-r--r--demo/project/roles/testrole/tests/test.yml5
-rw-r--r--demo/project/roles/testrole/vars/main.yml2
-rw-r--r--demo/project/test.yml4
-rw-r--r--docs/Makefile20
-rw-r--r--docs/ansible_runner.callbacks.rst30
-rw-r--r--docs/ansible_runner.display_callback.rst54
-rw-r--r--docs/ansible_runner.rst70
-rw-r--r--docs/conf.py175
-rw-r--r--docs/container.rst49
-rw-r--r--docs/external_interface.rst78
-rw-r--r--docs/index.rst53
-rw-r--r--docs/install.rst88
-rw-r--r--docs/intro.rst374
-rw-r--r--docs/make.bat36
-rw-r--r--docs/modules.rst7
-rw-r--r--docs/python_interface.rst141
-rw-r--r--docs/source/ansible_runner.callbacks.rst30
-rw-r--r--docs/source/ansible_runner.display_callback.rst54
-rw-r--r--docs/source/ansible_runner.rst62
-rw-r--r--docs/source/modules.rst7
-rw-r--r--docs/standalone.rst112
-rw-r--r--packaging/debian/changelog6
-rw-r--r--packaging/debian/compat1
-rw-r--r--packaging/debian/control19
-rw-r--r--packaging/debian/copyright172
-rw-r--r--packaging/debian/docker/Dockerfile8
-rw-r--r--packaging/debian/docker/docker-compose.yml14
-rw-r--r--packaging/debian/pydist-overrides1
-rwxr-xr-xpackaging/debian/rules10
-rw-r--r--packaging/debian/source/format1
-rw-r--r--packaging/rpm/Dockerfile.epel-7-x86_6412
-rw-r--r--packaging/rpm/Dockerfile.epel-8-x86_648
-rw-r--r--packaging/rpm/ansible-runner.spec.j2155
-rw-r--r--packaging/rpm/docker-compose.yml21
-rwxr-xr-xsetup.cfg25
-rw-r--r--setup.py33
-rw-r--r--test/__init__.py0
-rw-r--r--test/conftest.py6
-rw-r--r--test/integration/callback/other_callback.py14
-rw-r--r--test/integration/conftest.py20
-rw-r--r--test/integration/inventory/localhost2
-rw-r--r--test/integration/project/hello.yml6
-rw-r--r--test/integration/project/roles/benthomasson.hello_role/meta/.galaxy_install_info1
-rw-r--r--test/integration/project/roles/benthomasson.hello_role/meta/main.yml13
-rw-r--r--test/integration/project/roles/benthomasson.hello_role/tasks/main.yml2
-rw-r--r--test/integration/project/use_role.yml5
-rw-r--r--test/integration/test___main__.py243
-rw-r--r--test/integration/test_display_callback.py318
-rw-r--r--test/integration/test_events.py166
-rw-r--r--test/integration/test_interface.py13
-rw-r--r--test/integration/test_main.py345
-rw-r--r--test/integration/test_runner.py278
-rw-r--r--test/unit/__init__.py0
-rw-r--r--test/unit/test_event_filter.py200
-rw-r--r--test/unit/test_loader.py133
-rw-r--r--test/unit/test_runner.py127
-rw-r--r--test/unit/test_runner_config.py562
-rw-r--r--test/unit/test_utils.py215
-rwxr-xr-xtools/test-setup.sh11
-rw-r--r--tox.ini29
-rwxr-xr-xutils/entrypoint.sh18
-rw-r--r--utils/generate_callback_playbooks.py40
108 files changed, 9588 insertions, 0 deletions
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..ab6b8f7
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,173 @@
+.git/
+
+### Python template
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+### Vim template
+# Swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-v][a-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# CMake
+cmake-build-debug/
+cmake-build-release/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..0164155
--- /dev/null
+++ b/.github/CODE_OF_CONDUCT.md
@@ -0,0 +1,3 @@
+# Community Code of Conduct
+
+Please see the official [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..7c82b09
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,20 @@
+# Mac OS X
+*.DS_Store
+
+# Editors
+*.sw[poj]
+*~
+
+/demo/artifacts
+/docs/_build
+/.tox
+/dist
+/build
+/rpm-build
+/deb-build
+/*.egg-info
+*.py[c,o]
+.pytest_cache
+.coverage
+*,cover
+.venv
diff --git a/.yamllint b/.yamllint
new file mode 100644
index 0000000..6f0d12b
--- /dev/null
+++ b/.yamllint
@@ -0,0 +1,14 @@
+---
+extends: default
+
+ignore: |
+ .tox
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ level: error
+ brackets:
+ max-spaces-inside: 1
+ level: error
+ line-length: disable
diff --git a/CHANGES.rst b/CHANGES.rst
new file mode 100644
index 0000000..5366dc1
--- /dev/null
+++ b/CHANGES.rst
@@ -0,0 +1,194 @@
+.. :changelog:
+
+Changelog
+---------
+
+1.4.6 (2020-03-26)
+++++++++++++++++++
+- Fixed a bug that broke Ansible playbook execution prior to version 2.8 of
+ Ansible.
+
+1.4.5 (2020-03-19)
+++++++++++++++++++
+- Fix an issue with --process_isoloation_*_ paths parsing cli args
+- Switch default docker images to centos:8
+- Switch extravar format so we can support more than just string types
+- Make sure job events folder is created earlier to prevent errors when
+ using immediately after starting a runner job
+- Annotate all runner_on events with start/end/duration times
+
+
+1.4.4 (2019-10-25)
+++++++++++++++++++
+- Fix some unicode issues when using command line override on python 2.7
+- Fix an issue with file handles not closing on the pexpect pty
+- Fix missing ssh_key parameter from module interface
+- Fix a bug where the ssh agent process would hang around after process
+ isolation exit causing a failure to remove temporary directories
+
+1.4.2 (2019-10-04)
+++++++++++++++++++
+- Reverted ansible-runner --version to print a semantic version number
+
+1.4.1 (2019-10-03)
+++++++++++++++++++
+- Fixed a bug that prevented ANSIBLE_HOST_KEY_CHECKING from being respected
+
+1.4.0 (2019-09-20)
+++++++++++++++++++
+- Added changed count to stats data
+- Added initial support for gathering performance statistics using
+ the system's cgroup interface
+- Fix command line args override missing from module run kwargs
+- Omit inventory argument entirely if no inventory content is supplied
+ this allows ansible to pick up inventory from implicit locations and
+ ansible.cfg
+- Fix an issue where Runner wouldn't properly clean up process isolation
+ temporary directories
+- Fix error generated if unsafe parameter is used on vars prompt tasks
+- Fix an issue where additional callback plugins weren't being used when
+ defined in the environment
+- Fix an issue where Runner would stop returning events after the playbook
+ finished when using run_async
+- Fix an issue where unicode in task data would cause Runner to fail
+- Fix issues using vaulted data that would cause Runner to fail
+- Fix an issue where artifact-dir was only allowed in ad-hoc mode
+
+1.3.4 (2019-04-25)
+++++++++++++++++++
+- Removed an explicit version pin of the six library (which is unavailable in
+ certain Linux distributions).
+- Fixed an event handling bug in the callback plugin in Ansible2.9+
+
+1.3.3 (2019-04-22)
+++++++++++++++++++
+
+- Fix various issues involving unicode input and output
+- Fix an issue where cancelling execution could cause an error rather
+ than assigning the proper return code and exiting cleanly
+- Fix various errors that would cause Runner to silently exit if some
+ dependencies weren't met or some commands weren't available
+- Fix an issue where the job_events directory wasn't created and would result
+ in no output for non-ansible commands
+
+1.3.2 (2019-04-10)
+++++++++++++++++++
+
+- Add direct support for forks and environment variable in parameterization
+- Fix a bug where unicode in playbooks would cause a crash
+- Fix a bug where unicode in environment variables would cause a crash
+- Capture command and cwd as part of the artifacts delivered for the job
+- Automatically remove process isolation temp directories
+- Fail more gracefully if ansible and/or bubblewrap isn't available at startup
+- Fix an issue where `verbose` events would be delayed until the end of execution
+
+1.3.1 (2019-03-27)
+++++++++++++++++++
+
+- Fixes to make default file permissions much more secure (0600)
+- Adding git to the reference container image to support galaxy requests
+
+1.3.0 (2019-03-20)
+++++++++++++++++++
+
+- Add support for directory isolation
+- Add Debian packaging support
+- Add fact caching support
+- Add process isolation configuration in the settings file
+- Fix event and display issues related to alternative Ansible strategies
+- Add Runner config reference to status handler callback
+- Add some more direct access to various ansible command line arguments
+- Adding playbook stats for "ignored" and "rescued"
+- Fix loading of some ansible resources from outside of the private data
+ directory (such as projects/playbooks)
+- Fix handling of artifact dir when specified outside of the private data
+ directory
+- Fix an issue where the stdout handle wasn't closed and not all data
+ would be flushed
+- Fixed extravar loading behavior
+- Added support for resolving parent events by associating their event uuid
+ as parent_uuid
+- Allow PYTHONPATH to be overridden
+- Expand support for executing non-ansible tools
+
+1.2.0 (2018-12-19)
+++++++++++++++++++
+
+- Add support for runner_on_start from Ansible 2.8
+- Fix thread race condition issues in event gathering
+- Add Code Of Conduct
+- Fix an issue where the "running" status wouldn't be emitted to the
+ status callback
+- Add process isolation support via bubblewrap
+- Fix an issue with orphaned file descriptors
+- Add ability to suppress ansible output from the module interface
+
+1.1.2 (2018-10-18)
+++++++++++++++++++
+
+- Fix an issue where ssh sock path could be too long
+- Fix an issue passing extra vars as dictionaries via the interface
+- Fix an issue where stdout was delayed on buffering which also caused
+ stdout not to be available if the task was canceled or failed
+- Fix role-path parameter not being honored when given on the command line
+ Also fixed up unit tests to actually surface this error if it comes back
+- Fully onboard Zuul-CI for unit and integration testing
+
+1.1.1 (2018-09-13)
+++++++++++++++++++
+
+- Fix an issue when attaching PYTHONPATH environment variable
+- Allow selecting a different ansible binary with the RUNNER_BINARY
+- Fix --inventory command line arguments
+- Fix some issues related to terminating ansible
+- Add runner ident to to the event processing callback
+- Adding integration tests and improving unit tests
+
+1.1.0 (2018-08-16)
+++++++++++++++++++
+
+- Added a feature that supports sending ansible status and events to external systems via a plugin
+ interface
+- Added support for Runner module users to receive runtime status changes in the form of a callback
+ that can be supplied to the run() methods (or passing it directly on Runner initialization)
+- Fix an issue where timeout settings were far too short
+- Add a new status and return code to indicate Runner timeout occurred.
+- Add support for running ad-hoc commands (direct module invocation, ala ansible vs ansible-playbook)
+- Fix an issue that caused missing data in events sent to the event handler(s)
+- Adding support for supplying role_path in module interface
+- Fix an issue where messages would still be emitted when --quiet was used
+- Fix a bug where ansible processes could be orphaned after canceling a job
+- Fix a bug where calling the Runner stats method would fail on python 3
+- Fix a bug where direct execution of roles couldn't be daemonized
+- Fix a bug where relative paths couldn't be used when calling start vs run
+
+
+1.0.5 (2018-07-23)
+++++++++++++++++++
+
+- Fix a bug that could cause a hang if unicode environment variables are used
+- Allow select() to be used instead of poll() when invoking pexpect
+- Check for the presence of Ansible before executing
+- Fix an issue where a missing project directory would cause Runner to fail silently
+- Add support for automatic cleanup/rotation of artifact directories
+- Adding support for Runner module users to receive events in the form of a callback
+ that can be supplied to the run() methods (or passing it directly on Runner initialization)
+- Adding support for Runner module users to provide a callback that will be invoked when the
+ Runner Ansible process has finished. This can be supplied to the run() methods (or passing it
+ directly on Runner initialization).
+
+
+1.0.4 (2018-06-29)
+++++++++++++++++++
+
+- Adding support for pexpect 4.6 for performance and efficiency improvements
+- Adding support for launching roles directly
+- Adding support for changing the output mode to json instead of vanilla Ansible (-j)
+- Adding arguments to increase ansible verbosity (-v[vvv]) and quiet mode (-q)
+- Adding support for overriding the artifact directory location
+- Adding the ability to pass arbitrary arguments to the invocation of Ansible
+- Improving debug and verbose output
+- Various fixes for broken python 2/3 compatibility, including the event generator in the python module
+- Fixing a bug when providing an ssh key via the private directory interface
+- Fixing bugs that prevented Runner from working on MacOS
+- Fixing a bug that caused issues when providing extra vars via the private dir interface
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..ede2815
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,35 @@
+# Ansible Runner Contributing Guidelines
+
+Hi there! We're excited to have you as a contributor.
+
+If you have questions about this document or anything not covered here? Come chat with us `#ansible-awx` on irc.freenode.net
+
+## Things to know prior to submitting code
+
+- All code and doc submissions are done through pull requests against the `master` branch.
+- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
+- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
+
+## Setting up your development environment
+
+It's entirely possible to develop on **Ansible Runner** simply with
+
+```bash
+(host)$ python setup.py develop
+```
+
+Another (recommended) way is to use [Pipenv](https://docs.pipenv.org/), make sure you have it installed and then:
+
+```bash
+(host)$ pipenv install --dev
+```
+
+This will automatically setup the development environment under a virtualenv, which you can then switch to with:
+
+```bash
+(host)$ pipenv shell
+```
+
+## Linting and Unit Tests
+
+`tox` is used to run linters (`flake8` and `yamllint`) and unit tests on both Python 2 and 3. It uses pipenv to bootstrap these two environments.
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..4ca9450
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,37 @@
+FROM centos:8
+
+ADD https://github.com/krallin/tini/releases/download/v0.18.0/tini /bin/tini
+ADD utils/entrypoint.sh /bin/entrypoint
+ADD demo/project /runner/project
+ADD demo/env /runner/env
+ADD demo/inventory /runner/inventory
+
+# Install Ansible and Runner
+ADD https://releases.ansible.com/ansible-runner/ansible-runner.el8.repo /etc/yum.repos.d/ansible-runner.repo
+RUN dnf install -y epel-release && \
+ dnf install -y ansible-runner python3-pip sudo rsync openssh-clients sshpass glibc-langpack-en && \
+ alternatives --set python /usr/bin/python3 && \
+ pip3 install ansible && \
+ chmod +x /bin/tini /bin/entrypoint && \
+ rm -rf /var/cache/dnf
+
+# In OpenShift, container will run as a random uid number and gid 0. Make sure things
+# are writeable by the root group.
+RUN mkdir -p /runner/inventory /runner/project /runner/artifacts /runner/.ansible/tmp && \
+ chmod -R g+w /runner && chgrp -R root /runner && \
+ chmod g+w /etc/passwd
+
+VOLUME /runner/inventory
+VOLUME /runner/project
+VOLUME /runner/artifacts
+
+ENV LANG=en_US.UTF-8
+ENV LANGUAGE=en_US:en
+ENV LC_ALL=en_US.UTF-8
+ENV RUNNER_BASE_COMMAND=ansible-playbook
+ENV HOME=/runner
+
+WORKDIR /runner
+
+ENTRYPOINT ["entrypoint"]
+CMD ["ansible-runner", "run", "/runner"]
diff --git a/Dockerfile.dev b/Dockerfile.dev
new file mode 100644
index 0000000..cd264b4
--- /dev/null
+++ b/Dockerfile.dev
@@ -0,0 +1,21 @@
+FROM centos:7
+
+ADD https://github.com/krallin/tini/releases/download/v0.14.0/tini /tini
+
+# Install Ansible Runner
+RUN yum -y install epel-release && \
+ yum -y install ansible python-psutil python-pip bubblewrap bzip2 python-crypto \
+ which gcc python-devel libxml2 libxml2-devel krb5 krb5-devel curl curl-devel \
+ openssh openssh-clients && \
+ pip install --no-cache-dir -U setuptools && \
+ pip install --no-cache-dir wheel pexpect psutil python-daemon pipenv PyYAML && \
+ localedef -c -i en_US -f UTF-8 en_US.UTF-8 && \
+ chmod +x /tini && \
+ rm -rf /var/cache/yum
+
+ENV LANG=en_US.UTF-8 \
+ LANGUAGE=en_US:en \
+ LC_ALL=en_US.UTF-8
+
+ENTRYPOINT ["/tini", "--"]
+WORKDIR /
diff --git a/LICENSE.md b/LICENSE.md
new file mode 100644
index 0000000..3cb65ed
--- /dev/null
+++ b/LICENSE.md
@@ -0,0 +1,168 @@
+Apache License
+==============
+
+_Version 2.0, January 2004_
+_&lt;<http://www.apache.org/licenses/>&gt;_
+
+### Terms and Conditions for use, reproduction, and distribution
+
+#### 1. Definitions
+
+“License” shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+“Licensor” shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+“Legal Entity” shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, “control” means **(i)** the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
+outstanding shares, or **(iii)** beneficial ownership of such entity.
+
+“You” (or “Your”) shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+“Source” form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+“Object” form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+“Work” shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+“Derivative Works” shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+“Contribution” shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+“submitted” means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as “Not a Contribution.”
+
+“Contributor” shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+#### 2. Grant of Copyright License
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+#### 3. Grant of Patent License
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+#### 4. Redistribution
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+* **(a)** You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+* **(b)** You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+* **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+#### 5. Submission of Contributions
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+#### 6. Trademarks
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+#### 7. Disclaimer of Warranty
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+#### 8. Limitation of Liability
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+#### 9. Accepting Warranty or Additional Liability
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..6499e54
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,4 @@
+
+include README.md
+include LICENSE.md
+include Makefile
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..7b7e05b
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,173 @@
+PYTHON ?= python
+ifeq ($(origin VIRTUAL_ENV), undefined)
+ DIST_PYTHON ?= pipenv run $(PYTHON)
+else
+ DIST_PYTHON ?= $(PYTHON)
+endif
+
+NAME = ansible-runner
+IMAGE_NAME ?= $(NAME)
+PIP_NAME = ansible_runner
+VERSION := $(shell $(DIST_PYTHON) setup.py --version)
+ifeq ($(OFFICIAL),yes)
+ RELEASE ?= 1
+else
+ ifeq ($(origin RELEASE), undefined)
+ RELEASE := 0.git$(shell date -u +%Y%m%d%H).$(shell git rev-parse --short HEAD)
+ endif
+endif
+
+# RPM build variables
+MOCK_BIN ?= mock
+MOCK_CONFIG ?= epel-7-x86_64
+
+RPM_NVR = $(NAME)-$(VERSION)-$(RELEASE)$(RPM_DIST)
+RPM_DIST ?= $(shell rpm --eval '%{?dist}' 2>/dev/null)
+RPM_ARCH ?= $(shell rpm --eval '%{_arch}' 2>/dev/null)
+
+# Provide a fallback value for RPM_ARCH
+ifeq ($(RPM_ARCH),)
+ RPM_ARCH = $(shell uname -m)
+endif
+
+# Debian Packaging
+DEBUILD_BIN ?= debuild
+DEBUILD_OPTS ?=
+DPUT_BIN ?= dput
+DPUT_OPTS ?=
+DEB_DIST ?= xenial
+
+GPG_KEY_ID ?=
+
+ifeq ($(origin GPG_SIGNING_KEY), undefined)
+ GPG_SIGNING_KEY = /dev/null
+endif
+
+ifeq ($(OFFICIAL),yes)
+ # Sign official builds
+ DEBUILD_OPTS += -k$(GPG_KEY_ID)
+else
+ # Do not sign unofficial builds
+ DEBUILD_OPTS += -uc -us
+endif
+
+DEBUILD = $(DEBUILD_BIN) $(DEBUILD_OPTS)
+DEB_PPA ?= mini_dinstall
+DEB_ARCH ?= amd64
+DEB_NVR = $(NAME)_$(VERSION)-$(RELEASE)~$(DEB_DIST)
+DEB_NVRA = $(DEB_NVR)_$(DEB_ARCH)
+DEB_NVRS = $(DEB_NVR)_source
+DEB_TAR_NAME=$(NAME)-$(VERSION)
+DEB_TAR_FILE=$(NAME)_$(VERSION).orig.tar.gz
+DEB_DATE := $(shell LC_TIME=C date +"%a, %d %b %Y %T %z")
+
+.PHONY: clean dist sdist dev shell image devimage rpm srpm docs deb debian deb-src
+
+clean:
+ rm -rf dist
+ rm -rf build
+ rm -rf ansible-runner.egg-info
+ rm -rf rpm-build
+ rm -rf deb-build
+ find . -type f -regex ".*\py[co]$$" -delete
+
+dist:
+ $(DIST_PYTHON) setup.py bdist_wheel --universal
+
+sdist: dist/$(NAME)-$(VERSION).tar.gz
+
+dist/$(NAME)-$(VERSION).tar.gz:
+ $(DIST_PYTHON) setup.py sdist
+
+dev:
+ pipenv install
+
+shell:
+ pipenv shell
+
+test:
+ tox
+
+docs:
+ cd docs && make html
+
+image:
+ docker pull centos:8
+ docker build --rm=true -t $(IMAGE_NAME) .
+
+devimage:
+ docker pull centos:8
+ docker build --rm=true -t $(IMAGE_NAME)-dev -f Dockerfile.dev .
+
+rpm:
+ docker-compose -f packaging/rpm/docker-compose.yml \
+ run --rm -e RELEASE=$(RELEASE) rpm-builder "make mock-rpm"
+
+srpm:
+ docker-compose -f packaging/rpm/docker-compose.yml \
+ run --rm -e RELEASE=$(RELEASE) rpm-builder "make mock-srpm"
+
+mock-rpm: rpm-build/$(RPM_NVR).$(RPM_ARCH).rpm
+
+rpm-build/$(RPM_NVR).$(RPM_ARCH).rpm: rpm-build/$(RPM_NVR).src.rpm
+ $(MOCK_BIN) -r $(MOCK_CONFIG) --arch=noarch \
+ --resultdir=rpm-build \
+ --rebuild rpm-build/$(RPM_NVR).src.rpm
+
+mock-srpm: rpm-build/$(RPM_NVR).src.rpm
+
+rpm-build/$(RPM_NVR).src.rpm: dist/$(NAME)-$(VERSION).tar.gz rpm-build rpm-build/$(NAME).spec
+ $(MOCK_BIN) -r $(MOCK_CONFIG) --arch=noarch \
+ --resultdir=rpm-build \
+ --spec=rpm-build/$(NAME).spec \
+ --sources=rpm-build \
+ --buildsrpm
+
+rpm-build/$(NAME).spec:
+ ansible -c local -i localhost, all \
+ -m template \
+ -a "src=packaging/rpm/$(NAME).spec.j2 dest=rpm-build/$(NAME).spec" \
+ -e version=$(VERSION) \
+ -e release=$(RELEASE)
+
+rpm-build: sdist
+ mkdir -p $@
+ cp dist/$(NAME)-$(VERSION).tar.gz rpm-build/$(NAME)-$(VERSION)-$(RELEASE).tar.gz
+
+deb:
+ docker-compose -f packaging/debian/docker/docker-compose.yml \
+ run --rm \
+ -e OFFICIAL=$(OFFICIAL) -e DEB_DIST=$(DEB_DIST) -e RELEASE=$(RELEASE) \
+ -e GPG_KEY_ID=$(GPG_KEY_ID) -e GPG_SIGNING_KEY=$(GPG_SIGNING_KEY) \
+ deb-builder "make debian"
+
+ifeq ($(OFFICIAL),yes)
+debian: gpg-import deb-build/$(DEB_NVRA).deb
+gpg-import:
+ gpg --import /signing_key.asc
+else
+debian: deb-build/$(DEB_NVRA).deb
+endif
+
+deb-src: deb-build/$(DEB_NVR).dsc
+
+deb-build/$(DEB_NVRA).deb: deb-build/$(DEB_NVR).dsc
+ cd deb-build/$(NAME)-$(VERSION) && $(DEBUILD) -b
+
+deb-build/$(DEB_NVR).dsc: deb-build/$(NAME)-$(VERSION)
+ cd deb-build/$(NAME)-$(VERSION) && $(DEBUILD) -S
+
+deb-build/$(NAME)-$(VERSION): dist/$(NAME)-$(VERSION).tar.gz
+ mkdir -p $(dir $@)
+ @if [ "$(OFFICIAL)" != "yes" ] ; then \
+ tar -C deb-build/ -xvf dist/$(NAME)-$(VERSION).tar.gz ; \
+ cd deb-build && tar czf $(DEB_TAR_FILE) $(NAME)-$(VERSION) ; \
+ else \
+ cp -a dist/$(NAME)-$(VERSION).tar.gz deb-build/$(DEB_TAR_FILE) ; \
+ fi
+ cd deb-build && tar -xf $(DEB_TAR_FILE)
+ cp -a packaging/debian deb-build/$(NAME)-$(VERSION)/
+ sed -ie "s|%VERSION%|$(VERSION)|g;s|%RELEASE%|$(RELEASE)|;s|%DEB_DIST%|$(DEB_DIST)|g;s|%DATE%|$(DEB_DATE)|g" $@/debian/changelog
+
+print-%:
+ @echo $($*)
diff --git a/Pipfile b/Pipfile
new file mode 100644
index 0000000..fe80701
--- /dev/null
+++ b/Pipfile
@@ -0,0 +1,21 @@
+[[source]]
+url = "https://pypi.python.org/simple"
+verify_ssl = false
+name = "pypi"
+
+[dev-packages]
+more-itertools = "==5.0.0"
+pytest = "==4.4.0"
+"flake8" = "*"
+yamllint = "*"
+funcsigs = "*"
+mock = "*"
+pathlib2 = "*"
+scandir = "*"
+
+[packages]
+psutil = "*"
+pexpect = "==4.6"
+python-daemon = "*"
+pyyaml = "*"
+six = "*"
diff --git a/Pipfile.lock b/Pipfile.lock
new file mode 100644
index 0000000..99cbb5f
--- /dev/null
+++ b/Pipfile.lock
@@ -0,0 +1,319 @@
+{
+ "_meta": {
+ "hash": {
+ "sha256": "6041fd080b632cf05417e388df3f3246eac0be580c2ce8934b348e490fc0c122"
+ },
+ "pipfile-spec": 6,
+ "requires": {},
+ "sources": [
+ {
+ "name": "pypi",
+ "url": "https://pypi.python.org/simple",
+ "verify_ssl": false
+ }
+ ]
+ },
+ "default": {
+ "docutils": {
+ "hashes": [
+ "sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0",
+ "sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827",
+ "sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99"
+ ],
+ "version": "==0.15.2"
+ },
+ "lockfile": {
+ "hashes": [
+ "sha256:6aed02de03cba24efabcd600b30540140634fc06cfa603822d508d5361e9f799",
+ "sha256:6c3cb24f344923d30b2785d5ad75182c8ea7ac1b6171b08657258ec7429d50fa"
+ ],
+ "version": "==0.12.2"
+ },
+ "pexpect": {
+ "hashes": [
+ "sha256:2a8e88259839571d1251d278476f3eec5db26deb73a70be5ed5dc5435e418aba",
+ "sha256:3fbd41d4caf27fa4a377bfd16fef87271099463e6fa73e92a52f92dfee5d425b"
+ ],
+ "index": "pypi",
+ "version": "==4.6"
+ },
+ "psutil": {
+ "hashes": [
+ "sha256:094f899ac3ef72422b7e00411b4ed174e3c5a2e04c267db6643937ddba67a05b",
+ "sha256:10b7f75cc8bd676cfc6fa40cd7d5c25b3f45a0e06d43becd7c2d2871cbb5e806",
+ "sha256:1b1575240ca9a90b437e5a40db662acd87bbf181f6aa02f0204978737b913c6b",
+ "sha256:21231ef1c1a89728e29b98a885b8e0a8e00d09018f6da5cdc1f43f988471a995",
+ "sha256:28f771129bfee9fc6b63d83a15d857663bbdcae3828e1cb926e91320a9b5b5cd",
+ "sha256:70387772f84fa5c3bb6a106915a2445e20ac8f9821c5914d7cbde148f4d7ff73",
+ "sha256:b560f5cd86cf8df7bcd258a851ca1ad98f0d5b8b98748e877a0aec4e9032b465",
+ "sha256:b74b43fecce384a57094a83d2778cdfc2e2d9a6afaadd1ebecb2e75e0d34e10d",
+ "sha256:e85f727ffb21539849e6012f47b12f6dd4c44965e56591d8dec6e8bc9ab96f4a",
+ "sha256:fd2e09bb593ad9bdd7429e779699d2d47c1268cbde4dda95fcd1bd17544a0217",
+ "sha256:ffad8eb2ac614518bbe3c0b8eb9dffdb3a8d2e3a7d5da51c5b974fb723a5c5aa"
+ ],
+ "index": "pypi",
+ "version": "==5.6.7"
+ },
+ "ptyprocess": {
+ "hashes": [
+ "sha256:923f299cc5ad920c68f2bc0bc98b75b9f838b93b599941a6b63ddbc2476394c0",
+ "sha256:d7cc528d76e76342423ca640335bd3633420dc1366f258cb31d05e865ef5ca1f"
+ ],
+ "version": "==0.6.0"
+ },
+ "python-daemon": {
+ "hashes": [
+ "sha256:57c84f50a04d7825515e4dbf3a31c70cc44414394a71608dee6cfde469e81766",
+ "sha256:a0d5dc0b435a02c7e0b401e177a7c17c3f4c7b4e22e2d06271122c8fec5f8946"
+ ],
+ "index": "pypi",
+ "version": "==2.2.4"
+ },
+ "pyyaml": {
+ "hashes": [
+ "sha256:0e7f69397d53155e55d10ff68fdfb2cf630a35e6daf65cf0bdeaf04f127c09dc",
+ "sha256:2e9f0b7c5914367b0916c3c104a024bb68f269a486b9d04a2e8ac6f6597b7803",
+ "sha256:35ace9b4147848cafac3db142795ee42deebe9d0dad885ce643928e88daebdcc",
+ "sha256:38a4f0d114101c58c0f3a88aeaa44d63efd588845c5a2df5290b73db8f246d15",
+ "sha256:483eb6a33b671408c8529106df3707270bfacb2447bf8ad856a4b4f57f6e3075",
+ "sha256:4b6be5edb9f6bb73680f5bf4ee08ff25416d1400fbd4535fe0069b2994da07cd",
+ "sha256:7f38e35c00e160db592091751d385cd7b3046d6d51f578b29943225178257b31",
+ "sha256:8100c896ecb361794d8bfdb9c11fce618c7cf83d624d73d5ab38aef3bc82d43f",
+ "sha256:c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c",
+ "sha256:e4c015484ff0ff197564917b4b4246ca03f411b9bd7f16e02a2f586eb48b6d04",
+ "sha256:ebc4ed52dcc93eeebeae5cf5deb2ae4347b3a81c3fa12b0b8c976544829396a4"
+ ],
+ "index": "pypi",
+ "version": "==5.2"
+ },
+ "six": {
+ "hashes": [
+ "sha256:1f1b7d42e254082a9db6279deae68afb421ceba6158efa6131de7b3003ee93fd",
+ "sha256:30f610279e8b2578cab6db20741130331735c781b56053c59c4076da27f06b66"
+ ],
+ "index": "pypi",
+ "version": "==1.13.0"
+ }
+ },
+ "develop": {
+ "atomicwrites": {
+ "hashes": [
+ "sha256:03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4",
+ "sha256:75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6"
+ ],
+ "version": "==1.3.0"
+ },
+ "attrs": {
+ "hashes": [
+ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c",
+ "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72"
+ ],
+ "version": "==19.3.0"
+ },
+ "configparser": {
+ "hashes": [
+ "sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c",
+ "sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df"
+ ],
+ "markers": "python_version < '3.2'",
+ "version": "==4.0.2"
+ },
+ "contextlib2": {
+ "hashes": [
+ "sha256:01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e",
+ "sha256:3355078a159fbb44ee60ea80abd0d87b80b78c248643b49aa6d94673b413609b"
+ ],
+ "markers": "python_version < '3'",
+ "version": "==0.6.0.post1"
+ },
+ "entrypoints": {
+ "hashes": [
+ "sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19",
+ "sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451"
+ ],
+ "version": "==0.3"
+ },
+ "enum34": {
+ "hashes": [
+ "sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850",
+ "sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a",
+ "sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79",
+ "sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1"
+ ],
+ "markers": "python_version < '3.4'",
+ "version": "==1.1.6"
+ },
+ "flake8": {
+ "hashes": [
+ "sha256:45681a117ecc81e870cbf1262835ae4af5e7a8b08e40b944a8a6e6b895914cfb",
+ "sha256:49356e766643ad15072a789a20915d3c91dc89fd313ccd71802303fd67e4deca"
+ ],
+ "index": "pypi",
+ "version": "==3.7.9"
+ },
+ "funcsigs": {
+ "hashes": [
+ "sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca",
+ "sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50"
+ ],
+ "index": "pypi",
+ "version": "==1.0.2"
+ },
+ "functools32": {
+ "hashes": [
+ "sha256:89d824aa6c358c421a234d7f9ee0bd75933a67c29588ce50aaa3acdf4d403fa0",
+ "sha256:f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d"
+ ],
+ "markers": "python_version < '3.2'",
+ "version": "==3.2.3.post2"
+ },
+ "importlib-metadata": {
+ "hashes": [
+ "sha256:3a8b2dfd0a2c6a3636e7c016a7e54ae04b997d30e69d5eacdca7a6c2221a1402",
+ "sha256:41e688146d000891f32b1669e8573c57e39e5060e7f5f647aa617cd9a9568278"
+ ],
+ "markers": "python_version < '3.8'",
+ "version": "==1.2.0"
+ },
+ "mccabe": {
+ "hashes": [
+ "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
+ "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
+ ],
+ "version": "==0.6.1"
+ },
+ "mock": {
+ "hashes": [
+ "sha256:83657d894c90d5681d62155c82bda9c1187827525880eda8ff5df4ec813437c3",
+ "sha256:d157e52d4e5b938c550f39eb2fd15610db062441a9c2747d3dbfa9298211d0f8"
+ ],
+ "index": "pypi",
+ "version": "==3.0.5"
+ },
+ "more-itertools": {
+ "hashes": [
+ "sha256:38a936c0a6d98a38bcc2d03fdaaedaba9f412879461dd2ceff8d37564d6522e4",
+ "sha256:c0a5785b1109a6bd7fac76d6837fd1feca158e54e521ccd2ae8bfe393cc9d4fc",
+ "sha256:fe7a7cae1ccb57d33952113ff4fa1bc5f879963600ed74918f1236e212ee50b9"
+ ],
+ "index": "pypi",
+ "version": "==5.0.0"
+ },
+ "pathlib2": {
+ "hashes": [
+ "sha256:0ec8205a157c80d7acc301c0b18fbd5d44fe655968f5d947b6ecef5290fc35db",
+ "sha256:6cd9a47b597b37cc57de1c05e56fb1a1c9cc9fab04fe78c29acd090418529868"
+ ],
+ "index": "pypi",
+ "version": "==2.3.5"
+ },
+ "pathspec": {
+ "hashes": [
+ "sha256:e285ccc8b0785beadd4c18e5708b12bb8fcf529a1e61215b3feff1d1e559ea5c"
+ ],
+ "version": "==0.6.0"
+ },
+ "pluggy": {
+ "hashes": [
+ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
+ "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"
+ ],
+ "version": "==0.13.1"
+ },
+ "py": {
+ "hashes": [
+ "sha256:64f65755aee5b381cea27766a3a147c3f15b9b6b9ac88676de66ba2ae36793fa",
+ "sha256:dc639b046a6e2cff5bbe40194ad65936d6ba360b52b3c3fe1d08a82dd50b5e53"
+ ],
+ "version": "==1.8.0"
+ },
+ "pycodestyle": {
+ "hashes": [
+ "sha256:95a2219d12372f05704562a14ec30bc76b05a5b297b21a5dfe3f6fac3491ae56",
+ "sha256:e40a936c9a450ad81df37f549d676d127b1b66000a6c500caa2b085bc0ca976c"
+ ],
+ "version": "==2.5.0"
+ },
+ "pyflakes": {
+ "hashes": [
+ "sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0",
+ "sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2"
+ ],
+ "version": "==2.1.1"
+ },
+ "pytest": {
+ "hashes": [
+ "sha256:13c5e9fb5ec5179995e9357111ab089af350d788cbc944c628f3cde72285809b",
+ "sha256:f21d2f1fb8200830dcbb5d8ec466a9c9120e20d8b53c7585d180125cce1d297a"
+ ],
+ "index": "pypi",
+ "version": "==4.4.0"
+ },
+ "pyyaml": {
+ "hashes": [
+ "sha256:0e7f69397d53155e55d10ff68fdfb2cf630a35e6daf65cf0bdeaf04f127c09dc",
+ "sha256:2e9f0b7c5914367b0916c3c104a024bb68f269a486b9d04a2e8ac6f6597b7803",
+ "sha256:35ace9b4147848cafac3db142795ee42deebe9d0dad885ce643928e88daebdcc",
+ "sha256:38a4f0d114101c58c0f3a88aeaa44d63efd588845c5a2df5290b73db8f246d15",
+ "sha256:483eb6a33b671408c8529106df3707270bfacb2447bf8ad856a4b4f57f6e3075",
+ "sha256:4b6be5edb9f6bb73680f5bf4ee08ff25416d1400fbd4535fe0069b2994da07cd",
+ "sha256:7f38e35c00e160db592091751d385cd7b3046d6d51f578b29943225178257b31",
+ "sha256:8100c896ecb361794d8bfdb9c11fce618c7cf83d624d73d5ab38aef3bc82d43f",
+ "sha256:c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c",
+ "sha256:e4c015484ff0ff197564917b4b4246ca03f411b9bd7f16e02a2f586eb48b6d04",
+ "sha256:ebc4ed52dcc93eeebeae5cf5deb2ae4347b3a81c3fa12b0b8c976544829396a4"
+ ],
+ "index": "pypi",
+ "version": "==5.2"
+ },
+ "scandir": {
+ "hashes": [
+ "sha256:2586c94e907d99617887daed6c1d102b5ca28f1085f90446554abf1faf73123e",
+ "sha256:2ae41f43797ca0c11591c0c35f2f5875fa99f8797cb1a1fd440497ec0ae4b022",
+ "sha256:2b8e3888b11abb2217a32af0766bc06b65cc4a928d8727828ee68af5a967fa6f",
+ "sha256:2c712840c2e2ee8dfaf36034080108d30060d759c7b73a01a52251cc8989f11f",
+ "sha256:4d4631f6062e658e9007ab3149a9b914f3548cb38bfb021c64f39a025ce578ae",
+ "sha256:67f15b6f83e6507fdc6fca22fedf6ef8b334b399ca27c6b568cbfaa82a364173",
+ "sha256:7d2d7a06a252764061a020407b997dd036f7bd6a175a5ba2b345f0a357f0b3f4",
+ "sha256:8c5922863e44ffc00c5c693190648daa6d15e7c1207ed02d6f46a8dcc2869d32",
+ "sha256:92c85ac42f41ffdc35b6da57ed991575bdbe69db895507af88b9f499b701c188",
+ "sha256:b24086f2375c4a094a6b51e78b4cf7ca16c721dcee2eddd7aa6494b42d6d519d",
+ "sha256:cb925555f43060a1745d0a321cca94bcea927c50114b623d73179189a4e100ac"
+ ],
+ "index": "pypi",
+ "version": "==1.10.0"
+ },
+ "six": {
+ "hashes": [
+ "sha256:1f1b7d42e254082a9db6279deae68afb421ceba6158efa6131de7b3003ee93fd",
+ "sha256:30f610279e8b2578cab6db20741130331735c781b56053c59c4076da27f06b66"
+ ],
+ "index": "pypi",
+ "version": "==1.13.0"
+ },
+ "typing": {
+ "hashes": [
+ "sha256:91dfe6f3f706ee8cc32d38edbbf304e9b7583fb37108fef38229617f8b3eba23",
+ "sha256:c8cabb5ab8945cd2f54917be357d134db9cc1eb039e59d1606dc1e60cb1d9d36",
+ "sha256:f38d83c5a7a7086543a0f649564d661859c5146a85775ab90c0d2f93ffaa9714"
+ ],
+ "markers": "python_version < '3.5'",
+ "version": "==3.7.4.1"
+ },
+ "yamllint": {
+ "hashes": [
+ "sha256:0260121ed6a428b98bbadb7b6b66e9cd00114382e3d7ad06fa80e0754414cf15",
+ "sha256:c65f6df10e2752054ac89fbe7b32abc00864aecb747cf40c73fe445aea1da5f1"
+ ],
+ "index": "pypi",
+ "version": "==1.19.0"
+ },
+ "zipp": {
+ "hashes": [
+ "sha256:3718b1cbcd963c7d4c5511a8240812904164b7f381b647143a89d3b98f9bcd8e",
+ "sha256:f06903e9f1f43b12d371004b4ac7b06ab39a44adc747266928ae6debfa7b3335"
+ ],
+ "version": "==0.6.0"
+ }
+ }
+}
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..0e08dbf
--- /dev/null
+++ b/README.md
@@ -0,0 +1,22 @@
+Ansible Runner
+==============
+
+[![Documentation](https://readthedocs.org/projects/ansible-runner/badge/?version=stable)](https://ansible-runner.readthedocs.io/en/latest/)
+[![Code of Conduct](https://img.shields.io/badge/Code%20of%20Conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
+[![Ansible Mailing lists](https://img.shields.io/badge/Mailing%20lists-Ansible-orange.svg)](https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information)
+
+
+
+Ansible Runner is a tool and python library that helps when interfacing with Ansible directly or as part of another system whether that be through a container image interface, as a standalone tool, or as a Python module that can be imported. The goal is to provide a stable and consistent interface abstraction to Ansible.
+
+For the latest documentation see: [https://ansible-runner.readthedocs.io](https://ansible-runner.readthedocs.io/en/latest/)
+
+Get Involved
+============
+
+* We use [GitHub issues](https://github.com/ansible/ansible-runner/issues) to track bug report and feature ideas...
+* ... and [GitHub Milestones](https://github.com/ansible/ansible-runner/milestones) to track what's for the next release
+* Want to contribute, check out our [guide](CONTRIBUTING.md)
+* Join us in the `#ansible-runner` channel on Freenode IRC
+* Join the discussion in [awx-project](https://groups.google.com/forum/#!forum/awx-project)
+* For the full list of Ansible email Lists, IRC channels see the [Ansible Mailing lists](https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information)
diff --git a/ansible_runner/__init__.py b/ansible_runner/__init__.py
new file mode 100644
index 0000000..c0100de
--- /dev/null
+++ b/ansible_runner/__init__.py
@@ -0,0 +1,13 @@
+import pkg_resources
+
+from .interface import run, run_async # noqa
+from .exceptions import AnsibleRunnerException, ConfigurationError, CallbackError # noqa
+from .runner_config import RunnerConfig # noqa
+from .runner import Runner # noqa
+
+plugins = {
+ entry_point.name: entry_point.load()
+ for entry_point
+ in pkg_resources.iter_entry_points('ansible_runner.plugins')
+}
+
diff --git a/ansible_runner/__main__.py b/ansible_runner/__main__.py
new file mode 100644
index 0000000..9a9d8f7
--- /dev/null
+++ b/ansible_runner/__main__.py
@@ -0,0 +1,623 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import ast
+import pkg_resources
+import threading
+import traceback
+import argparse
+import logging
+import signal
+import sys
+import errno
+import json
+import stat
+import os
+import shutil
+import textwrap
+
+from contextlib import contextmanager
+from uuid import uuid4
+
+from yaml import safe_load
+
+from ansible_runner import run
+from ansible_runner import output
+from ansible_runner.utils import dump_artifact, Bunch
+from ansible_runner.runner import Runner
+from ansible_runner.exceptions import AnsibleRunnerException
+
+VERSION = pkg_resources.require("ansible_runner")[0].version
+
+DEFAULT_ROLES_PATH = os.getenv('ANSIBLE_ROLES_PATH', None)
+DEFAULT_RUNNER_BINARY = os.getenv('RUNNER_BINARY', None)
+DEFAULT_RUNNER_PLAYBOOK = os.getenv('RUNNER_PLAYBOOK', None)
+DEFAULT_RUNNER_ROLE = os.getenv('RUNNER_ROLE', None)
+DEFAULT_RUNNER_MODULE = os.getenv('RUNNER_MODULE', None)
+DEFAULT_UUID = uuid4()
+
+logger = logging.getLogger('ansible-runner')
+
+
+@contextmanager
+def role_manager(args):
+ if args.role:
+ role = {'name': args.role}
+ if args.role_vars:
+ role_vars = {}
+ for item in args.role_vars.split():
+ key, value = item.split('=')
+ try:
+ role_vars[key] = ast.literal_eval(value)
+ except Exception:
+ role_vars[key] = value
+ role['vars'] = role_vars
+
+ kwargs = Bunch(**args.__dict__)
+ kwargs.update(private_data_dir=args.private_data_dir,
+ json_mode=args.json,
+ ignore_logging=False,
+ project_dir=args.project_dir,
+ rotate_artifacts=args.rotate_artifacts)
+
+ if args.artifact_dir:
+ kwargs.artifact_dir = args.artifact_dir
+
+ if args.project_dir:
+ project_path = kwargs.project_dir = args.project_dir
+ else:
+ project_path = os.path.join(args.private_data_dir, 'project')
+
+ project_exists = os.path.exists(project_path)
+
+ env_path = os.path.join(args.private_data_dir, 'env')
+ env_exists = os.path.exists(env_path)
+
+ envvars_path = os.path.join(args.private_data_dir, 'env/envvars')
+ envvars_exists = os.path.exists(envvars_path)
+
+ if args.cmdline:
+ kwargs.cmdline = args.cmdline
+
+ playbook = None
+ tmpvars = None
+
+ play = [{'hosts': args.hosts if args.hosts is not None else "all",
+ 'gather_facts': not args.role_skip_facts,
+ 'roles': [role]}]
+
+ filename = str(uuid4().hex)
+
+ playbook = dump_artifact(json.dumps(play), project_path, filename)
+ kwargs.playbook = playbook
+ output.debug('using playbook file %s' % playbook)
+
+ if args.inventory:
+ inventory_file = os.path.join(args.private_data_dir, 'inventory', args.inventory)
+ if not os.path.exists(inventory_file):
+ raise AnsibleRunnerException('location specified by --inventory does not exist')
+ kwargs.inventory = inventory_file
+ output.debug('using inventory file %s' % inventory_file)
+
+ roles_path = args.roles_path or os.path.join(args.private_data_dir, 'roles')
+ roles_path = os.path.abspath(roles_path)
+ output.debug('setting ANSIBLE_ROLES_PATH to %s' % roles_path)
+
+ envvars = {}
+ if envvars_exists:
+ with open(envvars_path, 'rb') as f:
+ tmpvars = f.read()
+ new_envvars = safe_load(tmpvars)
+ if new_envvars:
+ envvars = new_envvars
+
+ envvars['ANSIBLE_ROLES_PATH'] = roles_path
+ kwargs.envvars = envvars
+ else:
+ kwargs = args
+
+ yield kwargs
+
+ if args.role:
+ if not project_exists and os.path.exists(project_path):
+ logger.debug('removing dynamically generated project folder')
+ shutil.rmtree(project_path)
+ elif playbook and os.path.isfile(playbook):
+ logger.debug('removing dynamically generated playbook')
+ os.remove(playbook)
+
+ # if a previous envvars existed in the private_data_dir,
+ # restore the original file contents
+ if tmpvars:
+ with open(envvars_path, 'wb') as f:
+ f.write(tmpvars)
+ elif not envvars_exists and os.path.exists(envvars_path):
+ logger.debug('removing dynamically generated envvars folder')
+ os.remove(envvars_path)
+
+ # since ansible-runner created the env folder, remove it
+ if not env_exists and os.path.exists(env_path):
+ logger.debug('removing dynamically generated env folder')
+ shutil.rmtree(env_path)
+
+
+def print_common_usage():
+ print(textwrap.dedent("""
+ These are common Ansible Runner commands:
+
+ execute a playbook contained in an ansible-runner directory:
+
+ ansible-runner run /tmp/private -p playbook.yml
+ ansible-runner start /tmp/private -p playbook.yml
+ ansible-runner stop /tmp/private
+ ansible-runner is-alive /tmp/private
+
+ directly execute ansible primitives:
+
+ ansible-runner run . -r role_name --hosts myhost
+ ansible-runner run . -m command -a "ls -l" --hosts myhost
+
+ `ansible-runner --help` list of optional command line arguments
+ """))
+
+
+def main(sys_args=None):
+ """Main entry point for ansible-runner executable
+
+ When the ```ansible-runner``` command is executed, this function
+ is the main entry point that is called and executed.
+
+ :param sys_args: List of arguments to be parsed by the parser
+ :type sys_args: list
+
+ :returns: an instance of SystemExit
+ :rtype: SystemExit
+ """
+ parser = argparse.ArgumentParser(
+ description="Use 'ansible-runner' (with no arguments) to see basic usage"
+ )
+
+ parser.add_argument(
+ '--version',
+ action='version',
+ version=VERSION
+ )
+
+ # positional options
+
+ parser.add_argument(
+ "command",
+ choices=["run", "start", "stop", "is-alive"],
+ metavar="COMMAND",
+ help="command directive for controlling ansible-runner execution "
+ "(one of 'run', 'start', 'stop', 'is-alive')"
+ #help="command directive controlling ansible-runner execution"
+ )
+
+ parser.add_argument(
+ 'private_data_dir',
+ help="base directory cotnaining the ansible-runner metadata "
+ "(project, inventory, env, etc)"
+ )
+
+ # mutually exclusive group
+
+ group = parser.add_mutually_exclusive_group()
+
+ group.add_argument(
+ "-p", "--playbook",
+ default=DEFAULT_RUNNER_PLAYBOOK,
+ help="invoke an Ansible playbook from the ansible-runner project "
+ "(See Ansible Playbook Options below)"
+ )
+
+ group.add_argument(
+ "-m", "--module",
+ default=DEFAULT_RUNNER_MODULE,
+ help="invoke an Ansible module directly without a playbook "
+ "(See Ansible Module Options below)"
+ )
+
+ group.add_argument(
+ "-r", "--role",
+ default=DEFAULT_RUNNER_ROLE,
+ help="invoke an Ansible role directly without a playbook "
+ "(See Ansible Role Options below)"
+ )
+
+ # ansible-runner options
+
+ runner_group = parser.add_argument_group(
+ "Ansible Runner Options",
+ "configuration options for controlling the ansible-runner "
+ "runtime environment."
+ )
+
+ runner_group.add_argument(
+ "--debug",
+ action="store_true",
+ help="enable ansible-runner debug output logging (default=False)"
+ )
+
+ runner_group.add_argument(
+ "--logfile",
+ help="log output messages to a file (default=None)"
+ )
+
+ runner_group.add_argument(
+ "-b", "--binary",
+ default=DEFAULT_RUNNER_BINARY,
+ help="specifies the full path pointing to the Ansible binaries "
+ "(default={})".format(DEFAULT_RUNNER_BINARY)
+ )
+
+ runner_group.add_argument(
+ "-i", "--ident",
+ default=DEFAULT_UUID,
+ help="an identifier that will be used when generating the artifacts "
+ "directory and can be used to uniquely identify a playbook run "
+ "(default={})".format(DEFAULT_UUID)
+ )
+
+ runner_group.add_argument(
+ "--rotate-artifacts",
+ default=0,
+ type=int,
+ help="automatically clean up old artifact directories after a given "
+ "number have been created (default=0, disabled)"
+ )
+
+ runner_group.add_argument(
+ "--artifact-dir",
+ help="optional path for the artifact root directory "
+ "(default=<private_data_dir>/artifacts)"
+ )
+
+ runner_group.add_argument(
+ "--project-dir",
+ help="optional path for the location of the playbook content directory "
+ "(default=<private_data_dir/project)"
+ )
+
+ runner_group.add_argument(
+ "--inventory",
+ help="optional path for the location of the inventory content directory "
+ "(default=<private_data_dir>/inventory)"
+ )
+
+ runner_group.add_argument(
+ "-j", "--json",
+ action="store_true",
+ help="output the JSON event structure to stdout instead of "
+ "Ansible output (default=False)"
+ )
+
+ runner_group.add_argument(
+ "--omit-event-data",
+ action="store_true",
+ help="Omits including extra event data in the callback payloads "
+ "or the Runner payload data files "
+ "(status and stdout still included)"
+ )
+
+ runner_group.add_argument(
+ "--only-failed-event-data",
+ action="store_true",
+ help="Only adds extra event data for failed tasks in the callback "
+ "payloads or the Runner payload data files "
+ "(status and stdout still included for other events)"
+ )
+
+ runner_group.add_argument(
+ "-q", "--quiet",
+ action="store_true",
+ help="disable all messages sent to stdout/stderr (default=False)"
+ )
+
+ runner_group.add_argument(
+ "-v",
+ action="count",
+ help="increase the verbosity with multiple v's (up to 5) of the "
+ "ansible-playbook output (default=None)"
+ )
+
+ # ansible options
+
+ ansible_group = parser.add_argument_group(
+ "Ansible Options",
+ "control the ansible[-playbook] execution environment"
+ )
+
+ ansible_group.add_argument(
+ "--limit",
+ help="matches Ansible's ```--limit``` parameter to further constrain "
+ "the inventory to be used (default=None)"
+ )
+
+ ansible_group.add_argument(
+ "--cmdline",
+ help="command line options to pass to ansible-playbook at "
+ "execution time (default=None)"
+ )
+
+ ansible_group.add_argument(
+ "--hosts",
+ help="define the set of hosts to execute against (default=None) "
+ "Note: this parameter only works with -m or -r"
+ )
+
+ ansible_group.add_argument(
+ "--forks",
+ help="matches Ansible's ```--forks``` parameter to set the number "
+ "of conconurent processes (default=None)"
+ )
+
+ # roles group
+
+ roles_group = parser.add_argument_group(
+ "Ansible Role Options",
+ "configuration options for directly executing Ansible roles"
+ )
+
+ roles_group.add_argument(
+ "--roles-path",
+ default=DEFAULT_ROLES_PATH,
+ help="path used to locate the role to be executed (default=None)"
+ )
+
+ roles_group.add_argument(
+ "--role-vars",
+ help="set of variables to be passed to the role at run time in the "
+ "form of 'key1=value1 key2=value2 keyN=valueN'(default=None)"
+ )
+
+ roles_group.add_argument(
+ "--role-skip-facts",
+ action="store_true",
+ default=False,
+ help="disable fact collection when the role is executed (default=False)"
+ )
+
+ # modules groups
+
+ modules_group = parser.add_argument_group(
+ "Ansible Module Options",
+ "configuration options for directly executing Ansible modules"
+ )
+
+ modules_group.add_argument(
+ "-a", "--args",
+ dest='module_args',
+ help="set of arguments to be passed to the module at run time in the "
+ "form of 'key1=value1 key2=value2 keyN=valueN'(default=None)"
+ )
+
+ # playbook options
+ playbook_group = parser.add_argument_group(
+ "Ansible Playbook Options",
+ "configuation options for executing Ansible playbooks"
+ )
+
+ playbook_group.add_argument(
+ "--process-isolation",
+ dest="process_isolation",
+ action="store_true",
+ help="limits what directories on the filesystem the playbook run "
+ "has access to, defaults to /tmp (default=False)"
+ )
+
+ playbook_group.add_argument(
+ "--process-isolation-executable",
+ dest="process_isolation_executable",
+ default="bwrap",
+ help="process isolation executable that will be used. (default=bwrap)"
+ )
+
+ playbook_group.add_argument(
+ "--process-isolation-path",
+ dest="process_isolation_path",
+ default="/tmp",
+ help="path that an isolated playbook run will use for staging. "
+ "(default=/tmp)"
+ )
+
+ playbook_group.add_argument(
+ "--process-isolation-hide-paths",
+ dest="process_isolation_hide_paths",
+ nargs='*',
+ help="list of paths on the system that should be hidden from the "
+ "playbook run (default=None)"
+ )
+
+ playbook_group.add_argument(
+ "--process-isolation-show-paths",
+ dest="process_isolation_show_paths",
+ nargs='*',
+ help="list of paths on the system that should be exposed to the "
+ "playbook run (default=None)"
+ )
+
+ playbook_group.add_argument(
+ "--process-isolation-ro-paths",
+ dest="process_isolation_ro_paths",
+ nargs='*',
+ help="list of paths on the system that should be exposed to the "
+ "playbook run as read-only (default=None)"
+ )
+
+ playbook_group.add_argument(
+ "--directory-isolation-base-path",
+ dest="directory_isolation_base_path",
+ help="copies the project directory to a location in this directory "
+ "to prevent multiple simultaneous executions from conflicting "
+ "(default=None)"
+ )
+
+ playbook_group.add_argument(
+ "--resource-profiling",
+ dest='resource_profiling',
+ action="store_true",
+ help="Records resource utilization during playbook execution")
+
+ playbook_group.add_argument(
+ "--resource-profiling-base-cgroup",
+ dest='resource_profiling_base_cgroup',
+ default="ansible-runner",
+ help="Top-level cgroup used to collect information on resource utilization. Defaults to ansible-runner")
+
+ playbook_group.add_argument(
+ "--resource-profiling-cpu-poll-interval",
+ dest='resource_profiling_cpu_poll_interval',
+ default=0.25,
+ help="Interval (in seconds) between CPU polling for determining CPU usage. Defaults to 0.25")
+
+ playbook_group.add_argument(
+ "--resource-profiling-memory-poll-interval",
+ dest='resource_profiling_memory_poll_interval',
+ default=0.25,
+ help="Interval (in seconds) between memory polling for determining memory usage. Defaults to 0.25")
+
+ playbook_group.add_argument(
+ "--resource-profiling-pid-poll-interval",
+ dest='resource_profiling_pid_poll_interval',
+ default=0.25,
+ help="Interval (in seconds) between polling PID count for determining number of processes used. Defaults to 0.25")
+
+ playbook_group.add_argument(
+ "--resource-profiling-results-dir",
+ dest='resource_profiling_results_dir',
+ help="Directory where profiling data files should be saved. Defaults to None (profiling_data folder under private data dir is used in this case).")
+
+ if len(sys.argv) == 1:
+ parser.print_usage()
+ print_common_usage()
+ parser.exit(status=0)
+
+ args = parser.parse_args(sys_args)
+
+ if args.command in ('start', 'run'):
+ if args.hosts and not (args.module or args.role):
+ parser.exit(status=1, message="The --hosts option can only be used with -m or -r\n")
+ if not (args.module or args.role) and not args.playbook:
+ parser.exit(status=1, message="The -p option must be specified when not using -m or -r\n")
+
+ output.configure()
+
+ # enable or disable debug mode
+ output.set_debug('enable' if args.debug else 'disable')
+
+ # set the output logfile
+ if args.logfile:
+ output.set_logfile(args.logfile)
+
+ output.debug('starting debug logging')
+
+ # get the absolute path for start since it is a daemon
+ args.private_data_dir = os.path.abspath(args.private_data_dir)
+
+ pidfile = os.path.join(args.private_data_dir, 'pid')
+
+ try:
+ os.makedirs(args.private_data_dir, mode=0o700)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
+ pass
+ else:
+ raise
+
+ stderr_path = None
+ context = None
+ if args.command != 'run':
+ stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
+ if not os.path.exists(stderr_path):
+ os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
+
+ if args.command in ('start', 'run'):
+
+ if args.command == 'start':
+ import daemon
+ from daemon.pidfile import TimeoutPIDLockFile
+ context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
+ else:
+ context = threading.Lock()
+
+ with context:
+ with role_manager(args) as args:
+ run_options = dict(private_data_dir=args.private_data_dir,
+ ident=args.ident,
+ binary=args.binary,
+ playbook=args.playbook,
+ module=args.module,
+ module_args=args.module_args,
+ host_pattern=args.hosts,
+ verbosity=args.v,
+ quiet=args.quiet,
+ rotate_artifacts=args.rotate_artifacts,
+ ignore_logging=False,
+ json_mode=args.json,
+ omit_event_data=args.omit_event_data,
+ only_failed_event_data=args.only_failed_event_data,
+ inventory=args.inventory,
+ forks=args.forks,
+ project_dir=args.project_dir,
+ artifact_dir=args.artifact_dir,
+ roles_path=[args.roles_path] if args.roles_path else None,
+ process_isolation=args.process_isolation,
+ process_isolation_executable=args.process_isolation_executable,
+ process_isolation_path=args.process_isolation_path,
+ process_isolation_hide_paths=args.process_isolation_hide_paths,
+ process_isolation_show_paths=args.process_isolation_show_paths,
+ process_isolation_ro_paths=args.process_isolation_ro_paths,
+ directory_isolation_base_path=args.directory_isolation_base_path,
+ resource_profiling=args.resource_profiling,
+ resource_profiling_base_cgroup=args.resource_profiling_base_cgroup,
+ resource_profiling_cpu_poll_interval=args.resource_profiling_cpu_poll_interval,
+ resource_profiling_memory_poll_interval=args.resource_profiling_memory_poll_interval,
+ resource_profiling_pid_poll_interval=args.resource_profiling_pid_poll_interval,
+ resource_profiling_results_dir=args.resource_profiling_results_dir,
+ limit=args.limit)
+ if args.cmdline:
+ run_options['cmdline'] = args.cmdline
+
+ try:
+ res = run(**run_options)
+ except Exception:
+ exc = traceback.format_exc()
+ if stderr_path:
+ open(stderr_path, 'w+').write(exc)
+ else:
+ sys.stderr.write(exc)
+ return 1
+ return(res.rc)
+
+ try:
+ with open(pidfile, 'r') as f:
+ pid = int(f.readline())
+ except IOError:
+ return(1)
+
+ if args.command == 'stop':
+ Runner.handle_termination(pid, pidfile=pidfile)
+ return (0)
+
+ elif args.command == 'is-alive':
+ try:
+ os.kill(pid, signal.SIG_DFL)
+ return(0)
+ except OSError:
+ return(1)
diff --git a/ansible_runner/callbacks/__init__.py b/ansible_runner/callbacks/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ansible_runner/callbacks/__init__.py
diff --git a/ansible_runner/callbacks/awx_display.py b/ansible_runner/callbacks/awx_display.py
new file mode 100644
index 0000000..cf877d9
--- /dev/null
+++ b/ansible_runner/callbacks/awx_display.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2017 Ansible by Red Hat
+#
+# This file is part of Ansible Tower, but depends on code imported from Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+
+DOCUMENTATION = '''
+ callback: awx_display
+ short_description: Playbook event dispatcher for ansible-runner
+ version_added: "2.0"
+ description:
+ - This callback is necessary for ansible-runner to work
+ type: stdout
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - Set as stdout in config
+'''
+
+# Python
+import os # noqa
+import sys # noqa
+
+# Add awx/lib to sys.path.
+awx_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+if awx_lib_path not in sys.path:
+ sys.path.insert(0, awx_lib_path)
+
+# Tower Display Callback
+from display_callback import AWXDefaultCallbackModule # noqa
+
+
+# In order to be recognized correctly, self.__class__.__name__ needs to
+# match "CallbackModule"
+class CallbackModule(AWXDefaultCallbackModule):
+ pass
diff --git a/ansible_runner/callbacks/minimal.py b/ansible_runner/callbacks/minimal.py
new file mode 100644
index 0000000..b170f08
--- /dev/null
+++ b/ansible_runner/callbacks/minimal.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2017 Ansible by Red Hat
+#
+# This file is part of Ansible Tower, but depends on code imported from Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+
+DOCUMENTATION = '''
+ callback: minimal
+ short_description: Ad hoc event dispatcher for ansible-runner
+ version_added: "2.0"
+ description:
+ - This callback is necessary for ansible-runner to work
+ type: stdout
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - Set as stdout in config
+'''
+
+# Python
+import os # noqa
+import sys # noqa
+
+# Add awx/lib to sys.path.
+awx_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+if awx_lib_path not in sys.path:
+ sys.path.insert(0, awx_lib_path)
+
+# Tower Display Callback
+from display_callback import AWXMinimalCallbackModule # noqa
+
+
+# In order to be recognized correctly, self.__class__.__name__ needs to
+# match "CallbackModule"
+class CallbackModule(AWXMinimalCallbackModule):
+ pass
diff --git a/ansible_runner/display_callback/__init__.py b/ansible_runner/display_callback/__init__.py
new file mode 100644
index 0000000..b7cbf97
--- /dev/null
+++ b/ansible_runner/display_callback/__init__.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2016 Ansible by Red Hat, Inc.
+#
+# This file is part of Ansible Tower, but depends on code imported from Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+# AWX Display Callback
+from . import cleanup # noqa (registers control persistent cleanup)
+from . import display # noqa (wraps ansible.display.Display methods)
+from .module import AWXDefaultCallbackModule, AWXMinimalCallbackModule
+
+__all__ = ['AWXDefaultCallbackModule', 'AWXMinimalCallbackModule']
diff --git a/ansible_runner/display_callback/cleanup.py b/ansible_runner/display_callback/cleanup.py
new file mode 100644
index 0000000..8926e54
--- /dev/null
+++ b/ansible_runner/display_callback/cleanup.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2016 Ansible by Red Hat, Inc.
+#
+# This file is part of Ansible Tower, but depends on code imported from Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+# Python
+import atexit
+import glob
+import os
+import pwd
+
+# PSUtil
+import psutil
+
+__all__ = []
+
+main_pid = os.getpid()
+
+
+@atexit.register
+def terminate_ssh_control_masters():
+ # Only run this cleanup from the main process.
+ if os.getpid() != main_pid:
+ return
+ # Determine if control persist is being used and if any open sockets
+ # exist after running the playbook.
+ cp_path = os.environ.get('ANSIBLE_SSH_CONTROL_PATH', '')
+ if not cp_path:
+ return
+ cp_dir = os.path.dirname(cp_path)
+ if not os.path.exists(cp_dir):
+ return
+ cp_pattern = os.path.join(cp_dir, 'ansible-ssh-*')
+ cp_files = glob.glob(cp_pattern)
+ if not cp_files:
+ return
+
+ # Attempt to find any running control master processes.
+ username = pwd.getpwuid(os.getuid())[0]
+ ssh_cm_procs = []
+ for proc in psutil.process_iter():
+ try:
+ pname = proc.name()
+ pcmdline = proc.cmdline()
+ pusername = proc.username()
+ except psutil.NoSuchProcess:
+ continue
+ if pusername != username:
+ continue
+ if pname != 'ssh':
+ continue
+ for cp_file in cp_files:
+ if pcmdline and cp_file in pcmdline[0]:
+ ssh_cm_procs.append(proc)
+ break
+
+ # Terminate then kill control master processes. Workaround older
+ # version of psutil that may not have wait_procs implemented.
+ for proc in ssh_cm_procs:
+ try:
+ proc.terminate()
+ except psutil.NoSuchProcess:
+ continue
+ procs_gone, procs_alive = psutil.wait_procs(ssh_cm_procs, timeout=5)
+ for proc in procs_alive:
+ proc.kill()
diff --git a/ansible_runner/display_callback/display.py b/ansible_runner/display_callback/display.py
new file mode 100644
index 0000000..ad5e8ba
--- /dev/null
+++ b/ansible_runner/display_callback/display.py
@@ -0,0 +1,98 @@
+# Copyright (c) 2016 Ansible by Red Hat, Inc.
+#
+# This file is part of Ansible Tower, but depends on code imported from Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+# Python
+import functools
+import sys
+import uuid
+
+# Ansible
+from ansible.utils.display import Display
+
+# Tower Display Callback
+from .events import event_context
+
+__all__ = []
+
+
+def with_context(**context):
+ global event_context
+
+ def wrap(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ with event_context.set_local(**context):
+ return f(*args, **kwargs)
+ return wrapper
+ return wrap
+
+
+for attr in dir(Display):
+ if attr.startswith('_') or 'cow' in attr or 'prompt' in attr:
+ continue
+ if attr in ('display', 'v', 'vv', 'vvv', 'vvvv', 'vvvvv', 'vvvvvv', 'verbose'):
+ continue
+ if not callable(getattr(Display, attr)):
+ continue
+ setattr(Display, attr, with_context(**{attr: True})(getattr(Display, attr)))
+
+
+def with_verbosity(f):
+ global event_context
+
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ host = args[2] if len(args) >= 3 else kwargs.get('host', None)
+ caplevel = args[3] if len(args) >= 4 else kwargs.get('caplevel', 2)
+ context = dict(verbose=True, verbosity=(caplevel + 1))
+ if host is not None:
+ context['remote_addr'] = host
+ with event_context.set_local(**context):
+ return f(*args, **kwargs)
+ return wrapper
+
+
+Display.verbose = with_verbosity(Display.verbose)
+
+
+def display_with_context(f):
+
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ log_only = args[5] if len(args) >= 6 else kwargs.get('log_only', False)
+ stderr = args[3] if len(args) >= 4 else kwargs.get('stderr', False)
+ event_uuid = event_context.get().get('uuid', None)
+ with event_context.display_lock:
+ # If writing only to a log file or there is already an event UUID
+ # set (from a callback module method), skip dumping the event data.
+ if log_only or event_uuid:
+ return f(*args, **kwargs)
+ try:
+ fileobj = sys.stderr if stderr else sys.stdout
+ event_context.add_local(uuid=str(uuid.uuid4()))
+ event_context.dump_begin(fileobj)
+ return f(*args, **kwargs)
+ finally:
+ event_context.dump_end(fileobj)
+ event_context.remove_local(uuid=None)
+
+ return wrapper
+
+
+Display.display = display_with_context(Display.display)
diff --git a/ansible_runner/display_callback/events.py b/ansible_runner/display_callback/events.py
new file mode 100644
index 0000000..f3e0679
--- /dev/null
+++ b/ansible_runner/display_callback/events.py
@@ -0,0 +1,203 @@
+# Copyright (c) 2016 Ansible by Red Hat, Inc.
+#
+# This file is part of Ansible Tower, but depends on code imported from Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+# Python
+import base64
+import contextlib
+import datetime
+import json
+import multiprocessing
+import os
+import stat
+import threading
+import uuid
+
+__all__ = ['event_context']
+
+
+# use a custom JSON serializer so we can properly handle !unsafe and !vault
+# objects that may exist in events emitted by the callback plugin
+# see: https://github.com/ansible/ansible/pull/38759
+class AnsibleJSONEncoderLocal(json.JSONEncoder):
+ '''
+ The class AnsibleJSONEncoder exists in Ansible core for this function
+ this performs a mostly identical function via duck typing
+ '''
+
+ def default(self, o):
+ if getattr(o, 'yaml_tag', None) == '!vault':
+ encrypted_form = o._ciphertext
+ if isinstance(encrypted_form, bytes):
+ encrypted_form = encrypted_form.decode('utf-8')
+ return {'__ansible_vault': encrypted_form}
+ elif isinstance(o, (datetime.date, datetime.datetime)):
+ return o.isoformat()
+ return super(AnsibleJSONEncoderLocal, self).default(o)
+
+
+class IsolatedFileWrite:
+ '''
+ Class that will write partial event data to a file
+ '''
+
+ def __init__(self):
+ self.private_data_dir = os.getenv('AWX_ISOLATED_DATA_DIR')
+
+ def set(self, key, value):
+ # Strip off the leading key identifying characters :1:ev-
+ event_uuid = key[len(':1:ev-'):]
+ # Write data in a staging area and then atomic move to pickup directory
+ filename = '{}-partial.json'.format(event_uuid)
+ if not os.path.exists(os.path.join(self.private_data_dir, 'job_events')):
+ os.mkdir(os.path.join(self.private_data_dir, 'job_events'), 0o700)
+ dropoff_location = os.path.join(self.private_data_dir, 'job_events', filename)
+ write_location = '.'.join([dropoff_location, 'tmp'])
+ partial_data = json.dumps(value, cls=AnsibleJSONEncoderLocal)
+ with os.fdopen(os.open(write_location, os.O_WRONLY | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR), 'w') as f:
+ f.write(partial_data)
+ os.rename(write_location, dropoff_location)
+
+
+class EventContext(object):
+ '''
+ Store global and local (per thread/process) data associated with callback
+ events and other display output methods.
+ '''
+
+ def __init__(self):
+ self.display_lock = multiprocessing.RLock()
+ self._local = threading.local()
+ if os.getenv('AWX_ISOLATED_DATA_DIR', False):
+ self.cache = IsolatedFileWrite()
+
+ def add_local(self, **kwargs):
+ tls = vars(self._local)
+ ctx = tls.setdefault('_ctx', {})
+ ctx.update(kwargs)
+
+ def remove_local(self, **kwargs):
+ for key in kwargs.keys():
+ self._local._ctx.pop(key, None)
+
+ @contextlib.contextmanager
+ def set_local(self, **kwargs):
+ try:
+ self.add_local(**kwargs)
+ yield
+ finally:
+ self.remove_local(**kwargs)
+
+ def get_local(self):
+ return getattr(getattr(self, '_local', None), '_ctx', {})
+
+ def add_global(self, **kwargs):
+ if not hasattr(self, '_global_ctx'):
+ self._global_ctx = {}
+ self._global_ctx.update(kwargs)
+
+ def remove_global(self, **kwargs):
+ if hasattr(self, '_global_ctx'):
+ for key in kwargs.keys():
+ self._global_ctx.pop(key, None)
+
+ @contextlib.contextmanager
+ def set_global(self, **kwargs):
+ try:
+ self.add_global(**kwargs)
+ yield
+ finally:
+ self.remove_global(**kwargs)
+
+ def get_global(self):
+ return getattr(self, '_global_ctx', {})
+
+ def get(self):
+ ctx = {}
+ ctx.update(self.get_global())
+ ctx.update(self.get_local())
+ return ctx
+
+ def get_begin_dict(self):
+ omit_event_data = os.getenv("RUNNER_OMIT_EVENTS", "False").lower() == "true"
+ include_only_failed_event_data = os.getenv("RUNNER_ONLY_FAILED_EVENTS", "False").lower() == "true"
+ event_data = self.get()
+ event = event_data.pop('event', None)
+ if not event:
+ event = 'verbose'
+ for key in ('debug', 'verbose', 'deprecated', 'warning', 'system_warning', 'error'):
+ if event_data.get(key, False):
+ event = key
+ break
+ event_dict = dict(event=event)
+ should_process_event_data = (include_only_failed_event_data and event in ('runner_on_failed', 'runner_on_async_failed', 'runner_on_item_failed')) \
+ or not include_only_failed_event_data
+ if os.getenv('JOB_ID', ''):
+ event_dict['job_id'] = int(os.getenv('JOB_ID', '0'))
+ if os.getenv('AD_HOC_COMMAND_ID', ''):
+ event_dict['ad_hoc_command_id'] = int(os.getenv('AD_HOC_COMMAND_ID', '0'))
+ if os.getenv('PROJECT_UPDATE_ID', ''):
+ event_dict['project_update_id'] = int(os.getenv('PROJECT_UPDATE_ID', '0'))
+ event_dict['pid'] = event_data.get('pid', os.getpid())
+ event_dict['uuid'] = event_data.get('uuid', str(uuid.uuid4()))
+ event_dict['created'] = event_data.get('created', datetime.datetime.utcnow().isoformat())
+ if not event_data.get('parent_uuid', None):
+ for key in ('task_uuid', 'play_uuid', 'playbook_uuid'):
+ parent_uuid = event_data.get(key, None)
+ if parent_uuid and parent_uuid != event_data.get('uuid', None):
+ event_dict['parent_uuid'] = parent_uuid
+ break
+ else:
+ event_dict['parent_uuid'] = event_data.get('parent_uuid', None)
+ if "verbosity" in event_data.keys():
+ event_dict["verbosity"] = event_data.pop("verbosity")
+ if not omit_event_data and should_process_event_data:
+ max_res = int(os.getenv("MAX_EVENT_RES", 700000))
+ if event not in ('playbook_on_stats',) and "res" in event_data and len(str(event_data['res'])) > max_res:
+ event_data['res'] = {}
+ else:
+ event_data = dict()
+ event_dict['event_data'] = event_data
+ return event_dict
+
+ def get_end_dict(self):
+ return {}
+
+ def dump(self, fileobj, data, max_width=78, flush=False):
+ b64data = base64.b64encode(json.dumps(data).encode('utf-8')).decode()
+ with self.display_lock:
+ # pattern corresponding to OutputEventFilter expectation
+ fileobj.write(u'\x1b[K')
+ for offset in range(0, len(b64data), max_width):
+ chunk = b64data[offset:offset + max_width]
+ escaped_chunk = u'{}\x1b[{}D'.format(chunk, len(chunk))
+ fileobj.write(escaped_chunk)
+ fileobj.write(u'\x1b[K')
+ if flush:
+ fileobj.flush()
+
+ def dump_begin(self, fileobj):
+ begin_dict = self.get_begin_dict()
+ self.cache.set(":1:ev-{}".format(begin_dict['uuid']), begin_dict)
+ self.dump(fileobj, {'uuid': begin_dict['uuid']})
+
+ def dump_end(self, fileobj):
+ self.dump(fileobj, self.get_end_dict(), flush=True)
+
+
+event_context = EventContext()
diff --git a/ansible_runner/display_callback/minimal.py b/ansible_runner/display_callback/minimal.py
new file mode 100644
index 0000000..98076ba
--- /dev/null
+++ b/ansible_runner/display_callback/minimal.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2016 Ansible by Red Hat, Inc.
+#
+# This file is part of Ansible Tower, but depends on code imported from Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+# Python
+import os
+
+# Ansible
+import ansible
+
+# Because of the way Ansible loads plugins, it's not possible to import
+# ansible.plugins.callback.minimal when being loaded as the minimal plugin. Ugh.
+minimal_plugin = os.path.join(os.path.dirname(ansible.__file__), 'plugins', 'callback', 'minimal.py')
+exec(compile(open(minimal_plugin, "rb").read(), minimal_plugin, 'exec'))
diff --git a/ansible_runner/display_callback/module.py b/ansible_runner/display_callback/module.py
new file mode 100644
index 0000000..1d9309f
--- /dev/null
+++ b/ansible_runner/display_callback/module.py
@@ -0,0 +1,548 @@
+# Copyright (c) 2016 Ansible by Red Hat, Inc.
+#
+# This file is part of Ansible Tower, but depends on code imported from Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+# Python
+import collections
+import contextlib
+import datetime
+import sys
+import uuid
+from copy import copy
+
+# Ansible
+from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
+from ansible.plugins.callback.default import CallbackModule as DefaultCallbackModule
+
+# AWX Display Callback
+from .events import event_context
+from .minimal import CallbackModule as MinimalCallbackModule
+
+CENSORED = "the output has been hidden due to the fact that 'no_log: true' was specified for this result" # noqa
+
+
+def current_time():
+ return datetime.datetime.utcnow()
+
+
+class BaseCallbackModule(CallbackBase):
+ '''
+ Callback module for logging ansible/ansible-playbook events.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+
+ # These events should never have an associated play.
+ EVENTS_WITHOUT_PLAY = [
+ 'playbook_on_start',
+ 'playbook_on_stats',
+ ]
+
+ # These events should never have an associated task.
+ EVENTS_WITHOUT_TASK = EVENTS_WITHOUT_PLAY + [
+ 'playbook_on_setup',
+ 'playbook_on_notify',
+ 'playbook_on_import_for_host',
+ 'playbook_on_not_import_for_host',
+ 'playbook_on_no_hosts_matched',
+ 'playbook_on_no_hosts_remaining',
+ ]
+
+ def __init__(self):
+ super(BaseCallbackModule, self).__init__()
+ self._host_start = {}
+ self.task_uuids = set()
+ self.duplicate_task_counts = collections.defaultdict(lambda: 1)
+
+ self.play_uuids = set()
+ self.duplicate_play_counts = collections.defaultdict(lambda: 1)
+
+ @contextlib.contextmanager
+ def capture_event_data(self, event, **event_data):
+ event_data.setdefault('uuid', str(uuid.uuid4()))
+
+ if event not in self.EVENTS_WITHOUT_TASK:
+ task = event_data.pop('task', None)
+ else:
+ task = None
+
+ if event_data.get('res'):
+ if event_data['res'].get('_ansible_no_log', False):
+ event_data['res'] = {'censored': CENSORED}
+ if event_data['res'].get('results', []):
+ event_data['res']['results'] = copy(event_data['res']['results'])
+ for i, item in enumerate(event_data['res'].get('results', [])):
+ if isinstance(item, dict) and item.get('_ansible_no_log', False):
+ event_data['res']['results'][i] = {'censored': CENSORED}
+
+ with event_context.display_lock:
+ try:
+ event_context.add_local(event=event, **event_data)
+ if task:
+ self.set_task(task, local=True)
+ event_context.dump_begin(sys.stdout)
+ yield
+ finally:
+ event_context.dump_end(sys.stdout)
+ if task:
+ self.clear_task(local=True)
+ event_context.remove_local(event=None, **event_data)
+
+ def set_playbook(self, playbook):
+ # NOTE: Ansible doesn't generate a UUID for playbook_on_start so do it for them.
+ self.playbook_uuid = str(uuid.uuid4())
+ file_name = getattr(playbook, '_file_name', '???')
+ event_context.add_global(playbook=file_name, playbook_uuid=self.playbook_uuid)
+ self.clear_play()
+
+ def set_play(self, play):
+ if hasattr(play, 'hosts'):
+ if isinstance(play.hosts, list):
+ pattern = ','.join(play.hosts)
+ else:
+ pattern = play.hosts
+ else:
+ pattern = ''
+ name = play.get_name().strip() or pattern
+ event_context.add_global(play=name, play_uuid=str(play._uuid), play_pattern=pattern)
+ self.clear_task()
+
+ def clear_play(self):
+ event_context.remove_global(play=None, play_uuid=None, play_pattern=None)
+ self.clear_task()
+
+ def set_task(self, task, local=False):
+ self.clear_task(local)
+ # FIXME: Task is "global" unless using free strategy!
+ task_ctx = dict(
+ task=(task.name or task.action),
+ task_uuid=str(task._uuid),
+ task_action=task.action,
+ task_args='',
+ )
+ try:
+ task_ctx['task_path'] = task.get_path()
+ except AttributeError:
+ pass
+ if C.DISPLAY_ARGS_TO_STDOUT:
+ if task.no_log:
+ task_ctx['task_args'] = "the output has been hidden due to the fact that 'no_log: true' was specified for this result"
+ else:
+ task_args = ', '.join(('%s=%s' % a for a in task.args.items()))
+ task_ctx['task_args'] = task_args
+ if getattr(task, '_role', None):
+ task_role = task._role._role_name
+ else:
+ task_role = getattr(task, 'role_name', '')
+ if task_role:
+ task_ctx['role'] = task_role
+ if local:
+ event_context.add_local(**task_ctx)
+ else:
+ event_context.add_global(**task_ctx)
+
+ def clear_task(self, local=False):
+ task_ctx = dict(task=None, task_path=None, task_uuid=None, task_action=None, task_args=None, role=None)
+ if local:
+ event_context.remove_local(**task_ctx)
+ else:
+ event_context.remove_global(**task_ctx)
+
+ def v2_playbook_on_start(self, playbook):
+ self.set_playbook(playbook)
+ event_data = dict(
+ uuid=self.playbook_uuid,
+ )
+ with self.capture_event_data('playbook_on_start', **event_data):
+ super(BaseCallbackModule, self).v2_playbook_on_start(playbook)
+
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None,
+ encrypt=None, confirm=False, salt_size=None,
+ salt=None, default=None, unsafe=None):
+ event_data = dict(
+ varname=varname,
+ private=private,
+ prompt=prompt,
+ encrypt=encrypt,
+ confirm=confirm,
+ salt_size=salt_size,
+ salt=salt,
+ default=default,
+ unsafe=unsafe,
+ )
+ with self.capture_event_data('playbook_on_vars_prompt', **event_data):
+ super(BaseCallbackModule, self).v2_playbook_on_vars_prompt(
+ varname, private, prompt, encrypt, confirm, salt_size, salt,
+ default,
+ )
+
+ def v2_playbook_on_include(self, included_file):
+ event_data = dict(
+ included_file=included_file._filename if included_file is not None else None,
+ )
+ with self.capture_event_data('playbook_on_include', **event_data):
+ super(BaseCallbackModule, self).v2_playbook_on_include(included_file)
+
+ def v2_playbook_on_play_start(self, play):
+ play_uuid = str(play._uuid)
+ if play_uuid in self.play_uuids:
+ # When this play UUID repeats, it means the play is using the
+ # free strategy (or serial:1) so different hosts may be running
+ # different tasks within a play (where duplicate UUIDS are common).
+ #
+ # When this is the case, modify the UUID slightly to append
+ # a counter so we can still _track_ duplicate events, but also
+ # avoid breaking the display in these scenarios.
+ self.duplicate_play_counts[play_uuid] += 1
+
+ play_uuid = '_'.join([
+ play_uuid,
+ str(self.duplicate_play_counts[play_uuid])
+ ])
+ self.play_uuids.add(play_uuid)
+ play._uuid = play_uuid
+
+ self.set_play(play)
+ if hasattr(play, 'hosts'):
+ if isinstance(play.hosts, list):
+ pattern = ','.join(play.hosts)
+ else:
+ pattern = play.hosts
+ else:
+ pattern = ''
+ name = play.get_name().strip() or pattern
+ event_data = dict(
+ name=name,
+ pattern=pattern,
+ uuid=str(play._uuid),
+ )
+ with self.capture_event_data('playbook_on_play_start', **event_data):
+ super(BaseCallbackModule, self).v2_playbook_on_play_start(play)
+
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ # NOTE: Not used by Ansible 2.x.
+ with self.capture_event_data('playbook_on_import_for_host'):
+ super(BaseCallbackModule, self).v2_playbook_on_import_for_host(result, imported_file)
+
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ # NOTE: Not used by Ansible 2.x.
+ with self.capture_event_data('playbook_on_not_import_for_host'):
+ super(BaseCallbackModule, self).v2_playbook_on_not_import_for_host(result, missing_file)
+
+ def v2_playbook_on_setup(self):
+ # NOTE: Not used by Ansible 2.x.
+ with self.capture_event_data('playbook_on_setup'):
+ super(BaseCallbackModule, self).v2_playbook_on_setup()
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ # FIXME: Flag task path output as vv.
+ task_uuid = str(task._uuid)
+ if task_uuid in self.task_uuids:
+ # When this task UUID repeats, it means the play is using the
+ # free strategy (or serial:1) so different hosts may be running
+ # different tasks within a play (where duplicate UUIDS are common).
+ #
+ # When this is the case, modify the UUID slightly to append
+ # a counter so we can still _track_ duplicate events, but also
+ # avoid breaking the display in these scenarios.
+ self.duplicate_task_counts[task_uuid] += 1
+
+ task_uuid = '_'.join([
+ task_uuid,
+ str(self.duplicate_task_counts[task_uuid])
+ ])
+ self.task_uuids.add(task_uuid)
+ self.set_task(task)
+ event_data = dict(
+ task=task,
+ name=task.get_name(),
+ is_conditional=is_conditional,
+ uuid=task_uuid,
+ )
+ with self.capture_event_data('playbook_on_task_start', **event_data):
+ super(BaseCallbackModule, self).v2_playbook_on_task_start(task, is_conditional)
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ # NOTE: Not used by Ansible 2.x.
+ self.set_task(task)
+ event_data = dict(
+ task=task,
+ name=task.get_name(),
+ uuid=str(task._uuid),
+ is_conditional=True,
+ )
+ with self.capture_event_data('playbook_on_task_start', **event_data):
+ super(BaseCallbackModule, self).v2_playbook_on_cleanup_task_start(task)
+
+ def v2_playbook_on_handler_task_start(self, task):
+ # NOTE: Re-using playbook_on_task_start event for this v2-specific
+ # event, but setting is_conditional=True, which is how v1 identified a
+ # task run as a handler.
+ self.set_task(task)
+ event_data = dict(
+ task=task,
+ name=task.get_name(),
+ uuid=str(task._uuid),
+ is_conditional=True,
+ )
+ with self.capture_event_data('playbook_on_task_start', **event_data):
+ super(BaseCallbackModule, self).v2_playbook_on_handler_task_start(task)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ with self.capture_event_data('playbook_on_no_hosts_matched'):
+ super(BaseCallbackModule, self).v2_playbook_on_no_hosts_matched()
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ with self.capture_event_data('playbook_on_no_hosts_remaining'):
+ super(BaseCallbackModule, self).v2_playbook_on_no_hosts_remaining()
+
+ def v2_playbook_on_notify(self, handler, host):
+ # NOTE: Not used by Ansible < 2.5.
+ event_data = dict(
+ host=host.get_name(),
+ handler=handler.get_name(),
+ )
+ with self.capture_event_data('playbook_on_notify', **event_data):
+ super(BaseCallbackModule, self).v2_playbook_on_notify(handler, host)
+
+ '''
+ ansible_stats is, retoractively, added in 2.2
+ '''
+ def v2_playbook_on_stats(self, stats):
+ self.clear_play()
+ # FIXME: Add count of plays/tasks.
+ event_data = dict(
+ changed=stats.changed,
+ dark=stats.dark,
+ failures=stats.failures,
+ ignored=getattr(stats, 'ignored', 0),
+ ok=stats.ok,
+ processed=stats.processed,
+ rescued=getattr(stats, 'rescued', 0),
+ skipped=stats.skipped,
+ artifact_data=stats.custom.get('_run', {}) if hasattr(stats, 'custom') else {}
+ )
+
+ with self.capture_event_data('playbook_on_stats', **event_data):
+ super(BaseCallbackModule, self).v2_playbook_on_stats(stats)
+
+ @staticmethod
+ def _get_event_loop(task):
+ if hasattr(task, 'loop_with'): # Ansible >=2.5
+ return task.loop_with
+ elif hasattr(task, 'loop'): # Ansible <2.4
+ return task.loop
+ return None
+
+ def _get_result_timing_data(self, result):
+ host_start = self._host_start.get(result._host.get_name())
+ if host_start:
+ end_time = current_time()
+ return host_start, end_time, (end_time - host_start).total_seconds()
+ return None, None, None
+
+ def v2_runner_on_ok(self, result):
+ # FIXME: Display detailed results or not based on verbosity.
+
+ # strip environment vars from the job event; it already exists on the
+ # job and sensitive values are filtered there
+ if result._task.action in ('setup', 'gather_facts'):
+ result._result.get('ansible_facts', {}).pop('ansible_env', None)
+
+ host_start, end_time, duration = self._get_result_timing_data(result)
+ event_data = dict(
+ host=result._host.get_name(),
+ remote_addr=result._host.address,
+ task=result._task,
+ res=result._result,
+ start=host_start,
+ end=end_time,
+ duration=duration,
+ event_loop=self._get_event_loop(result._task),
+ )
+ with self.capture_event_data('runner_on_ok', **event_data):
+ super(BaseCallbackModule, self).v2_runner_on_ok(result)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ # FIXME: Add verbosity for exception/results output.
+ host_start, end_time, duration = self._get_result_timing_data(result)
+ event_data = dict(
+ host=result._host.get_name(),
+ remote_addr=result._host.address,
+ res=result._result,
+ task=result._task,
+ start=host_start,
+ end=end_time,
+ duration=duration,
+ ignore_errors=ignore_errors,
+ event_loop=self._get_event_loop(result._task),
+ )
+ with self.capture_event_data('runner_on_failed', **event_data):
+ super(BaseCallbackModule, self).v2_runner_on_failed(result, ignore_errors)
+
+ def v2_runner_on_skipped(self, result):
+ host_start, end_time, duration = self._get_result_timing_data(result)
+ event_data = dict(
+ host=result._host.get_name(),
+ remote_addr=result._host.address,
+ task=result._task,
+ start=host_start,
+ end=end_time,
+ duration=duration,
+ event_loop=self._get_event_loop(result._task),
+ )
+ with self.capture_event_data('runner_on_skipped', **event_data):
+ super(BaseCallbackModule, self).v2_runner_on_skipped(result)
+
+ def v2_runner_on_unreachable(self, result):
+ host_start, end_time, duration = self._get_result_timing_data(result)
+ event_data = dict(
+ host=result._host.get_name(),
+ remote_addr=result._host.address,
+ task=result._task,
+ start=host_start,
+ end=end_time,
+ duration=duration,
+ res=result._result,
+ )
+ with self.capture_event_data('runner_on_unreachable', **event_data):
+ super(BaseCallbackModule, self).v2_runner_on_unreachable(result)
+
+ def v2_runner_on_no_hosts(self, task):
+ # NOTE: Not used by Ansible 2.x.
+ event_data = dict(
+ task=task,
+ )
+ with self.capture_event_data('runner_on_no_hosts', **event_data):
+ super(BaseCallbackModule, self).v2_runner_on_no_hosts(task)
+
+ def v2_runner_on_async_poll(self, result):
+ # NOTE: Not used by Ansible 2.x.
+ event_data = dict(
+ host=result._host.get_name(),
+ task=result._task,
+ res=result._result,
+ jid=result._result.get('ansible_job_id'),
+ )
+ with self.capture_event_data('runner_on_async_poll', **event_data):
+ super(BaseCallbackModule, self).v2_runner_on_async_poll(result)
+
+ def v2_runner_on_async_ok(self, result):
+ # NOTE: Not used by Ansible 2.x.
+ event_data = dict(
+ host=result._host.get_name(),
+ task=result._task,
+ res=result._result,
+ jid=result._result.get('ansible_job_id'),
+ )
+ with self.capture_event_data('runner_on_async_ok', **event_data):
+ super(BaseCallbackModule, self).v2_runner_on_async_ok(result)
+
+ def v2_runner_on_async_failed(self, result):
+ # NOTE: Not used by Ansible 2.x.
+ event_data = dict(
+ host=result._host.get_name(),
+ task=result._task,
+ res=result._result,
+ jid=result._result.get('ansible_job_id'),
+ )
+ with self.capture_event_data('runner_on_async_failed', **event_data):
+ super(BaseCallbackModule, self).v2_runner_on_async_failed(result)
+
+ def v2_runner_on_file_diff(self, result, diff):
+ # NOTE: Not used by Ansible 2.x.
+ event_data = dict(
+ host=result._host.get_name(),
+ task=result._task,
+ diff=diff,
+ )
+ with self.capture_event_data('runner_on_file_diff', **event_data):
+ super(BaseCallbackModule, self).v2_runner_on_file_diff(result, diff)
+
+ def v2_on_file_diff(self, result):
+ # NOTE: Logged as runner_on_file_diff.
+ event_data = dict(
+ host=result._host.get_name(),
+ task=result._task,
+ diff=result._result.get('diff'),
+ )
+ with self.capture_event_data('runner_on_file_diff', **event_data):
+ super(BaseCallbackModule, self).v2_on_file_diff(result)
+
+ def v2_runner_item_on_ok(self, result):
+ event_data = dict(
+ host=result._host.get_name(),
+ task=result._task,
+ res=result._result,
+ )
+ with self.capture_event_data('runner_item_on_ok', **event_data):
+ super(BaseCallbackModule, self).v2_runner_item_on_ok(result)
+
+ def v2_runner_item_on_failed(self, result):
+ event_data = dict(
+ host=result._host.get_name(),
+ task=result._task,
+ res=result._result,
+ )
+ with self.capture_event_data('runner_item_on_failed', **event_data):
+ super(BaseCallbackModule, self).v2_runner_item_on_failed(result)
+
+ def v2_runner_item_on_skipped(self, result):
+ event_data = dict(
+ host=result._host.get_name(),
+ task=result._task,
+ res=result._result,
+ )
+ with self.capture_event_data('runner_item_on_skipped', **event_data):
+ super(BaseCallbackModule, self).v2_runner_item_on_skipped(result)
+
+ def v2_runner_retry(self, result):
+ event_data = dict(
+ host=result._host.get_name(),
+ task=result._task,
+ res=result._result,
+ )
+ with self.capture_event_data('runner_retry', **event_data):
+ super(BaseCallbackModule, self).v2_runner_retry(result)
+
+ def v2_runner_on_start(self, host, task):
+ event_data = dict(
+ host=host.get_name(),
+ task=task
+ )
+ self._host_start[host.get_name()] = current_time()
+ with self.capture_event_data('runner_on_start', **event_data):
+ super(BaseCallbackModule, self).v2_runner_on_start(host, task)
+
+
+class AWXDefaultCallbackModule(BaseCallbackModule, DefaultCallbackModule):
+
+ CALLBACK_NAME = 'awx_display'
+
+
+class AWXMinimalCallbackModule(BaseCallbackModule, MinimalCallbackModule):
+
+ CALLBACK_NAME = 'minimal'
+
+ def v2_playbook_on_play_start(self, play):
+ pass
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.set_task(task)
diff --git a/ansible_runner/exceptions.py b/ansible_runner/exceptions.py
new file mode 100644
index 0000000..d2cf705
--- /dev/null
+++ b/ansible_runner/exceptions.py
@@ -0,0 +1,11 @@
+
+class AnsibleRunnerException(Exception):
+ """ Generic Runner Error """
+
+
+class ConfigurationError(AnsibleRunnerException):
+ """ Misconfiguration of Runner """
+
+
+class CallbackError(AnsibleRunnerException):
+ """ Exception occurred in Callback """
diff --git a/ansible_runner/interface.py b/ansible_runner/interface.py
new file mode 100644
index 0000000..02bbfeb
--- /dev/null
+++ b/ansible_runner/interface.py
@@ -0,0 +1,193 @@
+# Copyright (c) 2016 Ansible by Red Hat, Inc.
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import threading
+import logging
+
+from ansible_runner import output
+from ansible_runner.runner_config import RunnerConfig
+from ansible_runner.runner import Runner
+from ansible_runner.utils import (
+ dump_artifacts,
+ check_isolation_executable_installed,
+)
+
+logging.getLogger('ansible-runner').addHandler(logging.NullHandler())
+
+
+def init_runner(**kwargs):
+ '''
+ Initialize the Runner() instance
+
+ This function will properly initialize both run() and run_async()
+ functions in the same way and return a value instance of Runner.
+
+ See parameters given to :py:func:`ansible_runner.interface.run`
+ '''
+ dump_artifacts(kwargs)
+
+ debug = kwargs.pop('debug', None)
+ logfile = kwargs.pop('logfile', None)
+
+ if not kwargs.pop("ignore_logging", True):
+ output.configure()
+ if debug in (True, False):
+ output.set_debug('enable' if debug is True else 'disable')
+
+ if logfile:
+ output.set_logfile(logfile)
+
+ if kwargs.get("process_isolation", False):
+ check_isolation_executable_installed(kwargs.get("process_isolation_executable", "bwrap"))
+
+ event_callback_handler = kwargs.pop('event_handler', None)
+ status_callback_handler = kwargs.pop('status_handler', None)
+ cancel_callback = kwargs.pop('cancel_callback', None)
+ finished_callback = kwargs.pop('finished_callback', None)
+
+ rc = RunnerConfig(**kwargs)
+ rc.prepare()
+
+ return Runner(rc,
+ event_handler=event_callback_handler,
+ status_handler=status_callback_handler,
+ cancel_callback=cancel_callback,
+ finished_callback=finished_callback)
+
+
+def run(**kwargs):
+ '''
+ Run an Ansible Runner task in the foreground and return a Runner object when complete.
+
+ :param private_data_dir: The directory containing all runner metadata needed to invoke the runner
+ module. Output artifacts will also be stored here for later consumption.
+ :param ident: The run identifier for this invocation of Runner. Will be used to create and name
+ the artifact directory holding the results of the invocation.
+ :param json_mode: Store event data in place of stdout on the console and in the stdout file
+ :param playbook: The playbook (either supplied here as a list or string... or as a path relative to
+ ``private_data_dir/project``) that will be invoked by runner when executing Ansible.
+ :param module: The module that will be invoked in ad-hoc mode by runner when executing Ansible.
+ :param module_args: The module arguments that will be supplied to ad-hoc mode.
+ :param host_pattern: The host pattern to match when running in ad-hoc mode.
+ :param inventory: Overridees the inventory directory/file (supplied at ``private_data_dir/inventory``) with
+ a specific host or list of hosts. This can take the form of
+ - Path to the inventory file in the ``private_data_dir``
+ - Native python dict supporting the YAML/json inventory structure
+ - A text INI formatted string
+ - A list of inventory sources, or an empty list to disable passing inventory
+ :param roles_path: Directory or list of directories to assign to ANSIBLE_ROLES_PATH
+ :param envvars: Environment variables to be used when running Ansible. Environment variables will also be
+ read from ``env/envvars`` in ``private_data_dir``
+ :param extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
+ read from ``env/extravars`` in ``private_data_dir``.
+ :param passwords: A dictionary containing password prompt patterns and response values used when processing output from
+ Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
+ :param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
+ be read from ``env/settings`` in ``private_data_dir``.
+ :param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
+ :param cmdline: Command line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
+ :param limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
+ :param forks: Control Ansible parallel concurrency
+ :param verbosity: Control how verbose the output of ansible-playbook is
+ :param quiet: Disable all output
+ :param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
+ :param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
+ :param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
+ :param event_handler: An optional callback that will be invoked any time an event is received by Runner itself
+ :param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
+ :param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
+ :param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
+ :param process_isolation: Enable limiting what directories on the filesystem the playbook run has access to.
+ :param process_isolation_executable: Path to the executable that will be used to provide filesystem isolation (default: bwrap)
+ :param process_isolation_path: Path that an isolated playbook run will use for staging. (default: /tmp)
+ :param process_isolation_hide_paths: A path or list of paths on the system that should be hidden from the playbook run.
+ :param process_isolation_show_paths: A path or list of paths on the system that should be exposed to the playbook run.
+ :param process_isolation_ro_paths: A path or list of paths on the system that should be exposed to the playbook run as read-only.
+ :param resource_profiling: Enable collection of resource utilization data during playbook execution.
+ :param resource_profiling_base_cgroup: Name of existing cgroup which will be sub-grouped in order to measure resource utilization (default: ansible-runner)
+ :param resource_profiling_cpu_poll_interval: Interval (in seconds) between CPU polling for determining CPU usage (default: 0.25)
+ :param resource_profiling_memory_poll_interval: Interval (in seconds) between memory polling for determining memory usage (default: 0.25)
+ :param resource_profiling_pid_poll_interval: Interval (in seconds) between polling PID count for determining number of processes used (default: 0.25)
+ :param resource_profiling_results_dir: Directory where profiling data files should be saved (defaults to profiling_data folder inside private data dir)
+ :param directory_isolation_base_path: An optional path will be used as the base path to create a temp directory, the project contents will be
+ copied to this location which will then be used as the working directory during playbook execution.
+ :param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
+ This is only used for 'jsonfile' type fact caches.
+ :param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
+ :param omit_event_data: Omits extra ansible event data from event payload (stdout and event still included)
+ :param only_failed_event_data: Omits extra ansible event data unless it's a failed event (stdout and event still included)
+ :type private_data_dir: str
+ :type ident: str
+ :type json_mode: bool
+ :type playbook: str or filename or list
+ :type inventory: str or dict or list
+ :type envvars: dict
+ :type extravars: dict
+ :type passwords: dict
+ :type settings: dict
+ :type ssh_key: str
+ :type artifact_dir: str
+ :type project_dir: str
+ :type rotate_artifacts: int
+ :type cmdline: str
+ :type limit: str
+ :type forks: int
+ :type quiet: bool
+ :type verbosity: int
+ :type event_handler: function
+ :type cancel_callback: function
+ :type finished_callback: function
+ :type status_handler: function
+ :type process_isolation: bool
+ :type process_isolation_executable: str
+ :type process_isolation_path: str
+ :type process_isolation_hide_paths: str or list
+ :type process_isolation_show_paths: str or list
+ :type process_isolation_ro_paths: str or list
+ :type resource_profiling: bool
+ :type resource_profiling_base_cgroup: str
+ :type resource_profiling_cpu_poll_interval: float
+ :type resource_profiling_memory_poll_interval: float
+ :type resource_profiling_pid_poll_interval: float
+ :type resource_profiling_results_dir: str
+ :type directory_isolation_base_path: str
+ :type fact_cache: str
+ :type fact_cache_type: str
+ :type omit_event_data: bool
+ :type only_failed_event_data: bool
+
+ :returns: A :py:class:`ansible_runner.runner.Runner` object
+ '''
+ r = init_runner(**kwargs)
+ r.run()
+ return r
+
+
+def run_async(**kwargs):
+ '''
+ Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.
+
+ This uses the same parameters as :py:func:`ansible_runner.interface.run`
+
+ :returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
+ '''
+ r = init_runner(**kwargs)
+ runner_thread = threading.Thread(target=r.run)
+ runner_thread.start()
+ return runner_thread, r
diff --git a/ansible_runner/loader.py b/ansible_runner/loader.py
new file mode 100644
index 0000000..55f9f69
--- /dev/null
+++ b/ansible_runner/loader.py
@@ -0,0 +1,184 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import os
+import json
+import codecs
+
+from yaml import safe_load, YAMLError
+from six import string_types
+
+from ansible_runner.exceptions import ConfigurationError
+from ansible_runner.output import debug
+
+
+class ArtifactLoader(object):
+ '''
+ Handles loading and caching file contents from disk
+
+ This class will load the file contents and attempt to deserialize the
+ contents as either JSON or YAML. If the file contents cannot be
+ deserialized, the contents will be returned to the caller as a string.
+
+ The deserialized file contents are stored as a cached object in the
+ instance to avoid any additional reads from disk for subsequent calls
+ to load the same file.
+ '''
+
+ def __init__(self, base_path):
+ self._cache = {}
+ self.base_path = base_path
+
+ def _load_json(self, contents):
+ '''
+ Attempts to deserialize the contents of a JSON object
+
+ Args:
+ contents (string): The contents to deserialize
+
+ Returns:
+ dict: If the contents are JSON serialized
+
+ None: If the contents are not JSON serialized
+ '''
+ try:
+ return json.loads(contents)
+ except ValueError:
+ pass
+
+ def _load_yaml(self, contents):
+ '''
+ Attempts to deserialize the contents of a YAML object
+
+ Args:
+ contents (string): The contents to deserialize
+
+ Returns:
+ dict: If the contents are YAML serialized
+
+ None: If the contents are not YAML serialized
+ '''
+ try:
+ return safe_load(contents)
+ except YAMLError:
+ pass
+
+ def get_contents(self, path):
+ '''
+ Loads the contents of the file specified by path
+
+ Args:
+ path (string): The relative or absolute path to the file to
+ be loaded. If the path is relative, then it is combined
+ with the base_path to generate a full path string
+
+ Returns:
+ string: The contents of the file as a string
+
+ Raises:
+ ConfigurationError: If the file cannot be loaded
+ '''
+ try:
+ if not os.path.exists(path):
+ raise ConfigurationError('specified path does not exist %s' % path)
+ with codecs.open(path, encoding='utf-8') as f:
+ data = f.read()
+
+ return data
+
+ except (IOError, OSError) as exc:
+ raise ConfigurationError('error trying to load file contents: %s' % exc)
+
+ def abspath(self, path):
+ '''
+ Transform the path to an absolute path
+
+ Args:
+ path (string): The path to transform to an absolute path
+
+ Returns:
+ string: The absolute path to the file
+ '''
+ if not path.startswith(os.path.sep) or path.startswith('~'):
+ path = os.path.expanduser(os.path.join(self.base_path, path))
+ return path
+
+ def isfile(self, path):
+ '''
+ Check if the path is a file
+
+ :params path: The path to the file to check. If the path is relative
+ it will be exanded to an absolute path
+
+ :returns: boolean
+ '''
+ return os.path.isfile(self.abspath(path))
+
+ def load_file(self, path, objtype=None, encoding='utf-8'):
+ '''
+ Load the file specified by path
+
+ This method will first try to load the file contents from cache and
+ if there is a cache miss, it will load the contents from disk
+
+ Args:
+ path (string): The full or relative path to the file to be loaded
+
+ encoding (string): The file contents text encoding
+
+ objtype (object): The object type of the file contents. This
+ is used to type check the deserialized content against the
+ contents loaded from disk.
+ Ignore serializing if objtype is string_types
+
+ Returns:
+ object: The deserialized file contents which could be either a
+ string object or a dict object
+
+ Raises:
+ ConfigurationError:
+ '''
+ path = self.abspath(path)
+ debug('file path is %s' % path)
+
+ if path in self._cache:
+ return self._cache[path]
+
+ try:
+ debug('cache miss, attempting to load file from disk: %s' % path)
+ contents = parsed_data = self.get_contents(path)
+ if encoding:
+ parsed_data = contents.encode(encoding)
+ except ConfigurationError as exc:
+ debug(exc)
+ raise
+ except UnicodeEncodeError:
+ raise ConfigurationError('unable to encode file contents')
+
+ if objtype is not string_types:
+ for deserializer in (self._load_json, self._load_yaml):
+ parsed_data = deserializer(contents)
+ if parsed_data:
+ break
+
+ if objtype and not isinstance(parsed_data, objtype):
+ debug('specified file %s is not of type %s' % (path, objtype))
+ raise ConfigurationError('invalid file serialization type for contents')
+
+ self._cache[path] = parsed_data
+ return parsed_data
diff --git a/ansible_runner/output.py b/ansible_runner/output.py
new file mode 100644
index 0000000..d82d581
--- /dev/null
+++ b/ansible_runner/output.py
@@ -0,0 +1,91 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import sys
+import logging
+
+DEBUG_ENABLED = False
+TRACEBACK_ENABLED = True
+
+_display_logger = logging.getLogger('ansible-runner.display')
+_debug_logger = logging.getLogger('ansible-runner.debug')
+
+
+def display(msg, log_only=False):
+ if not log_only:
+ _display_logger.log(70, msg)
+ _debug_logger.log(10, msg)
+
+
+def debug(msg):
+ if DEBUG_ENABLED:
+ if isinstance(msg, Exception):
+ if TRACEBACK_ENABLED:
+ _debug_logger.exception(msg)
+ display(msg)
+
+
+def set_logfile(filename):
+ handlers = [h.get_name() for h in _debug_logger.handlers]
+ if 'logfile' not in handlers:
+ logfile_handler = logging.FileHandler(filename)
+ logfile_handler.set_name('logfile')
+ formatter = logging.Formatter('%(asctime)s: %(message)s')
+ logfile_handler.setFormatter(formatter)
+ _debug_logger.addHandler(logfile_handler)
+
+
+def set_debug(value):
+ global DEBUG_ENABLED
+ if value.lower() not in ('enable', 'disable'):
+ raise ValueError('value must be one of `enable` or `disable`, got %s' % value)
+ DEBUG_ENABLED = value.lower() == 'enable'
+
+
+def set_traceback(value):
+ global TRACEBACK_ENABLED
+ if value.lower() not in ('enable', 'disable'):
+ raise ValueError('value must be one of `enable` or `disable`, got %s' % value)
+ TRACEBACK_ENABLED = value.lower() == 'enable'
+
+
+def configure():
+ '''
+ Configures the logging facility
+
+ This function will setup an initial logging facility for handling display
+ and debug outputs. The default facility will send display messages to
+ stdout and the default debug facility will do nothing.
+
+ :returns: None
+ '''
+ root_logger = logging.getLogger()
+ root_logger.addHandler(logging.NullHandler())
+ root_logger.setLevel(99)
+
+ _display_logger.setLevel(70)
+ _debug_logger.setLevel(10)
+
+ display_handlers = [h.get_name() for h in _display_logger.handlers]
+
+ if 'stdout' not in display_handlers:
+ stdout_handler = logging.StreamHandler(sys.stdout)
+ stdout_handler.set_name('stdout')
+ formatter = logging.Formatter('%(message)s')
+ stdout_handler.setFormatter(formatter)
+ _display_logger.addHandler(stdout_handler)
diff --git a/ansible_runner/plugins/__init__.py b/ansible_runner/plugins/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ansible_runner/plugins/__init__.py
diff --git a/ansible_runner/runner.py b/ansible_runner/runner.py
new file mode 100644
index 0000000..135f1fc
--- /dev/null
+++ b/ansible_runner/runner.py
@@ -0,0 +1,434 @@
+import os
+import stat
+import time
+import json
+import errno
+import signal
+from subprocess import Popen, PIPE
+import shutil
+import codecs
+import collections
+import datetime
+import logging
+
+import six
+import pexpect
+import psutil
+
+import ansible_runner.plugins
+
+from .utils import OutputEventFilter, cleanup_artifact_dir, ensure_str, collect_new_events
+from .exceptions import CallbackError, AnsibleRunnerException
+from ansible_runner.output import debug
+
+logger = logging.getLogger('ansible-runner')
+
+
+class Runner(object):
+
+ def __init__(self, config, cancel_callback=None, remove_partials=True,
+ event_handler=None, finished_callback=None, status_handler=None):
+ self.config = config
+ self.cancel_callback = cancel_callback
+ self.event_handler = event_handler
+ self.finished_callback = finished_callback
+ self.status_handler = status_handler
+ self.canceled = False
+ self.timed_out = False
+ self.errored = False
+ self.status = "unstarted"
+ self.rc = None
+ self.remove_partials = remove_partials
+
+ def event_callback(self, event_data):
+ '''
+ Invoked for every Ansible event to collect stdout with the event data and store it for
+ later use
+ '''
+ self.last_stdout_update = time.time()
+ if 'uuid' in event_data:
+ filename = '{}-partial.json'.format(event_data['uuid'])
+ partial_filename = os.path.join(self.config.artifact_dir,
+ 'job_events',
+ filename)
+ full_filename = os.path.join(self.config.artifact_dir,
+ 'job_events',
+ '{}-{}.json'.format(event_data['counter'],
+ event_data['uuid']))
+ try:
+ event_data.update(dict(runner_ident=str(self.config.ident)))
+ try:
+ with codecs.open(partial_filename, 'r', encoding='utf-8') as read_file:
+ partial_event_data = json.load(read_file)
+ event_data.update(partial_event_data)
+ if self.remove_partials:
+ os.remove(partial_filename)
+ except IOError:
+ debug("Failed to open ansible stdout callback plugin partial data file {}".format(partial_filename))
+ if self.event_handler is not None:
+ should_write = self.event_handler(event_data)
+ else:
+ should_write = True
+ for plugin in ansible_runner.plugins:
+ ansible_runner.plugins[plugin].event_handler(self.config, event_data)
+ if should_write:
+ with codecs.open(full_filename, 'w', encoding='utf-8') as write_file:
+ os.chmod(full_filename, stat.S_IRUSR | stat.S_IWUSR)
+ json.dump(event_data, write_file)
+ except IOError as e:
+ debug("Failed writing event data: {}".format(e))
+
+ def status_callback(self, status):
+ self.status = status
+ status_data = dict(status=status, runner_ident=str(self.config.ident))
+ for plugin in ansible_runner.plugins:
+ ansible_runner.plugins[plugin].status_handler(self.config, status_data)
+ if self.status_handler is not None:
+ self.status_handler(status_data, runner_config=self.config)
+
+ def run(self):
+ '''
+ Launch the Ansible task configured in self.config (A RunnerConfig object), returns once the
+ invocation is complete
+ '''
+ self.status_callback('starting')
+ stdout_filename = os.path.join(self.config.artifact_dir, 'stdout')
+ command_filename = os.path.join(self.config.artifact_dir, 'command')
+
+ try:
+ os.makedirs(self.config.artifact_dir, mode=0o700)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST and os.path.isdir(self.config.artifact_dir):
+ pass
+ else:
+ raise
+ os.close(os.open(stdout_filename, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
+
+ job_events_path = os.path.join(self.config.artifact_dir, 'job_events')
+ if not os.path.exists(job_events_path):
+ os.mkdir(job_events_path, 0o700)
+
+ if six.PY2:
+ command = [a.decode('utf-8') for a in self.config.command]
+ else:
+ command = self.config.command
+ with codecs.open(command_filename, 'w', encoding='utf-8') as f:
+ os.chmod(command_filename, stat.S_IRUSR | stat.S_IWUSR)
+ json.dump(
+ {'command': command,
+ 'cwd': self.config.cwd,
+ 'env': self.config.env}, f, ensure_ascii=False
+ )
+
+ if self.config.ident is not None:
+ cleanup_artifact_dir(os.path.join(self.config.artifact_dir, ".."), self.config.rotate_artifacts)
+
+ stdout_handle = codecs.open(stdout_filename, 'w', encoding='utf-8')
+ stdout_handle = OutputEventFilter(stdout_handle, self.event_callback, self.config.suppress_ansible_output, output_json=self.config.json_mode)
+
+ if not isinstance(self.config.expect_passwords, collections.OrderedDict):
+ # We iterate over `expect_passwords.keys()` and
+ # `expect_passwords.values()` separately to map matched inputs to
+ # patterns and choose the proper string to send to the subprocess;
+ # enforce usage of an OrderedDict so that the ordering of elements in
+ # `keys()` matches `values()`.
+ expect_passwords = collections.OrderedDict(self.config.expect_passwords)
+ password_patterns = list(expect_passwords.keys())
+ password_values = list(expect_passwords.values())
+
+ # pexpect needs all env vars to be utf-8 encoded bytes
+ # https://github.com/pexpect/pexpect/issues/512
+
+ # Use a copy so as not to cause problems when serializing the job_env.
+ env = {
+ ensure_str(k): ensure_str(v) if k != 'PATH' and isinstance(v, six.text_type) else v
+ for k, v in self.config.env.items()
+ }
+
+ # Prepare to collect performance data
+ if self.config.resource_profiling:
+ cgroup_path = '{0}/{1}'.format(self.config.resource_profiling_base_cgroup, self.config.ident)
+
+ import getpass
+ import grp
+ user = getpass.getuser()
+ group = grp.getgrgid(os.getgid()).gr_name
+
+ cmd = 'cgcreate -a {user}:{group} -t {user}:{group} -g cpuacct,memory,pids:{}'.format(cgroup_path, user=user, group=group)
+ proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
+ _, stderr = proc.communicate()
+ if proc.returncode:
+ # Unable to create cgroup
+ logger.error('Unable to create cgroup: {}'.format(stderr))
+ raise RuntimeError('Unable to create cgroup: {}'.format(stderr))
+ else:
+ logger.info("Created cgroup '{}'".format(cgroup_path))
+
+
+ self.status_callback('running')
+ self.last_stdout_update = time.time()
+ try:
+ child = pexpect.spawn(
+ command[0],
+ command[1:],
+ cwd=self.config.cwd,
+ env=env,
+ ignore_sighup=True,
+ encoding='utf-8',
+ echo=False,
+ use_poll=self.config.pexpect_use_poll,
+ )
+ child.logfile_read = stdout_handle
+ except pexpect.exceptions.ExceptionPexpect as e:
+ child = collections.namedtuple(
+ 'MissingProcess', 'exitstatus isalive close'
+ )(
+ exitstatus=127,
+ isalive=lambda: False,
+ close=lambda: None,
+ )
+
+ def _decode(x):
+ return x.decode('utf-8') if six.PY2 else x
+
+ # create the events directory (the callback plugin won't run, so it
+ # won't get created)
+ events_directory = os.path.join(self.config.artifact_dir, 'job_events')
+ if not os.path.exists(events_directory):
+ os.mkdir(events_directory, 0o700)
+ stdout_handle.write(_decode(str(e)))
+ stdout_handle.write(_decode('\n'))
+
+ job_start = time.time()
+ while child.isalive():
+ result_id = child.expect(password_patterns,
+ timeout=self.config.pexpect_timeout,
+ searchwindowsize=100)
+ password = password_values[result_id]
+ if password is not None:
+ child.sendline(password)
+ self.last_stdout_update = time.time()
+ if self.cancel_callback:
+ try:
+ self.canceled = self.cancel_callback()
+ except Exception as e:
+ # TODO: logger.exception('Could not check cancel callback - cancelling immediately')
+ #if isinstance(extra_update_fields, dict):
+ # extra_update_fields['job_explanation'] = "System error during job execution, check system logs"
+ raise CallbackError("Exception in Cancel Callback: {}".format(e))
+ if self.config.job_timeout and not self.canceled and (time.time() - job_start) > self.config.job_timeout:
+ self.timed_out = True
+ # if isinstance(extra_update_fields, dict):
+ # extra_update_fields['job_explanation'] = "Job terminated due to timeout"
+ if self.canceled or self.timed_out or self.errored:
+ Runner.handle_termination(child.pid, is_cancel=self.canceled)
+ if self.config.idle_timeout and (time.time() - self.last_stdout_update) > self.config.idle_timeout:
+ Runner.handle_termination(child.pid, is_cancel=False)
+ self.timed_out = True
+
+ stdout_handle.flush()
+ stdout_handle.close()
+ child.close()
+
+ if self.canceled:
+ self.status_callback('canceled')
+ elif child.exitstatus == 0 and not self.timed_out:
+ self.status_callback('successful')
+ elif self.timed_out:
+ self.status_callback('timeout')
+ else:
+ self.status_callback('failed')
+ self.rc = child.exitstatus if not (self.timed_out or self.canceled) else 254
+ for filename, data in [
+ ('status', self.status),
+ ('rc', self.rc),
+ ]:
+ artifact_path = os.path.join(self.config.artifact_dir, filename)
+ if not os.path.exists(artifact_path):
+ os.close(os.open(artifact_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
+ with open(artifact_path, 'w') as f:
+ f.write(str(data))
+ if self.config.directory_isolation_path and self.config.directory_isolation_cleanup:
+ shutil.rmtree(self.config.directory_isolation_path)
+ if self.config.process_isolation and self.config.process_isolation_path_actual:
+ def _delete(retries=15):
+ try:
+ shutil.rmtree(self.config.process_isolation_path_actual)
+ except OSError as e:
+ res = False
+ if e.errno == 16 and retries > 0:
+ time.sleep(1)
+ res = _delete(retries=retries - 1)
+ if not res:
+ raise
+ return True
+ _delete()
+ if self.config.resource_profiling:
+ cmd = 'cgdelete -g cpuacct,memory,pids:{}'.format(cgroup_path)
+ proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
+ _, stderr = proc.communicate()
+ if proc.returncode:
+ logger.error('Failed to delete cgroup: {}'.format(stderr))
+ raise RuntimeError('Failed to delete cgroup: {}'.format(stderr))
+
+ if self.finished_callback is not None:
+ try:
+ self.finished_callback(self)
+ except Exception as e:
+ raise CallbackError("Exception in Finished Callback: {}".format(e))
+ return self.status, self.rc
+
+ @property
+ def stdout(self):
+ '''
+ Returns an open file handle to the stdout representing the Ansible run
+ '''
+ stdout_path = os.path.join(self.config.artifact_dir, 'stdout')
+ if not os.path.exists(stdout_path):
+ raise AnsibleRunnerException("stdout missing")
+ return open(os.path.join(self.config.artifact_dir, 'stdout'), 'r')
+
+ @property
+ def events(self):
+ '''
+ A generator that will return all ansible job events in the order that they were emitted from Ansible
+
+ Example:
+
+ {
+ "event":"runner_on_ok",
+ "uuid":"00a50d9c-161a-4b74-b978-9f60becaf209",
+ "stdout":"ok: [localhost] => {\\r\\n \\" msg\\":\\"Test!\\"\\r\\n}",
+ "counter":6,
+ "pid":740,
+ "created":"2018-04-05T18:24:36.096725",
+ "end_line":10,
+ "start_line":7,
+ "event_data":{
+ "play_pattern":"all",
+ "play":"all",
+ "task":"debug",
+ "task_args":"msg=Test!",
+ "remote_addr":"localhost",
+ "res":{
+ "msg":"Test!",
+ "changed":false,
+ "_ansible_verbose_always":true,
+ "_ansible_no_log":false
+ },
+ "pid":740,
+ "play_uuid":"0242ac11-0002-443b-cdb1-000000000006",
+ "task_uuid":"0242ac11-0002-443b-cdb1-000000000008",
+ "event_loop":null,
+ "playbook_uuid":"634edeee-3228-4c17-a1b4-f010fdd42eb2",
+ "playbook":"test.yml",
+ "task_action":"debug",
+ "host":"localhost",
+ "task_path":"/tmp/demo/project/test.yml:3"
+ }
+ }
+ '''
+ # collection of all the events that were yielded
+ old_events = {}
+ event_path = os.path.join(self.config.artifact_dir, 'job_events')
+
+ # Wait for events dir to be created
+ now = datetime.datetime.now()
+ while not os.path.exists(event_path):
+ time.sleep(0.05)
+ wait_time = datetime.datetime.now() - now
+ if wait_time.total_seconds() > 60:
+ raise AnsibleRunnerException("events directory is missing: %s" % event_path)
+
+ while self.status == "running":
+ for event, old_evnts in collect_new_events(event_path, old_events):
+ old_events = old_evnts
+ yield event
+
+ # collect new events that were written after the playbook has finished
+ for event, old_evnts in collect_new_events(event_path, old_events):
+ old_events = old_evnts
+ yield event
+
+ @property
+ def stats(self):
+ '''
+ Returns the final high level stats from the Ansible run
+
+ Example:
+ {'dark': {}, 'failures': {}, 'skipped': {}, 'ok': {u'localhost': 2}, 'processed': {u'localhost': 1}}
+ '''
+ last_event = list(filter(lambda x: 'event' in x and x['event'] == 'playbook_on_stats',
+ self.events))
+ if not last_event:
+ return None
+ last_event = last_event[0]['event_data']
+ return dict(skipped=last_event.get('skipped',{}),
+ ok=last_event.get('ok',{}),
+ dark=last_event.get('dark',{}),
+ failures=last_event.get('failures',{}),
+ processed=last_event.get('processed',{}),
+ changed=last_event.get('changed',{}))
+
+
+ def host_events(self, host):
+ '''
+ Given a host name, this will return all task events executed on that host
+ '''
+ all_host_events = filter(lambda x: 'event_data' in x and 'host' in x['event_data'] and x['event_data']['host'] == host,
+ self.events)
+ return all_host_events
+
+ @classmethod
+ def handle_termination(cls, pid, pidfile=None, is_cancel=True):
+ '''
+ Internal method to terminate a subprocess spawned by `pexpect` representing an invocation of runner.
+
+ :param pid: the process id of the running the job.
+ :param pidfile: the daemon's PID file
+ :param is_cancel: flag showing whether this termination is caused by
+ instance's cancel_flag.
+ '''
+
+ try:
+ main_proc = psutil.Process(pid=pid)
+ child_procs = main_proc.children(recursive=True)
+ for child_proc in child_procs:
+ try:
+ os.kill(child_proc.pid, signal.SIGKILL)
+ except (TypeError, OSError):
+ pass
+ os.kill(main_proc.pid, signal.SIGKILL)
+ try:
+ os.remove(pidfile)
+ except (OSError):
+ pass
+ except (TypeError, psutil.Error, OSError):
+ try:
+ os.kill(pid, signal.SIGKILL)
+ except (OSError):
+ pass
+
+ def get_fact_cache(self, host):
+ '''
+ Get the entire fact cache only if the fact_cache_type is 'jsonfile'
+ '''
+ if self.config.fact_cache_type != 'jsonfile':
+ raise Exception('Unsupported fact cache type. Only "jsonfile" is supported for reading and writing facts from ansible-runner')
+ fact_cache = os.path.join(self.config.fact_cache, host)
+ if os.path.exists(fact_cache):
+ with open(fact_cache) as f:
+ return json.loads(f.read())
+ return {}
+
+ def set_fact_cache(self, host, data):
+ '''
+ Set the entire fact cache data only if the fact_cache_type is 'jsonfile'
+ '''
+ if self.config.fact_cache_type != 'jsonfile':
+ raise Exception('Unsupported fact cache type. Only "jsonfile" is supported for reading and writing facts from ansible-runner')
+ fact_cache = os.path.join(self.config.fact_cache, host)
+ if not os.path.exists(os.path.dirname(fact_cache)):
+ os.makedirs(os.path.dirname(fact_cache), mode=0o700)
+ with open(fact_cache, 'w') as f:
+ return f.write(json.dumps(data))
diff --git a/ansible_runner/runner_config.py b/ansible_runner/runner_config.py
new file mode 100644
index 0000000..58ac590
--- /dev/null
+++ b/ansible_runner/runner_config.py
@@ -0,0 +1,533 @@
+############################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import logging
+import os
+import pexpect
+import re
+import shlex
+import stat
+import tempfile
+
+import six
+from uuid import uuid4
+try:
+ from collections.abc import Mapping
+except ImportError:
+ from collections import Mapping
+
+from distutils.dir_util import copy_tree
+
+from six import iteritems, string_types, text_type
+
+from ansible_runner import output
+from ansible_runner.exceptions import ConfigurationError
+from ansible_runner.loader import ArtifactLoader
+from ansible_runner.utils import (
+ open_fifo_write,
+ args2cmdline,
+)
+
+logger = logging.getLogger('ansible-runner')
+
+
+class ExecutionMode():
+ NONE = 0
+ ANSIBLE = 1
+ ANSIBLE_PLAYBOOK = 2
+ RAW = 3
+
+
+class RunnerConfig(object):
+ """
+ A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
+ :py:mod:`ansible_runner.runner.Runner` object to launch and manage the invocation of ``ansible``
+ and ``ansible-playbook``
+
+ Typically this object is initialized for you when using the standard ``run`` interfaces in :py:mod:`ansible_runner.interface`
+ but can be used to construct the ``Runner`` configuration to be invoked elsewhere. It can also be overridden to provide different
+ functionality to the Runner object.
+
+ :Example:
+
+ >>> rc = RunnerConfig(...)
+ >>> r = Runner(config=rc)
+ >>> r.run()
+
+ """
+
+ def __init__(self,
+ private_data_dir=None, playbook=None, ident=uuid4(),
+ inventory=None, roles_path=None, limit=None, module=None, module_args=None,
+ verbosity=None, quiet=False, json_mode=False, artifact_dir=None,
+ rotate_artifacts=0, host_pattern=None, binary=None, extravars=None, suppress_ansible_output=False,
+ process_isolation=False, process_isolation_executable=None, process_isolation_path=None,
+ process_isolation_hide_paths=None, process_isolation_show_paths=None, process_isolation_ro_paths=None,
+ resource_profiling=False, resource_profiling_base_cgroup='ansible-runner', resource_profiling_cpu_poll_interval=0.25,
+ resource_profiling_memory_poll_interval=0.25, resource_profiling_pid_poll_interval=0.25,
+ resource_profiling_results_dir=None,
+ tags=None, skip_tags=None, fact_cache_type='jsonfile', fact_cache=None, ssh_key=None,
+ project_dir=None, directory_isolation_base_path=None, envvars=None, forks=None, cmdline=None, omit_event_data=False,
+ only_failed_event_data=False):
+ self.private_data_dir = os.path.abspath(private_data_dir)
+ self.ident = str(ident)
+ self.json_mode = json_mode
+ self.playbook = playbook
+ self.inventory = inventory
+ self.roles_path = roles_path
+ self.limit = limit
+ self.module = module
+ self.module_args = module_args
+ self.host_pattern = host_pattern
+ self.binary = binary
+ self.rotate_artifacts = rotate_artifacts
+ self.artifact_dir = os.path.abspath(artifact_dir or self.private_data_dir)
+
+ if artifact_dir is None:
+ self.artifact_dir = os.path.join(self.private_data_dir, 'artifacts')
+ else:
+ self.artifact_dir = os.path.abspath(artifact_dir)
+
+ if self.ident is not None:
+ self.artifact_dir = os.path.join(self.artifact_dir, "{}".format(self.ident))
+
+ self.extra_vars = extravars
+ self.process_isolation = process_isolation
+ self.process_isolation_executable = process_isolation_executable
+ self.process_isolation_path = process_isolation_path
+ self.process_isolation_path_actual = None
+ self.process_isolation_hide_paths = process_isolation_hide_paths
+ self.process_isolation_show_paths = process_isolation_show_paths
+ self.process_isolation_ro_paths = process_isolation_ro_paths
+ self.resource_profiling = resource_profiling
+ self.resource_profiling_base_cgroup = resource_profiling_base_cgroup
+ self.resource_profiling_cpu_poll_interval = resource_profiling_cpu_poll_interval
+ self.resource_profiling_memory_poll_interval = resource_profiling_memory_poll_interval
+ self.resource_profiling_pid_poll_interval = resource_profiling_pid_poll_interval
+ self.resource_profiling_results_dir = resource_profiling_results_dir
+
+ self.directory_isolation_path = directory_isolation_base_path
+ if not project_dir:
+ self.project_dir = os.path.join(self.private_data_dir, 'project')
+ else:
+ self.project_dir = project_dir
+ self.verbosity = verbosity
+ self.quiet = quiet
+ self.suppress_ansible_output = suppress_ansible_output
+ self.loader = ArtifactLoader(self.private_data_dir)
+ self.tags = tags
+ self.skip_tags = skip_tags
+ self.fact_cache_type = fact_cache_type
+ self.fact_cache = os.path.join(self.artifact_dir, fact_cache or 'fact_cache') if self.fact_cache_type == 'jsonfile' else None
+ self.ssh_key_data = ssh_key
+ self.execution_mode = ExecutionMode.NONE
+ self.envvars = envvars
+ self.forks = forks
+ self.cmdline_args = cmdline
+ self.omit_event_data = omit_event_data
+ self.only_failed_event_data = only_failed_event_data
+
+ def prepare(self):
+ """
+ Performs basic checks and then properly invokes
+
+ - prepare_inventory
+ - prepare_env
+ - prepare_command
+
+ It's also responsible for wrapping the command with the proper ssh agent invocation
+ and setting early ANSIBLE_ environment variables.
+ """
+ # ansible_path = find_executable('ansible')
+ # if ansible_path is None or not os.access(ansible_path, os.X_OK):
+ # raise ConfigurationError("Ansible not found. Make sure that it is installed.")
+ if self.private_data_dir is None:
+ raise ConfigurationError("Runner Base Directory is not defined")
+ if self.module and self.playbook:
+ raise ConfigurationError("Only one of playbook and module options are allowed")
+ if not os.path.exists(self.artifact_dir):
+ os.makedirs(self.artifact_dir, mode=0o700)
+ if self.directory_isolation_path is not None:
+ self.directory_isolation_path = tempfile.mkdtemp(prefix='runner_di_', dir=self.directory_isolation_path)
+ if os.path.exists(self.project_dir):
+ output.debug("Copying directory tree from {} to {} for working directory isolation".format(self.project_dir,
+ self.directory_isolation_path))
+ copy_tree(self.project_dir, self.directory_isolation_path, preserve_symlinks=True)
+
+ self.prepare_inventory()
+ self.prepare_env()
+ self.prepare_command()
+
+ if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK and self.playbook is None:
+ raise ConfigurationError("Runner playbook required when running ansible-playbook")
+ elif self.execution_mode == ExecutionMode.ANSIBLE and self.module is None:
+ raise ConfigurationError("Runner module required when running ansible")
+ elif self.execution_mode == ExecutionMode.NONE:
+ raise ConfigurationError("No executable for runner to run")
+
+ # write the SSH key data into a fifo read by ssh-agent
+ if self.ssh_key_data:
+ self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
+ open_fifo_write(self.ssh_key_path, self.ssh_key_data)
+ self.command = self.wrap_args_with_ssh_agent(self.command, self.ssh_key_path)
+
+ # Use local callback directory
+ callback_dir = self.env.get('AWX_LIB_DIRECTORY', os.getenv('AWX_LIB_DIRECTORY'))
+ if callback_dir is None:
+ callback_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0],
+ "callbacks")
+ python_path = self.env.get('PYTHONPATH', os.getenv('PYTHONPATH', ''))
+ if python_path and not python_path.endswith(':'):
+ python_path += ':'
+ self.env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(filter(None,(self.env.get('ANSIBLE_CALLBACK_PLUGINS'), callback_dir)))
+ if 'AD_HOC_COMMAND_ID' in self.env:
+ self.env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
+ else:
+ self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
+ self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
+ if 'ANSIBLE_HOST_KEY_CHECKING' not in self.env:
+ self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir
+
+ if self.resource_profiling:
+ callback_whitelist = os.environ.get('ANSIBLE_CALLBACK_WHITELIST', '').strip()
+ self.env['ANSIBLE_CALLBACK_WHITELIST'] = ','.join(filter(None, [callback_whitelist, 'cgroup_perf_recap']))
+ self.env['CGROUP_CONTROL_GROUP'] = '{}/{}'.format(self.resource_profiling_base_cgroup, self.ident)
+ if self.resource_profiling_results_dir:
+ cgroup_output_dir = self.resource_profiling_results_dir
+ else:
+ cgroup_output_dir = os.path.normpath(os.path.join(self.private_data_dir, 'profiling_data'))
+
+ # Create results directory if it does not exist
+ if not os.path.isdir(cgroup_output_dir):
+ os.mkdir(cgroup_output_dir, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
+
+ self.env['CGROUP_OUTPUT_DIR'] = cgroup_output_dir
+ self.env['CGROUP_OUTPUT_FORMAT'] = 'json'
+ self.env['CGROUP_CPU_POLL_INTERVAL'] = str(self.resource_profiling_cpu_poll_interval)
+ self.env['CGROUP_MEMORY_POLL_INTERVAL'] = str(self.resource_profiling_memory_poll_interval)
+ self.env['CGROUP_PID_POLL_INTERVAL'] = str(self.resource_profiling_pid_poll_interval)
+ self.env['CGROUP_FILE_PER_TASK'] = 'True'
+ self.env['CGROUP_WRITE_FILES'] = 'True'
+ self.env['CGROUP_DISPLAY_RECAP'] = 'False'
+
+ self.env['PYTHONPATH'] = python_path + callback_dir
+ if self.roles_path:
+ self.env['ANSIBLE_ROLES_PATH'] = ':'.join(self.roles_path)
+
+ if self.process_isolation:
+ self.command = self.wrap_args_with_process_isolation(self.command)
+
+ if self.resource_profiling and self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
+ self.command = self.wrap_args_with_cgexec(self.command)
+
+ if self.fact_cache_type == 'jsonfile':
+ self.env['ANSIBLE_CACHE_PLUGIN'] = 'jsonfile'
+ self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = self.fact_cache
+
+ self.env["RUNNER_OMIT_EVENTS"] = str(self.omit_event_data)
+ self.env["RUNNER_ONLY_FAILED_EVENTS"] = str(self.only_failed_event_data)
+
+ def prepare_inventory(self):
+ """
+ Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
+ """
+ if self.inventory is None and os.path.exists(os.path.join(self.private_data_dir, "inventory")):
+ self.inventory = os.path.join(self.private_data_dir, "inventory")
+
+ def prepare_env(self):
+ """
+ Manages reading environment metadata files under ``private_data_dir`` and merging/updating
+ with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
+ """
+ try:
+ passwords = self.loader.load_file('env/passwords', Mapping)
+ self.expect_passwords = {
+ re.compile(pattern, re.M): password
+ for pattern, password in iteritems(passwords)
+ }
+ except ConfigurationError:
+ output.debug('Not loading passwords')
+ self.expect_passwords = dict()
+ self.expect_passwords[pexpect.TIMEOUT] = None
+ self.expect_passwords[pexpect.EOF] = None
+
+ # seed env with existing shell env
+ self.env = os.environ.copy()
+ if self.envvars and isinstance(self.envvars, dict):
+ if six.PY2:
+ self.env.update({k.decode('utf-8'):v.decode('utf-8') for k, v in self.envvars.items()})
+ else:
+ self.env.update({k:v for k, v in self.envvars.items()})
+ try:
+ envvars = self.loader.load_file('env/envvars', Mapping)
+ if envvars:
+ self.env.update({six.text_type(k):six.text_type(v) for k, v in envvars.items()})
+ except ConfigurationError:
+ output.debug("Not loading environment vars")
+ # Still need to pass default environment to pexpect
+
+ try:
+ self.settings = self.loader.load_file('env/settings', Mapping)
+ except ConfigurationError:
+ output.debug("Not loading settings")
+ self.settings = dict()
+
+ try:
+ if self.ssh_key_data is None:
+ self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types)
+ except ConfigurationError:
+ output.debug("Not loading ssh key")
+ self.ssh_key_data = None
+
+ self.idle_timeout = self.settings.get('idle_timeout', None)
+ self.job_timeout = self.settings.get('job_timeout', None)
+ self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)
+
+ self.process_isolation = self.settings.get('process_isolation', self.process_isolation)
+ self.process_isolation_executable = self.settings.get('process_isolation_executable', self.process_isolation_executable)
+ self.process_isolation_path = self.settings.get('process_isolation_path', self.process_isolation_path)
+ self.process_isolation_hide_paths = self.settings.get('process_isolation_hide_paths', self.process_isolation_hide_paths)
+ self.process_isolation_show_paths = self.settings.get('process_isolation_show_paths', self.process_isolation_show_paths)
+ self.process_isolation_ro_paths = self.settings.get('process_isolation_ro_paths', self.process_isolation_ro_paths)
+ self.resource_profiling = self.settings.get('resource_profiling', self.resource_profiling)
+ self.resource_profiling_base_cgroup = self.settings.get('resource_profiling_base_cgroup', self.resource_profiling_base_cgroup)
+ self.resource_profiling_cpu_poll_interval = self.settings.get('resource_profiling_cpu_poll_interval', self.resource_profiling_cpu_poll_interval)
+ self.resource_profiling_memory_poll_interval = self.settings.get('resource_profiling_memory_poll_interval',
+ self.resource_profiling_memory_poll_interval)
+ self.resource_profiling_pid_poll_interval = self.settings.get('resource_profiling_pid_poll_interval', self.resource_profiling_pid_poll_interval)
+ self.resource_profiling_results_dir = self.settings.get('resource_profiling_results_dir', self.resource_profiling_results_dir)
+ self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
+ self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet)
+ self.directory_isolation_cleanup = bool(self.settings.get('directory_isolation_cleanup', True))
+
+ if 'AD_HOC_COMMAND_ID' in self.env or not os.path.exists(self.project_dir):
+ self.cwd = self.private_data_dir
+ else:
+ if self.directory_isolation_path is not None:
+ self.cwd = self.directory_isolation_path
+ else:
+ self.cwd = self.project_dir
+
+ if 'fact_cache' in self.settings:
+ if 'fact_cache_type' in self.settings:
+ if self.settings['fact_cache_type'] == 'jsonfile':
+ self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
+ else:
+ self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
+
+ def prepare_command(self):
+ """
+ Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given
+ and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command`
+ """
+ try:
+ cmdline_args = self.loader.load_file('args', string_types, encoding=None)
+ if six.PY2 and isinstance(cmdline_args, text_type):
+ cmdline_args = cmdline_args.encode('utf-8')
+ self.command = shlex.split(cmdline_args)
+ self.execution_mode = ExecutionMode.RAW
+ except ConfigurationError:
+ self.command = self.generate_ansible_command()
+
+ def generate_ansible_command(self):
+ """
+ Given that the ``RunnerConfig`` preparation methods have been run to gather the inputs this method
+ will generate the ``ansible`` or ``ansible-playbook`` command that will be used by the
+ :py:class:`ansible_runner.runner.Runner` object to start the process
+ """
+ if self.binary is not None:
+ base_command = self.binary
+ self.execution_mode = ExecutionMode.RAW
+ elif self.module is not None:
+ base_command = 'ansible'
+ self.execution_mode = ExecutionMode.ANSIBLE
+ else:
+ base_command = 'ansible-playbook'
+ self.execution_mode = ExecutionMode.ANSIBLE_PLAYBOOK
+
+ exec_list = [base_command]
+
+ try:
+ if self.cmdline_args:
+ cmdline_args = self.cmdline_args
+ else:
+ cmdline_args = self.loader.load_file('env/cmdline', string_types, encoding=None)
+
+ if six.PY2 and isinstance(cmdline_args, text_type):
+ cmdline_args = cmdline_args.encode('utf-8')
+
+ args = shlex.split(cmdline_args)
+ exec_list.extend(args)
+ except ConfigurationError:
+ pass
+
+ if self.inventory is None:
+ pass
+ elif isinstance(self.inventory, list):
+ for i in self.inventory:
+ exec_list.append("-i")
+ exec_list.append(i)
+ else:
+ exec_list.append("-i")
+ exec_list.append(self.inventory)
+
+ if self.limit is not None:
+ exec_list.append("--limit")
+ exec_list.append(self.limit)
+
+ if self.loader.isfile('env/extravars'):
+ exec_list.extend(['-e', '@{}'.format(self.loader.abspath('env/extravars'))])
+
+ if self.extra_vars:
+ if isinstance(self.extra_vars, dict) and self.extra_vars:
+ extra_vars_list = []
+ for k in self.extra_vars:
+ if isinstance(self.extra_vars[k], str):
+ extra_vars_list.append("\"{}\":\"{}\"".format(k, self.extra_vars[k]))
+ else:
+ extra_vars_list.append("\"{}\":{}".format(k, self.extra_vars[k]))
+ exec_list.extend(
+ [
+ '-e',
+ '{%s}' % ','.join(extra_vars_list)
+ ]
+ )
+ elif self.loader.isfile(self.extra_vars):
+ exec_list.extend(['-e', '@{}'.format(self.loader.abspath(self.extra_vars))])
+
+ if self.verbosity:
+ v = 'v' * self.verbosity
+ exec_list.append('-{}'.format(v))
+
+ if self.tags:
+ exec_list.extend(['--tags', '{}'.format(self.tags)])
+
+ if self.skip_tags:
+ exec_list.extend(['--skip-tags', '{}'.format(self.skip_tags)])
+
+ if self.forks:
+ exec_list.extend(['--forks', '{}'.format(self.forks)])
+
+ # Other parameters
+ if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
+ exec_list.append(self.playbook)
+ elif self.execution_mode == ExecutionMode.ANSIBLE:
+ exec_list.append("-m")
+ exec_list.append(self.module)
+
+ if self.module_args is not None:
+ exec_list.append("-a")
+ exec_list.append(self.module_args)
+
+ if self.host_pattern is not None:
+ exec_list.append(self.host_pattern)
+
+ return exec_list
+
+ def build_process_isolation_temp_dir(self):
+ '''
+ Create a temporary directory for process isolation to use.
+ '''
+ path = tempfile.mkdtemp(prefix='ansible_runner_pi_', dir=self.process_isolation_path)
+ os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
+ return path
+
+ def wrap_args_with_cgexec(self, args):
+ '''
+ Wrap existing command line with cgexec in order to profile resource usage
+ '''
+ new_args = ['cgexec', '--sticky', '-g', 'cpuacct,memory,pids:{}/{}'.format(self.resource_profiling_base_cgroup, self.ident)]
+ new_args.extend(args)
+ return new_args
+
+
+ def wrap_args_with_process_isolation(self, args):
+ '''
+ Wrap existing command line with bwrap to restrict access to:
+ - self.process_isolation_path (generally, /tmp) (except for own /tmp files)
+ '''
+ cwd = os.path.realpath(self.cwd)
+ self.process_isolation_path_actual = self.build_process_isolation_temp_dir()
+ new_args = [self.process_isolation_executable or 'bwrap', '--die-with-parent', '--unshare-pid', '--dev-bind', '/', '/', '--proc', '/proc']
+
+ for path in sorted(set(self.process_isolation_hide_paths or [])):
+ if not os.path.exists(path):
+ logger.debug('hide path not found: {0}'.format(path))
+ continue
+ path = os.path.realpath(path)
+ if os.path.isdir(path):
+ new_path = tempfile.mkdtemp(dir=self.process_isolation_path_actual)
+ os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
+ else:
+ handle, new_path = tempfile.mkstemp(dir=self.process_isolation_path_actual)
+ os.close(handle)
+ os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR)
+ new_args.extend(['--bind', '{0}'.format(new_path), '{0}'.format(path)])
+
+ if self.private_data_dir:
+ show_paths = [self.private_data_dir]
+ else:
+ show_paths = [cwd]
+
+ for path in sorted(set(self.process_isolation_ro_paths or [])):
+ if not os.path.exists(path):
+ logger.debug('read-only path not found: {0}'.format(path))
+ continue
+ path = os.path.realpath(path)
+ new_args.extend(['--ro-bind', '{0}'.format(path), '{0}'.format(path)])
+
+ show_paths.extend(self.process_isolation_show_paths or [])
+ for path in sorted(set(show_paths)):
+ if not os.path.exists(path):
+ logger.debug('show path not found: {0}'.format(path))
+ continue
+ path = os.path.realpath(path)
+ new_args.extend(['--bind', '{0}'.format(path), '{0}'.format(path)])
+
+ if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
+ # playbook runs should cwd to the SCM checkout dir
+ if self.directory_isolation_path is not None:
+ new_args.extend(['--chdir', os.path.realpath(self.directory_isolation_path)])
+ else:
+ new_args.extend(['--chdir', os.path.realpath(self.project_dir)])
+ elif self.execution_mode == ExecutionMode.ANSIBLE:
+ # ad-hoc runs should cwd to the root of the private data dir
+ new_args.extend(['--chdir', os.path.realpath(self.private_data_dir)])
+
+ new_args.extend(args)
+ return new_args
+
+ def wrap_args_with_ssh_agent(self, args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False):
+ """
+ Given an existing command line and parameterization this will return the same command line wrapped with the
+ necessary calls to ``ssh-agent``
+ """
+ if ssh_key_path:
+ ssh_add_command = args2cmdline('ssh-add', ssh_key_path)
+ if silence_ssh_add:
+ ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
+ cmd = ' && '.join([ssh_add_command,
+ args2cmdline('rm', '-f', ssh_key_path),
+ args2cmdline(*args)])
+ args = ['ssh-agent']
+ if ssh_auth_sock:
+ args.extend(['-a', ssh_auth_sock])
+ args.extend(['sh', '-c', cmd])
+ return args
+
diff --git a/ansible_runner/utils.py b/ansible_runner/utils.py
new file mode 100644
index 0000000..ca14683
--- /dev/null
+++ b/ansible_runner/utils.py
@@ -0,0 +1,389 @@
+
+import json
+import sys
+import re
+import os
+import stat
+import fcntl
+import shutil
+import hashlib
+import tempfile
+import subprocess
+import base64
+import threading
+import pipes
+import uuid
+import codecs
+
+try:
+ from collections.abc import Iterable, Mapping
+except ImportError:
+ from collections import Iterable, Mapping
+from io import StringIO
+from six import string_types, PY2, PY3, text_type, binary_type
+
+
+class Bunch(object):
+
+ '''
+ Collect a bunch of variables together in an object.
+ This is a slight modification of Alex Martelli's and Doug Hudgeon's Bunch pattern.
+ '''
+
+ def __init__(self, **kwargs):
+ self.update(**kwargs)
+
+ def update(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+
+def isplaybook(obj):
+ '''
+ Inspects the object and returns if it is a playbook
+
+ Args:
+ obj (object): The object to be inspected by this function
+
+ Returns:
+ boolean: True if the object is a list and False if it is not
+ '''
+ return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping))
+
+
+def isinventory(obj):
+ '''
+ Inspects the object and returns if it is an inventory
+
+ Args:
+ obj (object): The object to be inspected by this function
+
+ Returns:
+ boolean: True if the object is an inventory dict and False if it is not
+ '''
+ return isinstance(obj, Mapping) or isinstance(obj, string_types)
+
+
+def check_isolation_executable_installed(isolation_executable):
+ '''
+ Check that proot is installed.
+ '''
+ cmd = [isolation_executable, '--version']
+ try:
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.communicate()
+ return bool(proc.returncode == 0)
+ except (OSError, ValueError) as e:
+ if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
+ raise RuntimeError('bwrap unavailable for unexpected reason.')
+ return False
+
+
+def dump_artifact(obj, path, filename=None):
+ '''
+ Write the artifact to disk at the specified path
+
+ Args:
+ obj (string): The string object to be dumped to disk in the specified
+ path. The artifact filename will be automatically created
+
+ path (string): The full path to the artifacts data directory.
+
+ filename (string, optional): The name of file to write the artifact to.
+ If the filename is not provided, then one will be generated.
+
+ Returns:
+ string: The full path filename for the artifact that was generated
+ '''
+ p_sha1 = None
+
+ if not os.path.exists(path):
+ os.makedirs(path, mode=0o700)
+ else:
+ p_sha1 = hashlib.sha1()
+ p_sha1.update(obj.encode(encoding='UTF-8'))
+
+ if filename is None:
+ fd, fn = tempfile.mkstemp(dir=path)
+ else:
+ fn = os.path.join(path, filename)
+
+ if os.path.exists(fn):
+ c_sha1 = hashlib.sha1()
+ with open(fn) as f:
+ contents = f.read()
+ c_sha1.update(contents.encode(encoding='UTF-8'))
+
+ if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest():
+ lock_fp = os.path.join(path, '.artifact_write_lock')
+ lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
+ fcntl.lockf(lock_fd, fcntl.LOCK_EX)
+
+ try:
+ with open(fn, 'w') as f:
+ os.chmod(fn, stat.S_IRUSR)
+ f.write(str(obj))
+ finally:
+ fcntl.lockf(lock_fd, fcntl.LOCK_UN)
+ os.close(lock_fd)
+ os.remove(lock_fp)
+
+ return fn
+
+
+def cleanup_artifact_dir(path, num_keep=0):
+ # 0 disables artifact dir cleanup/rotation
+ if num_keep < 1:
+ return
+ all_paths = sorted([os.path.join(path, p) for p in os.listdir(path)],
+ key=lambda x: os.path.getmtime(x))
+ total_remove = len(all_paths) - num_keep
+ for f in range(total_remove):
+ shutil.rmtree(all_paths[f])
+
+
+def dump_artifacts(kwargs):
+ '''
+ Introspect the kwargs and dump objects to disk
+ '''
+ private_data_dir = kwargs.get('private_data_dir')
+ if not private_data_dir:
+ private_data_dir = tempfile.mkdtemp()
+ kwargs['private_data_dir'] = private_data_dir
+
+ if not os.path.exists(private_data_dir):
+ raise ValueError('private_data_dir path is either invalid or does not exist')
+
+ if 'role' in kwargs:
+ role = {'name': kwargs.pop('role')}
+ if 'role_vars' in kwargs:
+ role['vars'] = kwargs.pop('role_vars')
+
+ play = [{'hosts': kwargs.pop('hosts', 'all'), 'roles': [role]}]
+
+ if kwargs.pop('role_skip_facts', False):
+ play[0]['gather_facts'] = False
+
+ kwargs['playbook'] = play
+
+ if 'envvars' not in kwargs:
+ kwargs['envvars'] = {}
+
+ roles_path = kwargs.pop('roles_path', None)
+ if not roles_path:
+ roles_path = os.path.join(private_data_dir, 'roles')
+ else:
+ roles_path += ':{}'.format(os.path.join(private_data_dir, 'roles'))
+
+ kwargs['envvars']['ANSIBLE_ROLES_PATH'] = roles_path
+
+ obj = kwargs.get('playbook')
+ if obj and isplaybook(obj):
+ path = os.path.join(private_data_dir, 'project')
+ kwargs['playbook'] = dump_artifact(json.dumps(obj), path, 'main.json')
+
+ obj = kwargs.get('inventory')
+ if obj and isinventory(obj):
+ path = os.path.join(private_data_dir, 'inventory')
+ if isinstance(obj, Mapping):
+ kwargs['inventory'] = dump_artifact(json.dumps(obj), path, 'hosts.json')
+ elif isinstance(obj, string_types):
+ if not os.path.exists(obj):
+ kwargs['inventory'] = dump_artifact(obj, path, 'hosts')
+
+ for key in ('envvars', 'extravars', 'passwords', 'settings'):
+ obj = kwargs.get(key)
+ if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
+ path = os.path.join(private_data_dir, 'env')
+ dump_artifact(json.dumps(obj), path, key)
+ kwargs.pop(key)
+
+ for key in ('ssh_key', 'cmdline'):
+ obj = kwargs.get(key)
+ if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
+ path = os.path.join(private_data_dir, 'env')
+ dump_artifact(str(kwargs[key]), path, key)
+ kwargs.pop(key)
+
+
+def collect_new_events(event_path,old_events):
+ '''
+ Collect new events for the 'events' generator property
+ '''
+ dir_events = os.listdir(event_path)
+ dir_events_actual = []
+ for each_file in dir_events:
+ if re.match("^[0-9]+-.+json$", each_file):
+ if '-partial' not in each_file and each_file not in old_events.keys() :
+ dir_events_actual.append(each_file)
+ dir_events_actual.sort(key=lambda filenm: int(filenm.split("-", 1)[0]))
+ for event_file in dir_events_actual:
+ with codecs.open(os.path.join(event_path, event_file), 'r', encoding='utf-8') as event_file_actual:
+ try:
+ event = json.load(event_file_actual)
+ except ValueError:
+ break
+
+ old_events[event_file] = True
+ yield event, old_events
+
+
+class OutputEventFilter(object):
+ '''
+ File-like object that looks for encoded job events in stdout data.
+ '''
+
+ EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K')
+
+ def __init__(self, handle, event_callback,
+ suppress_ansible_output=False, output_json=False):
+ self._event_callback = event_callback
+ self._counter = 0
+ self._start_line = 0
+ self._handle = handle
+ self._buffer = StringIO()
+ self._last_chunk = ''
+ self._current_event_data = None
+ self.output_json = output_json
+ self.suppress_ansible_output = suppress_ansible_output
+
+ def flush(self):
+ self._handle.flush()
+
+ def write(self, data):
+ self._buffer.write(data)
+
+ # keep a sliding window of the last chunk written so we can detect
+ # event tokens and determine if we need to perform a search of the full
+ # buffer
+ should_search = '\x1b[K' in (self._last_chunk + data)
+ self._last_chunk = data
+
+ # Only bother searching the buffer if we recently saw a start/end
+ # token (\x1b[K)
+ while should_search:
+ value = self._buffer.getvalue()
+ match = self.EVENT_DATA_RE.search(value)
+ if not match:
+ break
+ try:
+ base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1))
+ event_data = json.loads(base64.b64decode(base64_data).decode('utf-8'))
+ except ValueError:
+ event_data = {}
+ event_data = self._emit_event(value[:match.start()], event_data)
+ if not self.output_json:
+ stdout_actual = event_data['stdout'] if 'stdout' in event_data else None
+ else:
+ stdout_actual = json.dumps(event_data)
+ remainder = value[match.end():]
+ self._buffer = StringIO()
+ self._buffer.write(remainder)
+
+ if stdout_actual and stdout_actual != "{}":
+ if not self.suppress_ansible_output:
+ sys.stdout.write(
+ stdout_actual.encode('utf-8') if PY2 else stdout_actual
+ )
+ sys.stdout.write("\n")
+ sys.stdout.flush()
+ self._handle.write(stdout_actual + "\n")
+ self._handle.flush()
+
+ self._last_chunk = remainder
+ else:
+ if not self.suppress_ansible_output:
+ sys.stdout.write(
+ data.encode('utf-8') if PY2 else data
+ )
+ self._handle.write(data)
+ self._handle.flush()
+
+ # Verbose stdout outside of event data context
+ if data and '\n' in data and self._current_event_data is None:
+ # emit events for all complete lines we know about
+ lines = self._buffer.getvalue().splitlines(True) # keep ends
+ remainder = None
+ # if last line is not a complete line, then exclude it
+ if '\n' not in lines[-1]:
+ remainder = lines.pop()
+ # emit all complete lines
+ for line in lines:
+ self._emit_event(line)
+ self._buffer = StringIO()
+ # put final partial line back on buffer
+ if remainder:
+ self._buffer.write(remainder)
+
+ def close(self):
+ value = self._buffer.getvalue()
+ if value:
+ self._emit_event(value)
+ self._buffer = StringIO()
+ self._event_callback(dict(event='EOF'))
+ self._handle.close()
+
+ def _emit_event(self, buffered_stdout, next_event_data=None):
+ next_event_data = next_event_data or {}
+ if self._current_event_data:
+ event_data = self._current_event_data
+ stdout_chunks = [buffered_stdout]
+ elif buffered_stdout:
+ event_data = dict(event='verbose')
+ stdout_chunks = buffered_stdout.splitlines(True)
+ else:
+ event_data = dict()
+ stdout_chunks = []
+
+ for stdout_chunk in stdout_chunks:
+ if event_data.get('event') == 'verbose':
+ event_data['uuid'] = str(uuid.uuid4())
+ self._counter += 1
+ event_data['counter'] = self._counter
+ event_data['stdout'] = stdout_chunk[:-2] if len(stdout_chunk) > 2 else ""
+ n_lines = stdout_chunk.count('\n')
+ event_data['start_line'] = self._start_line
+ event_data['end_line'] = self._start_line + n_lines
+ self._start_line += n_lines
+ if self._event_callback:
+ self._event_callback(event_data)
+ if next_event_data.get('uuid', None):
+ self._current_event_data = next_event_data
+ else:
+ self._current_event_data = None
+ return event_data
+
+
+def open_fifo_write(path, data):
+ '''open_fifo_write opens the fifo named pipe in a new thread.
+ This blocks the thread until an external process (such as ssh-agent)
+ reads data from the pipe.
+ '''
+ os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
+ threading.Thread(target=lambda p, d: open(p, 'wb').write(d),
+ args=(path, data)).start()
+
+
+def args2cmdline(*args):
+ return ' '.join([pipes.quote(a) for a in args])
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """
+ Copied from six==1.12
+
+ Coerce *s* to `str`.
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ if PY2 and isinstance(s, text_type):
+ s = s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ s = s.decode(encoding, errors)
+ return s
diff --git a/bindep.txt b/bindep.txt
new file mode 100644
index 0000000..35c7de0
--- /dev/null
+++ b/bindep.txt
@@ -0,0 +1,7 @@
+# This is a cross-platform list tracking distribution packages needed by tests;
+# see https://docs.openstack.org/infra/bindep/ for additional information.
+
+gcc-c++ [test platform:rpm]
+python3-devel [test !platform:centos-7 platform:rpm]
+python3 [test !platform:centos-7 platform:rpm]
+python36 [test !platform:centos-7 !platform:fedora-28]
diff --git a/demo/env/envvars b/demo/env/envvars
new file mode 100644
index 0000000..8d0b681
--- /dev/null
+++ b/demo/env/envvars
@@ -0,0 +1,2 @@
+---
+TESTVAR: aval
diff --git a/demo/env/extravars b/demo/env/extravars
new file mode 100644
index 0000000..8d79467
--- /dev/null
+++ b/demo/env/extravars
@@ -0,0 +1,3 @@
+---
+ansible_connection: local
+test: val
diff --git a/demo/env/passwords b/demo/env/passwords
new file mode 100644
index 0000000..17949d0
--- /dev/null
+++ b/demo/env/passwords
@@ -0,0 +1,2 @@
+---
+"Password:\\s*?$": "some_password"
diff --git a/demo/env/settings b/demo/env/settings
new file mode 100644
index 0000000..693b1d3
--- /dev/null
+++ b/demo/env/settings
@@ -0,0 +1,4 @@
+---
+idle_timeout: 600
+job_timeout: 3600
+pexpect_timeout: 10
diff --git a/demo/env/ssh_key b/demo/env/ssh_key
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/demo/env/ssh_key
diff --git a/demo/inventory/hosts b/demo/inventory/hosts
new file mode 100644
index 0000000..2fbb50c
--- /dev/null
+++ b/demo/inventory/hosts
@@ -0,0 +1 @@
+localhost
diff --git a/demo/project/roles/testrole/README.md b/demo/project/roles/testrole/README.md
new file mode 100644
index 0000000..225dd44
--- /dev/null
+++ b/demo/project/roles/testrole/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/demo/project/roles/testrole/defaults/main.yml b/demo/project/roles/testrole/defaults/main.yml
new file mode 100644
index 0000000..2a051be
--- /dev/null
+++ b/demo/project/roles/testrole/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for testrole
diff --git a/demo/project/roles/testrole/handlers/main.yml b/demo/project/roles/testrole/handlers/main.yml
new file mode 100644
index 0000000..17131ef
--- /dev/null
+++ b/demo/project/roles/testrole/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for testrole
diff --git a/demo/project/roles/testrole/meta/main.yml b/demo/project/roles/testrole/meta/main.yml
new file mode 100644
index 0000000..ba43fdd
--- /dev/null
+++ b/demo/project/roles/testrole/meta/main.yml
@@ -0,0 +1,58 @@
+---
+galaxy_info:
+ author: your name
+ description: your description
+ company: your company (optional)
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: http://example.com/issue/tracker
+
+ # Some suggested licenses:
+ # - BSD (default)
+ # - MIT
+ # - GPLv2
+ # - GPLv3
+ # - Apache
+ # - CC-BY
+ license: license (GPLv2, CC-BY, etc)
+
+ min_ansible_version: 1.2
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ # Optionally specify the branch Galaxy will use when accessing the GitHub
+ # repo for this role. During role install, if no tags are available,
+ # Galaxy will use this branch. During import Galaxy will access files on
+ # this branch. If Travis integration is configured, only notifications for this
+ # branch will be accepted. Otherwise, in all cases, the repo's default branch
+ # (usually master) will be used.
+ # github_branch:
+
+ #
+ # platforms is a list of platforms, and each platform has a name and a list of versions.
+ #
+ # platforms:
+ # - name: Fedora
+ # versions:
+ # - all
+ # - 25
+ # - name: SomePlatform
+ # versions:
+ # - all
+ # - 1.0
+ # - 7
+ # - 99.99
+
+ galaxy_tags: []
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags. Be sure to
+ # remove the '[]' above, if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+# List your role dependencies here, one per line. Be sure to remove the '[]' above,
+# if you add dependencies to this list.
diff --git a/demo/project/roles/testrole/tasks/main.yml b/demo/project/roles/testrole/tasks/main.yml
new file mode 100644
index 0000000..3b588da
--- /dev/null
+++ b/demo/project/roles/testrole/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+# tasks file for testrole
+- name: just print a message to stdout
+ debug:
+ msg: "hello from the ansible-runner testrole!"
diff --git a/demo/project/roles/testrole/tests/inventory b/demo/project/roles/testrole/tests/inventory
new file mode 100644
index 0000000..878877b
--- /dev/null
+++ b/demo/project/roles/testrole/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/demo/project/roles/testrole/tests/test.yml b/demo/project/roles/testrole/tests/test.yml
new file mode 100644
index 0000000..93c73cc
--- /dev/null
+++ b/demo/project/roles/testrole/tests/test.yml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - testrole
diff --git a/demo/project/roles/testrole/vars/main.yml b/demo/project/roles/testrole/vars/main.yml
new file mode 100644
index 0000000..7c90db2
--- /dev/null
+++ b/demo/project/roles/testrole/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for testrole
diff --git a/demo/project/test.yml b/demo/project/test.yml
new file mode 100644
index 0000000..904b389
--- /dev/null
+++ b/demo/project/test.yml
@@ -0,0 +1,4 @@
+---
+- hosts: all
+ tasks:
+ - debug: msg="Test!"
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..80d2264
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+SPHINXPROJ = ansible-runner
+SOURCEDIR = .
+BUILDDIR = _build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file
diff --git a/docs/ansible_runner.callbacks.rst b/docs/ansible_runner.callbacks.rst
new file mode 100644
index 0000000..300d811
--- /dev/null
+++ b/docs/ansible_runner.callbacks.rst
@@ -0,0 +1,30 @@
+ansible\_runner.callbacks package
+=================================
+
+Submodules
+----------
+
+ansible\_runner.callbacks.awx\_display module
+---------------------------------------------
+
+.. automodule:: ansible_runner.callbacks.awx_display
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.callbacks.minimal module
+----------------------------------------
+
+.. automodule:: ansible_runner.callbacks.minimal
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: ansible_runner.callbacks
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/ansible_runner.display_callback.rst b/docs/ansible_runner.display_callback.rst
new file mode 100644
index 0000000..8fa22ef
--- /dev/null
+++ b/docs/ansible_runner.display_callback.rst
@@ -0,0 +1,54 @@
+ansible\_runner.display\_callback package
+=========================================
+
+Submodules
+----------
+
+ansible\_runner.display\_callback.cleanup module
+------------------------------------------------
+
+.. automodule:: ansible_runner.display_callback.cleanup
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.display\_callback.display module
+------------------------------------------------
+
+.. automodule:: ansible_runner.display_callback.display
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.display\_callback.events module
+-----------------------------------------------
+
+.. automodule:: ansible_runner.display_callback.events
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.display\_callback.minimal module
+------------------------------------------------
+
+.. automodule:: ansible_runner.display_callback.minimal
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.display\_callback.module module
+-----------------------------------------------
+
+.. automodule:: ansible_runner.display_callback.module
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: ansible_runner.display_callback
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/ansible_runner.rst b/docs/ansible_runner.rst
new file mode 100644
index 0000000..19c2cb0
--- /dev/null
+++ b/docs/ansible_runner.rst
@@ -0,0 +1,70 @@
+ansible\_runner package
+=======================
+
+Subpackages
+-----------
+
+.. toctree::
+
+ ansible_runner.callbacks
+ ansible_runner.display_callback
+
+Submodules
+----------
+
+ansible\_runner.exceptions module
+---------------------------------
+
+.. automodule:: ansible_runner.exceptions
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.interface module
+--------------------------------
+
+.. automodule:: ansible_runner.interface
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.loader module
+-----------------------------
+
+.. automodule:: ansible_runner.loader
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.runner module
+-----------------------------
+
+.. automodule:: ansible_runner.runner
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.runner\_config module
+-------------------------------------
+
+.. automodule:: ansible_runner.runner_config
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.utils module
+----------------------------
+
+.. automodule:: ansible_runner.utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: ansible_runner
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..562efb1
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+#
+# Configuration file for the Sphinx documentation builder.
+#
+# This file does only contain a selection of the most common options. For a
+# full list see the documentation:
+# http://www.sphinx-doc.org/en/master/config
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import sys
+sys.path.insert(0, os.path.abspath('../'))
+sys.path.insert(0, os.path.abspath('.'))
+sys.path.insert(0, os.path.abspath('./'))
+
+
+# -- Project information -----------------------------------------------------
+
+project = 'ansible-runner'
+copyright = '2018, Red Hat Ansible'
+author = 'Red Hat Ansible'
+
+# The short X.Y version
+version = ''
+# The full version, including alpha/beta/rc tags
+release = '1.4.6'
+
+
+# -- General configuration ---------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.viewcode',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path .
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'alabaster'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Custom sidebar templates, must be a dictionary that maps document names
+# to template names.
+#
+# The default sidebars (for documents that don't match any pattern) are
+# defined by theme itself. Builtin themes are using these templates by
+# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
+# 'searchbox.html']``.
+#
+# html_sidebars = {}
+
+
+# -- Options for HTMLHelp output ---------------------------------------------
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'ansible-runnerdoc'
+
+
+# -- Options for LaTeX output ------------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'ansible-runner.tex', 'ansible-runner Documentation',
+ 'Red Hat Ansible', 'manual'),
+]
+
+
+# -- Options for manual page output ------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'ansible-runner', 'ansible-runner Documentation',
+ [author], 1)
+]
+
+
+# -- Options for Texinfo output ----------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'ansible-runner', 'ansible-runner Documentation',
+ author, 'ansible-runner', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+
+# -- Extension configuration -------------------------------------------------
+
+# -- Options for intersphinx extension ---------------------------------------
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'https://docs.python.org/': None}
+
+# -- Options for todo extension ----------------------------------------------
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
diff --git a/docs/container.rst b/docs/container.rst
new file mode 100644
index 0000000..45c5976
--- /dev/null
+++ b/docs/container.rst
@@ -0,0 +1,49 @@
+.. _container:
+
+Using Runner as a container interface to Ansible
+================================================
+
+The design of **Ansible Runner** makes it especially suitable for controlling the execution of **Ansible** from within a container for single-purpose
+automation workflows. A reference container image definition is `provided <https://github.com/ansible/ansible-runner/blob/master/Dockerfile>`_ and
+is also published to `DockerHub <https://hub.docker.com/r/ansible/ansible-runner/>`_ you can try it out for yourself
+
+.. code-block:: console
+
+ $ docker run --rm -e RUNNER_PLAYBOOK=test.yml ansible/ansible-runner:latest
+ Unable to find image 'ansible/ansible-runner:latest' locally
+ latest: Pulling from ansible/ansible-runner
+ [...]
+ PLAY [all] *********************************************************************
+
+ TASK [Gathering Facts] *********************************************************
+ ok: [localhost]
+
+ TASK [debug] *******************************************************************
+ ok: [localhost] => {
+ "msg": "Test!"
+ }
+
+ PLAY RECAP *********************************************************************
+ localhost : ok=2 changed=0 unreachable=0 failed=0
+
+
+The reference container image is purposefully light-weight and only containing the dependencies necessary to run ``ansible-runner`` itself. It's
+intended to be overridden.
+
+Overriding the reference container image
+----------------------------------------
+
+**TODO**
+
+Gathering output from the reference container image
+---------------------------------------------------
+
+**TODO**
+
+Changing the console output to emit raw events
+----------------------------------------------
+
+This can be useful when directing task-level event data to an external system by means of the container's console output.
+
+See :ref:`outputjson`
+
diff --git a/docs/external_interface.rst b/docs/external_interface.rst
new file mode 100644
index 0000000..4186f6f
--- /dev/null
+++ b/docs/external_interface.rst
@@ -0,0 +1,78 @@
+.. _externalintf:
+
+Sending Runner Status and Events to External Systems
+====================================================
+
+**Runner** can store event and status data locally for retrieval, it can also emit this information via callbacks provided to the module interface.
+
+Alternatively **Runner** can be configured to send events to an external system via installable plugins, there are currently two available
+
+.. _plugineventstructure:
+
+Event Structure
+---------------
+
+There are two types of events that are emitted via plugins:
+
+* status events:
+
+ These are sent whenever Runner's status changes (see :ref:`runnerstatushandler`) for example::
+
+ {"status": "running", "runner_ident": "XXXX" }
+
+* ansible events:
+
+ These are sent during playbook execution for every event received from **Ansible** (see :ref:`Playbook and Host Events<artifactevents>`) for example::
+
+ {"runner_ident": "XXXX", <rest of event structure }
+
+.. _httpemitterplugin:
+
+HTTP Status/Event Emitter Plugin
+--------------------------------
+
+This sends status and event data to a URL in the form of json encoded POST requests.
+
+This plugin is available from the `ansible-runner-http github repo <https://github.com/ansible/ansible-runner-http>`_ and is also available to be installed from
+pip::
+
+ $ pip install ansible-runner-http
+
+In order to configure it, you can provide details in the Runner Settings file (see :ref:`runnersettings`):
+
+* `runner_http_url`: The url to receive the ``POST``
+* `runner_http_headers`: Headers to send along with the request.
+
+The plugin also supports unix file-based sockets with:
+
+* `runner_http_url`: The path to the unix socket
+* `runner_http_path`: The path that will be included as part of the request to the socket
+
+Some of these settings are also available as environment variables:
+
+* RUNNER_HTTP_URL
+* RUNNER_HTTP_PATH
+
+.. _zmqemitterplugin:
+
+ZeroMQ Status/Event Emitter Plugin
+----------------------------------
+
+TODO
+
+Writing your own Plugin
+-----------------------
+
+In order to write your own plugin interface and have it be picked up and used by **Runner** there are a few things that you'll need to do.
+
+* Declare the module as a Runner entrypoint in your setup file
+ (`ansible-runner-http has a good example of this <https://github.com/ansible/ansible-runner-http/blob/master/setup.py>`_)::
+
+ entry_points=('ansible_runner.plugins': 'modname = your_python_package_name'),
+
+* Implement the ``status_handler()`` and ``event_handler()`` functions at the top of your package, for example see
+ `ansible-runner-http events.py <https://github.com/ansible/ansible-runner-http/blob/master/ansible_runner_http/events.py>`_ and the ``__init__``
+ import `at the top of the module package <https://github.com/ansible/ansible-runner-http/blob/master/ansible_runner_http/__init__.py>`_
+
+After installing this, **Runner** will see the plugin and invoke the functions when status and events are sent. If there are any errors in your plugin
+they will be raised immediately and **Runner** will fail.
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..300ecfc
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,53 @@
+.. ansible-runner documentation master file, created by
+ sphinx-quickstart on Tue May 1 10:47:37 2018.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Ansible Runner
+==============
+
+Ansible Runner is a tool and python library that helps when interfacing with Ansible directly or as part of another system
+whether that be through a container image interface, as a standalone tool, or as a Python module that can be imported. The goal
+is to provide a stable and consistent interface abstraction to Ansible. This allows **Ansible** to be embedded into other systems that don't
+want to manage the complexities of the interface on their own (such as CI/CD platforms, Jenkins, or other automated tooling).
+
+**Ansible Runner** represents the modularization of the part of `Ansible Tower/AWX <https://github.com/ansible/awx>`_ that is responsible
+for running ``ansible`` and ``ansible-playbook`` tasks and gathers the output from it. It does this by presenting a common interface that doesn't
+change, even as **Ansible** itself grows and evolves.
+
+Part of what makes this tooling useful is that it can gather its inputs in a flexible way (See :ref:`intro`:). It also has a system for storing the
+output (stdout) and artifacts (host-level event data, fact data, etc) of the playbook run.
+
+There are 3 primary ways of interacting with **Runner**
+
+* A standalone command line tool (``ansible-runner``) that can be started in the foreground or run in the background asynchronously
+* A reference container image that can be used as a base for your own images and will work as a standalone container or running in
+ Openshift or Kubernetes
+* A python module - library interface
+
+**Ansible Runner** can also be configured to send status and event data to other systems using a plugin interface, see :ref:`externalintf`.
+
+Examples of this could include:
+
+* Sending status to Ansible Tower/AWX
+* Sending events to an external logging service
+
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Contents:
+
+ intro
+ install
+ external_interface
+ standalone
+ python_interface
+ container
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/docs/install.rst b/docs/install.rst
new file mode 100644
index 0000000..2847619
--- /dev/null
+++ b/docs/install.rst
@@ -0,0 +1,88 @@
+.. _install:
+
+Installing Ansible Runner
+=========================
+
+Ansible Runner is provided from several different locations depending on how you want to use it.
+
+Using pip
+---------
+
+Python 2.7+ and 3.6+ are supported and installable via pip::
+
+ $ pip install ansible-runner
+
+
+Fedora
+------
+
+To install from the latest Fedora sources::
+
+ $ dnf install python-ansible-runner
+
+Debian
+------
+
+Add an ansible-runner repository::
+
+ $ apt-get update
+ $ echo 'deb https://releases.ansible.com/ansible-runner/deb/ <trusty|xenial|stretch> main' > /etc/apt/sources.list.d/ansible.list
+
+Add a key::
+
+ $ apt-key adv --keyserver keyserver.ubuntu.com --recv 3DD29021
+
+Install the package::
+
+ $ apt-get update
+ $ apt-get install ansible-runner
+
+
+From source
+-----------
+
+Check out the source code from `github <https://github.com/ansible/ansible-runner>`_::
+
+ $ git clone git://github.com/ansible/ansible-runner
+
+Or download from the `releases page <https://github.com/ansible/ansible-runner/releases>`_
+
+Then install::
+
+ $ python setup.py install
+
+OR::
+
+ $ pip install .
+
+.. _builddist:
+
+Build the distribution
+----------------------
+
+To produce an installable ``wheel`` file::
+
+ make dist
+
+To produce a distribution tarball::
+
+ make sdist
+
+.. _buildcontimg:
+
+Building the base container image
+---------------------------------
+
+Make sure the ``wheel`` distribution is built (see :ref:`builddist`) and run::
+
+ make image
+
+Building the RPM
+----------------
+
+The RPM build uses a container image to bootstrap the environment in order to produce the RPM. Make sure you have docker
+installed and proceed with::
+
+ make rpm
+
+.. include:: ../CHANGES.rst
diff --git a/docs/intro.rst b/docs/intro.rst
new file mode 100644
index 0000000..1ddc9bb
--- /dev/null
+++ b/docs/intro.rst
@@ -0,0 +1,374 @@
+.. _intro:
+
+Introduction to Ansible Runner
+==============================
+
+**Runner** is intended to be most useful as part of automation and tooling that needs to invoke Ansible and consume its results.
+Most of the parameterization of the **Ansible** command line is also available on the **Runner** command line but **Runner** also
+can rely on an input interface that is mapped onto a directory structure, an example of which can be seen in `the source tree <https://github.com/ansible/ansible-runner/tree/master/demo>`_.
+
+Further sections in this document refer to the configuration and layout of that hierarchy. This isn't the only way to interface with **Runner**
+itself. The Python module interface allows supplying these details as direct module parameters in many forms, and the command line interface allows
+supplying them directly as arguments, mimicking the behavior of ``ansible-playbook``. Having the directory structure **does** allow gathering the inputs
+from elsewhere and preparing them for consumption by **Runner**, then the tooling can come along and inspect the results after the run.
+
+This is best seen in the way Ansible **AWX** uses **Runner** where most of the content comes from the database (and other content-management components) but
+ultimately needs to be brought together in a single place when launching the **Ansible** task.
+
+.. _inputdir:
+
+Runner Input Directory Hierarchy
+--------------------------------
+
+This directory contains all necessary inputs. Here's a view of the `demo directory <https://github.com/ansible/ansible-runner/tree/master/demo>`_ showing
+an active configuration.
+
+Note that not everything is required. Defaults will be used or values will be omitted if they are not provided.
+
+.. code-block:: none
+
+ .
+ ├── env
+ │   ├── envvars
+ │   ├── extravars
+ │   ├── passwords
+ │   ├── cmdline
+ │   ├── settings
+ │   └── ssh_key
+ ├── inventory
+ │   └── hosts
+ └── project
+    ├── test.yml
+ └── roles
+ └── testrole
+ ├── defaults
+ ├── handlers
+ ├── meta
+ ├── README.md
+ ├── tasks
+ ├── tests
+ └── vars
+
+The ``env`` directory
+---------------------
+
+The **env** directory contains settings and sensitive files that inform certain aspects of the invocation of the **Ansible** process, an example of which can
+be found in `the demo env directory <https://github.com/ansible/ansible-runner/tree/master/demo/env>`_. Each of these files can also be represented by a named
+pipe providing a bit of an extra layer of security. The formatting and expectation of these files differs slightly depending on what they are representing.
+
+``env/envvars``
+---------------
+
+.. note::
+
+ For an example see `the demo envvars <https://github.com/ansible/ansible-runner/blob/master/demo/env/envvars>`_.
+
+**Ansible Runner** will inherit the environment of the launching shell (or container, or system itself). This file (which can be in json or yaml format) represents
+the environment variables that will be added to the environment at run-time::
+
+ ---
+ TESTVAR: exampleval
+
+``env/extravars``
+-----------------
+
+.. note::
+
+ For an example see `the demo extravars <https://github.com/ansible/ansible-runner/blob/master/demo/env/extravars>`_.
+
+**Ansible Runner** gathers the extra vars provided here and supplies them to the **Ansible Process** itself. This file can be in either json or yaml format::
+
+ ---
+ ansible_connection: local
+ test: val
+
+``env/passwords``
+-----------------
+
+.. note::
+
+ For an example see `the demo passwords <https://github.com/ansible/ansible-runner/blob/master/demo/env/passwords>`_.
+
+.. warning::
+
+ We expect this interface to change/simplify in the future but will guarantee backwards compatibility. The goal is for the user of **Runner** to not
+ have to worry about the format of certain prompts emitted from **Ansible** itself. In particular, vault passwords need to become more flexible.
+
+**Ansible** itself is set up to emit passwords to certain prompts, these prompts can be requested (``-k`` for example to prompt for the connection password).
+Likewise, prompts can be emitted via `vars_prompt <https://docs.ansible.com/ansible/latest/user_guide/playbooks_prompts.html>`_ and also
+`Ansible Vault <https://docs.ansible.com/ansible/2.5/user_guide/vault.html#vault-ids-and-multiple-vault-passwords>`_.
+
+In order for **Runner** to respond with the correct password, it needs to be able to match the prompt and provide the correct password. This is currently supported
+by providing a yaml or json formatted file with a regular expression and a value to emit, for example::
+
+ ---
+ "^SSH [pP]assword:$": "some_password"
+ "^BECOME [pP]assword:$": "become_password"
+
+``env/cmdline``
+---------------
+
+.. warning::
+
+ Current **Ansible Runner** does not validate the command line arguments passed using this method so it is up to the playbook writer to provide a valid set of options
+ The command line options provided by this method are lower priority than the ones set by **Ansible Runner**. For instance, this will not override `inventory` or `limit` values.
+
+**Ansible Runner** gathers command line options provided here as a string and supplies them to the **Ansible Process** itself. This file should contain the arguments to be added, for example::
+
+ --tags one,two --skip-tags three -u ansible --become
+
+``env/ssh_key``
+---------------
+
+.. note::
+
+ Currently only a single ssh key can be provided via this mechanism but this is set to `change soon <https://github.com/ansible/ansible-runner/issues/51>`_.
+
+This file should contain the ssh private key used to connect to the host(s). **Runner** detects when a private key is provided and will wrap the call to
+**Ansible** in ssh-agent.
+
+.. _runnersettings:
+
+``env/settings`` - Settings for Runner itself
+---------------------------------------------
+
+The **settings** file is a little different than the other files provided in this section in that its contents are meant to control **Runner** directly.
+
+* ``idle_timeout``: ``600`` If no output is detected from ansible in this number of seconds the execution will be terminated.
+* ``job_timeout``: ``3600`` The maximum amount of time to allow the job to run for, exceeding this and the execution will be terminated.
+* ``pexpect_timeout``: ``10`` Number of seconds for the internal pexpect command to wait to block on input before continuing
+* ``pexpect_use_poll``: ``True`` Use ``poll()`` function for communication with child processes instead of ``select()``. ``select()`` is used when the value is set to ``False``. ``select()`` has a known limitation of using only up to 1024 file descriptors.
+
+* ``suppress_ansible_output``: ``False`` Allow output from ansible to not be printed to the screen
+* ``fact_cache``: ``'fact_cache'`` The directory relative to ``artifacts`` where ``jsonfile`` fact caching will be stored. Defaults to ``fact_cache``. This is ignored if ``fact_cache_type`` is different than ``jsonfile``.
+* ``fact_cache_type``: ``'jsonfile'`` The type of fact cache to use. Defaults to ``jsonfile``.
+
+Process Isolatiton Settings for Runner
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The process isolation settings are meant to control the process isolation feature of **Runner**.
+
+* ``process_isolation``: ``False`` Enable limiting what directories on the filesystem the playbook run has access to.
+* ``process_isolation_executable``: ``bwrap`` Path to the executable that will be used to provide filesystem isolation.
+* ``process_isolation_path``: ``/tmp`` Path that an isolated playbook run will use for staging.
+* ``process_isolation_hide_paths``: ``None`` Path or list of paths on the system that should be hidden from the playbook run.
+* ``process_isolation_show_paths``: ``None`` Path or list of paths on the system that should be exposed to the playbook run.
+* ``process_isolation_ro_paths``: ``None`` Path or list of paths on the system that should be exposed to the playbook run as read-only.
+
+Performance Data Collection Settings for Runner
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+**Runner** is capable of collecting performance data (namely cpu usage, memory usage, and pid count) during the execution of a playbook run.
+
+Resource profiling is made possible by the use of control groups (often referred to simply as cgroups). When a process runs inside of a cgroup, the resources used by that specific process can be measured.
+
+Before enabling Runner's resource profiling feature, users must create a cgroup that **Runner** can use. It is worth noting that only privileged users can create cgroups. The new cgroup should be associated with the same user (and related group) that will be invoking **Runner**. The following command accomplishes this on a RHEL system::
+
+ sudo yum install libcgroup-tools
+ sudo cgcreate -a `whoami` -t `whoami` -g cpuacct,memory,pids:ansible-runner
+
+In the above command, ``cpuacct``, ``memory``, and ``pids`` refer to kernel resource controllers, while ``ansible-runner`` refers to the name of the cgroup being created. More detailed information on the structure of cgroups can be found in the RHEL guide on `Managing, monitoring, and updating the kernel <https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_monitoring_and_updating_the_kernel/setting-limits-for-applications_managing-monitoring-and-updating-the-kernel>`_
+
+After a cgroup has been created, the following settings can be used to configure resource profiling. Note that ``resource_profiling_base_cgroup`` must match the name of the cgroup you create.
+
+* ``resource_profiling``: ``False`` Enable performance data collection.
+* ``resource_profiling_base_cgroup``: ``ansible-runner`` Top-level cgroup used to measure playbook resource utilization.
+* ``resource_profiling_cpu_poll_interval``: ``0.25`` Polling interval in seconds for collecting cpu usage.
+* ``resource_profiling_memory_poll_interval``: ``0.25`` Polling interval in seconds for collecting memory usage.
+* ``resource_profiling_pid_poll_interval``: ``0.25`` Polling interval in seconds for measuring PID count.
+* ``resource_profiling_results_dir``: ``None`` Directory where resource utilization data will be written (if not specified, will be placed in the ``profiling_data`` folder under the private data directory).
+
+Inventory
+---------
+
+The **Runner** ``inventory`` location under the private data dir has the same expectations as inventory provided directly to ansible itself. It can
+be either a single file or script or a directory containing static inventory files or scripts. This inventory is automatically loaded and provided to
+**Ansible** when invoked and can be further limited or overridden on the command line or via an environment variable to specify the hosts directly.
+
+Project
+--------
+
+The **Runner** ``project`` directory is the playbook root containing playbooks and roles that those playbooks can consume directly. This is also the
+directory that will be set as the ``current working directory`` when launching the **Ansible** process.
+
+
+Modules
+-------
+
+**Runner** has the ability to execute modules directly using Ansible ad-hoc mode.
+
+Roles
+-----
+
+**Runner** has the ability to execute `Roles <https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html>`_ directly without first needing
+a playbook to reference them. This directory holds roles used for that. Behind the scenes, **Runner** will generate a playbook and invoke the ``Role``.
+
+.. _artifactdir:
+
+Runner Artifacts Directory Hierarchy
+------------------------------------
+
+This directory will contain the results of **Runner** invocation grouped under an ``identifier`` directory. This identifier can be supplied to **Runner** directly
+and if not given, an identifier will be generated as a `UUID <https://docs.python.org/3/library/uuid.html#uuid.uuid4>`_. This is how the directory structure looks
+from the top level::
+
+ .
+ ├── artifacts
+ │   └── identifier
+ ├── env
+ ├── inventory
+ ├── profiling_data
+ ├── project
+ └── roles
+
+The artifact directory itself contains a particular structure that provides a lot of extra detail from a running or previously-run invocation of Ansible/Runner::
+
+ .
+ ├── artifacts
+ │   └── 37f639a3-1f4f-4acb-abee-ea1898013a25
+ │   ├── fact_cache
+ │   │   └── localhost
+ │   ├── job_events
+ │   │   ├── 1-34437b34-addd-45ae-819a-4d8c9711e191.json
+ │   │   ├── 2-8c164553-8573-b1e0-76e1-000000000006.json
+ │   │   ├── 3-8c164553-8573-b1e0-76e1-00000000000d.json
+ │   │   ├── 4-f16be0cd-99e1-4568-a599-546ab80b2799.json
+ │   │   ├── 5-8c164553-8573-b1e0-76e1-000000000008.json
+ │   │   ├── 6-981fd563-ec25-45cb-84f6-e9dc4e6449cb.json
+ │   │   └── 7-01c7090a-e202-4fb4-9ac7-079965729c86.json
+ │   ├── rc
+ │   ├── status
+ │   └── stdout
+
+
+The **rc** file contains the actual return code from the **Ansible** process.
+
+The **status** file contains one of three statuses suitable for displaying:
+
+* success: The **Ansible** process finished successfully
+* failed: The **Ansible** process failed
+* timeout: The **Runner** timeout (see :ref:`runnersettings`)
+
+The **stdout** file contains the actual stdout as it appears at that moment.
+
+.. _artifactevents:
+
+Runner Artifact Job Events (Host and Playbook Events)
+-----------------------------------------------------
+
+**Runner** gathers the individual task and playbook events that are emitted as part of the **Ansible** run. This is extremely helpful if you don't want
+to process or read the stdout returned from **Ansible** as it contains much more detail and status than just the plain stdout.
+It does some of the heavy lifting of assigning order to the events and stores them in json format under the ``job_events`` artifact directory.
+It also takes it a step further than normal **Ansible** callback plugins in that it will store the ``stdout`` associated with the event alongside the raw
+event data (along with stdout line numbers). It also generates dummy events for stdout that didn't have corresponding host event data::
+
+ {
+ "uuid": "8c164553-8573-b1e0-76e1-000000000008",
+ "parent_uuid": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
+ "counter": 5,
+ "stdout": "\r\nTASK [debug] *******************************************************************",
+ "start_line": 5,
+ "end_line": 7,
+ "event": "playbook_on_task_start",
+ "event_data": {
+ "playbook": "test.yml",
+ "playbook_uuid": "34437b34-addd-45ae-819a-4d8c9711e191",
+ "play": "all",
+ "play_uuid": "8c164553-8573-b1e0-76e1-000000000006",
+ "play_pattern": "all",
+ "task": "debug",
+ "task_uuid": "8c164553-8573-b1e0-76e1-000000000008",
+ "task_action": "debug",
+ "task_path": "\/home\/mjones\/ansible\/ansible-runner\/demo\/project\/test.yml:3",
+ "task_args": "msg=Test!",
+ "name": "debug",
+ "is_conditional": false,
+ "pid": 10640
+ },
+ "pid": 10640,
+ "created": "2018-06-07T14:54:58.410605"
+ }
+
+If the playbook runs to completion without getting killed, the last event will always be the ``stats`` event::
+
+ {
+ "uuid": "01c7090a-e202-4fb4-9ac7-079965729c86",
+ "counter": 7,
+ "stdout": "\r\nPLAY RECAP *********************************************************************\r\n\u001b[0;32mlocalhost,\u001b[0m : \u001b[0;32mok=2 \u001b[0m changed=0 unreachable=0 failed=0 \r\n",
+ "start_line": 10,
+ "end_line": 14,
+ "event": "playbook_on_stats",
+ "event_data": {
+ "playbook": "test.yml",
+ "playbook_uuid": "34437b34-addd-45ae-819a-4d8c9711e191",
+ "changed": {
+
+ },
+ "dark": {
+
+ },
+ "failures": {
+
+ },
+ "ok": {
+ "localhost,": 2
+ },
+ "processed": {
+ "localhost,": 1
+ },
+ "skipped": {
+
+ },
+ "artifact_data": {
+
+ },
+ "pid": 10640
+ },
+ "pid": 10640,
+ "created": "2018-06-07T14:54:58.424603"
+ }
+
+.. note::
+
+ The **Runner module interface** presents a programmatic interface to these events that allow getting the final status and performing host filtering of task events.
+
+Runner Profiling Data Directory
+-------------------------------
+
+If resource profiling is enabled for **Runner** the ``profiling_data`` directory will be populated with a set of files containing the profiling data::
+
+ .
+ ├── profiling_data
+ │   ├── 0-34437b34-addd-45ae-819a-4d8c9711e191-cpu.json
+ │   ├── 0-34437b34-addd-45ae-819a-4d8c9711e191-memory.json
+ │   ├── 0-34437b34-addd-45ae-819a-4d8c9711e191-pids.json
+ │   ├── 1-8c164553-8573-b1e0-76e1-000000000006-cpu.json
+ │   ├── 1-8c164553-8573-b1e0-76e1-000000000006-memory.json
+ │   └── 1-8c164553-8573-b1e0-76e1-000000000006-pids.json
+
+Each file is in `JSON text format <https://tools.ietf.org/html/rfc7464#section-2.2>`_. Each line of the file will begin with a record separator (RS), continue with a JSON dictionary, and conclude with a line feed (LF) character. The following provides an example of what the resource files may look like. Note that that since the RS and LF are control characters, they are not actually printed below::
+
+ ==> 0-525400c9-c704-29a6-4107-00000000000c-cpu.json <==
+ {"timestamp": 1568977988.6844425, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 97.12799768097156}
+ {"timestamp": 1568977988.9394386, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 94.17538298892688}
+ {"timestamp": 1568977989.1901696, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 64.38272588006255}
+ {"timestamp": 1568977989.4594045, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 83.77387744259856}
+
+ ==> 0-525400c9-c704-29a6-4107-00000000000c-memory.json <==
+ {"timestamp": 1568977988.4281094, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 36.21484375}
+ {"timestamp": 1568977988.6842303, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 57.87109375}
+ {"timestamp": 1568977988.939303, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 66.60546875}
+ {"timestamp": 1568977989.1900482, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 71.4609375}
+ {"timestamp": 1568977989.4592078, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 38.25390625}
+
+ ==> 0-525400c9-c704-29a6-4107-00000000000c-pids.json <==
+ {"timestamp": 1568977988.4284189, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 5}
+ {"timestamp": 1568977988.6845856, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 6}
+ {"timestamp": 1568977988.939547, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 8}
+ {"timestamp": 1568977989.1902773, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 13}
+ {"timestamp": 1568977989.4593227, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 6}
+
+* Resource profiling data is grouped by playbook task.
+* For each task, there will be three files, corresponding to cpu, memory and pid count data.
+* Each file contains a set of data points collected over the course of a playbook task.
+* If a task executes quickly and the polling rate for a given metric is large enough, it is possible that no profiling data may be collected during the task's execution. If this is the case, no data file will be created.
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 0000000..cbfea0d
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,36 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=.
+set BUILDDIR=_build
+set SPHINXPROJ=ansible-runner
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.http://sphinx-doc.org/
+ exit /b 1
+)
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+
+:end
+popd
diff --git a/docs/modules.rst b/docs/modules.rst
new file mode 100644
index 0000000..4fad22c
--- /dev/null
+++ b/docs/modules.rst
@@ -0,0 +1,7 @@
+ansible_runner
+==============
+
+.. toctree::
+ :maxdepth: 4
+
+ ansible_runner
diff --git a/docs/python_interface.rst b/docs/python_interface.rst
new file mode 100644
index 0000000..756e757
--- /dev/null
+++ b/docs/python_interface.rst
@@ -0,0 +1,141 @@
+.. _python_interface:
+
+Using Runner as a Python Module Interface to Ansible
+====================================================
+
+**Ansible Runner** is intended to provide a directly importable and usable API for interfacing with **Ansible** itself and exposes a few helper interfaces.
+
+The modules center around the :class:`Runner <ansible_runner.runner.Runner>` object. The helper methods will return an instance of this object which provides an
+interface to the results of executing the **Ansible** command.
+
+**Ansible Runner** itself is a wrapper around **Ansible** execution and so adds plugins and interfaces to the system in order to gather extra information and
+process/store it for use later.
+
+Helper Interfaces
+-----------------
+
+The helper :mod:`interfaces <ansible_runner.interface>` provides a quick way of supplying the recommended inputs in order to launch a **Runner** process. These interfaces also allow overriding and providing inputs beyond the scope of what the standalone or container interfaces
+support. You can see a full list of the inputs in the linked module documentation.
+
+``run()`` helper function
+-------------------------
+
+:meth:`ansible_runner.interface.run`
+
+When called, this function will take the inputs (either provided as direct inputs to the function or from the :ref:`inputdir`), and execute **Ansible**. It will run in the
+foreground and return the :class:`Runner <ansible_runner.runner.Runner>` object when finished.
+
+``run_async()`` helper function
+-------------------------------
+
+:meth:`ansible_runner.interface.run_async`
+
+Takes the same arguments as :meth:`ansible_runner.interface.run` but will launch **Ansible** asynchronously and return a tuple containing
+the ``thread`` object and a :class:`Runner <ansible_runner.runner.Runner>` object. The **Runner** object can be inspected during execution.
+
+The ``Runner`` object
+---------------------
+
+The :class:`Runner <ansible_runner.runner.Runner>` object is returned as part of the execution of **Ansible** itself. Since it wraps both execution and output
+it has some helper methods for inspecting the results. Other than the methods and indirect properties, the instance of the object itself contains two direct
+properties:
+
+* ``rc`` will represent the actual return code of the **Ansible** process
+* ``status`` will represent the state and can be one of:
+ * ``unstarted``: This is a very brief state where the Runner task has been created but hasn't actually started yet.
+ * ``successful``: The ``ansible`` process finished successfully.
+ * ``failed``: The ``ansible`` process failed.
+
+``Runner.stdout``
+-----------------
+
+The :class:`Runner <ansible_runner.runner.Runner>` object contains a property :attr:`ansible_runner.runner.Runner.stdout` which will return an open file
+handle containing the ``stdout`` of the **Ansible** process.
+
+``Runner.events``
+-----------------
+
+:attr:`ansible_runner.runner.Runner.events` is a ``generator`` that will return the :ref:`Playbook and Host Events<artifactevents>` as Python ``dict`` objects.
+
+``Runner.stats``
+----------------
+
+:attr:`ansible_runner.runner.Runner.stats` is a property that will return the final ``playbook stats`` event from **Ansible** in the form of a Python ``dict``
+
+``Runner.host_events``
+----------------------
+:meth:`ansible_runner.runner.Runner.host_events` is a method that, given a hostname, will return a list of only **Ansible** event data executed on that Host.
+
+``Runner.get_fact_cache``
+-------------------------
+
+:meth:`ansible_runner.runner.Runner.get_fact_cache` is a method that, given a hostname, will return a dictionary containing the `Facts <https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variables-discovered-from-systems-facts>`_ stored for that host during execution.
+
+``Runner.event_handler``
+------------------------
+
+A function passed to `__init__` of :class:`Runner <ansible_runner.runner.Runner>`, this is invoked every time an Ansible event is received. You can use this to
+inspect/process/handle events as they come out of Ansible.
+
+``Runner.cancel_callback``
+--------------------------
+
+A function passed to ``__init__`` of :class:`Runner <ansible_runner.runner.Runner>`, and to the :meth:`ansible_runner.interface.run` interface functions.
+This function will be called for every iteration of the :meth:`ansible_runner.interface.run` event loop and should return `True`
+to inform **Runner** cancel and shutdown the **Ansible** process or `False` to allow it to continue.
+
+``Runner.finished_callback``
+----------------------------
+
+A function passed to ``__init__`` of :class:`Runner <ansible_runner.runner.Runner>`, and to the :meth:`ansible_runner.interface.run` interface functions.
+This function will be called immediately before the **Runner** event loop finishes once **Ansible** has been shut down.
+
+.. _runnerstatushandler:
+
+``Runner.status_handler``
+-------------------------
+
+A function passed to ``__init__`` of :class:`Runner <ansible_runner.runner.Runner>` and to the :meth:`ansible_runner.interface.run` interface functions.
+This function will be called any time the ``status`` changes, expected values are:
+
+* `starting`: Preparing to start but hasn't started running yet
+* `running`: The **Ansible** task is running
+* `canceled`: The task was manually canceled either via callback or the cli
+* `timeout`: The timeout configured in Runner Settings was reached (see :ref:`runnersettings`)
+* `failed`: The **Ansible** process failed
+
+Usage examples
+--------------
+.. code-block:: python
+
+ import ansible_runner
+ r = ansible_runner.run(private_data_dir='/tmp/demo', playbook='test.yml')
+ print("{}: {}".format(r.status, r.rc))
+ # successful: 0
+ for each_host_event in r.events:
+ print(each_host_event['event'])
+ print("Final status:")
+ print(r.stats)
+
+.. code-block:: python
+
+ import ansible_runner
+ r = ansible_runner.run(private_data_dir='/tmp/demo', host_pattern='localhost', module='shell', module_args='whoami')
+ print("{}: {}".format(r.status, r.rc))
+ # successful: 0
+ for each_host_event in r.events:
+ print(each_host_event['event'])
+ print("Final status:")
+ print(r.stats)
+
+Providing custom behavior and inputs
+------------------------------------
+
+**TODO**
+
+The helper methods are just one possible entrypoint, extending the classes used by these helper methods can allow a lot more custom behavior and functionality.
+
+Show:
+
+* How :class:`Runner Config <ansible_runner.runner_config.RunnerConfig>` is used and how overriding the methods and behavior can work
+* Show how custom cancel and status callbacks can be supplied.
diff --git a/docs/source/ansible_runner.callbacks.rst b/docs/source/ansible_runner.callbacks.rst
new file mode 100644
index 0000000..300d811
--- /dev/null
+++ b/docs/source/ansible_runner.callbacks.rst
@@ -0,0 +1,30 @@
+ansible\_runner.callbacks package
+=================================
+
+Submodules
+----------
+
+ansible\_runner.callbacks.awx\_display module
+---------------------------------------------
+
+.. automodule:: ansible_runner.callbacks.awx_display
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.callbacks.minimal module
+----------------------------------------
+
+.. automodule:: ansible_runner.callbacks.minimal
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: ansible_runner.callbacks
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/ansible_runner.display_callback.rst b/docs/source/ansible_runner.display_callback.rst
new file mode 100644
index 0000000..8fa22ef
--- /dev/null
+++ b/docs/source/ansible_runner.display_callback.rst
@@ -0,0 +1,54 @@
+ansible\_runner.display\_callback package
+=========================================
+
+Submodules
+----------
+
+ansible\_runner.display\_callback.cleanup module
+------------------------------------------------
+
+.. automodule:: ansible_runner.display_callback.cleanup
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.display\_callback.display module
+------------------------------------------------
+
+.. automodule:: ansible_runner.display_callback.display
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.display\_callback.events module
+-----------------------------------------------
+
+.. automodule:: ansible_runner.display_callback.events
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.display\_callback.minimal module
+------------------------------------------------
+
+.. automodule:: ansible_runner.display_callback.minimal
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.display\_callback.module module
+-----------------------------------------------
+
+.. automodule:: ansible_runner.display_callback.module
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: ansible_runner.display_callback
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/ansible_runner.rst b/docs/source/ansible_runner.rst
new file mode 100644
index 0000000..2d11c8b
--- /dev/null
+++ b/docs/source/ansible_runner.rst
@@ -0,0 +1,62 @@
+ansible\_runner package
+=======================
+
+Subpackages
+-----------
+
+.. toctree::
+
+ ansible_runner.callbacks
+ ansible_runner.display_callback
+
+Submodules
+----------
+
+ansible\_runner.exceptions module
+---------------------------------
+
+.. automodule:: ansible_runner.exceptions
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.interface module
+--------------------------------
+
+.. automodule:: ansible_runner.interface
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.runner module
+-----------------------------
+
+.. automodule:: ansible_runner.runner
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.runner\_config module
+-------------------------------------
+
+.. automodule:: ansible_runner.runner_config
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ansible\_runner.utils module
+----------------------------
+
+.. automodule:: ansible_runner.utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: ansible_runner
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/modules.rst b/docs/source/modules.rst
new file mode 100644
index 0000000..4fad22c
--- /dev/null
+++ b/docs/source/modules.rst
@@ -0,0 +1,7 @@
+ansible_runner
+==============
+
+.. toctree::
+ :maxdepth: 4
+
+ ansible_runner
diff --git a/docs/standalone.rst b/docs/standalone.rst
new file mode 100644
index 0000000..3e86cac
--- /dev/null
+++ b/docs/standalone.rst
@@ -0,0 +1,112 @@
+.. _standalone:
+
+Using Runner as a standalone command line tool
+==============================================
+
+The **Ansible Runner** command line tool can be used as a standard command line interface to **Ansible** itself but is primarily intended
+to fit into automation and pipeline workflows. Because of this, it has a bit of a different workflow than **Ansible** itself because you can select between a few different modes to launch the command.
+
+While you can launch **Runner** and provide it all of the inputs as arguments to the command line (as you do with **Ansible** itself),
+there is another interface where inputs are gathered into a single location referred to in the command line parameters as ``private_data_dir``.
+(see :ref:`inputdir`)
+
+To view the parameters accepted by ``ansible-runner``::
+
+ $ ansible-runner --help
+
+An example invocation of the standalone ``ansible-runner`` utility::
+
+ $ ansible-runner -p playbook.yml run /tmp/private
+
+Where playbook.yml is the playbook from the ``/tmp/private/projects`` directory, and ``run`` is the command mode you want to invoke **Runner** with
+
+The different **commands** that runner accepts are:
+
+* ``run`` starts ``ansible-runner`` in the foreground and waits until the underlying **Ansible** process completes before returning
+* ``start`` starts ``ansible-runner`` as a background daemon process and generates a pid file
+* ``stop`` terminates an ``ansible-runner`` process that was launched in the background with ``start``
+* ``is-alive`` checks the status of an ``ansible-runner`` process that was started in the background with ``start``
+
+While **Runner** is running it creates an ``artifacts`` directory (see :ref:`artifactdir`) regardless of what mode it was started
+in. The resulting output and status from **Ansible** will be located here. You can control the exact location underneath the ``artifacts`` directory
+with the ``-i IDENT`` argument to ``ansible-runner``, otherwise a random UUID will be generated.
+
+Executing **Runner** in the foreground
+--------------------------------------
+
+When launching **Runner** with the ``run`` command, as above, the program will stay in the foreground and you'll see output just as you expect from a normal
+**Ansible** process. **Runner** will still populate the ``artifacts`` directory, as mentioned in the previous section, to preserve the output and allow processing
+of the artifacts after exit.
+
+Executing **Runner** in the background
+--------------------------------------
+
+When launching **Runner** with the ``start`` command, the program will generate a pid file and move to the background. You can check its status with the
+``is-alive`` command, or terminate it with the ``stop`` command. You can find the stdout, status, and return code in the ``artifacts`` directory.
+
+Running Playbooks
+-----------------
+
+An example invocation using ``demo`` as private directory::
+
+ $ ansible-runner --playbook test.yml run demo
+
+Running Modules Directly
+------------------------
+
+An example invocating the ``debug`` module with ``demo`` as a private directory::
+
+ $ ansible-runner -m debug --hosts localhost -a msg=hello run demo
+
+
+Running Roles Directly
+----------------------
+
+An example invocation using ``demo`` as private directory and ``localhost`` as target::
+
+ $ ansible-runner --role testrole --hosts localhost run demo
+
+Ansible roles directory can be provided with ``--roles-path`` option. Role variables can be passed with ``--role-vars`` at runtime.
+
+.. _outputjson:
+
+Running with Process Isolation
+------------------------------
+
+**Runner** supports process isolation. Process isolation creates a new mount namespace where the root is on a tmpfs that is invisible from the host
+and is automatically cleaned up when the last process exits. You can enable process isolation by providing the ``--process-isolation`` argument on
+the command line. **Runner** defaults to using ``bubblewrap`` as the process isolation executable, but supports
+using any executable that is compatible with the ``bubblewrap`` CLI arguments by passing in the ``--process-isolation-executable`` argument::
+
+ $ ansible-runner --process-isolation ...
+
+**Runner** supports various process isolation arguments that allow you to provide configuration details to the process isolation executable. To view the complete
+list of arguments accepted by ``ansible-runner``::
+
+ $ ansible-runner --help
+
+Running with Directory Isolation
+--------------------------------
+
+If you need to be able to execute multiple tasks in parallel that might conflict with each other or if you want to make sure a single invocation of
+Ansible/Runner doesn't pollute or overwrite the playbook content you can give a base path::
+
+ $ ansible-runner --directory-isolation-base-path /tmp/runner
+
+**Runner** will copy the project directory to a temporary directory created under that path, set it as the working directory, and execute from that location.
+After running that temp directory will be cleaned up and removed.
+
+Outputting json (raw event data) to the console instead of normal output
+------------------------------------------------------------------------
+
+**Runner** supports outputting json event data structure directly to the console (and stdout file) instead of the standard **Ansible** output, thus
+mimicing the behavior of the ``json`` output plugin. This is in addition to the event data that's already present in the artifact directory. All that is needed
+is to supply the ``-j`` argument on the command line::
+
+ $ ansible-runner ... -j ...
+
+Cleaning up artifact directories
+--------------------------------
+
+Using the command line argument ``--rotate-artifacts`` allows you to control the number of artifact directories that are present. Given a number as the parameter
+for this argument will cause **Runner** to clean up old artifact directories. The default value of ``0`` disables artifact directory cleanup.
diff --git a/packaging/debian/changelog b/packaging/debian/changelog
new file mode 100644
index 0000000..46e6510
--- /dev/null
+++ b/packaging/debian/changelog
@@ -0,0 +1,6 @@
+ansible-runner (%VERSION%-%RELEASE%) %DEB_DIST%; urgency=low
+
+ * %VERSION% release
+
+ -- Ansible, Inc. <info@ansible.com> %DATE%
+
diff --git a/packaging/debian/compat b/packaging/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/packaging/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/packaging/debian/control b/packaging/debian/control
new file mode 100644
index 0000000..7488ce0
--- /dev/null
+++ b/packaging/debian/control
@@ -0,0 +1,19 @@
+Source: ansible-runner
+Section: admin
+Priority: optional
+Maintainer: Ansible <info@ansible.com>
+Build-Depends: debhelper (>= 9), dh-python, python-all, python-setuptools
+Standards-Version: 4.1.3
+Homepage: https://www.ansible.com
+Vcs-Git: https://github.com/ansible/ansible-runner.git
+
+Package: ansible-runner
+Architecture: all
+Depends: ${python:Depends}, ${misc:Depends}, python-psutil, python-pexpect (>= 4.5)
+Recommends: ansible (>= 2.1)
+Description: interfaces with Ansible from other systems (Python 2)
+ A tool and python library that helps when interfacing with Ansible
+ directly or as part of another system whether that be through a
+ container image interface, as a standalone tool, or as a Python
+ module that can be imported. The goal is to provide a stable and
+ consistent interface abstraction to Ansible.
diff --git a/packaging/debian/copyright b/packaging/debian/copyright
new file mode 100644
index 0000000..635cc75
--- /dev/null
+++ b/packaging/debian/copyright
@@ -0,0 +1,172 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: ansible-runner
+Source: https://github.com/ansible/ansible-runner
+
+Files: *
+Copyright: 2016 Ansible by Red Hat <info@ansible.com>
+License: Apache
+ _Version 2.0, January 2004_
+ _&lt;<http://www.apache.org/licenses/>&gt;_
+ .
+ ### Terms and Conditions for use, reproduction, and distribution
+ .
+ #### 1. Definitions
+ .
+ “License” shall mean the terms and conditions for use, reproduction, and
+ distribution as defined by Sections 1 through 9 of this document.
+ .
+ “Licensor” shall mean the copyright owner or entity authorized by the copyright
+ owner that is granting the License.
+ .
+ “Legal Entity” shall mean the union of the acting entity and all other entities
+ that control, are controlled by, or are under common control with that entity.
+ For the purposes of this definition, “control” means **(i)** the power, direct or
+ indirect, to cause the direction or management of such entity, whether by
+ contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
+ outstanding shares, or **(iii)** beneficial ownership of such entity.
+ .
+ “You” (or “Your”) shall mean an individual or Legal Entity exercising
+ permissions granted by this License.
+ .
+ “Source” form shall mean the preferred form for making modifications, including
+ but not limited to software source code, documentation source, and configuration
+ files.
+ .
+ “Object” form shall mean any form resulting from mechanical transformation or
+ translation of a Source form, including but not limited to compiled object code,
+ generated documentation, and conversions to other media types.
+ .
+ “Work” shall mean the work of authorship, whether in Source or Object form, made
+ available under the License, as indicated by a copyright notice that is included
+ in or attached to the work (an example is provided in the Appendix below).
+ .
+ “Derivative Works” shall mean any work, whether in Source or Object form, that
+ is based on (or derived from) the Work and for which the editorial revisions,
+ annotations, elaborations, or other modifications represent, as a whole, an
+ original work of authorship. For the purposes of this License, Derivative Works
+ shall not include works that remain separable from, or merely link (or bind by
+ name) to the interfaces of, the Work and Derivative Works thereof.
+ .
+ “Contribution” shall mean any work of authorship, including the original version
+ of the Work and any modifications or additions to that Work or Derivative Works
+ thereof, that is intentionally submitted to Licensor for inclusion in the Work
+ by the copyright owner or by an individual or Legal Entity authorized to submit
+ on behalf of the copyright owner. For the purposes of this definition,
+ “submitted” means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems, and
+ issue tracking systems that are managed by, or on behalf of, the Licensor for
+ the purpose of discussing and improving the Work, but excluding communication
+ that is conspicuously marked or otherwise designated in writing by the copyright
+ owner as “Not a Contribution.”
+ .
+ “Contributor” shall mean Licensor and any individual or Legal Entity on behalf
+ of whom a Contribution has been received by Licensor and subsequently
+ incorporated within the Work.
+ .
+ #### 2. Grant of Copyright License
+ .
+ Subject to the terms and conditions of this License, each Contributor hereby
+ grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+ irrevocable copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the Work and such
+ Derivative Works in Source or Object form.
+ .
+ #### 3. Grant of Patent License
+ .
+ Subject to the terms and conditions of this License, each Contributor hereby
+ grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+ irrevocable (except as stated in this section) patent license to make, have
+ made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+ such license applies only to those patent claims licensable by such Contributor
+ that are necessarily infringed by their Contribution(s) alone or by combination
+ of their Contribution(s) with the Work to which such Contribution(s) was
+ submitted. If You institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+ Contribution incorporated within the Work constitutes direct or contributory
+ patent infringement, then any patent licenses granted to You under this License
+ for that Work shall terminate as of the date such litigation is filed.
+ .
+ #### 4. Redistribution
+ .
+ You may reproduce and distribute copies of the Work or Derivative Works thereof
+ in any medium, with or without modifications, and in Source or Object form,
+ provided that You meet the following conditions:
+ .
+ * **(a)** You must give any other recipients of the Work or Derivative Works a copy of
+ this License; and
+ * **(b)** You must cause any modified files to carry prominent notices stating that You
+ changed the files; and
+ * **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
+ all copyright, patent, trademark, and attribution notices from the Source form
+ of the Work, excluding those notices that do not pertain to any part of the
+ Derivative Works; and
+ * **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
+ Derivative Works that You distribute must include a readable copy of the
+ attribution notices contained within such NOTICE file, excluding those notices
+ that do not pertain to any part of the Derivative Works, in at least one of the
+ following places: within a NOTICE text file distributed as part of the
+ Derivative Works; within the Source form or documentation, if provided along
+ with the Derivative Works; or, within a display generated by the Derivative
+ Works, if and wherever such third-party notices normally appear. The contents of
+ the NOTICE file are for informational purposes only and do not modify the
+ License. You may add Your own attribution notices within Derivative Works that
+ You distribute, alongside or as an addendum to the NOTICE text from the Work,
+ provided that such additional attribution notices cannot be construed as
+ modifying the License.
+ .
+ You may add Your own copyright statement to Your modifications and may provide
+ additional or different license terms and conditions for use, reproduction, or
+ distribution of Your modifications, or for any such Derivative Works as a whole,
+ provided Your use, reproduction, and distribution of the Work otherwise complies
+ with the conditions stated in this License.
+ .
+ #### 5. Submission of Contributions
+ .
+ Unless You explicitly state otherwise, any Contribution intentionally submitted
+ for inclusion in the Work by You to the Licensor shall be under the terms and
+ conditions of this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify the terms of
+ any separate license agreement you may have executed with Licensor regarding
+ such Contributions.
+ .
+ #### 6. Trademarks
+ .
+ This License does not grant permission to use the trade names, trademarks,
+ service marks, or product names of the Licensor, except as required for
+ reasonable and customary use in describing the origin of the Work and
+ reproducing the content of the NOTICE file.
+ .
+ #### 7. Disclaimer of Warranty
+ .
+ Unless required by applicable law or agreed to in writing, Licensor provides the
+ Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+ including, without limitation, any warranties or conditions of TITLE,
+ NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+ solely responsible for determining the appropriateness of using or
+ redistributing the Work and assume any risks associated with Your exercise of
+ permissions under this License.
+ .
+ #### 8. Limitation of Liability
+ .
+ In no event and under no legal theory, whether in tort (including negligence),
+ contract, or otherwise, unless required by applicable law (such as deliberate
+ and grossly negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special, incidental,
+ or consequential damages of any character arising as a result of this License or
+ out of the use or inability to use the Work (including but not limited to
+ damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+ any and all other commercial damages or losses), even if such Contributor has
+ been advised of the possibility of such damages.
+ .
+ #### 9. Accepting Warranty or Additional Liability
+ .
+ While redistributing the Work or Derivative Works thereof, You may choose to
+ offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+ other liability obligations and/or rights consistent with this License. However,
+ in accepting such obligations, You may act only on Your own behalf and on Your
+ sole responsibility, not on behalf of any other Contributor, and only if You
+ agree to indemnify, defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason of your
+ accepting any such warranty or additional liability.
diff --git a/packaging/debian/docker/Dockerfile b/packaging/debian/docker/Dockerfile
new file mode 100644
index 0000000..c9968c4
--- /dev/null
+++ b/packaging/debian/docker/Dockerfile
@@ -0,0 +1,8 @@
+FROM ubuntu:xenial
+
+RUN apt-get update
+RUN apt-get install -y \
+ make debhelper dh-python devscripts python-all python-setuptools python-pip
+
+RUN pip install -IU pip setuptools
+RUN pip install -IU pipenv ansible
diff --git a/packaging/debian/docker/docker-compose.yml b/packaging/debian/docker/docker-compose.yml
new file mode 100644
index 0000000..8d84ca0
--- /dev/null
+++ b/packaging/debian/docker/docker-compose.yml
@@ -0,0 +1,14 @@
+---
+version: '3'
+services:
+ deb-builder:
+ build: .
+ environment:
+ RELEASE:
+ OFFICIAL:
+ volumes:
+ - ../../../:/ansible-runner
+ - ${GPG_SIGNING_KEY}:/signing_key.asc
+ entrypoint: ["/bin/bash", "-c"]
+ working_dir: /ansible-runner
+ privileged: true
diff --git a/packaging/debian/pydist-overrides b/packaging/debian/pydist-overrides
new file mode 100644
index 0000000..44fea95
--- /dev/null
+++ b/packaging/debian/pydist-overrides
@@ -0,0 +1 @@
+pexpect python-pexpect (>= 4.5)
diff --git a/packaging/debian/rules b/packaging/debian/rules
new file mode 100755
index 0000000..9443903
--- /dev/null
+++ b/packaging/debian/rules
@@ -0,0 +1,10 @@
+#!/usr/bin/make -f
+# See debhelper(7) (uncomment to enable)
+# output every command that modifies files on the build system.
+#export DH_VERBOSE = 1
+
+export PYBUILD_NAME=ansible-runner
+export DEB_BUILD_OPTIONS=nocheck
+
+%:
+ dh $@ --with python2 --buildsystem=pybuild
diff --git a/packaging/debian/source/format b/packaging/debian/source/format
new file mode 100644
index 0000000..163aaf8
--- /dev/null
+++ b/packaging/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/packaging/rpm/Dockerfile.epel-7-x86_64 b/packaging/rpm/Dockerfile.epel-7-x86_64
new file mode 100644
index 0000000..0449148
--- /dev/null
+++ b/packaging/rpm/Dockerfile.epel-7-x86_64
@@ -0,0 +1,12 @@
+FROM centos:7
+
+RUN yum install -y epel-release
+RUN yum install -y make mock python-pip which git
+
+# Fix output of rpm --eval '%{?dist}'
+RUN sed -i "s/.el7.centos/.el7/g" /etc/rpm/macros.dist
+
+# Newer version of setuptools needed for pipenv
+RUN pip install -IU pip setuptools
+RUN pip install -IU pipenv ansible
+
diff --git a/packaging/rpm/Dockerfile.epel-8-x86_64 b/packaging/rpm/Dockerfile.epel-8-x86_64
new file mode 100644
index 0000000..b242e7c
--- /dev/null
+++ b/packaging/rpm/Dockerfile.epel-8-x86_64
@@ -0,0 +1,8 @@
+FROM centos:8
+
+RUN dnf install -y epel-release
+RUN yum install -y make mock python3-pip which git
+
+# Newer version of setuptools needed for pipenv
+RUN pip3 install -IU pip setuptools
+RUN pip3 install -IU pipenv ansible
diff --git a/packaging/rpm/ansible-runner.spec.j2 b/packaging/rpm/ansible-runner.spec.j2
new file mode 100644
index 0000000..9f3e532
--- /dev/null
+++ b/packaging/rpm/ansible-runner.spec.j2
@@ -0,0 +1,155 @@
+%global pypi_name ansible-runner
+
+%global python3_sitelib /usr/lib/python3.6/site-packages/
+
+%if 0%{?fedora} || 0%{?rhel} > 7
+%bcond_with python2
+%bcond_without python3
+%else
+%bcond_without python2
+%bcond_with python3
+%endif
+
+Name: %{pypi_name}
+Version: {{ version }}
+Release: {{ release }}%{?dist}
+Summary: A tool and python library to interface with Ansible
+
+License: ASL 2.0
+URL: https://github.com/ansible/ansible-runner
+Source0: https://github.com/ansible/%{name}/archive/%{version}.tar.gz?/%{name}-%{version}-{{ release }}.tar.gz
+BuildArch: noarch
+
+%if %{with python2}
+BuildRequires: python-rpm-macros
+BuildRequires: python2-setuptools
+Requires: python2-%{pypi_name} = %{version}-%{release}
+%endif
+
+%if %{with python3}
+BuildRequires: python3
+BuildRequires: python3-setuptools
+Requires: python3-%{pypi_name} = %{version}-%{release}
+%endif
+
+%description
+Ansible Runner is a tool and python library that helps when interfacing with
+Ansible from other systems whether through a container image interface, as a
+standalone tool, or imported into a python project.
+
+%if %{with python2}
+%package -n python2-%{pypi_name}
+Summary: %{summary}
+%{?python_provide:%python_provide python2-%{pypi_name}}
+
+Requires: python-setuptools
+Requires: python-daemon
+Requires: pexpect >= 4.6
+Requires: python-psutil
+Requires: PyYAML
+Requires: python-six
+Requires: python-lockfile
+
+%description -n python2-%{pypi_name}
+Ansible Runner is a tool and python library that helps when interfacing with
+Ansible from other systems whether through a container image interface, as a
+standalone tool, or imported into a python project.
+%endif
+
+%if %{with python3}
+%package -n python3-%{pypi_name}
+Summary: %{summary}
+%{?python_provide:%python_provide python3-%{pypi_name}}
+
+Requires: python3-pyyaml
+Requires: python3-setuptools
+Requires: python3-daemon
+Requires: python3-six
+Requires: python3dist(pexpect) >= 4.6
+Requires: python3dist(psutil)
+Requires: python3dist(lockfile)
+
+%description -n python3-%{pypi_name}
+Ansible Runner is a tool and python library that helps when interfacing with
+Ansible from other systems whether through a container image interface, as a
+standalone tool, or imported into a python project.
+%endif
+
+%prep
+%autosetup -n %{pypi_name}-%{version}
+# Remove bundled egg-info
+rm -rf %{pypi_name}.egg-info
+
+%global py_setup setup.py
+
+%build
+%if %{with python2}
+export RHEL_ALLOW_PYTHON2_FOR_BUILD=1
+python2 setup.py build
+%endif
+%if %{with python3}
+python3 setup.py build
+%endif
+
+%install
+# Must do the subpackages' install first because the scripts in /usr/bin are
+# overwritten with every setup.py install.
+
+%if %{with python3}
+python3 setup.py install -O1 --skip-build --root %{buildroot}
+cp %{buildroot}/%{_bindir}/ansible-runner %{buildroot}/%{_bindir}/ansible-runner-%{python3_version}
+ln -s %{_bindir}/ansible-runner-%{python3_version} %{buildroot}/%{_bindir}/ansible-runner-3
+%endif
+
+%if %{with python2}
+export RHEL_ALLOW_PYTHON2_FOR_BUILD=1
+python2 setup.py install -O1 --skip-build --root %{buildroot}
+cp %{buildroot}/%{_bindir}/ansible-runner %{buildroot}/%{_bindir}/ansible-runner-%{python2_version}
+ln -s %{_bindir}/ansible-runner-%{python2_version} %{buildroot}/%{_bindir}/ansible-runner-2
+%endif
+
+%files
+%defattr(-,root,root)
+
+%if %{with python2}
+%files -n python2-%{pypi_name}
+%{_bindir}/ansible-runner
+%{_bindir}/ansible-runner-2
+%{_bindir}/ansible-runner-%{python2_version}
+%{python_sitelib}/*
+%endif
+
+%if %{with python3}
+%files -n python3-%{pypi_name}
+%{python3_sitelib}/*
+%{_bindir}/ansible-runner
+%{_bindir}/ansible-runner-3
+%{_bindir}/ansible-runner-%{python3_version}
+%endif
+
+%changelog
+* Thu Mar 19 2020 Ryan Petrello <rpetrell@redhat.com> - 1.4.6-1
+- Ansible Runner 1.4.6-1
+
+* Thu Mar 19 2020 Matthew Jones <matburt@redhat.com> - 1.4.5-1
+- Ansible Runner 1.4.5-1
+
+* Tue Feb 25 2020 Yanis Guenane <yguenane@redhat.com> - 1.4.4-3
+- Ansible Runner 1.4.4-3
+
+* Fri Oct 25 2019 Matthew Jones <matburt@redhat.com> - 1.4.4-1
+- Ansible Runner 1.4.4-1
+
+* Thu Oct 17 2019 Matthew Jones <matburt@redhat.com> - 1.4.2-1
+- Ansible Runner 1.4.2-1
+
+* Thu Oct 03 2019 Matthew Jones <matburt@redhat.com> - 1.4.1-1
+- Ansible Runner 1.4.1-1
+
+* Mon Sep 23 2019 Shane McDonald <shanemcd@redhat.com> - 1.4.0-1
+- Ansible Runner 1.4.0-1
+- Support for EL 7.7 (defaults to python2)
+
+* Wed Apr 24 2019 Shane McDonald <shanemcd@redhat.com> - 1.3.4-1
+- Ansible Runner 1.3.4-1
+- Adopted modified upstream spec file for python3 support
diff --git a/packaging/rpm/docker-compose.yml b/packaging/rpm/docker-compose.yml
new file mode 100644
index 0000000..ec7c86e
--- /dev/null
+++ b/packaging/rpm/docker-compose.yml
@@ -0,0 +1,21 @@
+---
+version: '3'
+services:
+ rpm-builder:
+ build:
+ dockerfile: Dockerfile.${MOCK_CONFIG}
+ context: .
+ image: runner-rpm-builder:${MOCK_CONFIG}
+ environment:
+ MOCK_BIN: "mock --old-chroot"
+ MOCK_CONFIG:
+ RELEASE:
+ OFFICIAL:
+ volumes:
+ - ../../:/ansible-runner
+ - mock-cache:/var/cache/mock
+ entrypoint: ["/bin/bash", "-c"]
+ working_dir: /ansible-runner
+ privileged: true
+volumes:
+ mock-cache:
diff --git a/setup.cfg b/setup.cfg
new file mode 100755
index 0000000..67f6276
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,25 @@
+[pep8]
+# E201 - Whitespace after '('
+# E203 - Whitespace before ":"
+# E221 - Multiple spaces after operator
+# E225 - Missing whitespace around operator
+# E231 - Missing whitespace after ','
+# E241 - Multiple spaces after ','
+# E251 - Unexpected spaces around keyword / parameter equals
+# E261 - At least two spaces before inline comment
+# E302 - Expected 2 blank lines found 0
+# E303 - Too many blank lines
+# W291 - Trailing whitespace
+# W391 - Blank line at end of file
+# W293 - Blank line contains whitespace
+ignore=E201,E203,E221,E225,E231,E241,E251,E261,E265,E303,W291,W391,W293
+exclude=.tox,venv,awx/lib/site-packages,awx/plugins/inventory/ec2.py,awx/plugins/inventory/gce.py,awx/plugins/inventory/vmware.py,awx/plugins/inventory/openstack.py,awx/ui,awx/api/urls.py,awx/main/migrations,awx/main/south_migrations,awx/main/tests/data,installer/openshift/settings.py
+
+[flake8]
+max-line-length=160
+ignore=E201,E203,E221,E225,E231,E241,E251,E261,E265,E303,W291,W391,W293,E731,F405
+exclude=.tox,venv,awx/lib/site-packages,awx/plugins/inventory,awx/ui,awx/api/urls.py,awx/main/migrations,awx/main/south_migrations,awx/main/tests/data,node_modules/,awx/projects/,tools/docker,awx/settings/local_*.py,installer/openshift/settings.py,build/,installer/
+
+[metadata]
+license_file=LICENSE.md
+description-file = README.md \ No newline at end of file
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..8b1c590
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 Red Hat, Inc.
+# All Rights Reserved.
+
+from setuptools import setup, find_packages
+
+with open('README.md', 'r') as f:
+ long_description = f.read()
+
+setup(
+ name="ansible-runner",
+ version="1.4.6",
+ author='Red Hat Ansible',
+ url="https://github.com/ansible/ansible-runner",
+ license='Apache',
+ packages=find_packages(),
+ long_description=long_description,
+ long_description_content_type='text/markdown',
+ install_requires=[
+ 'psutil',
+ 'pexpect>=4.5',
+ 'python-daemon',
+ 'PyYAML',
+ 'six',
+ ],
+ zip_safe=False,
+ entry_points={
+ 'console_scripts': [
+ 'ansible-runner = ansible_runner.__main__:main'
+ ]
+ }
+)
diff --git a/test/__init__.py b/test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/__init__.py
diff --git a/test/conftest.py b/test/conftest.py
new file mode 100644
index 0000000..b65db87
--- /dev/null
+++ b/test/conftest.py
@@ -0,0 +1,6 @@
+import pytest
+
+
+@pytest.fixture(autouse=True)
+def mock_env_user(monkeypatch):
+ monkeypatch.setenv("ANSIBLE_DEVEL_WARNING", "False")
diff --git a/test/integration/callback/other_callback.py b/test/integration/callback/other_callback.py
new file mode 100644
index 0000000..79cbb5e
--- /dev/null
+++ b/test/integration/callback/other_callback.py
@@ -0,0 +1,14 @@
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'other_callback'
+
+ def v2_playbook_on_play_start(self, play):
+ pass
+
+ def v2_runner_on_ok(self, result):
+ pass
+
diff --git a/test/integration/conftest.py b/test/integration/conftest.py
new file mode 100644
index 0000000..d6986bc
--- /dev/null
+++ b/test/integration/conftest.py
@@ -0,0 +1,20 @@
+import pytest
+import pexpect
+from ansible_runner.runner_config import RunnerConfig
+
+
+@pytest.fixture(scope='function')
+def rc(request, tmpdir):
+ rc = RunnerConfig(str(tmpdir))
+ rc.suppress_ansible_output = True
+ rc.expect_passwords = {
+ pexpect.TIMEOUT: None,
+ pexpect.EOF: None
+ }
+ rc.cwd = str(tmpdir)
+ rc.env = {}
+ rc.job_timeout = 2
+ rc.idle_timeout = 0
+ rc.pexpect_timeout = .1
+ rc.pexpect_use_poll = True
+ return rc
diff --git a/test/integration/inventory/localhost b/test/integration/inventory/localhost
new file mode 100644
index 0000000..df8b5f6
--- /dev/null
+++ b/test/integration/inventory/localhost
@@ -0,0 +1,2 @@
+[all]
+localhost ansible_connection=local
diff --git a/test/integration/project/hello.yml b/test/integration/project/hello.yml
new file mode 100644
index 0000000..bcf5d34
--- /dev/null
+++ b/test/integration/project/hello.yml
@@ -0,0 +1,6 @@
+---
+- hosts: all
+ tasks:
+ - debug: msg="Before!"
+ - shell: sleep 90
+ - debug: msg="After!"
diff --git a/test/integration/project/roles/benthomasson.hello_role/meta/.galaxy_install_info b/test/integration/project/roles/benthomasson.hello_role/meta/.galaxy_install_info
new file mode 100644
index 0000000..6292621
--- /dev/null
+++ b/test/integration/project/roles/benthomasson.hello_role/meta/.galaxy_install_info
@@ -0,0 +1 @@
+{install_date: 'Mon Aug 20 13:21:07 2018', version: master}
diff --git a/test/integration/project/roles/benthomasson.hello_role/meta/main.yml b/test/integration/project/roles/benthomasson.hello_role/meta/main.yml
new file mode 100644
index 0000000..d61c246
--- /dev/null
+++ b/test/integration/project/roles/benthomasson.hello_role/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: Ben Thomasson
+ description: Hello World role
+ company: Red Hat
+
+ license: Apache
+
+ min_ansible_version: 1.2
+
+ galaxy_tags:
+ - hello
+dependencies: []
diff --git a/test/integration/project/roles/benthomasson.hello_role/tasks/main.yml b/test/integration/project/roles/benthomasson.hello_role/tasks/main.yml
new file mode 100644
index 0000000..92cedff
--- /dev/null
+++ b/test/integration/project/roles/benthomasson.hello_role/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- debug:
diff --git a/test/integration/project/use_role.yml b/test/integration/project/use_role.yml
new file mode 100644
index 0000000..faf4292
--- /dev/null
+++ b/test/integration/project/use_role.yml
@@ -0,0 +1,5 @@
+---
+- hosts: all
+ gather_facts: false
+ roles:
+ - name: benthomasson.hello_role
diff --git a/test/integration/test___main__.py b/test/integration/test___main__.py
new file mode 100644
index 0000000..c44ee01
--- /dev/null
+++ b/test/integration/test___main__.py
@@ -0,0 +1,243 @@
+import os
+import sys
+import uuid
+import json
+import random
+import string
+import tempfile
+import shutil
+
+from pytest import raises
+from mock import patch
+
+from ansible_runner.__main__ import main
+
+
+def random_string():
+ return ''.join(random.choice(string.ascii_uppercase + string.digits)
+ for _ in range(random.randint(3, 20)))
+
+
+def random_json(keys=None):
+ data = dict()
+ if keys:
+ for key in keys:
+ data[key] = random_string()
+ else:
+ for _ in range(0, 5):
+ data[random_string()] = random_string()
+ return json.dumps(data)
+
+
+def cmdline(command, *args):
+ cmdline = ['ansible-runner', command]
+ cmdline.extend(args)
+ sys.argv = cmdline
+
+
+def test_main_bad_private_data_dir():
+ tmpfile = os.path.join('/tmp', str(uuid.uuid4().hex))
+ open(tmpfile, 'w').write(random_string())
+
+ cmdline('run', tmpfile, '-p', 'fake')
+
+ try:
+ with raises(OSError):
+ main()
+ finally:
+ os.remove(tmpfile)
+
+
+def run_role(options, private_data_dir=None, expected_rc=0):
+ try:
+ private_data_dir = private_data_dir or tempfile.mkdtemp()
+ args = ['run', private_data_dir]
+ args.extend(options)
+
+ with patch('ansible_runner.interface.run') as mock_run:
+ with raises(SystemExit) as exc:
+ main()
+ assert exc.type == SystemExit
+ assert exc.value.code == expected_rc
+
+ finally:
+ shutil.rmtree(private_data_dir)
+ return mock_run
+
+
+def test_cmdline_role_defaults():
+ """Run a role directly with all command line defaults
+ """
+ private_data_dir = tempfile.mkdtemp()
+ options = ['-r' , 'test']
+
+ playbook = [{'hosts': 'all', 'gather_facts': True, 'roles': [{'role': 'test'}]}]
+
+ run_options = {
+ 'private_data_dir': private_data_dir,
+ 'playbook': playbook
+ }
+
+ result = run_role(options, private_data_dir)
+ result.called_with_args([run_options])
+
+
+def test_cmdline_role_skip_facts():
+ """Run a role directly and set --role-skip-facts option
+ """
+ private_data_dir = tempfile.mkdtemp()
+ options = ['-r' , 'test', '--role-skip-facts']
+
+ playbook = [{'hosts': 'all', 'gather_facts': False, 'roles': [{'role': 'test'}]}]
+
+ run_options = {
+ 'private_data_dir': private_data_dir,
+ 'playbook': playbook
+ }
+
+ result = run_role(options, private_data_dir)
+ result.called_with_args([run_options])
+
+
+def test_cmdline_role_inventory():
+ """Run a role directly and set --inventory option
+ """
+ private_data_dir = tempfile.mkdtemp()
+ options = ['-r' , 'test', '--inventory hosts']
+
+ playbook = [{'hosts': 'all', 'gather_facts': False, 'roles': [{'role': 'test'}]}]
+
+ run_options = {
+ 'private_data_dir': private_data_dir,
+ 'playbook': playbook,
+ 'inventory': 'hosts'
+ }
+
+ result = run_role(options, private_data_dir)
+ result.called_with_args([run_options])
+
+
+def test_cmdline_role_vars():
+ """Run a role directly and set --role-vars option
+ """
+ private_data_dir = tempfile.mkdtemp()
+ options = ['-r' , 'test', '--role-vars "foo=bar"']
+
+ playbook = [{
+ 'hosts': 'all',
+ 'gather_facts': False,
+ 'roles': [{
+ 'role': 'test',
+ 'vars': {'foo': 'bar'}
+ }]
+ }]
+
+ run_options = {
+ 'private_data_dir': private_data_dir,
+ 'playbook': playbook
+ }
+
+ result = run_role(options, private_data_dir)
+ result.called_with_args([run_options])
+
+
+def test_cmdline_roles_path():
+ """Run a role directly and set --roles-path option
+ """
+ private_data_dir = tempfile.mkdtemp()
+ options = ['-r' , 'test', '--roles-path /tmp/roles']
+
+ playbook = [{'hosts': 'all', 'gather_facts': False, 'roles': [{'role': 'test'}]}]
+
+ run_options = {
+ 'private_data_dir': private_data_dir,
+ 'playbook': playbook,
+ 'envvars': {'ANSIBLE_ROLES_PATH': '/tmp/roles'}
+ }
+
+ result = run_role(options, private_data_dir)
+ result.called_with_args([run_options])
+
+
+def test_cmdline_role_with_playbook_option():
+ """Test error is raised with invalid command line option '-p'
+ """
+ cmdline('run', 'private_data_dir', '-r', 'fake', '-p', 'fake')
+ with raises(SystemExit) as exc:
+ main()
+ assert exc == 1
+
+
+def test_cmdline_playbook():
+ try:
+ private_data_dir = tempfile.mkdtemp()
+ play = [{'hosts': 'all', 'tasks': [{'debug': {'msg': random_string()}}]}]
+
+ path = os.path.join(private_data_dir, 'project')
+ os.makedirs(path)
+
+ playbook = os.path.join(path, 'main.yaml')
+ with open(playbook, 'w') as f:
+ f.write(json.dumps(play))
+
+ path = os.path.join(private_data_dir, 'inventory')
+ os.makedirs(path)
+
+ inventory = os.path.join(path, 'hosts')
+ with open(inventory, 'w') as f:
+ f.write('[all]\nlocalhost ansible_connection=local')
+
+ cmdline('run', private_data_dir, '-p', playbook, '--inventory', inventory)
+
+ assert main() == 0
+
+ with open(playbook) as f:
+ assert json.loads(f.read()) == play
+
+ finally:
+ shutil.rmtree(private_data_dir)
+
+
+def test_cmdline_playbook_hosts():
+ """Test error is raised with trying to pass '--hosts' with '-p'
+ """
+ cmdline('run', 'private_data_dir', '-p', 'fake', '--hosts', 'all')
+ with raises(SystemExit) as exc:
+ main()
+ assert exc == 1
+
+
+def test_cmdline_includes_one_option():
+ """Test error is raised if not '-p', '-m' or '-r'
+ """
+ cmdline('run', 'private_data_dir')
+ with raises(SystemExit) as exc:
+ main()
+ assert exc == 1
+
+
+def test_cmdline_cmdline_override():
+ try:
+ private_data_dir = tempfile.mkdtemp()
+ play = [{'hosts': 'all', 'tasks': [{'debug': {'msg': random_string()}}]}]
+
+ path = os.path.join(private_data_dir, 'project')
+ os.makedirs(path)
+
+ playbook = os.path.join(path, 'main.yaml')
+ with open(playbook, 'w') as f:
+ f.write(json.dumps(play))
+ path = os.path.join(private_data_dir, 'inventory')
+ os.makedirs(path)
+
+ inventory = os.path.join(path, 'hosts')
+ with open(inventory, 'w') as f:
+ f.write('[all]\nlocalhost ansible_connection=local')
+
+ # privateip: removed --hosts command line option from test beause it is
+ # not a supported combination of cli options
+ #cmdline('run', private_data_dir, '-p', playbook, '--hosts', 'all', '--cmdline', '-e foo=bar')
+ cmdline('run', private_data_dir, '-p', playbook, '--cmdline', '-e foo=bar')
+ assert main() == 0
+ finally:
+ shutil.rmtree(private_data_dir)
diff --git a/test/integration/test_display_callback.py b/test/integration/test_display_callback.py
new file mode 100644
index 0000000..fbf8796
--- /dev/null
+++ b/test/integration/test_display_callback.py
@@ -0,0 +1,318 @@
+from __future__ import absolute_import
+
+import json
+import os
+import yaml
+import six
+
+from ansible import __version__ as ANSIBLE_VERSION
+
+from ansible_runner.interface import init_runner
+
+import pytest
+
+HERE = os.path.abspath(os.path.dirname(__file__))
+
+
+@pytest.fixture()
+def executor(tmpdir, request):
+ private_data_dir = six.text_type(tmpdir.mkdir('foo'))
+
+ playbooks = request.node.callspec.params.get('playbook')
+ playbook = list(playbooks.values())[0]
+ envvars = request.node.callspec.params.get('envvars')
+ if envvars is None:
+ envvars = {}
+ # warning messages create verbose events and interfere with assertions
+ envvars["ANSIBLE_DEPRECATION_WARNINGS"] = "False"
+ # python interpreter used is not of much interest, we really want to silence warnings
+ envvars['ANSIBLE_PYTHON_INTERPRETER'] = 'auto_silent'
+
+ r = init_runner(
+ private_data_dir=private_data_dir,
+ inventory="localhost ansible_connection=local",
+ envvars=envvars,
+ playbook=yaml.safe_load(playbook)
+ )
+
+ return r
+
+
+@pytest.mark.parametrize('event', {'playbook_on_start',
+ 'playbook_on_play_start',
+ 'playbook_on_task_start', 'runner_on_ok',
+ 'playbook_on_stats'})
+@pytest.mark.parametrize('playbook', [
+{'helloworld.yml': '''
+- name: Hello World Sample
+ connection: local
+ hosts: all
+ gather_facts: no
+ tasks:
+ - name: Hello Message
+ debug:
+ msg: "Hello World!"
+'''}, # noqa
+{'results_included.yml': '''
+- name: Run module which generates results list
+ connection: local
+ hosts: all
+ gather_facts: no
+ vars:
+ results: ['foo', 'bar']
+ tasks:
+ - name: Generate results list
+ debug:
+ var: results
+'''}, # noqa
+])
+@pytest.mark.parametrize('envvars', [
+ {'ANSIBLE_CALLBACK_PLUGINS': os.path.join(HERE, 'callback')},
+ {'ANSIBLE_CALLBACK_PLUGINS': ''}
+])
+def test_callback_plugin_receives_events(executor, event, playbook, envvars):
+ executor.run()
+ assert len(list(executor.events))
+ assert event in [task['event'] for task in executor.events]
+
+
+
+@pytest.mark.parametrize('playbook', [
+{'no_log_on_ok.yml': '''
+- name: args should not be logged when task-level no_log is set
+ connection: local
+ hosts: all
+ gather_facts: no
+ tasks:
+ - shell: echo "SENSITIVE"
+ no_log: true
+'''}, # noqa
+{'no_log_on_fail.yml': '''
+- name: failed args should not be logged when task-level no_log is set
+ connection: local
+ hosts: all
+ gather_facts: no
+ tasks:
+ - shell: echo "SENSITIVE"
+ no_log: true
+ failed_when: true
+ ignore_errors: true
+'''}, # noqa
+{'no_log_on_skip.yml': '''
+- name: skipped task args should be suppressed with no_log
+ connection: local
+ hosts: all
+ gather_facts: no
+ tasks:
+ - shell: echo "SENSITIVE"
+ no_log: true
+ when: false
+'''}, # noqa
+{'no_log_on_play.yml': '''
+- name: args should not be logged when play-level no_log set
+ connection: local
+ hosts: all
+ gather_facts: no
+ no_log: true
+ tasks:
+ - shell: echo "SENSITIVE"
+'''}, # noqa
+{'async_no_log.yml': '''
+- name: async task args should suppressed with no_log
+ connection: local
+ hosts: all
+ gather_facts: no
+ no_log: true
+ tasks:
+ - async: 10
+ poll: 1
+ shell: echo "SENSITIVE"
+ no_log: true
+'''}, # noqa
+{'with_items.yml': '''
+- name: with_items tasks should be suppressed with no_log
+ connection: local
+ hosts: all
+ gather_facts: no
+ tasks:
+ - shell: echo {{ item }}
+ no_log: true
+ with_items: [ "SENSITIVE", "SENSITIVE-SKIPPED", "SENSITIVE-FAILED" ]
+ when: item != "SENSITIVE-SKIPPED"
+ failed_when: item == "SENSITIVE-FAILED"
+ ignore_errors: yes
+'''}, # noqa, NOTE: with_items will be deprecated in 2.9
+{'loop.yml': '''
+- name: loop tasks should be suppressed with no_log
+ connection: local
+ hosts: all
+ gather_facts: no
+ tasks:
+ - shell: echo {{ item }}
+ no_log: true
+ loop: [ "SENSITIVE", "SENSITIVE-SKIPPED", "SENSITIVE-FAILED" ]
+ when: item != "SENSITIVE-SKIPPED"
+ failed_when: item == "SENSITIVE-FAILED"
+ ignore_errors: yes
+'''}, # noqa
+])
+def test_callback_plugin_no_log_filters(executor, playbook):
+ executor.run()
+ assert len(list(executor.events))
+ assert 'SENSITIVE' not in json.dumps(list(executor.events))
+
+
+@pytest.mark.parametrize('playbook', [
+{'no_log_on_ok.yml': '''
+- name: args should not be logged when no_log is set at the task or module level
+ connection: local
+ hosts: all
+ gather_facts: no
+ tasks:
+ - shell: echo "PUBLIC"
+ - shell: echo "PRIVATE"
+ no_log: true
+ - uri: url=https://example.org url_username="PUBLIC" url_password="PRIVATE"
+'''}, # noqa
+])
+def test_callback_plugin_task_args_leak(executor, playbook):
+ executor.run()
+ events = list(executor.events)
+ assert events[0]['event'] == 'playbook_on_start'
+ assert events[1]['event'] == 'playbook_on_play_start'
+
+ # task 1
+ assert events[2]['event'] == 'playbook_on_task_start'
+ assert events[3]['event'] == 'runner_on_start'
+ assert events[4]['event'] == 'runner_on_ok'
+
+ # task 2 no_log=True
+ assert events[5]['event'] == 'playbook_on_task_start'
+ assert events[6]['event'] == 'runner_on_start'
+ assert events[7]['event'] == 'runner_on_ok'
+ assert 'PUBLIC' in json.dumps(events), events
+ for event in events:
+ assert 'PRIVATE' not in json.dumps(event), event
+ # make sure playbook was successful, so all tasks were hit
+ assert not events[-1]['event_data']['failures'], 'Unexpected playbook execution failure'
+
+
+@pytest.mark.parametrize('playbook', [
+{'loop_with_no_log.yml': '''
+- name: playbook variable should not be overwritten when using no log
+ connection: local
+ hosts: all
+ gather_facts: no
+ tasks:
+ - command: "{{ item }}"
+ register: command_register
+ no_log: True
+ with_items:
+ - "echo helloworld!"
+ - debug: msg="{{ command_register.results|map(attribute='stdout')|list }}"
+'''}, # noqa
+])
+def test_callback_plugin_censoring_does_not_overwrite(executor, playbook):
+ executor.run()
+ events = list(executor.events)
+ assert events[0]['event'] == 'playbook_on_start'
+ assert events[1]['event'] == 'playbook_on_play_start'
+
+ # task 1
+ assert events[2]['event'] == 'playbook_on_task_start'
+ # Ordering of task and item events may differ randomly
+ assert set(['runner_on_start', 'runner_item_on_ok', 'runner_on_ok']) == set([data['event'] for data in events[3:6]])
+
+ # task 2 no_log=True
+ assert events[6]['event'] == 'playbook_on_task_start'
+ assert events[7]['event'] == 'runner_on_start'
+ assert events[8]['event'] == 'runner_on_ok'
+ assert 'helloworld!' in events[8]['event_data']['res']['msg']
+
+
+@pytest.mark.parametrize('playbook', [
+{'strip_env_vars.yml': '''
+- name: sensitive environment variables should be stripped from events
+ connection: local
+ hosts: all
+ tasks:
+ - shell: echo "Hello, World!"
+'''}, # noqa
+])
+def test_callback_plugin_strips_task_environ_variables(executor, playbook):
+ executor.run()
+ assert len(list(executor.events))
+ for event in list(executor.events):
+ assert os.environ['PATH'] not in json.dumps(event)
+
+
+@pytest.mark.parametrize('playbook', [
+{'custom_set_stat.yml': '''
+- name: custom set_stat calls should persist to the local disk so awx can save them
+ connection: local
+ hosts: all
+ tasks:
+ - set_stats:
+ data:
+ foo: "bar"
+'''}, # noqa
+])
+def test_callback_plugin_saves_custom_stats(executor, playbook):
+ executor.run()
+ for event in executor.events:
+ event_data = event.get('event_data', {})
+ if 'artifact_data' in event_data:
+ assert event_data['artifact_data'] == {'foo': 'bar'}
+ break
+ else:
+ raise Exception('Did not find expected artifact data in event data')
+
+
+@pytest.mark.parametrize('playbook', [
+{'handle_playbook_on_notify.yml': '''
+- name: handle playbook_on_notify events properly
+ connection: local
+ hosts: all
+ handlers:
+ - name: my_handler
+ debug: msg="My Handler"
+ tasks:
+ - debug: msg="My Task"
+ changed_when: true
+ notify:
+ - my_handler
+'''}, # noqa
+])
+@pytest.mark.skipif(ANSIBLE_VERSION < '2.5', reason="v2_playbook_on_notify doesn't work before ansible 2.5")
+def test_callback_plugin_records_notify_events(executor, playbook):
+ executor.run()
+ assert len(list(executor.events))
+ notify_events = [x for x in executor.events if x['event'] == 'playbook_on_notify']
+ assert len(notify_events) == 1
+ assert notify_events[0]['event_data']['handler'] == 'my_handler'
+ assert notify_events[0]['event_data']['host'] == 'localhost'
+ assert notify_events[0]['event_data']['task'] == 'debug'
+
+
+@pytest.mark.parametrize('playbook', [
+{'no_log_module_with_var.yml': '''
+- name: ensure that module-level secrets are redacted
+ connection: local
+ hosts: all
+ vars:
+ - pw: SENSITIVE
+ tasks:
+ - uri:
+ url: https://example.org
+ url_username: john-jacob-jingleheimer-schmidt
+ url_password: "{{ pw }}"
+'''}, # noqa
+])
+def test_module_level_no_log(executor, playbook):
+ # It's possible for `no_log=True` to be defined at the _module_ level,
+ # e.g., for the URI module password parameter
+ # This test ensures that we properly redact those
+ executor.run()
+ assert len(list(executor.events))
+ assert 'john-jacob-jingleheimer-schmidt' in json.dumps(list(executor.events))
+ assert 'SENSITIVE' not in json.dumps(list(executor.events))
diff --git a/test/integration/test_events.py b/test/integration/test_events.py
new file mode 100644
index 0000000..ae4d101
--- /dev/null
+++ b/test/integration/test_events.py
@@ -0,0 +1,166 @@
+import pytest
+import tempfile
+from distutils.version import LooseVersion
+from distutils.spawn import find_executable
+import pkg_resources
+import json
+import os
+import shutil
+
+from ansible_runner import run, run_async
+
+
+def test_basic_events(is_run_async=False,g_facts=False):
+ tdir = tempfile.mkdtemp()
+ inventory = "localhost ansible_connection=local"
+ playbook = [{'hosts': 'all', 'gather_facts': g_facts, 'tasks': [{'debug': {'msg': "test"}}]}]
+ if not is_run_async:
+ r = run(private_data_dir=tdir,
+ inventory=inventory,
+ playbook=playbook)
+ else:
+ _, r = run_async(private_data_dir=tdir,
+ inventory=inventory,
+ playbook=playbook)
+
+ event_types = [x['event'] for x in r.events]
+ okay_events = [x for x in filter(lambda x: 'event' in x and x['event'] == 'runner_on_ok',
+ r.events)]
+ assert event_types[0] == 'playbook_on_start'
+ assert "playbook_on_play_start" in event_types
+ assert "runner_on_ok" in event_types
+ assert "playbook_on_stats" in event_types
+ assert r.rc == 0
+ if not is_run_async:
+ assert len(okay_events) == 1
+ else:
+ assert len(okay_events) == 2
+
+ okay_event = okay_events[0]
+ assert "uuid" in okay_event and len(okay_event['uuid']) == 36
+ assert "parent_uuid" in okay_event and len(okay_event['parent_uuid']) == 36
+ assert "stdout" in okay_event and len(okay_event['stdout']) > 0
+ assert "start_line" in okay_event and int(okay_event['start_line']) > 0
+ assert "end_line" in okay_event and int(okay_event['end_line']) > 0
+ assert "event_data" in okay_event and len(okay_event['event_data']) > 0
+
+
+def test_async_events():
+ test_basic_events(is_run_async=True,g_facts=True)
+
+
+def test_basic_serializeable():
+ tdir = tempfile.mkdtemp()
+ r = run(private_data_dir=tdir,
+ inventory="localhost ansible_connection=local",
+ playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'debug': {'msg': "test"}}]}])
+ events = [x for x in r.events]
+ json.dumps(events)
+
+
+def test_event_omission():
+ tdir = tempfile.mkdtemp()
+ r = run(private_data_dir=tdir,
+ inventory="localhost ansible_connection=local",
+ omit_event_data=True,
+ playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'debug': {'msg': "test"}}]}])
+ assert not any([x['event_data'] for x in r.events])
+
+
+def test_event_omission_except_failed():
+ tdir = tempfile.mkdtemp()
+ r = run(private_data_dir=tdir,
+ inventory="localhost ansible_connection=local",
+ only_failed_event_data=True,
+ playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'fail': {'msg': "test"}}]}])
+ all_event_datas = [x['event_data'] for x in r.events if x['event_data']]
+ assert len(all_event_datas) == 1
+
+@pytest.mark.skipif(LooseVersion(pkg_resources.get_distribution('ansible').version) < LooseVersion('2.8'),
+ reason="Valid only on Ansible 2.8+")
+def test_runner_on_start(rc):
+ tdir = tempfile.mkdtemp()
+ r = run(private_data_dir=tdir,
+ inventory="localhost ansible_connection=local",
+ playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'debug': {'msg': "test"}}]}])
+ start_events = [x for x in filter(lambda x: 'event' in x and x['event'] == 'runner_on_start',
+ r.events)]
+ assert len(start_events) == 1
+
+
+def test_playbook_on_stats_summary_fields(rc):
+ tdir = tempfile.mkdtemp()
+ r = run(private_data_dir=tdir,
+ inventory="localhost ansible_connection=local",
+ playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'debug': {'msg': "test"}}]}])
+ stats_events = [x for x in filter(lambda x: 'event' in x and x['event'] == 'playbook_on_stats',
+ r.events)]
+ assert len(stats_events) == 1
+
+ EXPECTED_SUMMARY_FIELDS = ('changed', 'dark', 'failures', 'ignored', 'ok', 'rescued', 'skipped')
+ fields = stats_events[0]['event_data'].keys()
+ assert set(fields) >= set(EXPECTED_SUMMARY_FIELDS)
+
+
+def test_include_role_events():
+ try:
+ r = run(
+ private_data_dir=os.path.abspath('test/integration'),
+ playbook='use_role.yml'
+ )
+ role_events = [event for event in r.events if event.get('event_data', {}).get('role', '') == "benthomasson.hello_role"]
+ assert 'runner_on_ok' in [event['event'] for event in role_events]
+ for event in role_events:
+ event_data = event['event_data']
+ assert not event_data.get('warning', False) # role use should not contain warnings
+ if event['event'] == 'runner_on_ok':
+ assert event_data['res']['msg'] == 'Hello world!'
+ finally:
+ shutil.rmtree('test/integration/artifacts')
+
+@pytest.mark.skipif(find_executable('cgexec') is None,
+ reason="cgexec not available")
+@pytest.mark.skipif(LooseVersion(pkg_resources.get_distribution('ansible').version) < LooseVersion('2.8'),
+ reason="Valid only on Ansible 2.8+")
+def test_profile_data():
+ tdir = tempfile.mkdtemp()
+ try:
+ r = run(private_data_dir=tdir,
+ inventory="localhost ansible_connection=local",
+ resource_profiling=True,
+ resource_profiling_base_cgroup='ansible-runner',
+ playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'debug': {'msg': "test"}}]}])
+ assert r.config.env['ANSIBLE_CALLBACK_WHITELIST'] == 'cgroup_perf_recap'
+ assert r.config.env['CGROUP_CONTROL_GROUP'].startswith('ansible-runner/')
+ expected_datadir = os.path.join(tdir, 'profiling_data')
+ assert r.config.env['CGROUP_OUTPUT_DIR'] == expected_datadir
+ assert r.config.env['CGROUP_OUTPUT_FORMAT'] == 'json'
+ assert r.config.env['CGROUP_CPU_POLL_INTERVAL'] == '0.25'
+ assert r.config.env['CGROUP_MEMORY_POLL_INTERVAL'] == '0.25'
+ assert r.config.env['CGROUP_PID_POLL_INTERVAL'] == '0.25'
+ assert r.config.env['CGROUP_FILE_PER_TASK'] == 'True'
+ assert r.config.env['CGROUP_WRITE_FILES'] == 'True'
+ assert r.config.env['CGROUP_DISPLAY_RECAP'] == 'False'
+
+ data_files = [f for f in os.listdir(expected_datadir)
+ if os.path.isfile(os.path.join(expected_datadir, f))]
+ # Ensure each type of metric is represented in the results
+ for metric in ('cpu', 'memory', 'pids'):
+ assert len([f for f in data_files if '{}.json'.format(metric) in f]) == 1
+
+ # Ensure each file consists of a list of json dicts
+ for file in data_files:
+ with open(os.path.join(expected_datadir, file)) as f:
+ for line in f:
+ line = line[1:-1] # strip RS and LF (see https://tools.ietf.org/html/rfc7464#section-2.2)
+ try:
+ json.loads(line)
+ except json.JSONDecodeError as e:
+ pytest.fail("Failed to parse {}: '{}'"
+ .format(os.path.join(expected_datadir, file), e))
+
+ except RuntimeError:
+ pytest.skip(
+ 'this test requires a cgroup to run e.g., '
+ 'sudo cgcreate -a `whoami` -t `whoami` -g cpuacct,memory,pids:ansible-runner'
+ ) # noqa
diff --git a/test/integration/test_interface.py b/test/integration/test_interface.py
new file mode 100644
index 0000000..dc27a6c
--- /dev/null
+++ b/test/integration/test_interface.py
@@ -0,0 +1,13 @@
+
+from ansible_runner.interface import run, run_async
+
+
+def test_run():
+ r = run(module='debug', host_pattern='localhost')
+ assert r.status == 'successful'
+
+
+def test_run_async():
+ thread, r = run_async(module='debug', host_pattern='localhost')
+ thread.join()
+ assert r.status == 'successful'
diff --git a/test/integration/test_main.py b/test/integration/test_main.py
new file mode 100644
index 0000000..08ee8a2
--- /dev/null
+++ b/test/integration/test_main.py
@@ -0,0 +1,345 @@
+# -*- coding: utf-8 -*-
+from __future__ import print_function
+from ansible_runner.__main__ import main
+
+import os
+import codecs
+import multiprocessing
+import shutil
+import yaml
+import tempfile
+import time
+from contextlib import contextmanager
+import pytest
+
+
+from ansible_runner.exceptions import AnsibleRunnerException
+
+HERE = os.path.abspath(os.path.dirname(__file__))
+
+
+def ensure_directory(directory):
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+
+def ensure_removed(path):
+ if os.path.exists(path):
+ if os.path.isfile(path):
+ os.unlink(path)
+ elif os.path.isdir(path):
+ shutil.rmtree(path)
+
+
+@contextmanager
+def temp_directory(files=None):
+ temp_dir = tempfile.mkdtemp()
+ print(temp_dir)
+ try:
+ yield temp_dir
+ shutil.rmtree(temp_dir)
+ except BaseException:
+ if files is not None:
+ for file in files:
+ if os.path.exists(file):
+ with open(file) as f:
+ print(f.read())
+ raise
+
+
+def test_temp_directory():
+
+ context = dict()
+
+
+ def will_fail():
+ with temp_directory() as temp_dir:
+ context['saved_temp_dir'] = temp_dir
+ assert False
+
+ def will_pass():
+ with temp_directory() as temp_dir:
+ context['saved_temp_dir'] = temp_dir
+ assert True
+
+ with pytest.raises(AssertionError):
+ will_fail()
+ assert os.path.exists(context['saved_temp_dir'])
+ shutil.rmtree(context['saved_temp_dir'])
+
+ will_pass()
+ assert not os.path.exists(context['saved_temp_dir'])
+
+
+def test_help():
+ with pytest.raises(SystemExit) as exc:
+ main([])
+ assert exc.value.code == 2, 'Should raise SystemExit with return code 2'
+
+
+def test_module_run():
+ try:
+ rc = main(['-m', 'ping',
+ '--hosts', 'localhost',
+ 'run',
+ 'ping'])
+ assert os.path.exists('./ping')
+ assert os.path.exists('./ping/artifacts')
+ assert rc == 0
+ finally:
+ shutil.rmtree('./ping')
+
+
+def test_module_run_debug():
+ try:
+ rc = main(['-m', 'ping',
+ '--hosts', 'localhost',
+ '--debug',
+ 'run',
+ 'ping'])
+ assert os.path.exists('./ping')
+ assert os.path.exists('./ping/artifacts')
+ assert rc == 0
+ finally:
+ shutil.rmtree('./ping')
+
+
+def test_module_run_clean():
+ with temp_directory() as temp_dir:
+ rc = main(['-m', 'ping',
+ '--hosts', 'localhost',
+ 'run',
+ temp_dir])
+ assert rc == 0
+
+
+def test_role_run():
+ rc = main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', 'test/integration/roles',
+ 'run',
+ "test/integration"])
+ assert rc == 0
+ ensure_removed("test/integration/artifacts")
+
+
+def test_role_run_abs():
+ with temp_directory() as temp_dir:
+ rc = main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', os.path.join(HERE, 'project/roles'),
+ 'run',
+ temp_dir])
+ assert rc == 0
+
+
+def test_role_logfile():
+ try:
+ rc = main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', 'test/integration/project/roles',
+ '--logfile', 'new_logfile',
+ 'run',
+ 'test/integration'])
+ assert os.path.exists('new_logfile')
+ assert rc == 0
+ finally:
+ ensure_removed("test/integration/artifacts")
+
+
+def test_role_logfile_abs():
+ try:
+ with temp_directory() as temp_dir:
+ rc = main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', os.path.join(HERE, 'project/roles'),
+ '--logfile', 'new_logfile',
+ 'run',
+ temp_dir])
+ assert os.path.exists('new_logfile')
+ assert rc == 0
+ finally:
+ ensure_removed("new_logfile")
+
+
+def test_role_bad_project_dir():
+
+ with open("bad_project_dir", 'w') as f:
+ f.write('not a directory')
+
+ try:
+ with pytest.raises(OSError):
+ main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', os.path.join(HERE, 'project/roles'),
+ '--logfile', 'new_logfile',
+ 'run',
+ 'bad_project_dir'])
+ finally:
+ os.unlink('bad_project_dir')
+ ensure_removed("new_logfile")
+
+
+def test_role_run_clean():
+
+ rc = main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', 'test/integration/roles',
+ 'run',
+ "test/integration"])
+ assert rc == 0
+ ensure_removed("test/integration/artifacts")
+
+
+def test_role_run_cmd_line_abs():
+ with temp_directory() as temp_dir:
+ rc = main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', os.path.join(HERE, 'project/roles'),
+ 'run',
+ temp_dir])
+ assert rc == 0
+
+
+def test_role_run_artifacts_dir():
+ rc = main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', 'test/integration/roles',
+ '--artifact-dir', 'otherartifacts',
+ 'run',
+ "test/integration"])
+ assert rc == 0
+ ensure_removed("test/integration/artifacts")
+
+
+def test_role_run_artifacts_dir_abs():
+ try:
+ with temp_directory() as temp_dir:
+ rc = main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', os.path.join(HERE, 'project/roles'),
+ '--artifact-dir', 'otherartifacts',
+ 'run',
+ temp_dir])
+ assert os.path.exists(os.path.join('.', 'otherartifacts'))
+ assert rc == 0
+ finally:
+ shutil.rmtree(os.path.join('.', 'otherartifacts'))
+
+
+@pytest.mark.parametrize('envvars', [
+ {'msg': 'hi'},
+ {
+ 'msg': u'utf-8-䉪ቒ칸ⱷ?噂폄蔆㪗輥',
+ u'蔆㪗輥': u'䉪ቒ칸'
+ }
+])
+def test_role_run_env_vars(envvars):
+
+ with temp_directory() as temp_dir:
+ ensure_directory(os.path.join(temp_dir, 'env'))
+ with codecs.open(os.path.join(temp_dir, 'env/envvars'), 'w', encoding='utf-8') as f:
+ f.write(yaml.dump(envvars))
+
+ rc = main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', os.path.join(HERE, 'project/roles'),
+ 'run',
+ temp_dir])
+ assert rc == 0
+
+
+def test_role_run_args():
+
+ with temp_directory() as temp_dir:
+ rc = main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', os.path.join(HERE, 'project/roles'),
+ '--role-vars', 'msg=hi',
+ 'run',
+ temp_dir])
+ assert rc == 0
+
+
+def test_role_run_inventory():
+
+ with temp_directory() as temp_dir:
+ ensure_directory(os.path.join(temp_dir, 'inventory'))
+ shutil.copy(os.path.join(HERE, 'inventory/localhost'), os.path.join(temp_dir, 'inventory/localhost'))
+
+ rc = main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', os.path.join(HERE, 'project/roles'),
+ '--inventory', 'localhost',
+ 'run',
+ temp_dir])
+ assert rc == 0
+
+
+def test_role_run_inventory_missing():
+
+ with temp_directory() as temp_dir:
+ ensure_directory(os.path.join(temp_dir, 'inventory'))
+ shutil.copy(os.path.join(HERE, 'inventory/localhost'), os.path.join(temp_dir, 'inventory/localhost'))
+
+ with pytest.raises(AnsibleRunnerException):
+ main(['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', os.path.join(HERE, 'project/roles'),
+ '--inventory', 'does_not_exist',
+ 'run',
+ temp_dir])
+
+
+def test_role_start():
+
+
+ with temp_directory() as temp_dir:
+ p = multiprocessing.Process(target=main,
+ args=[['-r', 'benthomasson.hello_role',
+ '--hosts', 'localhost',
+ '--roles-path', os.path.join(HERE, 'project/roles'),
+ 'start',
+ temp_dir]])
+ p.start()
+ p.join()
+
+
+def test_playbook_start():
+
+ with temp_directory() as temp_dir:
+ project_dir = os.path.join(temp_dir, 'project')
+ ensure_directory(project_dir)
+ shutil.copy(os.path.join(HERE, 'project/hello.yml'), project_dir)
+ ensure_directory(os.path.join(temp_dir, 'inventory'))
+ shutil.copy(os.path.join(HERE, 'inventory/localhost'), os.path.join(temp_dir, 'inventory/localhost'))
+
+ # privateip: removed --hosts command line option from test beause it is
+ # not a supported combination of cli options
+ p = multiprocessing.Process(target=main,
+ args=[['-p', 'hello.yml',
+ '--inventory', os.path.join(HERE, 'inventory/localhost'),
+ #'--hosts', 'localhost',
+ 'start',
+ temp_dir]])
+ p.start()
+
+
+ time.sleep(5)
+
+ assert os.path.exists(os.path.join(temp_dir, 'pid'))
+
+ rc = main(['is-alive', temp_dir])
+ assert rc == 0
+ rc = main(['stop', temp_dir])
+ assert rc == 0
+
+ time.sleep(1)
+
+ rc = main(['is-alive', temp_dir])
+ assert rc == 1
+
+ ensure_removed(os.path.join(temp_dir, 'pid'))
+
+ rc = main(['stop', temp_dir])
+ assert rc == 1
diff --git a/test/integration/test_runner.py b/test/integration/test_runner.py
new file mode 100644
index 0000000..128cea1
--- /dev/null
+++ b/test/integration/test_runner.py
@@ -0,0 +1,278 @@
+# -*- coding: utf-8 -*-
+
+import json
+import os
+import re
+import pytest
+import six
+import sys
+try:
+ from unittest.mock import MagicMock
+except ImportError:
+ from mock import MagicMock
+from ansible_runner import Runner
+
+from ansible_runner.exceptions import AnsibleRunnerException
+
+
+def test_password_prompt(rc):
+ rc.command = [sys.executable, '-c' 'import time; print(input("Password: "))']
+ rc.expect_passwords[re.compile(r'Password:\s*?$', re.M)] = '1234'
+ status, exitcode = Runner(config=rc).run()
+ assert status == 'successful'
+ assert exitcode == 0
+ with open(os.path.join(rc.artifact_dir, 'stdout')) as f:
+ assert '1234' in f.read()
+
+
+def test_run_command(rc):
+ rc.command = ['sleep', '1']
+ status, exitcode = Runner(config=rc).run()
+ assert status == 'successful'
+ assert exitcode == 0
+ with open(os.path.join(rc.artifact_dir, 'command')) as f:
+ data = json.load(f)
+ assert data.get('command') == ['sleep','1']
+ assert 'cwd' in data
+ assert isinstance(data.get('env'), dict)
+
+
+def test_run_command_with_unicode(rc):
+ expected = '"utf-8-䉪ቒ칸ⱷ?噂폄蔆㪗輥"'
+ if six.PY2:
+ expected = expected.decode('utf-8')
+ rc.command = ['echo', '"utf-8-䉪ቒ칸ⱷ?噂폄蔆㪗輥"']
+ rc.envvars = {"䉪ቒ칸": "蔆㪗輥"}
+ rc.prepare_env()
+ status, exitcode = Runner(config=rc).run()
+ assert status == 'successful'
+ assert exitcode == 0
+ with open(os.path.join(rc.artifact_dir, 'command')) as f:
+ data = json.load(f)
+ assert data.get('command') == ['echo', expected]
+ assert 'cwd' in data
+ assert isinstance(data.get('env'), dict)
+ assert u"䉪ቒ칸" in data.get('env')
+
+
+def test_run_command_finished_callback(rc):
+ finished_callback = MagicMock()
+ rc.command = ['sleep','1']
+ runner = Runner(config=rc, finished_callback=finished_callback)
+ status, exitcode = runner.run()
+ assert status == 'successful'
+ assert exitcode == 0
+ finished_callback.assert_called_with(runner)
+
+
+def test_run_command_explosive_finished_callback(rc):
+ def boom(*args):
+ raise Exception('boom')
+ rc.command = ['sleep','1']
+ runner = Runner(config=rc, finished_callback=boom)
+ with pytest.raises(Exception):
+ runner.run()
+
+
+def test_run_command_explosive_cancel_callback(rc):
+ def boom(*args):
+ raise Exception('boom')
+ rc.command = ['sleep','1']
+ runner = Runner(config=rc, cancel_callback=boom)
+ with pytest.raises(Exception):
+ runner.run()
+
+
+def test_run_command_cancel_callback(rc):
+ def cancel(*args):
+ return True
+ rc.command = ['sleep','1']
+ runner = Runner(config=rc, cancel_callback=cancel)
+ status, exitcode = runner.run()
+ assert status == 'canceled'
+ assert exitcode == 254
+
+
+def test_run_command_job_timeout(rc):
+ rc.command = ['sleep', '1']
+ rc.job_timeout = 0.0000001
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'timeout'
+ assert exitcode == 254
+
+
+def test_run_command_idle_timeout(rc):
+ rc.command = ['sleep', '1']
+ rc.idle_timeout = 0.0000001
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'timeout'
+ assert exitcode == 254
+
+
+def test_run_command_failed(rc):
+ rc.command = ['false']
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'failed'
+ assert exitcode == 1
+
+
+def test_executable_not_found(rc):
+ rc.command = ['supercalifragilistic']
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'failed'
+ assert exitcode == 127
+ events = list(runner.events)
+ assert len(events) == 1
+ assert 'The command was not found or was not executable: supercalifragilistic' in events[0]['stdout'] # noqa
+
+
+def test_run_command_long_running(rc):
+ rc.command = ['yes']
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'timeout'
+ assert exitcode == 254
+
+
+def test_run_command_long_running_children(rc):
+ rc.command = ['bash', '-c', "(yes)"]
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'timeout'
+ assert exitcode == 254
+
+
+def test_run_command_events_missing(rc):
+ rc.command = ['sleep','1']
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'successful'
+ assert exitcode == 0
+ assert list(runner.events) == []
+
+
+def test_run_command_stdout_missing(rc):
+ rc.command = ['sleep','1']
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'successful'
+ assert exitcode == 0
+ os.unlink(os.path.join(runner.config.artifact_dir, 'stdout'))
+ with pytest.raises(AnsibleRunnerException):
+ list(runner.stdout)
+
+
+def test_run_command_no_stats(rc):
+ rc.command = ['sleep','1']
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'successful'
+ assert exitcode == 0
+ assert runner.stats is None
+
+
+def test_run_command_ansible(rc):
+ rc.module = "debug"
+ rc.host_pattern = "localhost"
+ rc.prepare()
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'successful'
+ assert exitcode == 0
+ assert list(runner.events) != []
+ assert runner.stats != {}
+ assert list(runner.host_events('localhost')) != []
+ stdout = runner.stdout
+ assert stdout.read() != ""
+
+
+def test_run_command_ansible_event_handler(rc):
+ event_handler = MagicMock()
+ status_handler = MagicMock()
+ rc.module = "debug"
+ rc.host_pattern = "localhost"
+ rc.prepare()
+ runner = Runner(config=rc, event_handler=event_handler, status_handler=status_handler)
+ status, exitcode = runner.run()
+ assert status == 'successful'
+ assert exitcode == 0
+ event_handler.assert_called()
+ status_handler.assert_called()
+
+
+def test_run_command_ansible_event_handler_failure(rc):
+ def event_handler(*args):
+ raise IOError()
+ rc.module = "debug"
+ rc.host_pattern = "localhost"
+ rc.prepare()
+ runner = Runner(config=rc, event_handler=event_handler)
+ status, exitcode = runner.run()
+ assert status == 'successful'
+ assert exitcode == 0
+
+
+def test_run_command_ansible_rotate_artifacts(rc):
+ rc.module = "debug"
+ rc.host_pattern = "localhost"
+ rc.prepare()
+ rc.rotate_artifacts = 1
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'successful'
+ assert exitcode == 0
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'successful'
+ assert exitcode == 0
+
+
+def test_get_fact_cache(rc):
+ assert os.path.basename(rc.fact_cache) == 'fact_cache'
+ rc.module = "setup"
+ rc.host_pattern = "localhost"
+ rc.prepare()
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'successful'
+ assert exitcode == 0
+ print(rc.cwd)
+ assert os.path.exists(os.path.join(rc.artifact_dir, 'fact_cache'))
+ assert os.path.exists(os.path.join(rc.artifact_dir, 'fact_cache', 'localhost'))
+ data = runner.get_fact_cache('localhost')
+ assert data
+
+
+def test_set_fact_cache(rc):
+ assert os.path.basename(rc.fact_cache) == 'fact_cache'
+ rc.module = "debug"
+ rc.module_args = "var=message"
+ rc.host_pattern = "localhost"
+ rc.prepare()
+ runner = Runner(config=rc)
+ runner.set_fact_cache('localhost', dict(message='hello there'))
+ status, exitcode = runner.run()
+ assert status == 'successful'
+ assert exitcode == 0
+ print(rc.cwd)
+ assert os.path.exists(os.path.join(rc.artifact_dir, 'fact_cache'))
+ assert os.path.exists(os.path.join(rc.artifact_dir, 'fact_cache', 'localhost'))
+ data = runner.get_fact_cache('localhost')
+ assert data['message'] == 'hello there'
+
+
+def test_set_extra_vars(rc):
+ rc.module = "debug"
+ rc.module_args = "var=test_extra_vars"
+ rc.host_pattern = "localhost"
+ rc.extra_vars = dict(test_extra_vars='hello there')
+ rc.prepare()
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ with open(os.path.join(rc.artifact_dir, 'stdout')) as f:
+ assert 'hello there' in f.read()
+
diff --git a/test/unit/__init__.py b/test/unit/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/unit/__init__.py
diff --git a/test/unit/test_event_filter.py b/test/unit/test_event_filter.py
new file mode 100644
index 0000000..b9e08e6
--- /dev/null
+++ b/test/unit/test_event_filter.py
@@ -0,0 +1,200 @@
+import pytest
+import base64
+import json
+from io import StringIO
+
+from six.moves import xrange
+
+from ansible_runner.utils import OutputEventFilter
+
+MAX_WIDTH = 78
+EXAMPLE_UUID = '890773f5-fe6d-4091-8faf-bdc8021d65dd'
+
+
+def write_encoded_event_data(fileobj, data):
+ b64data = base64.b64encode(json.dumps(data).encode('utf-8')).decode()
+ # pattern corresponding to OutputEventFilter expectation
+ fileobj.write(u'\x1b[K')
+ for offset in xrange(0, len(b64data), MAX_WIDTH):
+ chunk = b64data[offset:offset + MAX_WIDTH]
+ escaped_chunk = u'{}\x1b[{}D'.format(chunk, len(chunk))
+ fileobj.write(escaped_chunk)
+ fileobj.write(u'\x1b[K')
+
+
+@pytest.fixture
+def fake_callback():
+ return []
+
+
+@pytest.fixture
+def fake_cache():
+ return {}
+
+
+@pytest.fixture
+def wrapped_handle(job_event_callback):
+ # Preliminary creation of resources usually done in tasks.py
+ return OutputEventFilter(StringIO(), job_event_callback)
+
+
+@pytest.fixture
+def job_event_callback(fake_callback, fake_cache):
+ def method(event_data):
+ print('fake callback called')
+ if 'uuid' in event_data:
+ cache_event = fake_cache.get(':1:ev-{}'.format(event_data['uuid']), None)
+ if cache_event is not None:
+ event_data.update(cache_event)
+ fake_callback.append(event_data)
+ return method
+
+
+def test_event_recomb(fake_callback, fake_cache, wrapped_handle):
+ # Pretend that this is done by the Ansible callback module
+ fake_cache[':1:ev-{}'.format(EXAMPLE_UUID)] = {'event': 'foo'}
+ write_encoded_event_data(wrapped_handle, {
+ 'uuid': EXAMPLE_UUID
+ })
+ wrapped_handle.write(u'\r\nTASK [Gathering Facts] *********************************************************\n')
+ wrapped_handle.write(u'\u001b[0;33mchanged: [localhost]\u001b[0m\n')
+ write_encoded_event_data(wrapped_handle, {})
+ # stop pretending
+
+ assert len(fake_callback) == 1
+ recomb_data = fake_callback[0]
+ assert 'event' in recomb_data
+ assert recomb_data['event'] == 'foo'
+
+
+def test_separate_verbose_events(fake_callback, wrapped_handle):
+ # Pretend that this is done by the Ansible callback module
+ wrapped_handle.write(u'Using /etc/ansible/ansible.cfg as config file\n')
+ wrapped_handle.write(u'SSH password: \n')
+ write_encoded_event_data(wrapped_handle, { # associated with _next_ event
+ 'uuid': EXAMPLE_UUID
+ })
+ # stop pretending
+
+ assert len(fake_callback) == 2
+ for event_data in fake_callback:
+ assert 'event' in event_data
+ assert event_data['event'] == 'verbose'
+
+
+def test_large_data_payload(fake_callback, fake_cache, wrapped_handle):
+ # Pretend that this is done by the Ansible callback module
+ fake_cache[':1:ev-{}'.format(EXAMPLE_UUID)] = {'event': 'foo'}
+ event_data_to_encode = {
+ 'uuid': EXAMPLE_UUID,
+ 'host': 'localhost',
+ 'role': 'some_path_to_role'
+ }
+ assert len(json.dumps(event_data_to_encode)) > MAX_WIDTH
+ write_encoded_event_data(wrapped_handle, event_data_to_encode)
+ wrapped_handle.write(u'\r\nTASK [Gathering Facts] *********************************************************\n')
+ wrapped_handle.write(u'\u001b[0;33mchanged: [localhost]\u001b[0m\n')
+ write_encoded_event_data(wrapped_handle, {})
+ # stop pretending
+
+ assert len(fake_callback) == 1
+ recomb_data = fake_callback[0]
+ assert 'role' in recomb_data
+ assert recomb_data['role'] == 'some_path_to_role'
+ assert 'event' in recomb_data
+ assert recomb_data['event'] == 'foo'
+
+
+def test_event_lazy_parsing(fake_callback, fake_cache, wrapped_handle):
+ # Pretend that this is done by the Ansible callback module
+ fake_cache[':1:ev-{}'.format(EXAMPLE_UUID)] = {'event': 'foo'}
+ buff = StringIO()
+ event_data_to_encode = {
+ 'uuid': EXAMPLE_UUID,
+ 'host': 'localhost',
+ 'role': 'some_path_to_role'
+ }
+ write_encoded_event_data(buff, event_data_to_encode)
+
+ # write the data to the event filter in chunks to test lazy event matching
+ buff.seek(0)
+ start_token_chunk = buff.read(1) # \x1b
+ start_token_remainder = buff.read(2) # [K
+ body = buff.read(15) # next 15 bytes of base64 data
+ remainder = buff.read() # the remainder
+ for chunk in (start_token_chunk, start_token_remainder, body, remainder):
+ wrapped_handle.write(chunk)
+
+ wrapped_handle.write(u'\r\nTASK [Gathering Facts] *********************************************************\n')
+ wrapped_handle.write(u'\u001b[0;33mchanged: [localhost]\u001b[0m\n')
+ write_encoded_event_data(wrapped_handle, {})
+ # stop pretending
+
+ assert len(fake_callback) == 1
+ recomb_data = fake_callback[0]
+ assert 'role' in recomb_data
+ assert recomb_data['role'] == 'some_path_to_role'
+ assert 'event' in recomb_data
+ assert recomb_data['event'] == 'foo'
+
+
+@pytest.mark.timeout(1)
+def test_large_stdout_blob():
+ def _callback(*args, **kw):
+ pass
+
+ f = OutputEventFilter(StringIO(), _callback)
+ for x in range(1024 * 10):
+ f.write(u'x' * 1024)
+
+
+def test_verbose_line_buffering():
+ events = []
+
+ def _callback(event_data):
+ events.append(event_data)
+
+ f = OutputEventFilter(StringIO(), _callback)
+ f.write(u'one two\r\n\r\n')
+
+ assert len(events) == 2
+ assert events[0]['start_line'] == 0
+ assert events[0]['end_line'] == 1
+ assert events[0]['stdout'] == 'one two'
+
+ assert events[1]['start_line'] == 1
+ assert events[1]['end_line'] == 2
+ assert events[1]['stdout'] == ''
+
+ f.write(u'three')
+ assert len(events) == 2
+ f.write(u'\r\nfou')
+
+ # three is not pushed to buffer until its line completes
+ assert len(events) == 3
+ assert events[2]['start_line'] == 2
+ assert events[2]['end_line'] == 3
+ assert events[2]['stdout'] == 'three'
+
+ f.write(u'r\r')
+ f.write(u'\nfi')
+
+ assert events[3]['start_line'] == 3
+ assert events[3]['end_line'] == 4
+ assert events[3]['stdout'] == 'four'
+
+ f.write(u've')
+ f.write(u'\r\n')
+
+ assert len(events) == 5
+ assert events[4]['start_line'] == 4
+ assert events[4]['end_line'] == 5
+ assert events[4]['stdout'] == 'five'
+
+ f.close()
+
+ from pprint import pprint
+ pprint(events)
+ assert len(events) == 6
+
+ assert events[5]['event'] == 'EOF'
diff --git a/test/unit/test_loader.py b/test/unit/test_loader.py
new file mode 100644
index 0000000..01aaaa2
--- /dev/null
+++ b/test/unit/test_loader.py
@@ -0,0 +1,133 @@
+from io import BytesIO
+
+from pytest import raises, fixture
+from mock import patch
+from six import string_types
+
+import ansible_runner.loader
+
+from ansible_runner.exceptions import ConfigurationError
+
+
+@fixture
+def loader():
+ return ansible_runner.loader.ArtifactLoader('/tmp')
+
+
+def test__load_json_success(loader):
+ res = loader._load_json('{"test": "string"}')
+ assert isinstance(res, dict)
+ assert res['test'] == 'string'
+
+
+def test__load_json_failure(loader):
+ res = loader._load_json('---\ntest: string')
+ assert res is None
+
+ res = loader._load_json('test string')
+ assert res is None
+
+
+def test__load_yaml_success(loader):
+ res = loader._load_yaml('---\ntest: string')
+ assert isinstance(res, dict)
+ assert res['test'] == 'string'
+
+ res = loader._load_yaml('{"test": "string"}')
+ assert isinstance(res, dict)
+ assert res['test'] == 'string'
+
+
+def test__load_yaml_failure(loader):
+ res = loader._load_yaml('---\ntest: string:')
+ assert res is None
+
+
+def test_abspath(loader):
+ res = loader.abspath('/test')
+ assert res == '/test'
+
+ res = loader.abspath('test')
+ assert res == '/tmp/test'
+
+ res = loader.abspath('~/test')
+ assert res.startswith('/')
+
+
+def test_load_file_text(loader):
+ with patch.object(ansible_runner.loader.ArtifactLoader, 'get_contents') as mock_get_contents:
+ mock_get_contents.return_value = 'test\nstring'
+
+ assert not loader._cache
+
+ # cache miss
+ res = loader.load_file('/tmp/test', string_types)
+ assert mock_get_contents.called
+ assert mock_get_contents.called_with_args('/tmp/test')
+ assert res == b'test\nstring'
+ assert '/tmp/test' in loader._cache
+
+ mock_get_contents.reset_mock()
+
+ # cache hit
+ res = loader.load_file('/tmp/test', string_types)
+ assert not mock_get_contents.called
+ assert res == b'test\nstring'
+ assert '/tmp/test' in loader._cache
+
+
+def test_load_file_json(loader):
+ with patch.object(ansible_runner.loader.ArtifactLoader, 'get_contents') as mock_get_contents:
+ mock_get_contents.return_value = '---\ntest: string'
+
+ assert not loader._cache
+
+ res = loader.load_file('/tmp/test')
+
+ assert mock_get_contents.called
+ assert mock_get_contents.called_with_args('/tmp/test')
+ assert '/tmp/test' in loader._cache
+ assert res['test'] == 'string'
+
+
+def test_load_file_type_check(loader):
+ with patch.object(ansible_runner.loader.ArtifactLoader, 'get_contents') as mock_get_contents:
+ mock_get_contents.return_value = '---\ntest: string'
+
+ assert not loader._cache
+
+ # type check passes
+ res = loader.load_file('/tmp/test', dict)
+ assert res is not None
+
+ mock_get_contents.reset_mock()
+ mock_get_contents.return_value = 'test string'
+
+ loader._cache = {}
+
+ # type check fails
+ with raises(ConfigurationError):
+ res = loader.load_file('/tmp/test', dict)
+ assert res is not None
+
+
+def test_get_contents_ok(loader):
+ with patch('codecs.open') as mock_open:
+ handler = BytesIO()
+ handler.write(b"test string")
+ handler.seek(0)
+
+ mock_open.return_value.__enter__.return_value = handler
+
+ res = loader.get_contents('/tmp')
+ assert res == b'test string'
+
+
+def test_get_contents_invalid_path(loader):
+ with raises(ConfigurationError):
+ loader.get_contents('/tmp/invalid')
+
+
+def test_get_contents_exception(loader):
+ with raises(ConfigurationError):
+ loader.get_contents('/tmp')
diff --git a/test/unit/test_runner.py b/test/unit/test_runner.py
new file mode 100644
index 0000000..09d61b6
--- /dev/null
+++ b/test/unit/test_runner.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+
+import codecs
+import os
+
+import json
+import mock
+import pexpect
+import pytest
+import six
+import sys
+
+from ansible_runner import Runner
+from ansible_runner.exceptions import CallbackError
+from ansible_runner.runner_config import RunnerConfig
+
+HERE, FILENAME = os.path.split(__file__)
+
+
+@pytest.fixture(scope='function')
+def rc(request, tmpdir):
+ rc = RunnerConfig(str(tmpdir))
+ rc.suppress_ansible_output = True
+ rc.expect_passwords = {
+ pexpect.TIMEOUT: None,
+ pexpect.EOF: None
+ }
+ rc.cwd = str(tmpdir)
+ rc.env = {}
+ rc.job_timeout = .5
+ rc.idle_timeout = 0
+ rc.pexpect_timeout = .1
+ rc.pexpect_use_poll = True
+ return rc
+
+
+@pytest.fixture(autouse=True)
+def mock_sleep(request):
+ # the handle_termination process teardown mechanism uses `time.sleep` to
+ # wait on processes to respond to SIGTERM; these are tests and don't care
+ # about being nice
+ m = mock.patch('time.sleep')
+ m.start()
+ request.addfinalizer(m.stop)
+
+
+def test_simple_spawn(rc):
+ rc.command = ['ls', '-la']
+ status, exitcode = Runner(config=rc).run()
+ assert status == 'successful'
+ assert exitcode == 0
+
+
+def test_error_code(rc):
+ rc.command = ['ls', '--nonsense']
+ status, exitcode = Runner(config=rc).run()
+ assert status == 'failed'
+ assert exitcode > 0
+
+
+# TODO: matt does not like this test
+def test_job_timeout(rc):
+ rc.command = [sys.executable, '-c', 'import time; time.sleep(5)']
+ runner = Runner(config=rc)
+ status, exitcode = runner.run()
+ assert status == 'timeout'
+ assert runner.timed_out is True
+
+
+def test_cancel_callback(rc):
+ rc.command = [sys.executable, '-c', 'print(input("Password: "))']
+ status, exitcode = Runner(config=rc, cancel_callback=lambda: True).run()
+ assert status == 'canceled'
+
+
+def test_cancel_callback_error(rc):
+ def kaboom():
+ raise Exception('kaboom')
+
+ rc.command = [sys.executable, '-c', 'print(input("Password: "))']
+ with pytest.raises(CallbackError):
+ Runner(config=rc, cancel_callback=kaboom).run()
+
+
+@pytest.mark.parametrize('value', ['abc123', six.u('Iñtërnâtiônàlizætiøn')])
+def test_env_vars(rc, value):
+ rc.command = [sys.executable, '-c', 'import os; print(os.getenv("X_MY_ENV"))']
+ rc.env = {'X_MY_ENV': value}
+ status, exitcode = Runner(config=rc).run()
+ assert status == 'successful'
+ assert exitcode == 0
+ with codecs.open(os.path.join(rc.artifact_dir, 'stdout'), 'r', encoding='utf-8') as f:
+ assert value in f.read()
+
+
+def test_event_callback_interface_has_ident(rc):
+ rc.ident = "testident"
+ runner = Runner(config=rc, remove_partials=False)
+ runner.event_handler = mock.Mock()
+ with mock.patch('codecs.open', mock.mock_open(read_data=json.dumps(dict(event="test")))):
+ with mock.patch('os.chmod', mock.Mock()) as chmod:
+ with mock.patch('os.mkdir', mock.Mock()):
+ runner.event_callback(dict(uuid="testuuid", counter=0))
+ assert runner.event_handler.call_count == 1
+ runner.event_handler.assert_called_with(dict(runner_ident='testident', counter=0, uuid='testuuid', event='test'))
+ chmod.assert_called_once()
+ runner.status_callback("running")
+
+
+def test_event_callback_interface_calls_event_handler_for_verbose_event(rc):
+ rc.ident = "testident"
+ event_handler = mock.Mock()
+ runner = Runner(config=rc, event_handler=event_handler)
+ with mock.patch('os.mkdir', mock.Mock()):
+ runner.event_callback(dict(uuid="testuuid", event='verbose', counter=0))
+ assert event_handler.call_count == 1
+ event_handler.assert_called_with(dict(runner_ident='testident', counter=0, uuid='testuuid', event='verbose'))
+
+
+def test_status_callback_interface(rc):
+ runner = Runner(config=rc)
+ assert runner.status == 'unstarted'
+ runner.status_handler = mock.Mock()
+ runner.status_callback("running")
+ assert runner.status_handler.call_count == 1
+ runner.status_handler.assert_called_with(dict(status='running', runner_ident=str(rc.ident)), runner_config=runner.config)
+ assert runner.status == 'running'
diff --git a/test/unit/test_runner_config.py b/test/unit/test_runner_config.py
new file mode 100644
index 0000000..0988943
--- /dev/null
+++ b/test/unit/test_runner_config.py
@@ -0,0 +1,562 @@
+# -*- coding: utf-8 -*-
+
+from functools import partial
+from io import StringIO
+import os
+import re
+
+import six
+from pexpect import TIMEOUT, EOF
+
+import pytest
+from mock import patch
+from mock import Mock
+
+from ansible_runner.runner_config import RunnerConfig, ExecutionMode
+from ansible_runner.loader import ArtifactLoader
+from ansible_runner.exceptions import ConfigurationError
+
+try:
+ Pattern = re._pattern_type
+except AttributeError:
+ # Python 3.7
+ Pattern = re.Pattern
+
+
+def load_file_side_effect(path, value=None, *args, **kwargs):
+ if args[0] == path:
+ if value:
+ return value
+ raise ConfigurationError
+
+
+def test_runner_config_init_defaults():
+ rc = RunnerConfig('/')
+ assert rc.private_data_dir == '/'
+ assert rc.ident is not None
+ assert rc.playbook is None
+ assert rc.inventory is None
+ assert rc.limit is None
+ assert rc.module is None
+ assert rc.module_args is None
+ assert rc.artifact_dir == os.path.join('/artifacts/%s' % rc.ident)
+ assert isinstance(rc.loader, ArtifactLoader)
+
+
+def test_runner_config_with_artifact_dir():
+ rc = RunnerConfig('/', artifact_dir='/this-is-some-dir')
+ assert rc.artifact_dir == os.path.join('/this-is-some-dir', rc.ident)
+
+
+def test_runner_config_init_with_ident():
+ rc = RunnerConfig('/', ident='test')
+ assert rc.private_data_dir == '/'
+ assert rc.ident == 'test'
+ assert rc.playbook is None
+ assert rc.inventory is None
+ assert rc.limit is None
+ assert rc.module is None
+ assert rc.module_args is None
+ assert rc.artifact_dir == os.path.join('/artifacts/test')
+ assert isinstance(rc.loader, ArtifactLoader)
+
+
+def test_runner_config_project_dir():
+ rc = RunnerConfig('/', project_dir='/another/path')
+ assert rc.project_dir == '/another/path'
+ rc = RunnerConfig('/')
+ assert rc.project_dir == '/project'
+
+
+def test_prepare_environment_vars_only_strings():
+ rc = RunnerConfig(private_data_dir="/", envvars=dict(D='D'))
+
+ value = dict(A=1, B=True, C="foo")
+ envvar_side_effect = partial(load_file_side_effect, 'env/envvars', value)
+
+ with patch.object(rc.loader, 'load_file', side_effect=envvar_side_effect):
+ rc.prepare_env()
+ assert 'A' in rc.env
+ assert isinstance(rc.env['A'], six.string_types)
+ assert 'B' in rc.env
+ assert isinstance(rc.env['B'], six.string_types)
+ assert 'C' in rc.env
+ assert isinstance(rc.env['C'], six.string_types)
+ assert 'D' in rc.env
+ assert rc.env['D'] == 'D'
+
+
+def test_prepare_env_ad_hoc_command():
+ rc = RunnerConfig(private_data_dir="/")
+
+ value = {'AD_HOC_COMMAND_ID': 'teststring'}
+ envvar_side_effect = partial(load_file_side_effect, 'env/envvars', value)
+
+ with patch.object(rc.loader, 'load_file', side_effect=envvar_side_effect):
+ rc.prepare_env()
+ assert rc.cwd == '/'
+
+
+def test_prepare_environment_pexpect_defaults():
+ rc = RunnerConfig(private_data_dir="/")
+ rc.prepare_env()
+
+ assert len(rc.expect_passwords) == 2
+ assert TIMEOUT in rc.expect_passwords
+ assert rc.expect_passwords[TIMEOUT] is None
+ assert EOF in rc.expect_passwords
+ assert rc.expect_passwords[EOF] is None
+
+
+def test_prepare_env_passwords():
+ rc = RunnerConfig(private_data_dir='/')
+
+ value = {'^SSH [pP]assword.*$': 'secret'}
+ password_side_effect = partial(load_file_side_effect, 'env/passwords', value)
+
+ with patch.object(rc.loader, 'load_file', side_effect=password_side_effect):
+ rc.prepare_env()
+ rc.expect_passwords.pop(TIMEOUT)
+ rc.expect_passwords.pop(EOF)
+ assert len(rc.expect_passwords) == 1
+ assert isinstance(list(rc.expect_passwords.keys())[0], Pattern)
+ assert 'secret' in rc.expect_passwords.values()
+
+
+def test_prepare_env_extra_vars_defaults():
+ rc = RunnerConfig('/')
+ rc.prepare_env()
+ assert rc.extra_vars is None
+
+
+def test_prepare_env_settings_defaults():
+ rc = RunnerConfig('/')
+ rc.prepare_env()
+ assert rc.settings == {}
+
+
+def test_prepare_env_settings():
+ rc = RunnerConfig('/')
+
+ value = {'test': 'string'}
+ settings_side_effect = partial(load_file_side_effect, 'env/settings', value)
+
+ with patch.object(rc.loader, 'load_file', side_effect=settings_side_effect):
+ rc.prepare_env()
+ assert rc.settings == value
+
+
+def test_prepare_env_sshkey_defaults():
+ rc = RunnerConfig('/')
+ rc.prepare_env()
+ assert rc.ssh_key_data is None
+
+
+def test_prepare_env_sshkey():
+ rc = RunnerConfig('/')
+
+ value = '01234567890'
+ sshkey_side_effect = partial(load_file_side_effect, 'env/ssh_key', value)
+
+ with patch.object(rc.loader, 'load_file', side_effect=sshkey_side_effect):
+ rc.prepare_env()
+ assert rc.ssh_key_data == value
+
+
+def test_prepare_env_defaults():
+ with patch('os.path.exists') as path_exists:
+ path_exists.return_value=True
+ rc = RunnerConfig('/')
+ rc.prepare_env()
+ assert rc.idle_timeout is None
+ assert rc.job_timeout is None
+ assert rc.pexpect_timeout == 5
+ assert rc.cwd == '/project'
+
+
+def test_prepare_env_directory_isolation():
+ with patch('os.path.exists') as path_exists:
+ path_exists.return_value=True
+ rc = RunnerConfig('/')
+ rc.directory_isolation_path = '/tmp/foo'
+ rc.prepare_env()
+ assert rc.cwd == '/tmp/foo'
+
+
+@patch('os.path.exists', return_value=True)
+def test_prepare_inventory(path_exists):
+ rc = RunnerConfig(private_data_dir='/')
+ rc.prepare_inventory()
+ assert rc.inventory == '/inventory'
+ rc.inventory = '/tmp/inventory'
+ rc.prepare_inventory()
+ assert rc.inventory == '/tmp/inventory'
+ rc.inventory = 'localhost,anotherhost,'
+ rc.prepare_inventory()
+ assert rc.inventory == 'localhost,anotherhost,'
+ path_exists.return_value = False
+ rc.inventory = None
+ rc.prepare_inventory()
+ assert rc.inventory is None
+
+
+def test_generate_ansible_command():
+ rc = RunnerConfig(private_data_dir='/', playbook='main.yaml')
+ with patch('os.path.exists') as path_exists:
+ path_exists.return_value=True
+ rc.prepare_inventory()
+ rc.extra_vars = None
+
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', '-i', '/inventory', 'main.yaml']
+
+ rc.extra_vars = dict(test="key")
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', '-i', '/inventory', '-e', '{"test":"key"}', 'main.yaml']
+
+ with patch.object(rc.loader, 'isfile', side_effect=lambda x: True):
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', '-i', '/inventory', '-e', '@/env/extravars', '-e', '{"test":"key"}', 'main.yaml']
+ rc.extra_vars = '/tmp/extravars.yml'
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', '-i', '/inventory', '-e', '@/env/extravars', '-e', '@/tmp/extravars.yml', 'main.yaml']
+ rc.extra_vars = None
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', '-i', '/inventory', '-e', '@/env/extravars', 'main.yaml']
+ rc.extra_vars = None
+
+ rc.inventory = "localhost,"
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', '-i', 'localhost,', 'main.yaml']
+
+ rc.inventory = ['thing1', 'thing2']
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', '-i', 'thing1', '-i', 'thing2', 'main.yaml']
+
+ rc.inventory = []
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', 'main.yaml']
+ rc.inventory = None
+
+ with patch('os.path.exists', return_value=False) as path_exists:
+ rc.prepare_inventory()
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', 'main.yaml']
+
+ rc.verbosity = 3
+ with patch('os.path.exists', return_value=True) as path_exists:
+ rc.prepare_inventory()
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', '-i', '/inventory', '-vvv', 'main.yaml']
+ rc.verbosity = None
+
+ rc.limit = 'hosts'
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', '-i', '/inventory', '--limit', 'hosts', 'main.yaml']
+ rc.limit = None
+
+ rc.module = 'setup'
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible', '-i', '/inventory', '-m', 'setup']
+ rc.module = None
+
+ rc.module = 'setup'
+ rc.module_args = 'test=string'
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible', '-i', '/inventory', '-m', 'setup', '-a', 'test=string']
+ rc.module_args = None
+ rc.module = None
+
+ rc.forks = 5
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', '-i', '/inventory', '--forks', '5', 'main.yaml']
+
+
+def test_generate_ansible_command_with_api_extravars():
+ rc = RunnerConfig(private_data_dir='/', playbook='main.yaml', extravars={"foo":"bar"})
+ with patch('os.path.exists') as path_exists:
+ path_exists.return_value=True
+ rc.prepare_inventory()
+
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook', '-i', '/inventory', '-e', '{"foo":"bar"}', 'main.yaml']
+
+
+@pytest.mark.parametrize('cmdline,tokens', [
+ (u'--tags foo --skip-tags', ['--tags', 'foo', '--skip-tags']),
+ (u'--limit "䉪ቒ칸ⱷ?噂폄蔆㪗輥"', ['--limit', '䉪ቒ칸ⱷ?噂폄蔆㪗輥']),
+])
+def test_generate_ansible_command_with_cmdline_args(cmdline, tokens):
+ rc = RunnerConfig(private_data_dir='/', playbook='main.yaml')
+ with patch('os.path.exists') as path_exists:
+ path_exists.return_value = True
+ rc.prepare_inventory()
+ rc.extra_vars = {}
+
+ cmdline_side_effect = partial(load_file_side_effect, 'env/cmdline', cmdline)
+ with patch.object(rc.loader, 'load_file', side_effect=cmdline_side_effect):
+ cmd = rc.generate_ansible_command()
+ assert cmd == ['ansible-playbook'] + tokens + ['-i', '/inventory', 'main.yaml']
+
+
+def test_prepare_command_defaults():
+ rc = RunnerConfig('/')
+
+ cmd_side_effect = partial(load_file_side_effect, 'args')
+
+ def generate_side_effect():
+ return StringIO(u'test "string with spaces"')
+
+ with patch.object(rc.loader, 'load_file', side_effect=cmd_side_effect):
+ with patch.object(rc, 'generate_ansible_command', side_effect=generate_side_effect):
+ rc.prepare_command()
+ rc.command == ['test', '"string with spaces"']
+
+
+def test_prepare_with_defaults():
+ rc = RunnerConfig('/')
+
+ rc.prepare_inventory = Mock()
+ rc.prepare_env = Mock()
+ rc.prepare_command = Mock()
+
+ rc.ssh_key_data = None
+ rc.artifact_dir = '/'
+ rc.env = {}
+
+ with pytest.raises(ConfigurationError) as exc:
+ rc.prepare()
+
+ assert str(exc.value) == 'No executable for runner to run'
+
+
+def test_prepare():
+ rc = RunnerConfig('/')
+
+ rc.prepare_inventory = Mock()
+ rc.prepare_env = Mock()
+ rc.prepare_command = Mock()
+
+ rc.ssh_key_data = None
+ rc.artifact_dir = '/'
+ rc.env = {}
+ rc.execution_mode = ExecutionMode.ANSIBLE_PLAYBOOK
+ rc.playbook = 'main.yaml'
+
+ os.environ['PYTHONPATH'] = '/python_path_via_environ'
+ os.environ['AWX_LIB_DIRECTORY'] = '/awx_lib_directory_via_environ'
+
+ rc.prepare()
+
+ assert rc.prepare_inventory.called
+ assert rc.prepare_env.called
+ assert rc.prepare_command.called
+
+ assert not hasattr(rc, 'ssh_key_path')
+ assert not hasattr(rc, 'command')
+
+ assert rc.env['ANSIBLE_STDOUT_CALLBACK'] == 'awx_display'
+ assert rc.env['ANSIBLE_RETRY_FILES_ENABLED'] == 'False'
+ assert rc.env['ANSIBLE_HOST_KEY_CHECKING'] == 'False'
+ assert rc.env['AWX_ISOLATED_DATA_DIR'] == '/'
+ assert rc.env['PYTHONPATH'] == '/python_path_via_environ:/awx_lib_directory_via_environ', \
+ "PYTHONPATH is the union of the env PYTHONPATH and AWX_LIB_DIRECTORY"
+
+ del rc.env['PYTHONPATH']
+ os.environ['PYTHONPATH'] = "/foo/bar/python_path_via_environ"
+ rc.prepare()
+ assert rc.env['PYTHONPATH'] == "/foo/bar/python_path_via_environ:/awx_lib_directory_via_environ", \
+ "PYTHONPATH is the union of the explicit env['PYTHONPATH'] override and AWX_LIB_DIRECTORY"
+
+
+@patch('ansible_runner.runner_config.open_fifo_write')
+def test_prepare_with_ssh_key(open_fifo_write_mock):
+ rc = RunnerConfig('/')
+
+ rc.prepare_inventory = Mock()
+ rc.prepare_env = Mock()
+ rc.prepare_command = Mock()
+
+ rc.wrap_args_with_ssh_agent = Mock()
+
+ rc.ssh_key_data = None
+ rc.artifact_dir = '/'
+ rc.env = {}
+ rc.execution_mode = ExecutionMode.ANSIBLE_PLAYBOOK
+ rc.playbook = 'main.yaml'
+ rc.ssh_key_data = '01234567890'
+ rc.command = 'ansible-playbook'
+
+ os.environ['AWX_LIB_DIRECTORY'] = '/'
+
+ rc.prepare()
+
+ assert rc.ssh_key_path == '/ssh_key_data'
+ assert rc.wrap_args_with_ssh_agent.called
+ assert open_fifo_write_mock.called
+
+
+def test_wrap_args_with_ssh_agent_defaults():
+ rc = RunnerConfig('/')
+ res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], '/tmp/sshkey')
+ assert res == ['ssh-agent', 'sh', '-c', 'ssh-add /tmp/sshkey && rm -f /tmp/sshkey && ansible-playbook main.yaml']
+
+
+def test_wrap_args_with_ssh_agent_with_auth():
+ rc = RunnerConfig('/')
+ res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], '/tmp/sshkey', '/tmp/sshauth')
+ assert res == ['ssh-agent', '-a', '/tmp/sshauth', 'sh', '-c', 'ssh-add /tmp/sshkey && rm -f /tmp/sshkey && ansible-playbook main.yaml']
+
+
+def test_wrap_args_with_ssh_agent_silent():
+ rc = RunnerConfig('/')
+ res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], '/tmp/sshkey', silence_ssh_add=True)
+ assert res == ['ssh-agent', 'sh', '-c', 'ssh-add /tmp/sshkey 2>/dev/null && rm -f /tmp/sshkey && ansible-playbook main.yaml']
+
+
+def test_process_isolation_defaults():
+ rc = RunnerConfig('/')
+ rc.artifact_dir = '/tmp/artifacts'
+ rc.playbook = 'main.yaml'
+ rc.command = 'ansible-playbook'
+ rc.process_isolation = True
+ with patch('os.path.exists') as path_exists:
+ path_exists.return_value=True
+ rc.prepare()
+
+ assert rc.command == [
+ 'bwrap',
+ '--die-with-parent',
+ '--unshare-pid',
+ '--dev-bind', '/', '/',
+ '--proc', '/proc',
+ '--bind', '/', '/',
+ '--chdir', '/project',
+ 'ansible-playbook', '-i', '/inventory', 'main.yaml',
+ ]
+
+
+@patch('os.makedirs', return_value=True)
+@patch('shutil.copytree', return_value=True)
+@patch('tempfile.mkdtemp', return_value="/tmp/dirisolation/foo")
+@patch('os.chmod', return_value=True)
+@patch('shutil.rmtree', return_value=True)
+def test_process_isolation_and_directory_isolation(mock_makedirs, mock_copytree, mock_mkdtemp,
+ mock_chmod, mock_rmtree):
+ def new_exists(path):
+ if path == "/project":
+ return False
+ return True
+ rc = RunnerConfig('/')
+ rc.artifact_dir = '/tmp/artifacts'
+ rc.directory_isolation_path = '/tmp/dirisolation'
+ rc.playbook = 'main.yaml'
+ rc.command = 'ansible-playbook'
+ rc.process_isolation = True
+ with patch('os.path.exists', new=new_exists):
+ rc.prepare()
+
+ assert rc.command == [
+ 'bwrap',
+ '--die-with-parent',
+ '--unshare-pid',
+ '--dev-bind', '/', '/',
+ '--proc', '/proc',
+ '--bind', '/', '/',
+ '--chdir', os.path.realpath(rc.directory_isolation_path),
+ 'ansible-playbook', '-i', '/inventory', 'main.yaml',
+ ]
+
+
+def test_process_isolation_settings():
+ rc = RunnerConfig('/')
+ rc.artifact_dir = '/tmp/artifacts'
+ rc.playbook = 'main.yaml'
+ rc.command = 'ansible-playbook'
+ rc.process_isolation = True
+ rc.process_isolation_executable = 'not_bwrap'
+ rc.process_isolation_hide_paths = ['/home', '/var']
+ rc.process_isolation_show_paths = ['/usr']
+ rc.process_isolation_ro_paths = ['/venv']
+ rc.process_isolation_path = '/tmp'
+
+ with patch('os.path.exists') as path_exists:
+ path_exists.return_value=True
+ rc.prepare()
+
+ assert rc.command[0:8] == [
+ 'not_bwrap',
+ '--die-with-parent',
+ '--unshare-pid',
+ '--dev-bind', '/', '/',
+ '--proc', '/proc',
+ ]
+
+ # hide /home
+ assert rc.command[8] == '--bind'
+ assert 'ansible_runner_pi' in rc.command[9]
+ assert rc.command[10] == '/home'
+
+ # hide /var
+ assert rc.command[11] == '--bind'
+ assert 'ansible_runner_pi' in rc.command[12]
+ assert rc.command[13] == '/var' or rc.command[13] == '/private/var'
+
+ # read-only bind
+ assert rc.command[14:17] == ['--ro-bind', '/venv', '/venv']
+
+ # root bind
+ assert rc.command[17:20] == ['--bind', '/', '/']
+
+ # show /usr
+ assert rc.command[20:23] == ['--bind', '/usr', '/usr']
+
+ # chdir and ansible-playbook command
+ assert rc.command[23:] == ['--chdir', '/project', 'ansible-playbook', '-i', '/inventory', 'main.yaml']
+
+
+@patch('os.mkdir', return_value=True)
+def test_profiling_plugin_settings(mock_mkdir):
+ rc = RunnerConfig('/')
+ rc.playbook = 'main.yaml'
+ rc.command = 'ansible-playbook'
+ rc.resource_profiling = True
+ rc.resource_profiling_base_cgroup = 'ansible-runner'
+ rc.prepare()
+
+ expected_command_start = [
+ 'cgexec',
+ '--sticky',
+ '-g',
+ 'cpuacct,memory,pids:ansible-runner/{}'.format(rc.ident),
+ 'ansible-playbook'
+ ]
+ for index, element in enumerate(expected_command_start):
+ assert rc.command[index] == element
+ assert 'main.yaml' in rc.command
+ assert rc.env['ANSIBLE_CALLBACK_WHITELIST'] == 'cgroup_perf_recap'
+ assert rc.env['CGROUP_CONTROL_GROUP'] == 'ansible-runner/{}'.format(rc.ident)
+ assert rc.env['CGROUP_OUTPUT_DIR'] == os.path.normpath(os.path.join(rc.private_data_dir, 'profiling_data'))
+ assert rc.env['CGROUP_OUTPUT_FORMAT'] == 'json'
+ assert rc.env['CGROUP_CPU_POLL_INTERVAL'] == '0.25'
+ assert rc.env['CGROUP_MEMORY_POLL_INTERVAL'] == '0.25'
+ assert rc.env['CGROUP_PID_POLL_INTERVAL'] == '0.25'
+ assert rc.env['CGROUP_FILE_PER_TASK'] == 'True'
+ assert rc.env['CGROUP_WRITE_FILES'] == 'True'
+ assert rc.env['CGROUP_DISPLAY_RECAP'] == 'False'
+
+
+@patch('os.mkdir', return_value=True)
+def test_profiling_plugin_settings_with_custom_intervals(mock_mkdir):
+ rc = RunnerConfig('/')
+ rc.playbook = 'main.yaml'
+ rc.command = 'ansible-playbook'
+ rc.resource_profiling = True
+ rc.resource_profiling_base_cgroup = 'ansible-runner'
+ rc.resource_profiling_cpu_poll_interval = '.5'
+ rc.resource_profiling_memory_poll_interval = '.75'
+ rc.resource_profiling_pid_poll_interval = '1.5'
+ rc.prepare()
+ assert rc.env['CGROUP_CPU_POLL_INTERVAL'] == '.5'
+ assert rc.env['CGROUP_MEMORY_POLL_INTERVAL'] == '.75'
+ assert rc.env['CGROUP_PID_POLL_INTERVAL'] == '1.5'
diff --git a/test/unit/test_utils.py b/test/unit/test_utils.py
new file mode 100644
index 0000000..c51f909
--- /dev/null
+++ b/test/unit/test_utils.py
@@ -0,0 +1,215 @@
+import json
+import shutil
+import tempfile
+
+from pytest import raises
+from mock import patch
+
+from ansible_runner.utils import (
+ isplaybook,
+ isinventory,
+ dump_artifacts,
+ args2cmdline,
+)
+
+
+def test_isplaybook():
+
+ for obj in ('foo', {}, {'foo': 'bar'}, True, False, None):
+ assert isplaybook(obj) is False, obj
+
+ for obj in (['foo'], []):
+ assert isplaybook(obj) is True, obj
+
+
+def test_isinventory():
+ for obj in (__file__, {}, {'foo': 'bar'}):
+ assert isinventory(obj) is True, obj
+
+ for obj in ([], ['foo'], True, False, None):
+ assert isinventory(obj) is False, obj
+
+
+def test_dump_artifacts_private_data_dir():
+ data_dir = tempfile.gettempdir()
+ kwargs = {'private_data_dir': data_dir}
+ dump_artifacts(kwargs)
+ assert kwargs['private_data_dir'] == data_dir
+
+ kwargs = {'private_data_dir': None}
+ dump_artifacts(kwargs)
+ assert kwargs['private_data_dir'].startswith(tempfile.gettempdir())
+ shutil.rmtree(kwargs['private_data_dir'])
+
+ with raises(ValueError):
+ data_dir = '/foo'
+ kwargs = {'private_data_dir': data_dir}
+ dump_artifacts(kwargs)
+
+
+def test_dump_artifacts_playbook():
+ with patch('ansible_runner.utils.dump_artifact') as mock_dump_artifact:
+ # playbook as a native object
+ pb = [{'playbook': [{'hosts': 'all'}]}]
+ kwargs = {'private_data_dir': '/tmp', 'playbook': pb}
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 1
+ data, fp, fn = mock_dump_artifact.call_args[0]
+ assert data == json.dumps(pb)
+ assert fp == '/tmp/project'
+ assert fn == 'main.json'
+
+ mock_dump_artifact.reset_mock()
+
+ # playbook as a path
+ pb = 'test.yml'
+ kwargs = {'private_data_dir': '/tmp', 'playbook': pb}
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 0
+ assert mock_dump_artifact.called is False
+
+ mock_dump_artifact.reset_mock()
+
+ # invalid playbook structures
+ for obj in ({'foo': 'bar'}, None, True, False, 'foo', []):
+ mock_dump_artifact.reset_mock()
+ kwargs = {'private_data_dir': '/tmp', 'playbook': obj}
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 0
+ assert mock_dump_artifact.called is False
+
+
+def test_dump_artifacts_roles():
+ with patch('ansible_runner.utils.dump_artifact') as mock_dump_artifact:
+ kwargs = dict(private_data_dir="/tmp",
+ role="test",
+ playbook=[{'playbook': [{'hosts': 'all'}]}])
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 2
+ data, envpath, fp = mock_dump_artifact.call_args[0]
+ assert fp == "envvars"
+ data = json.loads(data)
+ assert "ANSIBLE_ROLES_PATH" in data
+ assert data['ANSIBLE_ROLES_PATH'] == "/tmp/roles"
+ mock_dump_artifact.reset_mock()
+ kwargs = dict(private_data_dir="/tmp",
+ role="test",
+ roles_path="/tmp/altrole",
+ playbook=[{'playbook': [{'hosts': 'all'}]}])
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 2
+ data, envpath, fp = mock_dump_artifact.call_args[0]
+ assert fp == "envvars"
+ data = json.loads(data)
+ assert "ANSIBLE_ROLES_PATH" in data
+ assert data['ANSIBLE_ROLES_PATH'] == "/tmp/altrole:/tmp/roles"
+
+
+def test_dump_artifacts_inventory():
+ with patch('ansible_runner.utils.dump_artifact') as mock_dump_artifact:
+ # inventory as a string (INI)
+ inv = '[all]\nlocalhost'
+ kwargs = {'private_data_dir': '/tmp', 'inventory': inv}
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 1
+ data, fp, fn = mock_dump_artifact.call_args[0]
+ assert data == inv
+ assert fp == '/tmp/inventory'
+ assert fn == 'hosts'
+
+ mock_dump_artifact.reset_mock()
+
+ # inventory as a path
+ inv = '/tmp'
+ kwargs = {'private_data_dir': '/tmp', 'inventory': inv}
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 0
+ assert mock_dump_artifact.called is False
+ assert kwargs['inventory'] == inv
+
+ mock_dump_artifact.reset_mock()
+
+ # inventory as a native object
+ inv = {'foo': 'bar'}
+ kwargs = {'private_data_dir': '/tmp', 'inventory': inv}
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 1
+ data, fp, fn = mock_dump_artifact.call_args[0]
+ assert data == json.dumps(inv)
+ assert fp == '/tmp/inventory'
+ assert fn == 'hosts.json'
+
+
+def test_dump_artifacts_extravars():
+ with patch('ansible_runner.utils.dump_artifact') as mock_dump_artifact:
+ extravars = {'foo': 'bar'}
+ kwargs = {'private_data_dir': '/tmp', 'extravars': extravars}
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 1
+ data, fp, fn = mock_dump_artifact.call_args[0]
+ assert data == json.dumps(extravars)
+ assert fp == '/tmp/env'
+ assert fn == 'extravars'
+ assert 'extravars' not in kwargs
+
+
+def test_dump_artifacts_passwords():
+ with patch('ansible_runner.utils.dump_artifact') as mock_dump_artifact:
+ passwords = {'foo': 'bar'}
+ kwargs = {'private_data_dir': '/tmp', 'passwords': passwords}
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 1
+ data, fp, fn = mock_dump_artifact.call_args[0]
+ assert data == json.dumps(passwords)
+ assert fp == '/tmp/env'
+ assert fn == 'passwords'
+ assert 'passwords' not in kwargs
+
+
+def test_dump_artifacts_settings():
+ with patch('ansible_runner.utils.dump_artifact') as mock_dump_artifact:
+ settings = {'foo': 'bar'}
+ kwargs = {'private_data_dir': '/tmp', 'settings': settings}
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 1
+ data, fp, fn = mock_dump_artifact.call_args[0]
+ assert data == json.dumps(settings)
+ assert fp == '/tmp/env'
+ assert fn == 'settings'
+ assert 'settings' not in kwargs
+
+
+def test_dump_artifacts_ssh_key():
+ with patch('ansible_runner.utils.dump_artifact') as mock_dump_artifact:
+ ssh_key = '1234567890'
+ kwargs = {'private_data_dir': '/tmp', 'ssh_key': ssh_key}
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 1
+ data, fp, fn = mock_dump_artifact.call_args[0]
+ assert data == ssh_key
+ assert fp == '/tmp/env'
+ assert fn == 'ssh_key'
+ assert 'ssh_key' not in kwargs
+
+
+def test_dump_artifacts_cmdline():
+ with patch('ansible_runner.utils.dump_artifact') as mock_dump_artifact:
+ cmdline = '--tags foo --skip-tags'
+ kwargs = {'private_data_dir': '/tmp', 'cmdline': cmdline}
+ dump_artifacts(kwargs)
+ assert mock_dump_artifact.call_count == 1
+ data, fp, fn = mock_dump_artifact.call_args[0]
+ assert data == cmdline
+ assert fp == '/tmp/env'
+ assert fn == 'cmdline'
+ assert 'cmdline' not in kwargs
+
+
+def test_fifo_write():
+ pass
+
+
+def test_args2cmdline():
+ res = args2cmdline('ansible', '-m', 'setup', 'localhost')
+ assert res == 'ansible -m setup localhost'
+
diff --git a/tools/test-setup.sh b/tools/test-setup.sh
new file mode 100755
index 0000000..3c78bdd
--- /dev/null
+++ b/tools/test-setup.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+# NOTE(pabelanger): Default to pip3, when possible this is becaue python2
+# support is EOL.
+PIP=$(command -v pip3) || PIP=$(command -v pip2)
+
+# NOTE(pabelanger): Tox on centos-7 is old, so upgrade it across all distros
+# to the latest version
+# NOTE(pabelanger): Cap zipp<0.6.0 due to python2.7 issue with more-iterrtools
+# https://github.com/jaraco/zipp/issues/14
+sudo $PIP install -U tox "configparser<5" "zipp<0.6.0;python_version=='2.7'"
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..00807da
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,29 @@
+[tox]
+envlist = linters, py27, py3
+
+[testenv]
+deps = pipenv
+ mock
+ ansible
+commands=
+ pipenv install --dev #--ignore-pipfile --dev
+ pipenv run py.test -v test
+
+[testenv:linters]
+basepython = python3
+commands=
+ pipenv install --dev
+ pipenv run flake8 --version
+ pipenv run flake8 setup.py docs ansible_runner test
+ pipenv run yamllint --version
+ pipenv run yamllint -s .
+
+[testenv:py27]
+commands =
+ pipenv --python 2.7
+ {[testenv]commands}
+
+[testenv:py3]
+commands =
+ pipenv --python 3
+ {[testenv]commands}
diff --git a/utils/entrypoint.sh b/utils/entrypoint.sh
new file mode 100755
index 0000000..726619b
--- /dev/null
+++ b/utils/entrypoint.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+# In OpenShift, containers are run as a random high number uid
+# that doesn't exist in /etc/passwd, but Ansible module utils
+# require a named user. So if we're in OpenShift, we need to make
+# one before Ansible runs.
+if [ `id -u` -ge 500 ] || [ -z "${CURRENT_UID}" ]; then
+
+ cat << EOF > /tmp/passwd
+root:x:0:0:root:/root:/bin/bash
+runner:x:`id -u`:`id -g`:,,,:/runner:/bin/bash
+EOF
+
+ cat /tmp/passwd > /etc/passwd
+ rm /tmp/passwd
+fi
+
+exec tini -- "${@}"
diff --git a/utils/generate_callback_playbooks.py b/utils/generate_callback_playbooks.py
new file mode 100644
index 0000000..4613201
--- /dev/null
+++ b/utils/generate_callback_playbooks.py
@@ -0,0 +1,40 @@
+import os
+import imp
+
+
+"""
+This script allows creating a directory structure that corresponds to the
+parameterized inputs present in the file test/integration/test_display_callback.py
+Run this from the root of the ansible-runner directory
+It will write these files to a folder named "callback-testing-playbooks"
+"""
+
+
+callback_tests = imp.load_source('test.integration.test_display_callback', 'test/integration/test_display_callback.py')
+
+
+BASE_DIR = 'callback-testing-playbooks'
+names = [test_name for test_name in dir(callback_tests) if test_name.startswith('test_')]
+for name in names:
+
+ print('')
+ print('Processing test {}'.format(name))
+
+ bare_name = name[len('test_callback_plugin_'):]
+ if not os.path.exists('{}/{}'.format(BASE_DIR, bare_name)):
+ os.makedirs('{}/{}'.format(BASE_DIR, bare_name))
+ the_test = getattr(callback_tests, name)
+ for test_marker in the_test.pytestmark:
+ if test_marker.name == 'parametrize':
+ inputs = test_marker.args[1]
+ break
+ else:
+ raise Exception('Test {} not parameterized in expected way.'.format(the_test))
+
+ for input in inputs:
+ for k, v in input.items():
+ filename = '{}/{}/{}'.format(BASE_DIR, bare_name, k)
+ print(' Writing file {}'.format(filename))
+ if not os.path.exists(filename):
+ with open(filename, 'w') as f:
+ f.write(v)