diff options
Diffstat (limited to 'docs/docsite')
379 files changed, 53162 insertions, 0 deletions
diff --git a/docs/docsite/.gitignore b/docs/docsite/.gitignore new file mode 100644 index 00000000..8fade815 --- /dev/null +++ b/docs/docsite/.gitignore @@ -0,0 +1,19 @@ +# Old compiled python stuff +*.py[co] +# package building stuff +build +# Emacs backup files... +*~ +.\#* +.doctrees +# Generated docs stuff +ansible*.xml +.buildinfo +objects.inv +.doctrees +rst/dev_guide/testing/sanity/index.rst +rst/modules/*.rst +rst/playbooks_keywords.rst +rst/collections/ + +*.min.css diff --git a/docs/docsite/.nojekyll b/docs/docsite/.nojekyll new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/docs/docsite/.nojekyll diff --git a/docs/docsite/Makefile b/docs/docsite/Makefile new file mode 100644 index 00000000..02158a41 --- /dev/null +++ b/docs/docsite/Makefile @@ -0,0 +1,179 @@ +OS := $(shell uname -s) +SITELIB = $(shell python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()"): +PLUGIN_FORMATTER=../../hacking/build-ansible.py docs-build +TESTING_FORMATTER=../bin/testing_formatter.sh +KEYWORD_DUMPER=../../hacking/build-ansible.py document-keywords +CONFIG_DUMPER=../../hacking/build-ansible.py document-config +GENERATE_CLI=../../hacking/build-ansible.py generate-man +COLLECTION_DUMPER=../../hacking/build-ansible.py collection-meta +ifeq ($(shell echo $(OS) | egrep -ic 'Darwin|FreeBSD|OpenBSD|DragonFly'),1) +CPUS ?= $(shell sysctl hw.ncpu|awk '{print $$2}') +else +CPUS ?= $(shell nproc) +endif + +# Sets the build output directory for the main docsite if it's not already specified +ifndef BUILDDIR + BUILDDIR = _build +endif + +# Backwards compat for separate VARS +PLUGIN_ARGS= +ifdef MODULES +ifndef PLUGINS + PLUGIN_ARGS = -l $(MODULES) +else + PLUGIN_ARGS = -l $(MODULES),$(PLUGINS) +endif +else +ifdef PLUGINS + PLUGIN_ARGS = -l $(PLUGINS) +endif +endif + +PYTHON=python +MAJOR_VERSION := $(shell $(PYTHON) ../../packaging/release/versionhelper/version_helper.py --majorversion || echo error) + +ANSIBLE_VERSION_ARGS= +ifndef ANSIBLE_VERSION + # Only needed to make stable-2.10 docs build correctly. Do not apply to devel and future branches + ANSIBLE_VERSION=$(MAJOR_VERSION) +endif +ifdef ANSIBLE_VERSION + ANSIBLE_VERSION_ARGS=--ansible-version=$(ANSIBLE_VERSION) +endif + +DOC_PLUGINS ?= become cache callback cliconf connection httpapi inventory lookup netconf shell strategy vars + +# fetch version from project release.py as single source-of-truth +VERSION := $(shell $(PYTHON) ../../packaging/release/versionhelper/version_helper.py --raw || echo error) +ifeq ($(findstring error,$(VERSION)), error) +$(error "version_helper failed") +endif + +ifeq ($(findstring error,$(MAJOR_VERSION)), error) +$(error "version_helper failed to determine major version") +endif + +assertrst: +ifndef rst + $(error specify document or pattern with rst=somefile.rst) +endif + +all: docs + +docs: htmldocs + +coredocs: core_htmldocs + +generate_rst: collections_meta config cli keywords plugins testing +core_generate_rst: collections_meta config cli keywords base_plugins testing + +# The following two symlinks are necessary to produce two different docsets +# from the same set of rst files (Ansible the package docs, and core docs). +# Symlink the relevant index into place for building Ansible docs +ansible_structure: generate_rst + # We must have python and python-packaging for the version_helper + # script so use it for version comparison + if python -c "import sys, packaging.version as p; sys.exit(not p.Version('$(ANSIBLE_VERSION)') > p.Version('2.10'))" ; then \ + echo "Creating symlinks in generate_rst"; \ + ln -sf ../rst/ansible_index.rst rst/index.rst; \ + ln -sf ../sphinx_conf/ansible_conf.py rst/conf.py; \ + else \ + echo 'Creating symlinks for older ansible in generate_rst'; \ + ln -sf ../rst/2.10_index.rst rst/index.rst; \ + ln -sf ../sphinx_conf/2.10_conf.py rst/conf.py; \ + fi + +# Symlink the relevant index into place for building core docs +core_structure: core_generate_rst + @echo "Creating symlinks in core_generate_rst" + -ln -sf ../rst/core_index.rst rst/index.rst + -ln -sf ../sphinx_conf/core_conf.py rst/conf.py + +htmldocs: ansible_structure + CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx html + +core_htmldocs: core_structure + CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx html + +singlehtmldocs: ansible_structure + CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx singlehtml + +core_singlehtmldocs: core_structure + CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx singlehtml + +linkcheckdocs: generate_rst + CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx linkcheck + +webdocs: docs + +#TODO: leaving htmlout removal for those having older versions, should eventually be removed also +clean: + @echo "Cleaning $(BUILDDIR)" + -rm -rf $(BUILDDIR)/doctrees + -rm -rf $(BUILDDIR)/html + -rm -rf htmlout + -rm -rf module_docs + -rm -rf $(BUILDDIR) + -rm -f .buildinfo + -rm -f objects.inv + -rm -rf *.doctrees + @echo "Cleaning up minified css files" + find . -type f -name "*.min.css" -delete + @echo "Cleaning up byte compiled python stuff" + find . -regex ".*\.py[co]$$" -delete + @echo "Cleaning up editor backup files" + find . -type f \( -name "*~" -or -name "#*" \) -delete + find . -type f \( -name "*.swp" \) -delete + @echo "Cleaning up generated rst" + rm -f rst/playbooks_directives.rst + rm -f rst/reference_appendices/config.rst + rm -f rst/reference_appendices/playbooks_keywords.rst + rm -f rst/dev_guide/collections_galaxy_meta.rst + rm -f rst/cli/*.rst + for filename in `ls rst/collections/` ; do \ + if test x"$$filename" != x'all_plugins.rst' ; then \ + rm -rf "rst/collections/$$filename"; \ + fi \ + done + @echo "Cleanning up generated ansible_structure" + find -type l -delete + @echo "Cleaning up legacy generated rst locations" + rm -rf rst/modules + rm -f rst/plugins/*/*.rst + +.PHONY: docs clean + +collections_meta: ../templates/collections_galaxy_meta.rst.j2 + $(COLLECTION_DUMPER) --template-file=../templates/collections_galaxy_meta.rst.j2 --output-dir=rst/dev_guide/ ../../lib/ansible/galaxy/data/collections_galaxy_meta.yml + +# TODO: make generate_man output dir cli option +cli: + mkdir -p rst/cli + $(GENERATE_CLI) --template-file=../templates/cli_rst.j2 --output-dir=rst/cli/ --output-format rst ../../lib/ansible/cli/*.py + +keywords: ../templates/playbooks_keywords.rst.j2 + $(KEYWORD_DUMPER) --template-dir=../templates --output-dir=rst/reference_appendices/ ./keyword_desc.yml + +config: ../templates/config.rst.j2 + $(CONFIG_DUMPER) --template-file=../templates/config.rst.j2 --output-dir=rst/reference_appendices/ ../../lib/ansible/config/base.yml + +# For now, if we're building on devel, just build base docs. In the future we'll want to build docs that +# are the latest versions on galaxy (using a different antsibull-docs subcommand) +plugins: + $(PLUGIN_FORMATTER) full -o rst $(ANSIBLE_VERSION_ARGS) $(PLUGIN_ARGS);\ + +# This only builds the plugin docs included with ansible-base +base_plugins: + $(PLUGIN_FORMATTER) base -o rst $(PLUGIN_ARGS);\ + +testing: + $(TESTING_FORMATTER) + +epub: + (CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx epub) + +htmlsingle: assertrst + sphinx-build -j $(CPUS) -b html -d $(BUILDDIR)/doctrees ./rst $(BUILDDIR)/html rst/$(rst) + @echo "Output is in $(BUILDDIR)/html/$(rst:.rst=.html)" diff --git a/docs/docsite/Makefile.sphinx b/docs/docsite/Makefile.sphinx new file mode 100644 index 00000000..c05e3c3e --- /dev/null +++ b/docs/docsite/Makefile.sphinx @@ -0,0 +1,25 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXCONFDIR = rst +SPHINXOPTS = -j $(CPUS) -n -w rst_warnings -c "$(SPHINXCONFDIR)" +SPHINXBUILD = sphinx-build +SPHINXPROJ = sdfsdf +SOURCEDIR = rst + +# Sets the build output directory if it's not specified on the command line +ifndef BUILDDIR + BUILDDIR = _build +endif + +# Put it first so that "make" without argument is like "make help". +help: + $(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile.sphinx + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile.sphinx + $(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/docsite/README.md b/docs/docsite/README.md new file mode 100644 index 00000000..d643792e --- /dev/null +++ b/docs/docsite/README.md @@ -0,0 +1,26 @@ +Ansible documentation +===================== + +This project hosts the source behind the general pages of [docs.ansible.com](https://docs.ansible.com/). Module-specific documentation is hosted in the various collections repositories. See [Ansible Galaxy](https://galaxy.ansible.com/), the list of [Ansible-maintained collections](https://docs.ansible.com/ansible/devel/community/contributing_maintained_collections.html), and the [ansible-collections organization](https://github.com/ansible-collections) for collections sources. + +To create clear, concise, and consistent contributions to Ansible documentation, please refer to the following information. + +Contributions +============= +Contributions to the documentation are welcome. + +The Ansible community produces guidance on contributions, building documentation, and submitting pull requests, which you can find in [Contributing to the Ansible Documentation](https://docs.ansible.com/ansible/latest/community/documentation_contributions.html). + +You can also join the [Docs Working Group](https://github.com/ansible/community/wiki/Docs) and/or the ``#ansible-docs`` channel on freenode IRC. + +Ansible style guide +=================== +Ansible documentation is written in ReStructuredText(RST). The [Ansible style guide](https://docs.ansible.com/ansible/latest/dev_guide/style_guide/index.html#linguistic-guidelines) provides linguistic direction and technical guidelines for working with reStructuredText, in addition to other resources. + +Tools +===== +The Ansible community uses a range of tools and programs for working with Ansible documentation. Learn more about [Other Tools and Programs](https://docs.ansible.com/ansible/latest/community/other_tools_and_programs.html#popular-editors) in the Ansible Community Guide. + +GitHub +====== +[Ansible documentation](https://github.com/ansible/ansible/tree/devel/docs/docsite) is hosted on the Ansible GitHub project and various collection repositories, especially those in the [ansible-collections organization](https://github.com/ansible-collections). For general GitHub workflows and other information, see the [GitHub Guides](https://guides.github.com/). diff --git a/docs/docsite/_extensions/pygments_lexer.py b/docs/docsite/_extensions/pygments_lexer.py new file mode 100644 index 00000000..62c7fdfd --- /dev/null +++ b/docs/docsite/_extensions/pygments_lexer.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +# pylint: disable=no-self-argument +# +# Copyright 2006-2017 by the Pygments team, see AUTHORS at +# https://bitbucket.org/birkenfeld/pygments-main/raw/7941677dc77d4f2bf0bbd6140ade85a9454b8b80/AUTHORS +# Copyright by Norman Richards (original author of JSON lexer). +# +# Licensed under BSD license: +# +# Copyright (c) 2006-2017 by the respective authors (see AUTHORS file). +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, print_function + +from pygments.lexer import LexerContext, ExtendedRegexLexer, DelegatingLexer, RegexLexer, bygroups, include +from pygments.lexers import DiffLexer +from pygments import token + +import re + + +class AnsibleOutputPrimaryLexer(RegexLexer): + name = 'Ansible-output-primary' + + # The following definitions are borrowed from Pygment's JSON lexer. + # It has been originally authored by Norman Richards. + + # integer part of a number + int_part = r'-?(0|[1-9]\d*)' + + # fractional part of a number + frac_part = r'\.\d+' + + # exponential part of a number + exp_part = r'[eE](\+|-)?\d+' + + tokens = { + # ######################################### + # # BEGIN: states from JSON lexer ######### + # ######################################### + 'whitespace': [ + (r'\s+', token.Text), + ], + + # represents a simple terminal value + 'simplevalue': [ + (r'(true|false|null)\b', token.Keyword.Constant), + (('%(int_part)s(%(frac_part)s%(exp_part)s|' + '%(exp_part)s|%(frac_part)s)') % vars(), + token.Number.Float), + (int_part, token.Number.Integer), + (r'"(\\\\|\\"|[^"])*"', token.String), + ], + + + # the right hand side of an object, after the attribute name + 'objectattribute': [ + include('value'), + (r':', token.Punctuation), + # comma terminates the attribute but expects more + (r',', token.Punctuation, '#pop'), + # a closing bracket terminates the entire object, so pop twice + (r'\}', token.Punctuation, '#pop:2'), + ], + + # a json object - { attr, attr, ... } + 'objectvalue': [ + include('whitespace'), + (r'"(\\\\|\\"|[^"])*"', token.Name.Tag, 'objectattribute'), + (r'\}', token.Punctuation, '#pop'), + ], + + # json array - [ value, value, ... } + 'arrayvalue': [ + include('whitespace'), + include('value'), + (r',', token.Punctuation), + (r'\]', token.Punctuation, '#pop'), + ], + + # a json value - either a simple value or a complex value (object or array) + 'value': [ + include('whitespace'), + include('simplevalue'), + (r'\{', token.Punctuation, 'objectvalue'), + (r'\[', token.Punctuation, 'arrayvalue'), + ], + # ######################################### + # # END: states from JSON lexer ########### + # ######################################### + + 'host-postfix': [ + (r'\n', token.Text, '#pop:3'), + (r'( )(=>)( )(\{)', + bygroups(token.Text, token.Punctuation, token.Text, token.Punctuation), + 'objectvalue'), + ], + + 'host-error': [ + (r'(?:(:)( )(UNREACHABLE|FAILED)(!))?', + bygroups(token.Punctuation, token.Text, token.Keyword, token.Punctuation), + 'host-postfix'), + (r'', token.Text, 'host-postfix'), + ], + + 'host-name': [ + (r'(\[)([^ \]]+)(?:( )(=>)( )([^\]]+))?(\])', + bygroups(token.Punctuation, token.Name.Variable, token.Text, token.Punctuation, token.Text, token.Name.Variable, token.Punctuation), + 'host-error') + ], + + 'host-result': [ + (r'\n', token.Text, '#pop'), + (r'( +)(ok|changed|failed|skipped|unreachable)(=)([0-9]+)', + bygroups(token.Text, token.Keyword, token.Punctuation, token.Number.Integer)), + ], + + 'root': [ + (r'(PLAY|TASK|PLAY RECAP)(?:( )(\[)([^\]]+)(\]))?( )(\*+)(\n)', + bygroups(token.Keyword, token.Text, token.Punctuation, token.Literal, token.Punctuation, token.Text, token.Name.Variable, token.Text)), + (r'(fatal|ok|changed|skipping)(:)( )', + bygroups(token.Keyword, token.Punctuation, token.Text), + 'host-name'), + (r'(\[)(WARNING)(\]:)([^\n]+)', + bygroups(token.Punctuation, token.Keyword, token.Punctuation, token.Text)), + (r'([^ ]+)( +)(:)', + bygroups(token.Name, token.Text, token.Punctuation), + 'host-result'), + (r'(\tto retry, use: )(.*)(\n)', bygroups(token.Text, token.Literal.String, token.Text)), + (r'.*\n', token.Other), + ], + } + + +class AnsibleOutputLexer(DelegatingLexer): + name = 'Ansible-output' + aliases = ['ansible-output'] + + def __init__(self, **options): + super(AnsibleOutputLexer, self).__init__(DiffLexer, AnsibleOutputPrimaryLexer, **options) + + +# #################################################################################################### +# # Sphinx plugin #################################################################################### +# #################################################################################################### + +__version__ = "0.1.0" +__license__ = "BSD license" +__author__ = "Felix Fontein" +__author_email__ = "felix@fontein.de" + + +def setup(app): + """ Initializer for Sphinx extension API. + See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions. + """ + for lexer in [ + AnsibleOutputLexer(startinline=True) + ]: + app.add_lexer(lexer.name, lexer) + for alias in lexer.aliases: + app.add_lexer(alias, lexer) + + return dict(version=__version__, parallel_read_safe=True) diff --git a/docs/docsite/_static/ansible.css b/docs/docsite/_static/ansible.css new file mode 100644 index 00000000..f9d0b1a4 --- /dev/null +++ b/docs/docsite/_static/ansible.css @@ -0,0 +1,59 @@ +/*! minified with http://css-minify.online-domain-tools.com/ - all comments + * must have ! to preserve during minifying with that tool *//*! Fix for read the docs theme: + * https://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html + *//*! override table width restrictions */@media screen and (min-width:767px){/*! If we ever publish to read the docs, we need to use !important for these + * two styles as read the docs itself loads their theme in a way that we + * can't otherwise override it. + */.wy-table-responsive table td{white-space:normal}.wy-table-responsive{overflow:visible}}/*! + * We use the class documentation-table for attribute tables where the first + * column is the name of an attribute and the second column is the description. + *//*! These tables look like this: + * + * Attribute Name Description + * -------------- ----------- + * **NAME** This is a multi-line description + * str/required that can span multiple lines + * added in x.y + * With multiple paragraphs + * -------------- ----------- + * + * **NAME** is given the class .value-name + * str is given the class .value-type + * / is given the class .value-separator + * required is given the class .value-required + * added in x.y is given the class .value-added-in + *//*! The extra .rst-content is so this will override rtd theme */.rst-content table.documentation-table td{vertical-align:top}table.documentation-table td:first-child{white-space:nowrap;vertical-align:top}table.documentation-table td:first-child p:first-child{font-weight:700;display:inline}/*! This is now redundant with above position-based styling *//*! +table.documentation-table .value-name { + font-weight: bold; + display: inline; +} +*/table.documentation-table .value-type{font-size:x-small;color:purple;display:inline}table.documentation-table .value-separator{font-size:x-small;display:inline}table.documentation-table .value-required{font-size:x-small;color:red;display:inline}.value-added-in{font-size:x-small;font-style:italic;color:green;display:inline}/*! Ansible-specific CSS pulled out of rtd theme for 2.9 */.DocSiteProduct-header{flex:1;-webkit-flex:1;padding:10px 20px 20px;display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;align-items:center;-webkit-align-items:center;justify-content:flex-start;-webkit-justify-content:flex-start;margin-left:20px;margin-right:20px;text-decoration:none;font-weight:400;font-family:'Open Sans',sans-serif}.DocSiteProduct-header:active,.DocSiteProduct-header:focus,.DocSiteProduct-header:visited{color:#fff}.DocSiteProduct-header--core{font-size:25px;background-color:#5bbdbf;border:2px solid #5bbdbf;border-top-left-radius:4px;border-top-right-radius:4px;color:#fff;padding-left:2px;margin-left:2px}.DocSiteProduct-headerAlign{width:100%}.DocSiteProduct-logo{width:60px;height:60px;margin-bottom:-9px}.DocSiteProduct-logoText{margin-top:6px;font-size:25px;text-align:left}.DocSiteProduct-CheckVersionPara{margin-left:2px;padding-bottom:4px;margin-right:2px;margin-bottom:10px}/*! Ansible color scheme */.wy-nav-top,.wy-side-nav-search{background-color:#5bbdbf}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#5bbdbf}.wy-menu-vertical a{padding:0}.wy-menu-vertical a.reference.internal{padding:.4045em 1.618em}/*! Override sphinx rtd theme max-with of 800px */.wy-nav-content{max-width:100%}/*! Override sphinx_rtd_theme - keeps left-nav from overwriting Documentation title */.wy-nav-side{top:45px}/*! Ansible - changed absolute to relative to remove extraneous side scroll bar */.wy-grid-for-nav{position:relative}/*! Ansible narrow the search box */.wy-side-nav-search input[type=text]{width:90%;padding-left:24px}/*! Ansible - remove so highlight indenting is correct */.rst-content .highlighted{padding:0}.DocSiteBanner{display:flex;display:-webkit-flex;justify-content:center;-webkit-justify-content:center;flex-wrap:wrap;-webkit-flex-wrap:wrap;margin-bottom:25px}.DocSiteBanner-imgWrapper{max-width:100%}td,th{min-width:100px}table{overflow-x:auto;display:block;max-width:100%}.documentation-table td.elbow-placeholder{border-left:1px solid #000;border-top:0;width:30px;min-width:30px}.documentation-table td,.documentation-table th{padding:4px;border-left:1px solid #000;border-top:1px solid #000}.documentation-table{border-right:1px solid #000;border-bottom:1px solid #000}@media print{*{background:0 0!important;color:#000!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}#nav,a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}/*! Don't show links for images, or javascript/internal links */pre,blockquote{border:0 solid #999;page-break-inside:avoid}thead{display:table-header-group}/*! h5bp.com/t */tr,img{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}#google_image_div,.DocSiteBanner{display:none!important}}#sideBanner,.DocSite-globalNav{display:none}.DocSite-sideNav{display:block;margin-bottom:40px}.DocSite-nav{display:none}.ansibleNav{background:#000;padding:0 20px;width:auto;border-bottom:1px solid #444;font-size:14px;z-index:1}.ansibleNav ul{list-style:none;padding-left:0;margin-top:0}.ansibleNav ul li{padding:7px 0;border-bottom:1px solid #444}.ansibleNav ul li:last-child{border:none}.ansibleNav ul li a{color:#fff;text-decoration:none;text-transform:uppercase;padding:6px 0}.ansibleNav ul li a:hover{color:#5bbdbf;background:0 0}h4{font-size:105%}h5{font-size:90%}h6{font-size:80%}@media screen and (min-width:768px){.DocSite-globalNav{display:block;position:fixed}#sideBanner{display:block}.DocSite-sideNav{display:none}.DocSite-nav{flex:initial;-webkit-flex:initial;display:flex;display:-webkit-flex;flex-direction:row;-webkit-flex-direction:row;justify-content:flex-start;-webkit-justify-content:flex-start;padding:15px;background-color:#000;text-decoration:none;font-family:'Open Sans',sans-serif}.DocSiteNav-logo{width:28px;height:28px;margin-right:8px;margin-top:-6px;position:fixed;z-index:1}.DocSiteNav-title{color:#fff;font-size:20px;position:fixed;margin-left:40px;margin-top:-4px;z-index:1}.ansibleNav{height:45px;width:100%;font-size:13px;padding:0 60px 0 0}.ansibleNav ul{float:right;display:flex;flex-wrap:nowrap;margin-top:13px}.ansibleNav ul li{padding:0;border-bottom:none}.ansibleNav ul li a{color:#fff;text-decoration:none;text-transform:uppercase;padding:8px 13px}h4{font-size:105%}h5{font-size:90%}h6{font-size:80%}}@media screen and (min-width:768px){#sideBanner,.DocSite-globalNav{display:block}.DocSite-sideNav{display:none}.DocSite-nav{flex:initial;-webkit-flex:initial;display:flex;display:-webkit-flex;flex-direction:row;-webkit-flex-direction:row;justify-content:flex-start;-webkit-justify-content:flex-start;padding:15px;background-color:#000;text-decoration:none;font-family:'Open Sans',sans-serif}.DocSiteNav-logo{width:28px;height:28px;margin-right:8px;margin-top:-6px;position:fixed}.DocSiteNav-title{color:#fff;font-size:20px;position:fixed;margin-left:40px;margin-top:-4px}.ansibleNav{height:45px;font-size:13px;padding:0 60px 0 0}.ansibleNav ul{float:right;display:flex;flex-wrap:nowrap;margin-top:13px}.ansibleNav ul li{padding:0;border-bottom:none}.ansibleNav ul li a{color:#fff;text-decoration:none;text-transform:uppercase;padding:8px 13px}h4{font-size:105%}h5{font-size:90%}h6{font-size:80%}} +/* ansibleOptionLink is adapted from h1 .headerlink in sphinx_rtd_theme */ +tr:hover .ansibleOptionLink::after { + visibility: visible; +} +tr .ansibleOptionLink::after { + content: ""; + font-family: FontAwesome; +} +tr .ansibleOptionLink { + visibility: hidden; + display: inline-block; + font: normal normal normal 14px/1 FontAwesome; + text-rendering: auto; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +@media screen and (min-width:767px){ + /* Move anchors a bit up so that they aren't hidden by the header bar */ + section [id] { + padding-top: 45px; + margin-top: -45px; + } + /* Without this, for example most links in the page's TOC aren't usable anymore */ + section a[id] { + padding-top: 0; + margin-top: 0; + } +}
\ No newline at end of file diff --git a/docs/docsite/_static/pygments.css b/docs/docsite/_static/pygments.css new file mode 100644 index 00000000..8774dd3b --- /dev/null +++ b/docs/docsite/_static/pygments.css @@ -0,0 +1,76 @@ +.highlight { background: #f8f8f8 } +.highlight .hll { background-color: #ffffcc; border: 1px solid #edff00; padding-top: 2px; border-radius: 3px; display: block } +.highlight .c { color: #6a737d; font-style: italic } /* Comment */ +.highlight .err { color: #a61717; background-color: #e3d2d2; color: #a61717; border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #007020; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666; font-weight: bold } /* Operator */ +.highlight .ch { color: #6a737d; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #6a737d; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #007020 } /* Comment.Preproc */ +.highlight .cpf { color: #6a737d; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #6a737d; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #999999; font-weight: bold; font-style: italic; background-color: #fff0f0 } /* Comment.Special */ +.highlight .gd { color: #A00000; background-color: #ffdddd } /* Generic.Deleted */ +.highlight .gd .x { color: #A00000; background-color: #ffaaaa } +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gr { color: #aa0000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000; background-color: #ddffdd } /* Generic.Inserted */ +.highlight .gi .x { color: #00A000; background-color: #aaffaa; } +.highlight .go { color: #333333 } /* Generic.Output */ +.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0040D0 } /* Generic.Traceback */ +.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #007020 } /* Keyword.Pseudo */ +.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #902000 } /* Keyword.Type */ +.highlight .l { color: #032f62 } /* Literal */ +.highlight .m { color: #208050 } /* Literal.Number */ +.highlight .s { color: #4070a0 } /* Literal.String */ +.highlight .n { color: #333333 } +.highlight .p { font-weight: bold } +.highlight .na { color: teal } /* Name.Attribute */ +.highlight .nb { color: #0086b3 } /* Name.Builtin */ +.highlight .nc { color: #445588; font-weight: bold } /* Name.Class */ +.highlight .no { color: teal; } /* Name.Constant */ +.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ +.highlight .ni { color: purple; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #990000; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #990000; font-weight: bold } /* Name.Function */ +.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ +.highlight .nn { color: #555555; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #22863a } /* Name.Tag */ +.highlight .nv { color: #9960b5; font-weight: bold } /* Name.Variable */ +.highlight .p { color: font-weight: bold } /* Indicator */ +.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #009999 } /* Literal.Number.Bin */ +.highlight .mf { color: #009999 } /* Literal.Number.Float */ +.highlight .mh { color: #009999 } /* Literal.Number.Hex */ +.highlight .mi { color: #009999 } /* Literal.Number.Integer */ +.highlight .mo { color: #009999 } /* Literal.Number.Oct */ +.highlight .sa { color: #dd1144 } /* Literal.String.Affix */ +.highlight .sb { color: #dd1144 } /* Literal.String.Backtick */ +.highlight .sc { color: #dd1144 } /* Literal.String.Char */ +.highlight .dl { color: #dd1144 } /* Literal.String.Delimiter */ +.highlight .sd { color: #dd1144; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #dd1144 } /* Literal.String.Double */ +.highlight .se { color: #dd1144; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #dd1144 } /* Literal.String.Heredoc */ +.highlight .si { color: #dd1144; font-style: italic } /* Literal.String.Interpol */ +.highlight .sx { color: #dd1144 } /* Literal.String.Other */ +.highlight .sr { color: #009926 } /* Literal.String.Regex */ +.highlight .s1 { color: #dd1144 } /* Literal.String.Single */ +.highlight .ss { color: #990073 } /* Literal.String.Symbol */ +.highlight .bp { color: #999999 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #06287e } /* Name.Function.Magic */ +.highlight .vc { color: teal } /* Name.Variable.Class */ +.highlight .vg { color: teal } /* Name.Variable.Global */ +.highlight .vi { color: teal } /* Name.Variable.Instance */ +.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */ +.highlight .il { color: #009999 } /* Literal.Number.Integer.Long */ +.highlight .gc { color: #909090; background-color: #eaf2f5 } diff --git a/docs/docsite/ansible_2_10.inv b/docs/docsite/ansible_2_10.inv Binary files differnew file mode 100644 index 00000000..900df6e1 --- /dev/null +++ b/docs/docsite/ansible_2_10.inv diff --git a/docs/docsite/ansible_2_5.inv b/docs/docsite/ansible_2_5.inv Binary files differnew file mode 100644 index 00000000..05e5a2b0 --- /dev/null +++ b/docs/docsite/ansible_2_5.inv diff --git a/docs/docsite/ansible_2_6.inv b/docs/docsite/ansible_2_6.inv Binary files differnew file mode 100644 index 00000000..b84a2661 --- /dev/null +++ b/docs/docsite/ansible_2_6.inv diff --git a/docs/docsite/ansible_2_7.inv b/docs/docsite/ansible_2_7.inv Binary files differnew file mode 100644 index 00000000..81cea2cb --- /dev/null +++ b/docs/docsite/ansible_2_7.inv diff --git a/docs/docsite/ansible_2_8.inv b/docs/docsite/ansible_2_8.inv Binary files differnew file mode 100644 index 00000000..1d8fcd07 --- /dev/null +++ b/docs/docsite/ansible_2_8.inv diff --git a/docs/docsite/ansible_2_9.inv b/docs/docsite/ansible_2_9.inv Binary files differnew file mode 100644 index 00000000..83badf67 --- /dev/null +++ b/docs/docsite/ansible_2_9.inv diff --git a/docs/docsite/collection-plugins.yml b/docs/docsite/collection-plugins.yml new file mode 100644 index 00000000..499274b4 --- /dev/null +++ b/docs/docsite/collection-plugins.yml @@ -0,0 +1,17 @@ +# We also need an example of modules hosted in Automation Hub +# We'll likely move to data hosted in botmeta instead of a standalone file but +# we'll need all of these same details. +module: + purefa_user: + source: 'https://galaxy.ansible.com/' + fqcn: 'purestorage.flasharray' + purefa_vg: + source: 'https://galaxy.ansible.com/' + fqcn: 'purestorage.flasharray' + gcp_compute_firewall_info: + source: 'https://galaxy.ansible.com/' + fqcn: 'google.cloud' +module_utils: + purefa: + source: 'https://galaxy.ansible.com/' + fqcn: 'purestorage.flasharray' diff --git a/docs/docsite/jinja2.inv b/docs/docsite/jinja2.inv Binary files differnew file mode 100644 index 00000000..552a9584 --- /dev/null +++ b/docs/docsite/jinja2.inv diff --git a/docs/docsite/js/ansible/application.js b/docs/docsite/js/ansible/application.js new file mode 100644 index 00000000..5e9f81ba --- /dev/null +++ b/docs/docsite/js/ansible/application.js @@ -0,0 +1,106 @@ +angular.module('ansibleApp', []).filter('moduleVersion', function() { + return function(modules, version) { + + var parseVersionString = function (str) { + if (typeof(str) != 'string') { return false; } + var x = str.split('.'); + // parse from string or default to 0 if can't parse + var maj = parseInt(x[0]) || 0; + var min = parseInt(x[1]) || 0; + var pat = parseInt(x[2]) || 0; + return { + major: maj, + minor: min, + patch: pat + } + } + + var vMinMet = function(vmin, vcurrent) { + minimum = parseVersionString(vmin); + running = parseVersionString(vcurrent); + if (running.major != minimum.major) + return (running.major > minimum.major); + else { + if (running.minor != minimum.minor) + return (running.minor > minimum.minor); + else { + if (running.patch != minimum.patch) + return (running.patch > minimum.patch); + else + return true; + } + } + }; + + var result = []; + if (!version) { + return modules; + } + for (var i = 0; i < modules.length; i++) { + if (vMinMet(modules[i].version_added, version)) { + result[result.length] = modules[i]; + } + } + + return result; + }; +}).filter('uniqueVersion', function() { + return function(modules) { + var result = []; + var inArray = function (needle, haystack) { + var length = haystack.length; + for(var i = 0; i < length; i++) { + if(haystack[i] == needle) return true; + } + return false; + } + + var parseVersionString = function (str) { + if (typeof(str) != 'string') { return false; } + var x = str.split('.'); + // parse from string or default to 0 if can't parse + var maj = parseInt(x[0]) || 0; + var min = parseInt(x[1]) || 0; + var pat = parseInt(x[2]) || 0; + return { + major: maj, + minor: min, + patch: pat + } + } + + for (var i = 0; i < modules.length; i++) { + if (!inArray(modules[i].version_added, result)) { + // Some module do not define version + if (modules[i].version_added) { + result[result.length] = "" + modules[i].version_added; + } + } + } + + result.sort( + function (a, b) { + ao = parseVersionString(a); + bo = parseVersionString(b); + if (ao.major == bo.major) { + if (ao.minor == bo.minor) { + if (ao.patch == bo.patch) { + return 0; + } + else { + return (ao.patch > bo.patch) ? 1 : -1; + } + } + else { + return (ao.minor > bo.minor) ? 1 : -1; + } + } + else { + return (ao.major > bo.major) ? 1 : -1; + } + }); + + return result; + }; +}); + diff --git a/docs/docsite/keyword_desc.yml b/docs/docsite/keyword_desc.yml new file mode 100644 index 00000000..8d5a0801 --- /dev/null +++ b/docs/docsite/keyword_desc.yml @@ -0,0 +1,79 @@ +accelerate: "*DEPRECATED*, set to True to use accelerate connection plugin." +accelerate_ipv6: "*DEPRECATED*, set to True to force accelerate plugin to use ipv6 for its connection." +accelerate_port: "*DEPRECATED*, set to override default port use for accelerate connection." +action: "The 'action' to execute for a task, it normally translates into a C(module) or action plugin." +args: "A secondary way to add arguments into a task. Takes a dictionary in which keys map to options and values." +always: List of tasks, in a block, that execute no matter if there is an error in the block or not. +any_errors_fatal: Force any un-handled task errors on any host to propagate to all hosts and end the play. +async: Run a task asynchronously if the C(action) supports this; value is maximum runtime in seconds. +become: Boolean that controls if privilege escalation is used or not on :term:`Task` execution. Implemented by the become plugin. See :ref:`become_plugins`. +become_exe: Path to the executable used to elevate privileges. Implemented by the become plugin. See :ref:`become_plugins`. +become_flags: A string of flag(s) to pass to the privilege escalation program when :term:`become` is True. +become_method: Which method of privilege escalation to use (such as sudo or su). +become_user: "User that you 'become' after using privilege escalation. The remote/login user must have permissions to become this user." +block: List of tasks in a block. +changed_when: "Conditional expression that overrides the task's normal 'changed' status." +check_mode: A boolean that controls if a task is executed in 'check' mode. See :ref:`check_mode_dry`. +collections: | + + List of collection namespaces to search for modules, plugins, and roles. See :ref:`collections_using_playbook` + + .. note:: + + Tasks within a role do not inherit the value of ``collections`` from the play. To have a role search a list of collections, use the ``collections`` keyword in ``meta/main.yml`` within a role. + + +connection: Allows you to change the connection plugin used for tasks to execute on the target. See :ref:`using_connection`. +debugger: Enable debugging tasks based on state of the task result. See :ref:`playbook_debugger`. +delay: Number of seconds to delay between retries. This setting is only used in combination with :term:`until`. +delegate_facts: Boolean that allows you to apply facts to a delegated host instead of inventory_hostname. +delegate_to: Host to execute task instead of the target (inventory_hostname). Connection vars from the delegated host will also be used for the task. +diff: "Toggle to make tasks return 'diff' information or not." +environment: A dictionary that gets converted into environment vars to be provided for the task upon execution. This can ONLY be used with modules. This isn't supported for any other type of plugins nor Ansible itself nor its configuration, it just sets the variables for the code responsible for executing the task. This is not a recommended way to pass in confidential data. +fact_path: Set the fact path option for the fact gathering plugin controlled by :term:`gather_facts`. +failed_when: "Conditional expression that overrides the task's normal 'failed' status." +force_handlers: Will force notified handler execution for hosts even if they failed during the play. Will not trigger if the play itself fails. +gather_facts: "A boolean that controls if the play will automatically run the 'setup' task to gather facts for the hosts." +gather_subset: Allows you to pass subset options to the fact gathering plugin controlled by :term:`gather_facts`. +gather_timeout: Allows you to set the timeout for the fact gathering plugin controlled by :term:`gather_facts`. +handlers: "A section with tasks that are treated as handlers, these won't get executed normally, only when notified after each section of tasks is complete. A handler's `listen` field is not templatable." +hosts: "A list of groups, hosts or host pattern that translates into a list of hosts that are the play's target." +ignore_errors: Boolean that allows you to ignore task failures and continue with play. It does not affect connection errors. +ignore_unreachable: Boolean that allows you to ignore task failures due to an unreachable host and continue with the play. This does not affect other task errors (see :term:`ignore_errors`) but is useful for groups of volatile/ephemeral hosts. +loop: "Takes a list for the task to iterate over, saving each list element into the ``item`` variable (configurable via loop_control)" +loop_control: | + Several keys here allow you to modify/set loop behaviour in a task. + + .. seealso:: :ref:`loop_control` + +max_fail_percentage: can be used to abort the run after a given percentage of hosts in the current batch has failed. +module_defaults: Specifies default parameter values for modules. +name: "Identifier. Can be used for documentation, in or tasks/handlers." +no_log: Boolean that controls information disclosure. +notify: "List of handlers to notify when the task returns a 'changed=True' status." +order: Controls the sorting of hosts as they are used for executing the play. Possible values are inventory (default), sorted, reverse_sorted, reverse_inventory and shuffle. +poll: Sets the polling interval in seconds for async tasks (default 10s). +port: Used to override the default port used in a connection. +post_tasks: A list of tasks to execute after the :term:`tasks` section. +pre_tasks: A list of tasks to execute before :term:`roles`. +remote_user: User used to log into the target via the connection plugin. +register: Name of variable that will contain task status and module return data. +rescue: List of tasks in a :term:`block` that run if there is a task error in the main :term:`block` list. +retries: "Number of retries before giving up in a :term:`until` loop. This setting is only used in combination with :term:`until`." +roles: List of roles to be imported into the play +run_once: Boolean that will bypass the host loop, forcing the task to attempt to execute on the first host available and afterwards apply any results and facts to all active hosts in the same batch. +serial: | + Explicitly define how Ansible batches the execution of the current play on the play's target + + .. seealso:: :ref:`rolling_update_batch_size` + +strategy: Allows you to choose the connection plugin to use for the play. +tags: Tags applied to the task or included tasks, this allows selecting subsets of tasks from the command line. +tasks: Main list of tasks to execute in the play, they run after :term:`roles` and before :term:`post_tasks`. +timeout: Time limit for task to execute in, if exceeded Ansible will interrupt and fail the task. +throttle: Limit number of concurrent task runs on task, block and playbook level. This is independent of the forks and serial settings, but cannot be set higher than those limits. For example, if forks is set to 10 and the throttle is set to 15, at most 10 hosts will be operated on in parallel. +until: "This keyword implies a ':term:`retries` loop' that will go on until the condition supplied here is met or we hit the :term:`retries` limit." +vars: Dictionary/map of variables +vars_files: List of files that contain vars to include in the play. +vars_prompt: list of variables to prompt for. +when: Conditional expression, determines if an iteration of a task is run or not. diff --git a/docs/docsite/modules.js b/docs/docsite/modules.js new file mode 100644 index 00000000..103bc2ca --- /dev/null +++ b/docs/docsite/modules.js @@ -0,0 +1,5 @@ +function AnsibleModules($scope) { + $scope.modules = []; + + $scope.orderProp = "module"; +}
\ No newline at end of file diff --git a/docs/docsite/python2.inv b/docs/docsite/python2.inv Binary files differnew file mode 100644 index 00000000..7ea2dc1d --- /dev/null +++ b/docs/docsite/python2.inv diff --git a/docs/docsite/python3.inv b/docs/docsite/python3.inv Binary files differnew file mode 100644 index 00000000..19216788 --- /dev/null +++ b/docs/docsite/python3.inv diff --git a/docs/docsite/requirements.txt b/docs/docsite/requirements.txt new file mode 100644 index 00000000..1290ac1d --- /dev/null +++ b/docs/docsite/requirements.txt @@ -0,0 +1,9 @@ +#pip packages required to build docsite +jinja2 +PyYAML +rstcheck +sphinx==2.1.2 +sphinx-notfound-page +Pygments >= 2.4.0 +straight.plugin # Needed for hacking/build-ansible.py which is the backend build script +antsibull >= 0.15.0 diff --git a/docs/docsite/rst/2.10_index.rst b/docs/docsite/rst/2.10_index.rst new file mode 100644 index 00000000..1ff97d0b --- /dev/null +++ b/docs/docsite/rst/2.10_index.rst @@ -0,0 +1,106 @@ +.. _ansible_documentation: + +Ansible Documentation +===================== + +About Ansible +````````````` + +Ansible is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks such as continuous deployments or zero downtime rolling updates. + +Ansible's main goals are simplicity and ease-of-use. It also has a strong focus on security and reliability, featuring a minimum of moving parts, usage of OpenSSH for transport (with other transports and pull modes as alternatives), and a language that is designed around auditability by humans--even those not familiar with the program. + +We believe simplicity is relevant to all sizes of environments, so we design for busy users of all types: developers, sysadmins, release engineers, IT managers, and everyone in between. Ansible is appropriate for managing all environments, from small setups with a handful of instances to enterprise environments with many thousands of instances. + +You can learn more at `AnsibleFest <https://www.ansible.com/ansiblefest>`_, the annual event for all Ansible contributors, users, and customers hosted by Red Hat. AnsibleFest is the place to connect with others, learn new skills, and find a new friend to automate with. + +Ansible manages machines in an agent-less manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. Because OpenSSH is one of the most peer-reviewed open source components, security exposure is greatly reduced. Ansible is decentralized--it relies on your existing OS credentials to control access to remote machines. If needed, Ansible can easily connect with Kerberos, LDAP, and other centralized authentication management systems. + +This documentation covers the version of Ansible noted in the upper left corner of this page. We maintain multiple versions of Ansible and of the documentation, so please be sure you are using the version of the documentation that covers the version of Ansible you're using. For recent features, we note the version of Ansible where the feature was added. + +Ansible releases a new major release approximately twice a year. The core application evolves somewhat conservatively, valuing simplicity in language design and setup. Contributors develop and change modules and plugins, hosted in collections since version 2.10, much more quickly. + +.. toctree:: + :maxdepth: 2 + :caption: Installation, Upgrade & Configuration + + installation_guide/index + porting_guides/porting_guides + +.. toctree:: + :maxdepth: 2 + :caption: Using Ansible + + user_guide/index + +.. toctree:: + :maxdepth: 2 + :caption: Contributing to Ansible + + community/index + +.. toctree:: + :maxdepth: 2 + :caption: Extending Ansible + + dev_guide/index + +.. toctree:: + :glob: + :maxdepth: 1 + :caption: Common Ansible Scenarios + + scenario_guides/cloud_guides + scenario_guides/network_guides + scenario_guides/virt_guides + +.. toctree:: + :maxdepth: 2 + :caption: Network Automation + + network/getting_started/index + network/user_guide/index + network/dev_guide/index + +.. toctree:: + :maxdepth: 2 + :caption: Ansible Galaxy + + galaxy/user_guide.rst + galaxy/dev_guide.rst + + +.. toctree:: + :maxdepth: 1 + :caption: Reference & Appendices + + collections/index + collections/all_plugins + reference_appendices/playbooks_keywords + reference_appendices/common_return_values + reference_appendices/config + reference_appendices/general_precedence + reference_appendices/YAMLSyntax + reference_appendices/python_3_support + reference_appendices/interpreter_discovery + reference_appendices/release_and_maintenance + reference_appendices/test_strategies + dev_guide/testing/sanity/index + reference_appendices/faq + reference_appendices/glossary + reference_appendices/module_utils + reference_appendices/special_variables + reference_appendices/tower + reference_appendices/automationhub + reference_appendices/logging + + +.. toctree:: + :maxdepth: 2 + :caption: Release Notes + +.. toctree:: + :maxdepth: 2 + :caption: Roadmaps + + roadmap/index.rst diff --git a/docs/docsite/rst/404.rst b/docs/docsite/rst/404.rst new file mode 100644 index 00000000..4a869d22 --- /dev/null +++ b/docs/docsite/rst/404.rst @@ -0,0 +1,12 @@ +:orphan: + +***** +Oops! +***** + +The version of the Ansible documentation you were looking at doesn't contain that page. + +.. image:: images/cow.png + :alt: Cowsay 404 + +Use the back button to return to the version you were browsing, or use the navigation at left to explore our latest release. Once you're on a non-404 page, you can use the version-changer to select a version. diff --git a/docs/docsite/rst/ansible_index.rst b/docs/docsite/rst/ansible_index.rst new file mode 100644 index 00000000..4d3a6011 --- /dev/null +++ b/docs/docsite/rst/ansible_index.rst @@ -0,0 +1,104 @@ +.. _ansible_documentation: +.. + This is the index file for Ansible the package. It gets symlinked to index.rst by the Makefile + +Ansible Documentation +===================== + +About Ansible +````````````` + +Ansible is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks such as continuous deployments or zero downtime rolling updates. + +Ansible's main goals are simplicity and ease-of-use. It also has a strong focus on security and reliability, featuring a minimum of moving parts, usage of OpenSSH for transport (with other transports and pull modes as alternatives), and a language that is designed around auditability by humans--even those not familiar with the program. + +We believe simplicity is relevant to all sizes of environments, so we design for busy users of all types: developers, sysadmins, release engineers, IT managers, and everyone in between. Ansible is appropriate for managing all environments, from small setups with a handful of instances to enterprise environments with many thousands of instances. + +You can learn more at `AnsibleFest <https://www.ansible.com/ansiblefest>`_, the annual event for all Ansible contributors, users, and customers hosted by Red Hat. AnsibleFest is the place to connect with others, learn new skills, and find a new friend to automate with. + +Ansible manages machines in an agent-less manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. Because OpenSSH is one of the most peer-reviewed open source components, security exposure is greatly reduced. Ansible is decentralized--it relies on your existing OS credentials to control access to remote machines. If needed, Ansible can easily connect with Kerberos, LDAP, and other centralized authentication management systems. + +This documentation covers the version of Ansible noted in the upper left corner of this page. We maintain multiple versions of Ansible and of the documentation, so please be sure you are using the version of the documentation that covers the version of Ansible you're using. For recent features, we note the version of Ansible where the feature was added. + +Ansible releases a new major release approximately twice a year. The core application evolves somewhat conservatively, valuing simplicity in language design and setup. Contributors develop and change modules and plugins, hosted in collections since version 2.10, much more quickly. + +.. toctree:: + :maxdepth: 2 + :caption: Installation, Upgrade & Configuration + + installation_guide/index + porting_guides/porting_guides + +.. toctree:: + :maxdepth: 2 + :caption: Using Ansible + + user_guide/index + +.. toctree:: + :maxdepth: 2 + :caption: Contributing to Ansible + + community/index + +.. toctree:: + :maxdepth: 2 + :caption: Extending Ansible + + dev_guide/index + +.. toctree:: + :glob: + :maxdepth: 1 + :caption: Common Ansible Scenarios + + scenario_guides/cloud_guides + scenario_guides/network_guides + scenario_guides/virt_guides + +.. toctree:: + :maxdepth: 2 + :caption: Network Automation + + network/getting_started/index + network/user_guide/index + network/dev_guide/index + +.. toctree:: + :maxdepth: 2 + :caption: Ansible Galaxy + + galaxy/user_guide.rst + galaxy/dev_guide.rst + + +.. toctree:: + :maxdepth: 1 + :caption: Reference & Appendices + + collections/index + collections/all_plugins + reference_appendices/playbooks_keywords + reference_appendices/common_return_values + reference_appendices/config + reference_appendices/general_precedence + reference_appendices/YAMLSyntax + reference_appendices/python_3_support + reference_appendices/interpreter_discovery + reference_appendices/release_and_maintenance + reference_appendices/test_strategies + dev_guide/testing/sanity/index + reference_appendices/faq + reference_appendices/glossary + reference_appendices/module_utils + reference_appendices/special_variables + reference_appendices/tower + reference_appendices/automationhub + reference_appendices/logging + + +.. toctree:: + :maxdepth: 2 + :caption: Roadmaps + + roadmap/ansible_roadmap_index.rst diff --git a/docs/docsite/rst/api/index.rst b/docs/docsite/rst/api/index.rst new file mode 100644 index 00000000..27afbe42 --- /dev/null +++ b/docs/docsite/rst/api/index.rst @@ -0,0 +1,107 @@ +:orphan: + +************************* +Ansible API Documentation +************************* + +The Ansible API is under construction. These stub references for attributes, classes, functions, methods, and modules will be documented in future. +The :ref:`module utilities <ansible.module_utils>` included in ``ansible.module_utils.basic`` and ``AnsibleModule`` are documented under Reference & Appendices. + +.. contents:: + :local: + +Attributes +========== + +.. py:attribute:: AnsibleModule.params + +The parameters accepted by the module. + +.. py:attribute:: ansible.module_utils.basic.ANSIBLE_VERSION + +.. py:attribute:: ansible.module_utils.basic.SELINUX_SPECIAL_FS + +Deprecated in favor of ansibleModule._selinux_special_fs. + +.. py:attribute:: AnsibleModule.ansible_version + +.. py:attribute:: AnsibleModule._debug + +.. py:attribute:: AnsibleModule._diff + +.. py:attribute:: AnsibleModule.no_log + +.. py:attribute:: AnsibleModule._selinux_special_fs + +(formerly ansible.module_utils.basic.SELINUX_SPECIAL_FS) + +.. py:attribute:: AnsibleModule._syslog_facility + +.. py:attribute:: self.playbook + +.. py:attribute:: self.play + +.. py:attribute:: self.task + +.. py:attribute:: sys.path + + +Classes +======= + +.. py:class:: ``ansible.module_utils.basic.AnsibleModule`` + :noindex: + +The basic utilities for AnsibleModule. + +.. py:class:: AnsibleModule + +The main class for an Ansible module. + + +Functions +========= + +.. py:function:: ansible.module_utils.basic._load_params() + +Load parameters. + + +Methods +======= + +.. py:method:: AnsibleModule.log() + +Logs the output of Ansible. + +.. py:method:: AnsibleModule.debug() + +Debugs Ansible. + +.. py:method:: Ansible.get_bin_path() + +Retrieves the path for executables. + +.. py:method:: AnsibleModule.run_command() + +Runs a command within an Ansible module. + +.. py:method:: module.fail_json() + +Exits and returns a failure. + +.. py:method:: module.exit_json() + +Exits and returns output. + + +Modules +======= + +.. py:module:: ansible.module_utils + +.. py:module:: ansible.module_utils.basic + :noindex: + + +.. py:module:: ansible.module_utils.url diff --git a/docs/docsite/rst/collections/all_plugins.rst b/docs/docsite/rst/collections/all_plugins.rst new file mode 100644 index 00000000..35232f7d --- /dev/null +++ b/docs/docsite/rst/collections/all_plugins.rst @@ -0,0 +1,11 @@ +.. _all_modules_and_plugins: + +Indexes of all modules and plugins +---------------------------------- + +.. toctree:: + :maxdepth: 1 + :caption: Plugin indexes + :glob: + + index_* diff --git a/docs/docsite/rst/community/code_of_conduct.rst b/docs/docsite/rst/community/code_of_conduct.rst new file mode 100644 index 00000000..9462618d --- /dev/null +++ b/docs/docsite/rst/community/code_of_conduct.rst @@ -0,0 +1,146 @@ +.. _code_of_conduct: + +************************* +Community Code of Conduct +************************* + +.. contents:: Topics + +Every community can be strengthened by a diverse variety of viewpoints, insights, +opinions, skillsets, and skill levels. However, with diversity comes the potential for +disagreement and miscommunication. The purpose of this Code of Conduct is to ensure that +disagreements and differences of opinion are conducted respectfully and on their own +merits, without personal attacks or other behavior that might create an unsafe or +unwelcoming environment. + +These policies are not designed to be a comprehensive set of Things You Cannot Do. We ask +that you treat your fellow community members with respect and courtesy, and in general, +Don't Be A Jerk. This Code of Conduct is meant to be followed in spirit as much as in +letter and is not exhaustive. + +All Ansible events and participants therein are governed by this Code of Conduct and +anti-harassment policy. We expect organizers to enforce these guidelines throughout all events, +and we expect attendees, speakers, sponsors, and volunteers to help ensure a safe +environment for our whole community. Specifically, this Code of Conduct covers +participation in all Ansible-related forums and mailing lists, code and documentation +contributions, public IRC channels, private correspondence, and public meetings. + +Ansible community members are... + +**Considerate** + +Contributions of every kind have far-ranging consequences. Just as your work depends on +the work of others, decisions you make surrounding your contributions to the Ansible +community will affect your fellow community members. You are strongly encouraged to take +those consequences into account while making decisions. + +**Patient** + +Asynchronous communication can come with its own frustrations, even in the most responsive +of communities. Please remember that our community is largely built on volunteered time, +and that questions, contributions, and requests for support may take some time to receive +a response. Repeated "bumps" or "reminders" in rapid succession are not good displays of +patience. Additionally, it is considered poor manners to ping a specific person with +general questions. Pose your question to the community as a whole, and wait patiently for +a response. + +**Respectful** + +Every community inevitably has disagreements, but remember that it is +possible to disagree respectfully and courteously. Disagreements are never an excuse for +rudeness, hostility, threatening behavior, abuse (verbal or physical), or personal attacks. + +**Kind** + +Everyone should feel welcome in the Ansible community, regardless of their background. +Please be courteous, respectful and polite to fellow community members. Do not make or +post offensive comments related to skill level, gender, gender identity or expression, +sexual orientation, disability, physical appearance, body size, race, or religion. +Sexualized images or imagery, real or implied violence, intimidation, oppression, +stalking, sustained disruption of activities, publishing the personal information of +others without explicit permission to do so, unwanted physical contact, and unwelcome +sexual attention are all strictly prohibited. Additionally, you are encouraged not to +make assumptions about the background or identity of your fellow community members. + +**Inquisitive** + +The only stupid question is the one that does not get asked. We +encourage our users to ask early and ask often. Rather than asking whether you can ask a +question (the answer is always yes!), instead, simply ask your question. You are +encouraged to provide as many specifics as possible. Code snippets in the form of Gists or +other paste site links are almost always needed in order to get the most helpful answers. +Refrain from pasting multiple lines of code directly into the IRC channels - instead use +gist.github.com or another paste site to provide code snippets. + +**Helpful** + +The Ansible community is committed to being a welcoming environment for all users, +regardless of skill level. We were all beginners once upon a time, and our community +cannot grow without an environment where new users feel safe and comfortable asking questions. +It can become frustrating to answer the same questions repeatedly; however, community +members are expected to remain courteous and helpful to all users equally, regardless of +skill or knowledge level. Avoid providing responses that prioritize snideness and snark over +useful information. At the same time, everyone is expected to read the provided +documentation thoroughly. We are happy to answer questions, provide strategic guidance, +and suggest effective workflows, but we are not here to do your job for you. + +Anti-harassment policy +====================== + +Harassment includes (but is not limited to) all of the following behaviors: + +- Offensive comments related to gender (including gender expression and identity), age, sexual orientation, disability, physical appearance, body size, race, and religion +- Derogatory terminology including words commonly known to be slurs +- Posting sexualized images or imagery in public spaces +- Deliberate intimidation +- Stalking +- Posting others' personal information without explicit permission +- Sustained disruption of talks or other events +- Inappropriate physical contact +- Unwelcome sexual attention + +Participants asked to stop any harassing behavior are expected to comply immediately. +Sponsors are also subject to the anti-harassment policy. In particular, sponsors should +not use sexualized images, activities, or other material. Meetup organizing staff and +other volunteer organizers should not use sexualized attire or otherwise create a +sexualized environment at community events. + +In addition to the behaviors outlined above, continuing to behave a certain way after you +have been asked to stop also constitutes harassment, even if that behavior is not +specifically outlined in this policy. It is considerate and respectful to stop doing +something after you have been asked to stop, and all community members are expected to +comply with such requests immediately. + +Policy violations +================= + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by +contacting `codeofconduct@ansible.com <mailto:codeofconduct@ansible.com>`_, to any channel +operator in the community IRC channels, or to the local organizers of an event. Meetup +organizers are encouraged to prominently display points of contact for reporting unacceptable +behavior at local events. + +If a participant engages in harassing behavior, the meetup organizers may take any action +they deem appropriate. These actions may include but are not limited to warning the +offender, expelling the offender from the event, and barring the offender from future +community events. + +Organizers will be happy to help participants contact security or local law enforcement, +provide escorts to an alternate location, or otherwise assist those experiencing +harassment to feel safe for the duration of the meetup. We value the safety and well-being +of our community members and want everyone to feel welcome at our events, both online and +offline. + +We expect all participants, organizers, speakers, and attendees to follow these policies at +all of our event venues and event-related social events. + +The Ansible Community Code of Conduct is licensed under the Creative Commons +Attribution-Share Alike 3.0 license. Our Code of Conduct was adapted from Codes of Conduct +of other open source projects, including: + +* Contributor Covenant +* Elastic +* The Fedora Project +* OpenStack +* Puppet Labs +* Ubuntu diff --git a/docs/docsite/rst/community/committer_guidelines.rst b/docs/docsite/rst/community/committer_guidelines.rst new file mode 100644 index 00000000..2caa3a56 --- /dev/null +++ b/docs/docsite/rst/community/committer_guidelines.rst @@ -0,0 +1,156 @@ +.. _community_committer_guidelines: + +********************* +Committers Guidelines +********************* + +These are the guidelines for people with commit privileges on the Ansible GitHub repository. Committers are essentially acting as members of the Ansible Core team, although not necessarily as employees of Ansible and Red Hat. Please read the guidelines before you commit. + +These guidelines apply to everyone. At the same time, this ISN'T a process document. So just use good judgment. You've been given commit access because we trust your judgment. + +That said, use the trust wisely. + +If you abuse the trust and break components and builds, and so on, the trust level falls and you may be asked not to commit or you may lose your commit privileges. + +Features, high-level design, and roadmap +======================================== + +As a core team member, you are an integral part of the team that develops the :ref:`roadmap <roadmaps>`. Please be engaged, and push for the features and fixes that you want to see. Also keep in mind that Red Hat, as a company, will commit to certain features, fixes, APIs, and so on, for various releases. Red Hat, the company, and the Ansible team must get these changes completed and released as scheduled. Obligations to users, the community, and customers must come first. Because of these commitments, a feature you want to develop yourself may not get into a release if it affects a lot of other parts within Ansible. + +Any other new features and changes to high level design should go through the proposal process (TBD), to ensure the community and core team have had a chance to review the idea and approve it. The core team has sole responsibility for merging new features based on proposals. + +Our workflow on GitHub +====================== + +As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you're aware of the following workflow steps: + +* Fork the repository upon which you want to do some work to your own personal repository +* Work on the specific branch upon which you need to commit +* Create a Pull Request back to the Ansible repository and tag the people you would like to review; assign someone as the primary "owner" of your request +* Adjust code as necessary based on the Comments provided +* Ask someone on the Core Team to do a final review and merge + +Addendum to workflow for committers: +------------------------------------ + +The Core Team is aware that this can be a difficult process at times. Sometimes, the team breaks the rules by making direct commits or merging their own PRs. This section is a set of guidelines. If you're changing a comma in a doc, or making a very minor change, you can use your best judgement. This is another trust thing. The process is critical for any major change, but for little things or getting something done quickly, use your best judgement and make sure people on the team are aware of your work. + +Roles on Core +============= +* Core committers: Fine to do PRs for most things, but we should have a timebox. Hanging PRs may merge on the judgement of these devs. +* :ref:`Module maintainers <maintainers>`: Module maintainers own specific modules and have indirect commit access through the current module PR mechanisms. + +General rules +============= +Individuals with direct commit access to ansible/ansible are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than rules, treat these as general *guidelines*, individuals with this power are expected to use their best judgement. + +* Don't + + - Commit directly. + - Merge your own PRs. Someone else should have a chance to review and approve the PR merge. If you are a Core Committer, you have a small amount of leeway here for very minor changes. + - Forget about alternate environments. Consider the alternatives--yes, people have bad environments, but they are the ones who need us the most. + - Drag your community team members down. Always discuss the technical merits, but you should never address the person's limitations (you can later go for beers and call them idiots, but not in IRC/GitHub/and so on). + - Forget about the maintenance burden. Some things are really cool to have, but they might not be worth shoehorning in if the maintenance burden is too great. + - Break playbooks. Always keep backwards compatibility in mind. + - Forget to keep it simple. Complexity breeds all kinds of problems. + +* Do + + - Squash, avoid merges whenever possible, use GitHub's squash commits or cherry pick if needed (bisect thanks you). + - Be active. Committers who have no activity on the project (through merges, triage, commits, and so on) will have their permissions suspended. + - Consider backwards compatibility (goes back to "don't break existing playbooks"). + - Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes. + - Discuss with other committers, specially when you are unsure of something. + - Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so. It also helps to add the version of ``ansible-base`` against which this documentation is compatible (to avoid confusion between stable and devel docs, for backwards compatibility, and so on). + - Consider scope, sometimes a fix can be generalized + - Keep it simple, then things are maintainable, debuggable and intelligible. + +Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community. + + +People +====== + +Individuals who've been asked to become a part of this group have generally been contributing in significant ways to the Ansible community for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, through a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act. + ++---------------------+----------------------+--------------------+----------------------+ +| Name | GitHub ID | IRC Nick | Other | ++=====================+======================+====================+======================+ +| James Cammarata | jimi-c | jimi | | ++---------------------+----------------------+--------------------+----------------------+ +| Brian Coca | bcoca | bcoca | | ++---------------------+----------------------+--------------------+----------------------+ +| Matt Davis | nitzmahone | nitzmahone | | ++---------------------+----------------------+--------------------+----------------------+ +| Toshio Kuratomi | abadger | abadger1999 | | ++---------------------+----------------------+--------------------+----------------------+ +| Jason McKerr | mckerrj | newtMcKerr | | ++---------------------+----------------------+--------------------+----------------------+ +| Robyn Bergeron | robynbergeron | rbergeron | | ++---------------------+----------------------+--------------------+----------------------+ +| Greg DeKoenigsberg | gregdek | gregdek | | ++---------------------+----------------------+--------------------+----------------------+ +| Monty Taylor | emonty | mordred | | ++---------------------+----------------------+--------------------+----------------------+ +| Matt Martz | sivel | sivel | | ++---------------------+----------------------+--------------------+----------------------+ +| Nate Case | qalthos | Qalthos | | ++---------------------+----------------------+--------------------+----------------------+ +| James Tanner | jctanner | jtanner | | ++---------------------+----------------------+--------------------+----------------------+ +| Peter Sprygada | privateip | privateip | | ++---------------------+----------------------+--------------------+----------------------+ +| Abhijit Menon-Sen | amenonsen | crab | | ++---------------------+----------------------+--------------------+----------------------+ +| Michael Scherer | mscherer | misc | | ++---------------------+----------------------+--------------------+----------------------+ +| René Moser | resmo | resmo | | ++---------------------+----------------------+--------------------+----------------------+ +| David Shrewsbury | Shrews | Shrews | | ++---------------------+----------------------+--------------------+----------------------+ +| Sandra Wills | docschick | docschick | | ++---------------------+----------------------+--------------------+----------------------+ +| Graham Mainwaring | ghjm | | | ++---------------------+----------------------+--------------------+----------------------+ +| Chris Houseknecht | chouseknecht | | | ++---------------------+----------------------+--------------------+----------------------+ +| Trond Hindenes | trondhindenes | | | ++---------------------+----------------------+--------------------+----------------------+ +| Jon Hawkesworth | jhawkesworth | jhawkesworth | | ++---------------------+----------------------+--------------------+----------------------+ +| Will Thames | willthames | willthames | | ++---------------------+----------------------+--------------------+----------------------+ +| Adrian Likins | alikins | alikins | | ++---------------------+----------------------+--------------------+----------------------+ +| Dag Wieers | dagwieers | dagwieers | dag@wieers.com | ++---------------------+----------------------+--------------------+----------------------+ +| Tim Rupp | caphrim007 | caphrim007 | | ++---------------------+----------------------+--------------------+----------------------+ +| Sloane Hertel | s-hertel | shertel | | ++---------------------+----------------------+--------------------+----------------------+ +| Sam Doran | samdoran | samdoran | | ++---------------------+----------------------+--------------------+----------------------+ +| Matt Clay | mattclay | mattclay | | ++---------------------+----------------------+--------------------+----------------------+ +| Martin Krizek | mkrizek | mkrizek | | ++---------------------+----------------------+--------------------+----------------------+ +| Ganesh Nalawade | ganeshrn | ganeshrn | | ++---------------------+----------------------+--------------------+----------------------+ +| Trishna Guha | trishnaguha | trishnag | | ++---------------------+----------------------+--------------------+----------------------+ +| Andrew Gaffney | agaffney | agaffney | | ++---------------------+----------------------+--------------------+----------------------+ +| Jordan Borean | jborean93 | jborean93 | | ++---------------------+----------------------+--------------------+----------------------+ +| Abhijeet Kasurde | Akasurde | akasurde | | ++---------------------+----------------------+--------------------+----------------------+ +| Adam Miller | maxamillion | maxamillion | | ++---------------------+----------------------+--------------------+----------------------+ +| Sviatoslav Sydorenko| webknjaz | webknjaz | | ++---------------------+----------------------+--------------------+----------------------+ +| Alicia Cozine | acozine | acozine | | ++---------------------+----------------------+--------------------+----------------------+ +| Sandra McCann | samccann | samccann | | ++---------------------+----------------------+--------------------+----------------------+ +| Felix Fontein | felixfontein | felixfontein | felix@fontein.de | ++---------------------+----------------------+--------------------+----------------------+ diff --git a/docs/docsite/rst/community/communication.rst b/docs/docsite/rst/community/communication.rst new file mode 100644 index 00000000..29f8898f --- /dev/null +++ b/docs/docsite/rst/community/communication.rst @@ -0,0 +1,103 @@ +.. _communication: + +************* +Communicating +************* + +.. contents:: + :local: + +Code of Conduct +=============== + +Please read and understand the :ref:`code_of_conduct`. + +Mailing list information +======================== + +Ansible has several mailing lists. Your first post to the mailing list will be moderated (to reduce spam), so please allow up to a day or so for your first post to appear. + +* `Ansible Announce list <https://groups.google.com/forum/#!forum/ansible-announce>`_ is a read-only list that shares information about new releases of Ansible, and also rare infrequent event information, such as announcements about an upcoming AnsibleFest, which is our official conference series. Worth subscribing to! +* `Ansible AWX List <https://groups.google.com/forum/#!forum/awx-project>`_ is for `Ansible AWX <https://github.com/ansible/awx>`_ the upstream version of `Red Hat Ansible Tower <https://www.ansible.com/products/tower>`_ +* `Ansible Container List <https://groups.google.com/forum/#!forum/ansible-container>`_ is for users and developers of the Ansible Container project. +* `Ansible Development List <https://groups.google.com/forum/#!forum/ansible-devel>`_ is for learning how to develop on Ansible, asking about prospective feature design, or discussions about extending ansible or features in progress. +* `Ansible Lockdown List <https://groups.google.com/forum/#!forum/ansible-lockdown>`_ is for all things related to Ansible Lockdown projects, including DISA STIG automation and CIS Benchmarks. +* `Ansible Outreach List <https://groups.google.com/forum/#!forum/ansible-outreach>`_ help with promoting Ansible and `Ansible Meetups <https://ansible.meetup.com/>`_ +* `Ansible Project List <https://groups.google.com/forum/#!forum/ansible-project>`_ is for sharing Ansible tips, answering questions, and general user discussion. +* `Molecule Discussions <https://github.com/ansible-community/molecule/discussions>`_ is designed to aid with the development and testing of Ansible roles with Molecule. + +To subscribe to a group from a non-Google account, you can send an email to the subscription address requesting the subscription. For example: ``ansible-devel+subscribe@googlegroups.com`` + +.. _communication_irc: + +IRC channels +============ + +Ansible has several IRC channels on `Freenode <https://freenode.net/>`_. + +Our IRC channels may require you to register your nickname. If you receive an error when you connect, see `Freenode's Nickname Registration guide <https://freenode.net/kb/answer/registration>`_ for instructions. + +To find all ``ansible`` specific channels on a freenode network, use the following command in your IRC client:: + + /msg alis LIST #ansible* -min 5 + +as described in `freenode docs <https://freenode.net/kb/answer/findingchannels>`_. + +General channels +---------------- + +- ``#ansible`` - For general use questions and support. +- ``#ansible-devel`` - For discussions on developer topics and code related to features or bugs. +- ``#ansible-meeting`` - For public community meetings. We will generally announce these on one or more of the above mailing lists. See the `meeting schedule and agenda page <https://github.com/ansible/community/blob/master/meetings/README.md>`_ + +.. _working_group_list: + +Working groups +-------------- + +Many of our community `Working Groups <https://github.com/ansible/community/wiki#working-groups>`_ meet on Freenode IRC channels. If you want to get involved in a working group, join the channel where it meets or comment on the agenda. + +- `Amazon (AWS) Working Group <https://github.com/ansible/community/wiki/AWS>`_ - ``#ansible-aws`` +- `Ansible Lockdown Working Group <https://github.com/ansible/community/wiki/Lockdown>`_ | `gh/ansible/ansible-lockdown <https://github.com/ansible/ansible-lockdown>`_ - ``#ansible-lockdown``- Security playbooks/roles +- `AWX Working Group <https://github.com/ansible/awx>`_ - ``#ansible-awx`` - Upstream for Ansible Tower +- `Azure Working Group <https://github.com/ansible/community/wiki/Azure>`_ - ``#ansible-azure`` +- `Community Working Group <https://github.com/ansible/community/wiki/Community>`_ - ``#ansible-community`` - Including Meetups +- `Container Working Group <https://github.com/ansible/community/wiki/Container>`_ - ``#ansible-container`` +- `Contributor Experience Working Group <https://github.com/ansible/community/wiki/Contributor-Experience>`_ - ``#ansible-community`` +- `Docker Working Group <https://github.com/ansible/community/wiki/Docker>`_ - ``#ansible-devel`` +- `Documentation Working Group <https://github.com/ansible/community/wiki/Docs>`_- ``#ansible-docs`` +- `Galaxy Working Group <https://github.com/ansible/community/wiki/Galaxy>`_ - ``#ansible-galaxy`` +- `JBoss Working Group <https://github.com/ansible/community/wiki/JBoss>`_ - ``#ansible-jboss`` +- `Kubernetes Working Group <https://github.com/ansible/community/wiki/Kubernetes>`_ - ``#ansible-kubernetes`` +- `Lightbulb Training <https://github.com/ansible/lightbulb>`_ - ``#ansible-lightbulb`` - Ansible training +- `Linode Working Group <https://github.com/ansible/community/wiki/Linode>`_ - ``#ansible-linode`` +- `Molecule Working Group <https://github.com/ansible/community/wiki/Molecule>`_ | `molecule.io <https://molecule.readthedocs.io>`_ - ``#ansible-molecule`` - testing platform for Ansible playbooks and roles +- `Network Working Group <https://github.com/ansible/community/wiki/Network>`_ - ``#ansible-network`` +- `Remote Management Working Group <https://github.com/ansible/community/issues/409>`_ - ``#ansible-devel`` +- `Testing Working Group <https://github.com/ansible/community/wiki/Testing>`_ - ``#ansible-devel`` +- `VMware Working Group <https://github.com/ansible/community/wiki/VMware>`_ - ``#ansible-vmware`` +- `Windows Working Group <https://github.com/ansible/community/wiki/Windows>`_ - ``#ansible-windows`` + +Want to `form a new Working Group <https://github.com/ansible/community/blob/master/WORKING-GROUPS.md>`_? + +Regional and Language-specific channels +--------------------------------------- + +- ``#ansible-es`` - Channel for Spanish speaking Ansible community. +- ``#ansible-eu`` - Channel for the European Ansible Community. +- ``#ansible-fr`` - Channel for French speaking Ansible community. +- ``#ansiblezh`` - Channel for Zurich/Swiss Ansible community. + +IRC meetings +------------ + +The Ansible community holds regular IRC meetings on various topics, and anyone who is interested is invited to +participate. For more information about Ansible meetings, consult the `meeting schedule and agenda page <https://github.com/ansible/community/blob/master/meetings/README.md>`_. + +Ansible Tower support questions +=============================== + +Red Hat Ansible `Tower <https://www.ansible.com/products/tower>`_ is a UI, Server, and REST endpoint for Ansible. +The Red Hat Ansible Automation subscription contains support for Ansible, Ansible Tower, Ansible Automation for Networking, and more. + +If you have a question about Ansible Tower, visit `Red Hat support <https://access.redhat.com/products/ansible-tower-red-hat/>`_ rather than using the IRC channel or the general project mailing list. diff --git a/docs/docsite/rst/community/community.rst b/docs/docsite/rst/community/community.rst new file mode 100644 index 00000000..5dadb7bc --- /dev/null +++ b/docs/docsite/rst/community/community.rst @@ -0,0 +1,6 @@ +:orphan: + +Community Information & Contributing +```````````````````````````````````` + +This page is deprecated. Please see the updated :ref:`Ansible Community Guide <ansible_community_guide>`. diff --git a/docs/docsite/rst/community/contributing_maintained_collections.rst b/docs/docsite/rst/community/contributing_maintained_collections.rst new file mode 100644 index 00000000..f508d145 --- /dev/null +++ b/docs/docsite/rst/community/contributing_maintained_collections.rst @@ -0,0 +1,271 @@ + +.. _contributing_maintained_collections: + +*********************************************** +Contributing to Ansible-maintained Collections +*********************************************** + +The Ansible team welcomes community contributions to the collections maintained by Red Hat Ansible Engineering. This section describes how you can open issues and create PRs with the required testing before your PR can be merged. + +.. contents:: + :local: + +Ansible-maintained collections +================================= + +The following table shows: + +* **Ansible-maintained collection** - Click the link to the collection on Galaxy, then click the ``repo`` button in Galaxy to find the GitHub repository for this collection. +* **Related community collection** - Collection that holds community-created content (modules, roles, and so on) that may also be of interest to a user of the Ansible-maintained collection. You can, for example, add new modules to the community collection as a technical preview before the content is moved to the Ansible-maintained collection. +* **Sponsor** - Working group that manages the collections. You can join the meetings to discuss important proposed changes and enhancements to the collections. +* **Test requirements** - Testing required for any new or changed content for the Ansible-maintained collection. +* **Developer details** - Describes whether the Ansible-maintained collection accepts direct community issues and PRs for existing collection content, as well as more specific developer guidelines based on the collection type. + + +.. _ansible-collection-table: + +.. raw:: html + + <style> + /* Style for this single table. Add delimiters between header columns */ + table#ansible-collection-table th { + border-width: 1px; + border-color: #dddddd /*rgb(225, 228, 229)*/; + border-style: solid; + text-align: center; + padding: 5px; + background-color: #eeeeee; + } + tr, td { + border-width: 1px; + border-color: rgb(225, 228, 229); + border-style: solid; + text-align: center; + padding: 5px; + + } + </style> + + <table id="ansible-collection-table"> + <tr> + <th colspan="3">Collection details</th> + <th colspan="4">Test requirements: Ansible collections</th> + <th colspan="2">Developer details</th> + </tr> + <tr> + <th>Ansible collection</th> + <th>Related community collection</th> + <th>Sponsor</th> + <th>Sanity</th> + <th>Unit</th> + <th>Integration</th> + <th>CI Platform</th> + <th>Open to PRs*</th> + <th>Guidelines</th> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/amazon/aws">amazon.aws</a></td> + <td><a href="https://galaxy.ansible.com/community/aws">community.aws</a></td> + <td><a href="https://github.com/ansible/community/tree/master/group-aws">Cloud</a></td> + <td>✓**</td> + <td>**</td> + <td>✓</td> + <td>Shippable</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html">AWS guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/ansible/netcommon">ansible.netcommon***</a></td> + <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td> + <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td> + <td>✓</td> + <td>✓</td> + <td>✓</td> + <td>Zuul</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/ansible/posix">ansible.posix</a></td> + <td><a href="https://galaxy.ansible.com/community/general">community.general</a></td> + <td>Linux</a></td> + <td>✓</td> + <td></td> + <td></td> + <td>Shippable</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/latest/dev_guide/index.html">Developer guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/ansible/windows">ansible.windows</a></td> + <td><a href="https://galaxy.ansible.com/community/windows">community.windows</a></td> + <td><a href="https://github.com/ansible/community/wiki/Windows">Windows</a></td> + <td>✓</td> + <td>✓****</td> + <td>✓</td> + <td>Shippable</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_general_windows.html#developing-modules-general-windows">Windows guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/arista/eos">arista.eos</a></td> + <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td> + <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td> + <td>✓</td> + <td>✓</td> + <td>✓</td> + <td>Zuul</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/cisco/asa">cisco.asa</a></td> + <td><a href="https://github.com/ansible-collections/community.asa">community.asa</a></td> + <td><a href="https://github.com/ansible/community/wiki/Security-Automation">Security</a></td> + <td>✓</td> + <td>✓</td> + <td>✓</td> + <td>Zuul</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/latest/dev_guide/index.html">Developer guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/cisco/ios">cisco.ios</a></td> + <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td> + <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td> + <td>✓</td> + <td>✓</td> + <td>✓</td> + <td>Zuul</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/cisco/iosxr">cisco.iosxr</a></td> + <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td> + <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td> + <td>✓</td> + <td>✓</td> + <td>✓</td> + <td>Zuul</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/cisco/nxos">cisco.nxos</a></td> + <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td> + <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td> + <td>✓</td> + <td>✓</td> + <td>✓</td> + <td>Zuul</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/ibm/qradar">ibm.qradar</a></td> + <td><a href="https://github.com/ansible-collections/community.qradar">community.qradar</a></td> + <td><a href="https://github.com/ansible/community/wiki/Security-Automation">Security</a></td> + <td>✓</td> + <td></td> + <td>✓</td> + <td>Zuul</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/latest/dev_guide/index.html">Developer guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/junipernetworks/junos">junipernetworks.junos</a></td> + <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td> + <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td> + <td>✓</td> + <td>✓</td> + <td>✓</td> + <td>Zuul</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/openvswitch/openvswitch">openvswitch.openvswitch</a></td> + <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td> + <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td> + <td>✓</td> + <td>✓</td> + <td>✓</td> + <td>Zuul</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td> + </tr> + <tr> + <td><a href="https://github.com/ansible-collections/splunk.es">splunk.es</a></td> + <td><a href="https://github.com/ansible-collections/community.es">community.es</a></td> + <td><a href="https://github.com/ansible/community/wiki/Security-Automation">Security</a></td> + <td>✓</td> + <td></td> + <td>✓</td> + <td>Zuul</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/latest/dev_guide/index.html">Developer guide</a></td> + </tr> + <tr> + <td><a href="https://galaxy.ansible.com/vyos/vyos">vyos.vyos</a></td> + <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td> + <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td> + <td>✓</td> + <td>✓</td> + <td>✓</td> + <td>Zuul</td> + <td>✓</td> + <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td> + </tr> + </table> + + +.. note:: + + \* A ✓ under **Open to PRs** means the collection welcomes GitHub issues and PRs for any changes to existing collection content (plugins, roles, and so on). + + \*\* Integration tests are required and unit tests are welcomed but not required for the AWS collections. An exception to this is made in cases where integration tests are logistically not feasible due to external requirements. An example of this is AWS Direct Connect, as this service can not be functionally tested without the establishment of network peering connections. Unit tests are therefore required for modules that interact with AWS Direct Connect. Exceptions to ``amazon.aws`` must be approved by Red Hat, and exceptions to ``community.aws`` must be approved by the AWS community. + + \*\*\* ``ansible.netcommon`` contains all foundational components for enabling many network and security :ref:`platform <platform_options>` collections. It contains all connection and filter plugins required, and installs as a dependency when you install the the platform collection. + + \*\*\*\* Unit tests for Windows PowerShell modules are an exception to testing, but unit tests are valid and required for the remainder of the collection, including Ansible-side plugins. + + +.. _which_collection: + +Deciding where your contribution belongs +========================================= + +We welcome contributions to Ansible-maintained collections. Because these collections are part of a downstream supported Red Hat product, the criteria for contribution, testing, and release may be higher than other community collections. The related community collections (such as ``community.general`` and ``community.network``) have less-stringent requirements and are a great place for new functionality that may become part of the Ansible-maintained collection in a future release. + +The following scenarios use the ``arista.eos`` to help explain when to contribute to the Ansible-maintained collection, and when to propose your change or idea to the related community collection: + + +1. You want to fix a problem in the ``arista.eos`` Ansible-maintained collection. Create the PR directly in the `arista.eos collection GitHub repository <https://github.com/ansible-collections/arista.eos>`_. Apply all the :ref:`merge requirements <ansible_collection_merge_requirements>`. + +2. You want to add a new Ansible module for Arista. Your options are one of the following: + + * Propose a new module in the ``arista.eos`` collection (requires approval from Arista and Red Hat). + * Propose a new collection in the ``arista`` namespace (requires approval from Arista and Red Hat). + * Propose a new module in the ``community.network`` collection (requires network community approval). + * Place your new module in a collection in your own namespace (no approvals required). + + +Most new content should go into either a related community collection or your own collection first so that is well established in the community before you can propose adding it to the ``arista`` namespace, where inclusion and maintenance criteria are much higher. + + +.. _ansible_collection_merge_requirements: + +Requirements to merge your PR +============================== + +Your PR must meet the following requirements before it can merge into an Ansible-maintained collection: + + +#. The PR is in the intended scope of the collection. Communicate with the appropriate Ansible sponsor listed in the :ref:`Ansible-maintained collection table <ansible-collection-table>` for help. +#. For network and security domains, the PR follows the :ref:`resource module development principles <developing_resource_modules>`. +#. Passes :ref:`sanity tests and tox <tox_resource_modules>`. +#. Passes unit, and integration tests, as listed in the :ref:`Ansible-maintained collection table <ansible-collection-table>` and described in :ref:`testing_resource_modules`. +#. Follows Ansible guidelines. See :ref:`developing_modules` and :ref:`developing_collections`. +#. Addresses all review comments. +#. Includes an appropriate :ref:`changelog <community_changelogs>`. diff --git a/docs/docsite/rst/community/contributor_license_agreement.rst b/docs/docsite/rst/community/contributor_license_agreement.rst new file mode 100644 index 00000000..b0a0f117 --- /dev/null +++ b/docs/docsite/rst/community/contributor_license_agreement.rst @@ -0,0 +1,7 @@ +.. _contributor_license_agreement: + +****************************** +Contributors License Agreement +****************************** + +By contributing you agree that these contributions are your own (or approved by your employer) and you grant a full, complete, irrevocable copyright license to all users and developers of the project, present and future, pursuant to the license of the project. diff --git a/docs/docsite/rst/community/development_process.rst b/docs/docsite/rst/community/development_process.rst new file mode 100644 index 00000000..fc3c987a --- /dev/null +++ b/docs/docsite/rst/community/development_process.rst @@ -0,0 +1,277 @@ +.. _community_development_process: + +***************************** +The Ansible Development Cycle +***************************** + +Ansible developers (including community contributors) add new features, fix bugs, and update code in many different repositories. The `ansible/ansible repository <https://github.com/ansible/ansible>`_ contains the code for basic features and functions, such as copying module code to managed nodes. This code is also known as ``ansible-base``. Other repositories contain plugins and modules that enable Ansible to execute specific tasks, like adding a user to a particular database or configuring a particular network device. These repositories contain the source code for collections. + +Development on ``ansible-base`` occurs on two levels. At the macro level, the ``ansible-base`` developers and maintainers plan releases and track progress with roadmaps and projects. At the micro level, each PR has its own lifecycle. + +Development on collections also occurs at the macro and micro levels. Each collection has its own macro development cycle. For more information on the collections development cycle, see :ref:`contributing_maintained_collections`. The micro-level lifecycle of a PR is similar in collections and in ``ansible-base``. + +.. contents:: + :local: + +Macro development: ``ansible-base`` roadmaps, releases, and projects +===================================================================== + +If you want to follow the conversation about what features will be added to ``ansible-base`` for upcoming releases and what bugs are being fixed, you can watch these resources: + +* the :ref:`roadmaps` +* the :ref:`Ansible Release Schedule <release_and_maintenance>` +* various GitHub `projects <https://github.com/ansible/ansible/projects>`_ - for example: + + * the `2.10 release project <https://github.com/ansible/ansible/projects/39>`_ + * the `network bugs project <https://github.com/ansible/ansible/projects/20>`_ + * the `core documentation project <https://github.com/ansible/ansible/projects/27>`_ + +.. _community_pull_requests: + +Micro development: the lifecycle of a PR +======================================== + +If you want to contribute a feature or fix a bug in ``ansible-base`` or in a collection, you must open a **pull request** ("PR" for short). GitHub provides a great overview of `how the pull request process works <https://help.github.com/articles/about-pull-requests/>`_ in general. The ultimate goal of any pull request is to get merged and become part of a collection or ``ansible-base``. +Here's an overview of the PR lifecycle: + +* Contributor opens a PR +* Ansibot reviews the PR +* Ansibot assigns labels +* Ansibot pings maintainers +* Shippable runs the test suite +* Developers, maintainers, community review the PR +* Contributor addresses any feedback from reviewers +* Developers, maintainers, community re-review +* PR merged or closed + +Automated PR review: ansibullbot +-------------------------------- + +Because Ansible receives many pull requests, and because we love automating things, we have automated several steps of the process of reviewing and merging pull requests with a tool called Ansibullbot, or Ansibot for short. + +`Ansibullbot <https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md>`_ serves many functions: + +- Responds quickly to PR submitters to thank them for submitting their PR +- Identifies the community maintainer responsible for reviewing PRs for any files affected +- Tracks the current status of PRs +- Pings responsible parties to remind them of any PR actions for which they may be responsible +- Provides maintainers with the ability to move PRs through the workflow +- Identifies PRs abandoned by their submitters so that we can close them +- Identifies modules abandoned by their maintainers so that we can find new maintainers + +Ansibot workflow +^^^^^^^^^^^^^^^^ + +Ansibullbot runs continuously. You can generally expect to see changes to your issue or pull request within thirty minutes. Ansibullbot examines every open pull request in the repositories, and enforces state roughly according to the following workflow: + +- If a pull request has no workflow labels, it's considered **new**. Files in the pull request are identified, and the maintainers of those files are pinged by the bot, along with instructions on how to review the pull request. (Note: sometimes we strip labels from a pull request to "reboot" this process.) +- If the module maintainer is not ``$team_ansible``, the pull request then goes into the **community_review** state. +- If the module maintainer is ``$team_ansible``, the pull request then goes into the **core_review** state (and probably sits for a while). +- If the pull request is in **community_review** and has received comments from the maintainer: + + - If the maintainer says ``shipit``, the pull request is labeled **shipit**, whereupon the Core team assesses it for final merge. + - If the maintainer says ``needs_info``, the pull request is labeled **needs_info** and the submitter is asked for more info. + - If the maintainer says **needs_revision**, the pull request is labeled **needs_revision** and the submitter is asked to fix some things. + +- If the submitter says ``ready_for_review``, the pull request is put back into **community_review** or **core_review** and the maintainer is notified that the pull request is ready to be reviewed again. +- If the pull request is labeled **needs_revision** or **needs_info** and the submitter has not responded lately: + + - The submitter is first politely pinged after two weeks, pinged again after two more weeks and labeled **pending action**, and the issue or pull request will be closed two weeks after that. + - If the submitter responds at all, the clock is reset. +- If the pull request is labeled **community_review** and the reviewer has not responded lately: + + - The reviewer is first politely pinged after two weeks, pinged again after two more weeks and labeled **pending_action**, and then may be reassigned to ``$team_ansible`` or labeled **core_review**, or often the submitter of the pull request is asked to step up as a maintainer. +- If Shippable tests fail, or if the code is not able to be merged, the pull request is automatically put into **needs_revision** along with a message to the submitter explaining why. + +There are corner cases and frequent refinements, but this is the workflow in general. + +PR labels +^^^^^^^^^ + +There are two types of PR Labels generally: **workflow** labels and **information** labels. + +Workflow labels +""""""""""""""" + +- **community_review**: Pull requests for modules that are currently awaiting review by their maintainers in the Ansible community. +- **core_review**: Pull requests for modules that are currently awaiting review by their maintainers on the Ansible Core team. +- **needs_info**: Waiting on info from the submitter. +- **needs_rebase**: Waiting on the submitter to rebase. +- **needs_revision**: Waiting on the submitter to make changes. +- **shipit**: Waiting for final review by the core team for potential merge. + +Information labels +"""""""""""""""""" + +- **backport**: this is applied automatically if the PR is requested against any branch that is not devel. The bot immediately assigns the labels backport and ``core_review``. +- **bugfix_pull_request**: applied by the bot based on the templatized description of the PR. +- **cloud**: applied by the bot based on the paths of the modified files. +- **docs_pull_request**: applied by the bot based on the templatized description of the PR. +- **easyfix**: applied manually, inconsistently used but sometimes useful. +- **feature_pull_request**: applied by the bot based on the templatized description of the PR. +- **networking**: applied by the bot based on the paths of the modified files. +- **owner_pr**: largely deprecated. Formerly workflow, now informational. Originally, PRs submitted by the maintainer would automatically go to **shipit** based on this label. If the submitter is also a maintainer, we notify the other maintainers and still require one of the maintainers (including the submitter) to give a **shipit**. +- **pending_action**: applied by the bot to PRs that are not moving. Reviewed every couple of weeks by the community team, who tries to figure out the appropriate action (closure, asking for new maintainers, and so on). + + +Special Labels +"""""""""""""" + +- **new_plugin**: this is for new modules or plugins that are not yet in Ansible. + +**Note:** `new_plugin` kicks off a completely separate process, and frankly it doesn't work very well at present. We're working our best to improve this process. + +Human PR review +--------------- + +After Ansibot reviews the PR and applies labels, the PR is ready for human review. The most likely reviewers for any PR are the maintainers for the module that PR modifies. + +Each module has at least one assigned :ref:`maintainer <maintainers>`, listed in the `BOTMETA.yml <https://github.com/ansible/ansible/blob/devel/.github/BOTMETA.yml>`_ file. + +The maintainer's job is to review PRs that affect that module and decide whether they should be merged (``shipit``) or revised (``needs_revision``). We'd like to have at least one community maintainer for every module. If a module has no community maintainers assigned, the maintainer is listed as ``$team_ansible``. + +Once a human applies the ``shipit`` label, the :ref:`committers <community_committer_guidelines>` decide whether the PR is ready to be merged. Not every PR that gets the ``shipit`` label is actually ready to be merged, but the better our reviewers are, and the better our guidelines are, the more likely it will be that a PR that reaches **shipit** will be mergeable. + + +Making your PR merge-worthy +=========================== + +We do not merge every PR. Here are some tips for making your PR useful, attractive, and merge-worthy. + +.. _community_changelogs: + +Changelogs +---------- + +Changelogs help users and developers keep up with changes to Ansible. Ansible builds a changelog for each release from fragments. You **must** add a changelog fragment to any PR that changes functionality or fixes a bug in ansible-base. You do not have to add a changelog fragment for PRs that add new modules and plugins, because our tooling does that for you automatically. + +We build short summary changelogs for minor releases as well as for major releases. If you backport a bugfix, include a changelog fragment with the backport PR. + +.. _changelogs_how_to: + +Creating a changelog fragment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A basic changelog fragment is a ``.yaml`` file placed in the ``changelogs/fragments/`` directory. Each file contains a yaml dict with keys like ``bugfixes`` or ``major_changes`` followed by a list of changelog entries of bugfixes or features. Each changelog entry is rst embedded inside of the yaml file which means that certain constructs would need to be escaped so they can be interpreted by rst and not by yaml (or escaped for both yaml and rst if you prefer). Each PR **must** use a new fragment file rather than adding to an existing one, so we can trace the change back to the PR that introduced it. + +To create a changelog entry, create a new file with a unique name in the ``changelogs/fragments/`` directory of the corresponding repository. The file name should include the PR number and a description of the change. It must end with the file extension ``.yaml``. For example: ``40696-user-backup-shadow-file.yaml`` + +A single changelog fragment may contain multiple sections but most will only contain one section. The toplevel keys (bugfixes, major_changes, and so on) are defined in the `config file <https://github.com/ansible/ansible/blob/devel/changelogs/config.yaml>`_ for our `release note tool <https://github.com/ansible-community/antsibull-changelog/blob/main/docs/changelogs.rst>`_. Here are the valid sections and a description of each: + +**breaking_changes** + Changes that break existing playbooks or roles. This includes any change to existing behavior that forces users to update tasks. Displayed in both the changelogs and the :ref:`Porting Guides <porting_guides>`. + +**major_changes** + Major changes to Ansible itself. Generally does not include module or plugin changes. Displayed in both the changelogs and the :ref:`Porting Guides <porting_guides>`. + +**minor_changes** + Minor changes to Ansible, modules, or plugins. This includes new features, new parameters added to modules, or behavior changes to existing parameters. + +**deprecated_features** + Features that have been deprecated and are scheduled for removal in a future release. Displayed in both the changelogs and the :ref:`Porting Guides <porting_guides>`. + +**removed_features** + Features that were previously deprecated and are now removed. Displayed in both the changelogs and the :ref:`Porting Guides <porting_guides>`. + +**security_fixes** + Fixes that address CVEs or resolve security concerns. Include links to CVE information. + +**bugfixes** + Fixes that resolve issues. + +**known_issues** + Known issues that are currently not fixed or will not be fixed. + +Each changelog entry must contain a link to its issue between parentheses at the end. If there is no corresponding issue, the entry must contain a link to the PR itself. + +Most changelog entries will be ``bugfixes`` or ``minor_changes``. When writing a changelog entry that pertains to a particular module, start the entry with ``- [module name] -`` and the following sentence with a lowercase letter. + +Here are some examples: + +.. code-block:: yaml + + bugfixes: + - apt_repository - fix crash caused by ``cache.update()`` raising an ``IOError`` + due to a timeout in ``apt update`` (https://github.com/ansible/ansible/issues/51995). + +.. code-block:: yaml + + minor_changes: + - lineinfile - add warning when using an empty regexp (https://github.com/ansible/ansible/issues/29443). + +.. code-block:: yaml + + bugfixes: + - copy - the module was attempting to change the mode of files for + remote_src=True even if mode was not set as a parameter. This failed on + filesystems which do not have permission bits (https://github.com/ansible/ansible/issues/29444). + +You can find more example changelog fragments in the `changelog directory <https://github.com/ansible/ansible/tree/stable-2.10/changelogs/fragments>`_ for the 2.10 release. + +After you have written the changelog fragment for your PR, commit the file and include it with the pull request. + +.. _backport_process: + +Backporting merged PRs in ``ansible-base`` +=========================================== + +All ``ansible-base`` PRs must be merged to the ``devel`` branch first. After a pull request has been accepted and merged to the ``devel`` branch, the following instructions will help you create a pull request to backport the change to a previous stable branch. + +We do **not** backport features. + +.. note:: + + These instructions assume that: + + * ``stable-2.10`` is the targeted release branch for the backport + * ``https://github.com/ansible/ansible.git`` is configured as a + ``git remote`` named ``upstream``. If you do not use + a ``git remote`` named ``upstream``, adjust the instructions accordingly. + * ``https://github.com/<yourgithubaccount>/ansible.git`` + is configured as a ``git remote`` named ``origin``. If you do not use + a ``git remote`` named ``origin``, adjust the instructions accordingly. + +#. Prepare your devel, stable, and feature branches: + + :: + + git fetch upstream + git checkout -b backport/2.10/[PR_NUMBER_FROM_DEVEL] upstream/stable-2.10 + +#. Cherry pick the relevant commit SHA from the devel branch into your feature + branch, handling merge conflicts as necessary: + + :: + + git cherry-pick -x [SHA_FROM_DEVEL] + +#. Add a :ref:`changelog fragment <changelogs_how_to>` for the change, and commit it. + +#. Push your feature branch to your fork on GitHub: + + :: + + git push origin backport/2.10/[PR_NUMBER_FROM_DEVEL] + +#. Submit the pull request for ``backport/2.10/[PR_NUMBER_FROM_DEVEL]`` + against the ``stable-2.10`` branch + +#. The Release Manager will decide whether to merge the backport PR before + the next minor release. There isn't any need to follow up. Just ensure that the automated + tests (CI) are green. + +.. note:: + + The choice to use ``backport/2.10/[PR_NUMBER_FROM_DEVEL]`` as the + name for the feature branch is somewhat arbitrary, but conveys meaning + about the purpose of that branch. It is not required to use this format, + but it can be helpful, especially when making multiple backport PRs for + multiple stable branches. + +.. note:: + + If you prefer, you can use CPython's cherry-picker tool + (``pip install --user 'cherry-picker >= 1.3.2'``) to backport commits + from devel to stable branches in Ansible. Take a look at the `cherry-picker + documentation <https://pypi.org/p/cherry-picker#cherry-picking>`_ for + details on installing, configuring, and using it. diff --git a/docs/docsite/rst/community/documentation_contributions.rst b/docs/docsite/rst/community/documentation_contributions.rst new file mode 100644 index 00000000..7b135580 --- /dev/null +++ b/docs/docsite/rst/community/documentation_contributions.rst @@ -0,0 +1,214 @@ +.. _community_documentation_contributions: + +***************************************** +Contributing to the Ansible Documentation +***************************************** + +Ansible has a lot of documentation and a small team of writers. Community support helps us keep up with new features, fixes, and changes. + +Improving the documentation is an easy way to make your first contribution to the Ansible project. You do not have to be a programmer, since most of our documentation is written in YAML (module documentation) or `reStructuredText <http://docutils.sourceforge.net/rst.html>`_ (rST). Some collection-level documentation is written in a subset of `Markdown <https://github.com/ansible/ansible/issues/68119#issuecomment-596723053>`_. If you are using Ansible, you already use YAML in your playbooks. rST and Markdown are mostly just text. You do not even need git experience, if you use the ``Edit on GitHub`` option. + +If you find a typo, a broken example, a missing topic, or any other error or omission on this documentation website, let us know. Here are some ways to support Ansible documentation: + +.. contents:: + :local: + +Editing docs directly on GitHub +=============================== + +For typos and other quick fixes, you can edit most of the documentation right from the site. Look at the top right corner of this page. That ``Edit on GitHub`` link is available on all the guide pages in the documentation. If you have a GitHub account, you can submit a quick and easy pull request this way. + +.. note:: + + The source files for individual collection plugins exist in their respective repositories. Follow the link to the collection on Galaxy to find where the repository is located and any guidelines on how to contribute to that collection. + +To submit a documentation PR from docs.ansible.com with ``Edit on GitHub``: + +#. Click on ``Edit on GitHub``. +#. If you don't already have a fork of the ansible repo on your GitHub account, you'll be prompted to create one. +#. Fix the typo, update the example, or make whatever other change you have in mind. +#. Enter a commit message in the first rectangle under the heading ``Propose file change`` at the bottom of the GitHub page. The more specific, the better. For example, "fixes typo in my_module description". You can put more detail in the second rectangle if you like. Leave the ``+label: docsite_pr`` there. +#. Submit the suggested change by clicking on the green "Propose file change" button. GitHub will handle branching and committing for you, and open a page with the heading "Comparing Changes". +#. Click on ``Create pull request`` to open the PR template. +#. Fill out the PR template, including as much detail as appropriate for your change. You can change the title of your PR if you like (by default it's the same as your commit message). In the ``Issue Type`` section, delete all lines except the ``Docs Pull Request`` line. +#. Submit your change by clicking on ``Create pull request`` button. +#. Be patient while Ansibot, our automated script, adds labels, pings the docs maintainers, and kicks off a CI testing run. +#. Keep an eye on your PR - the docs team may ask you for changes. + +Reviewing open PRs and issues +============================= + +You can also contribute by reviewing open documentation `issues <https://github.com/ansible/ansible/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3Adocs>`_ and `PRs <https://github.com/ansible/ansible/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+label%3Adocs>`_. To add a helpful review, please: + +- Include a comment - "looks good to me" only helps if we know why. +- For issues, reproduce the problem. +- For PRs, test the change. + +Opening a new issue and/or PR +============================= + +If the problem you have noticed is too complex to fix with the ``Edit on GitHub`` option, and no open issue or PR already documents the problem, please open an issue and/or a PR on the correct underlying repo - ``ansible/ansible`` for most pages that are not plugin or module documentation. If the documentation page has no ``Edit on GitHub`` option, check if the page is for a module within a collection. If so, follow the link to the collection on Galaxy and select the ``repo`` button in the upper right corner to find the source repository for that collection and module. The Collection README file should contain information on how to contribute to that collection, or report issues. + +A great documentation GitHub issue or PR includes: + +- a specific title +- a detailed description of the problem (even for a PR - it's hard to evaluate a suggested change unless we know what problem it's meant to solve) +- links to other information (related issues/PRs, external documentation, pages on docs.ansible.com, and so on) + + +Verifying your documentation PR +================================ + +If you make multiple changes to the documentation on ``ansible/ansible``, or add more than a line to it, before you open a pull request, please: + +#. Check that your text follows our :ref:`style_guide`. +#. Test your changes for rST errors. +#. Build the page, and preferably the entire documentation site, locally. + +.. note:: + + The following sections apply to documentation sourced from the ``ansible/ansible`` repo and does not apply to documentation from an individual collection. See the collection README file for details on how to contribute to that collection. + +Setting up your environment to build documentation locally +---------------------------------------------------------- + +To build documentation locally, ensure you have a working :ref:`development environment <environment_setup>`. + +To work with documentation on your local machine, you need to have python-3.5 or greater and the +following packages installed: + +- gcc +- jinja2 +- libyaml +- Pygments >= 2.4.0 +- pyparsing +- PyYAML +- rstcheck +- six +- sphinx +- sphinx-notfound-page +- straight.plugin + +These required packages are listed in two :file:`requirements.txt` files to make installation easier: + +.. code-block:: bash + + pip install --user -r requirements.txt + pip install --user -r docs/docsite/requirements.txt + +You can drop ``--user`` if you have set up a virtual environment (venv/virtenv). + +.. note:: + + On macOS with Xcode, you may need to install ``six`` and ``pyparsing`` with ``--ignore-installed`` to get versions that work with ``sphinx``. + +.. note:: + + After checking out ``ansible/ansible``, make sure the ``docs/docsite/rst`` directory has strict enough permissions. It should only be writable by the owner's account. If your default ``umask`` is not 022, you can use ``chmod go-w docs/docsite/rst`` to set the permissions correctly in your new branch. Optionally, you can set your ``umask`` to 022 to make all newly created files on your system (including those created by ``git clone``) have the correct permissions. + +.. _testing_documentation_locally: + +Testing the documentation locally +--------------------------------- + +To test an individual file for rST errors: + +.. code-block:: bash + + rstcheck changed_file.rst + +Building the documentation locally +---------------------------------- + +Building the documentation is the best way to check for errors and review your changes. Once `rstcheck` runs with no errors, navigate to ``ansible/docs/docsite`` and then build the page(s) you want to review. + +Building a single rST page +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To build a single rST file with the make utility: + +.. code-block:: bash + + make htmlsingle rst=path/to/your_file.rst + +For example: + +.. code-block:: bash + + make htmlsingle rst=community/documentation_contributions.rst + +This process compiles all the links but provides minimal log output. If you're writing a new page or want more detailed log output, refer to the instructions on :ref:`build_with_sphinx-build` + +.. note:: + + ``make htmlsingle`` adds ``rst/`` to the beginning of the path you provide in ``rst=``, so you can't type the filename with autocomplete. Here are the error messages you will see if you get this wrong: + + - If you run ``make htmlsingle`` from the ``docs/docsite/rst/`` directory: ``make: *** No rule to make target `htmlsingle'. Stop.`` + - If you run ``make htmlsingle`` from the ``docs/docsite/`` directory with the full path to your rST document: ``sphinx-build: error: cannot find files ['rst/rst/community/documentation_contributions.rst']``. + + +Building all the rST pages +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To build all the rST files without any module documentation: + +.. code-block:: bash + + MODULES=none make webdocs + +Building module docs and rST pages +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To build documentation for a few modules included in ``ansible/ansible`` plus all the rST files, use a comma-separated list: + +.. code-block:: bash + + MODULES=one_module,another_module make webdocs + +To build all the module documentation plus all the rST files: + +.. code-block:: bash + + make webdocs + +.. _build_with_sphinx-build: + +Building rST files with ``sphinx-build`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Advanced users can build one or more rST files with the sphinx utility directly. ``sphinx-build`` returns misleading ``undefined label`` warnings if you only build a single page, because it does not create internal links. However, ``sphinx-build`` returns more extensive syntax feedback, including warnings about indentation errors and ``x-string without end-string`` warnings. This can be useful, especially if you're creating a new page from scratch. To build a page or pages with ``sphinx-build``: + +.. code-block:: bash + + sphinx-build [options] sourcedir outdir [filenames...] + +You can specify filenames, or ``–a`` for all files, or omit both to compile only new/changed files. + +For example: + +.. code-block:: bash + + sphinx-build -b html -c rst/ rst/dev_guide/ _build/html/dev_guide/ rst/dev_guide/developing_modules_documenting.rst + +Running the final tests +^^^^^^^^^^^^^^^^^^^^^^^ + +When you submit a documentation pull request, automated tests are run. Those same tests can be run locally. To do so, navigate to the repository's top directory and run: + +.. code-block:: bash + + make clean && + bin/ansible-test sanity --test docs-build && + bin/ansible-test sanity --test rstcheck + +Unfortunately, leftover rST-files from previous document-generating can occasionally confuse these tests. It is therefore safest to run them on a clean copy of the repository, which is the purpose of ``make clean``. If you type these three lines one at a time and manually check the success of each, you do not need the ``&&``. + +Joining the documentation working group +======================================= + +The Documentation Working Group (DaWGs) meets weekly on Tuesdays on the #ansible-docs channel on freenode IRC. For more information, including links to our agenda and a calendar invite, please visit the `working group page in the community repo <https://github.com/ansible/community/wiki/Docs>`_. + +.. seealso:: + :ref:`More about testing module documentation <testing_module_documentation>` + + :ref:`More about documenting modules <module_documenting>` diff --git a/docs/docsite/rst/community/github_admins.rst b/docs/docsite/rst/community/github_admins.rst new file mode 100644 index 00000000..802b180d --- /dev/null +++ b/docs/docsite/rst/community/github_admins.rst @@ -0,0 +1,32 @@ +.. _github_admins: + +************* +GitHub Admins +************* + +.. contents:: Topics + +GitHub Admins have more permissions on GitHub than normal contributors or even committers. There are +a few responsibilities that come with that increased power. + + +Adding and removing committers +============================== + +The Ansible Team will periodically review who is actively contributing to Ansible to grant or revoke +contributors' ability to commit on their own. GitHub Admins are the people who have the power to +actually manage the GitHub permissions. + + +Changing branch permissions for releases +======================================== + +When we make releases we make people go through a :ref:`release_managers` to push commits to that +branch. The GitHub admins are responsible for setting the branch so only the Release Manager can +commit to the branch when the release process reaches that stage and later opening the branch once +the release has been made. The Release manager will let the GitHub Admin know when this needs to be +done. + +.. seealso:: The `GitHub Admin Process Docs + <https://docs.google.com/document/d/1gWPtxNX4J39uIzwqQWLIsTZ1dY_AwEZzAd9bJ4XtZso/edit#heading=h.2wezayw9xsqz>`_ for instructions + on how to change branch permissions. diff --git a/docs/docsite/rst/community/how_can_I_help.rst b/docs/docsite/rst/community/how_can_I_help.rst new file mode 100644 index 00000000..cf0a64c2 --- /dev/null +++ b/docs/docsite/rst/community/how_can_I_help.rst @@ -0,0 +1,86 @@ +.. _how_can_i_help: + +*************** +How can I help? +*************** + +.. contents:: + :local: + +Thanks for being interested in helping the Ansible project! + +There are many ways to help the Ansible project...but first, please read and understand the :ref:`code_of_conduct`. + +Become a power user +=================== + +A great way to help the Ansible project is to become a power user: + +* Use Ansible everywhere you can +* Take tutorials and classes +* Read the :ref:`official documentation <ansible_documentation>` +* Study some of the `many excellent books <https://www.amazon.com/s/ref=nb_sb_ss_c_2_7?url=search-alias%3Dstripbooks&field-keywords=ansible&sprefix=ansible%2Caps%2C260>`_ about Ansible +* `Get certified <https://www.ansible.com/products/training-certification>`_. + +When you become a power user, your ability and opportunities to help the Ansible project in other ways will multiply quickly. + +Ask and answer questions online +=============================== + +There are many forums online where Ansible users ask and answer questions. Reach out and communicate with your fellow Ansible users. + +You can find the official :ref:`Ansible communication channels <communication>`. + +Review, fix, and maintain the documentation +=========================================== + +Typos are everywhere, even in the Ansible documentation. We work hard to keep the documentation up-to-date, but you may also find outdated examples. We offer easy ways to :ref:`report and/or fix documentation errors <community_documentation_contributions>`. + +.. _ansible_community_meetup: + +Participate in your local meetup +================================ + +There are Ansible meetups `all over the world <https://www.meetup.com/topics/ansible/>`_. Join your local meetup. Attend regularly. Ask good questions. Volunteer to give a presentation about how you use Ansible. + +If there is no meetup near you, we are happy to help you `start one <https://www.ansible.com/community/events/ansible-meetups>`_. + +File and verify issues +====================== + +All software has bugs, and Ansible is no exception. When you find a bug, you can help tremendously by :ref:`telling us about it <reporting_bugs_and_features>`. + + +If the bug you found already exists in an issue, you can help by verifying the behavior of the reported bug with a comment in that issue, or by reporting any additional information. + +Review and submit pull requests +=============================== + +As you become more familiar with how Ansible works, you may be able to fix issues or develop new features yourself. If you think you have a fix for a bug in Ansible, or if you have a new feature that you would like to share with millions of Ansible users, read all about the :ref:`Ansible development process <community_development_process>` and and :ref:`how to contribute to collections <contributing_maintained_collections>` to learn how to get your code accepted into Ansible. + +Another good way to help is to review pull requests that other Ansible users have submitted. The Ansible community keeps a full list of `open pull requests by file <https://ansible.sivel.net/pr/byfile.html>`_, so if a particular module or plugin interests you, you can easily keep track of all the relevant new pull requests and provide testing or feedback. + +Become a collection maintainer +============================== + +Once you have learned about the development process and have contributed code to a collection, we encourage you to become a maintainer of that collection. There are hundreds of modules in dozens of Ansible collections, and the vast majority of them are written and maintained entirely by members of the Ansible community. + +To learn more about the responsibilities of being an Ansible module maintainer, please read our :ref:`collection maintainer guidelines <maintainers>`. + +.. _community_working_groups: + +Join a working group +==================== + +Working groups are a way for Ansible community members to self-organize around particular topics of interest. We have working groups around various topics. To join or create a working group, please read the :ref:`Ansible Working Groups<working_group_list>`. + + +Teach Ansible to others +======================= + +We are working on a standardized `Ansible workshop <https://ansible.github.io/workshops/>`_ that can provide a good hands-on introduction to Ansible usage and concepts. + +Social media +============ + +If you like Ansible and just want to spread the good word, feel free to share on your social media platform of choice, and let us know by using ``@ansible`` or ``#ansible``. We'll be looking for you. diff --git a/docs/docsite/rst/community/index.rst b/docs/docsite/rst/community/index.rst new file mode 100644 index 00000000..be08228c --- /dev/null +++ b/docs/docsite/rst/community/index.rst @@ -0,0 +1,88 @@ +.. _ansible_community_guide: + +*********************** +Ansible Community Guide +*********************** + +Welcome to the Ansible Community Guide! + +The purpose of this guide is to teach you everything you need to know about being a contributing member of the Ansible community. All types of contributions are welcome and necessary to Ansible's continued success. + +This page outlines the most common situations and questions that bring readers to this section. If you prefer a :ref:`traditional table of contents <community_toc>`, you can find one at the bottom of the page. + + +Getting started +=============== + +* I am new to the community. Where can I find the Ansible :ref:`code_of_conduct`? +* I would like to know what I am agreeing to when I contribute to Ansible. Does Ansible have a :ref:`contributor_license_agreement`? +* I would like to contribute but I am not sure how. Are there :ref:`easy ways to contribute <how_can_i_help>`? +* I want to talk to other Ansible users. How do I find an `Ansible Meetup near me <https://www.meetup.com/topics/ansible/>`_? +* I have a question. Which :ref:`Ansible email lists and IRC channels <communication>` will help me find answers? +* I want to learn more about Ansible. What can I do? + + * `Read books <https://www.ansible.com/resources/ebooks>`_. + * `Get certified <https://www.ansible.com/products/training-certification>`_. + * `Attend events <https://www.ansible.com/community/events>`_. + * `Review getting started guides <https://www.ansible.com/resources/get-started>`_. + * `Watch videos <https://www.ansible.com/resources/videos>`_ - includes Ansible Automates, AnsibleFest & webinar recordings. + +* I would like updates about new Ansible versions. How are `new releases announced <https://groups.google.com/forum/#!forum/ansible-announce>`_? +* I want to use the current release. How do I know which :ref:`releases are current <release_schedule>`? + +Going deeper +============ + +* I think Ansible is broken. How do I :ref:`report a bug <reporting_bugs>`? +* I need functionality that Ansible does not offer. How do I :ref:`request a feature <request_features>`? +* How do I :ref:`contribute to an Ansible-maintained collection <contributing_maintained_collections>`? +* I am waiting for a particular feature. How do I see what is :ref:`planned for future Ansible Releases <roadmaps>`? +* I have a specific Ansible interest or expertise (for example, VMware, Linode, and so on). How do I get involved in a :ref:`working group <working_group_list>`? +* I would like to participate in conversations about features and fixes. How do I review GitHub issues and pull requests? +* I found a typo or another problem on docs.ansible.com. How can I :ref:`improve the documentation <community_documentation_contributions>`? + + +Working with the Ansible repo +============================= + +* I want to make my first code changes to a collection or to ``ansible-base``. How do I :ref:`set up my Python development environment <environment_setup>`? +* I would like to get more efficient as a developer. How can I find :ref:`editors, linters, and other tools <other_tools_and_programs>` that will support my Ansible development efforts? +* I want my code to meet Ansible's guidelines. Where can I find guidance on :ref:`coding in Ansible <developer_guide>`? +* I want to learn more about Ansible roadmaps, releases, and projects. How do I find information on :ref:`the development cycle <community_development_process>`? +* I would like to connect Ansible to a new API or other resource. How do I :ref:`create a collection <developing_modules_in_groups>`? +* My pull request is marked ``needs_rebase``. How do I :ref:`rebase my PR <rebase_guide>`? +* I am using an older version of Ansible and want a bug fixed in my version that has already been fixed on the ``devel`` branch. How do I :ref:`backport a bugfix PR <backport_process>`? +* I have an open pull request with a failing test. How do I learn about Ansible's :ref:`testing (CI) process <developing_testing>`? +* I am ready to step up as a collection maintainer. What are the :ref:`guidelines for maintainers <maintainers>`? +* A module in a collection I maintain is obsolete. How do I :ref:`deprecate a module <deprecating_modules>`? + +.. _community_toc: + +Traditional Table of Contents +============================= + +If you prefer to read the entire Community Guide, here is a list of the pages in order: + +.. toctree:: + :maxdepth: 2 + + code_of_conduct + how_can_I_help + reporting_bugs_and_features + documentation_contributions + communication + development_process + contributing_maintained_collections + contributor_license_agreement + triage_process + other_tools_and_programs + ../dev_guide/style_guide/index + +.. toctree:: + :caption: Guidelines for specific types of contributors + :maxdepth: 1 + + committer_guidelines + maintainers + release_managers + github_admins diff --git a/docs/docsite/rst/community/maintainers.rst b/docs/docsite/rst/community/maintainers.rst new file mode 100644 index 00000000..ac466d67 --- /dev/null +++ b/docs/docsite/rst/community/maintainers.rst @@ -0,0 +1,34 @@ +.. _maintainers: + +******************************** +Collection maintainer guidelines +******************************** + +Thank you for being a community collection maintainer. This guide offers an overview of your responsibilities as a maintainer along with resources for additional information. The Ansible community hopes that you will find that maintaining a collection is as rewarding for you as having the collection content is for the wider community. + +.. contents:: + :local: + +In addition to the information below, module maintainers should be familiar with: + +* :ref:`General Ansible community development practices <ansible_community_guide>` +* Documentation on :ref:`module development <developing_modules>` + + +Maintainer responsibilities +=========================== + +When you contribute a module to a collection included in the ``ansible`` package, you become a maintainer for that module once it has been merged. Maintainership empowers you with the authority to accept, reject, or request revisions to pull requests on your module -- but as they say, "with great power comes great responsibility." + +Maintainers of Ansible collections are expected to provide feedback, responses, or actions on pull requests or issues to the collection(s) they maintain in a reasonably timely manner. You can also update the contributor guidelines for that collection, in collaboration with the Ansible community team and the other maintainers of that collection. + +Resources +========= + +Please see :ref:`communication` for ways to contact the broader Ansible community. For maintainers, following the `ansible-devel <https://groups.google.com/forum/#!forum/ansible-devel>`_ mailing list is a great way to participate in conversations about coding, get assistance when you need it, and influence the overall direction, quality, and goals of Ansible and the collections. If you are not on this relatively low-volume list, please join us here: https://groups.google.com/forum/#!forum/ansible-devel + + +Pull requests, issues, and workflow +=================================== + +Each collection community can set its own rules and workflow for managing pull requests, bug reports, documentation issues, and feature requests, as well as adding and replacing maintainers. diff --git a/docs/docsite/rst/community/other_tools_and_programs.rst b/docs/docsite/rst/community/other_tools_and_programs.rst new file mode 100644 index 00000000..4d7326bc --- /dev/null +++ b/docs/docsite/rst/community/other_tools_and_programs.rst @@ -0,0 +1,123 @@ +.. _other_tools_and_programs: + +######################## +Other Tools And Programs +######################## + +.. contents:: + :local: + +The Ansible community uses a range of tools for working with the Ansible project. This is a list of some of the most popular of these tools. + +If you know of any other tools that should be added, this list can be updated by clicking "Edit on GitHub" on the top right of this page. + +*************** +Popular Editors +*************** + +Atom +==== + +An open-source, free GUI text editor created and maintained by GitHub. You can keep track of git project +changes, commit from the GUI, and see what branch you are on. You can customize the themes for different colors and install syntax highlighting packages for different languages. You can install Atom on Linux, macOS and Windows. Useful Atom plugins include: + +* `language-yaml <https://atom.io/packages/language-yaml>`_ - YAML highlighting for Atom (built-in). +* `linter-js-yaml <https://atom.io/packages/linter-js-yaml>`_ - parses your YAML files in Atom through js-yaml. + + +Emacs +===== + +A free, open-source text editor and IDE that supports auto-indentation, syntax highlighting and built in terminal shell(among other things). + +* `yaml-mode <https://github.com/yoshiki/yaml-mode>`_ - YAML highlighting and syntax checking. +* `jinja2-mode <https://github.com/paradoxxxzero/jinja2-mode>`_ - Jinja2 highlighting and syntax checking. +* `magit-mode <https://github.com/magit/magit>`_ - Git porcelain within Emacs. + + +PyCharm +======= + +A full IDE (integrated development environment) for Python software development. It ships with everything you need to write python scripts and complete software, including support for YAML syntax highlighting. It's a little overkill for writing roles/playbooks, but it can be a very useful tool if you write modules and submit code for Ansible. Can be used to debug the Ansible engine. + + +Sublime +======= + +A closed-source, subscription GUI text editor. You can customize the GUI with themes and install packages for language highlighting and other refinements. You can install Sublime on Linux, macOS and Windows. Useful Sublime plugins include: + +* `GitGutter <https://packagecontrol.io/packages/GitGutter>`_ - shows information about files in a git repository. +* `SideBarEnhancements <https://packagecontrol.io/packages/SideBarEnhancements>`_ - provides enhancements to the operations on Sidebar of Files and Folders. +* `Sublime Linter <https://packagecontrol.io/packages/SublimeLinter>`_ - a code-linting framework for Sublime Text 3. +* `Pretty YAML <https://packagecontrol.io/packages/Pretty%20YAML>`_ - prettifies YAML for Sublime Text 2 and 3. +* `Yamllint <https://packagecontrol.io/packages/SublimeLinter-contrib-yamllint>`_ - a Sublime wrapper around yamllint. + + +Visual Studio Code +================== + +An open-source, free GUI text editor created and maintained by Microsoft. Useful Visual Studio Code plugins include: + + +* `YAML Support by Red Hat <https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml>`_ - provides YAML support through yaml-language-server with built-in Kubernetes and Kedge syntax support. +* `Ansible Syntax Highlighting Extension <https://marketplace.visualstudio.com/items?itemName=haaaad.ansible>`_ - YAML & Jinja2 support. +* `Visual Studio Code extension for Ansible <https://marketplace.visualstudio.com/items?itemName=vscoss.vscode-ansible>`_ - provides autocompletion, syntax highlighting. + +vim +=== + +An open-source, free command-line text editor. Useful vim plugins include: + +* `Ansible vim <https://github.com/pearofducks/ansible-vim>`_ - vim syntax plugin for Ansible 2.x, it supports YAML playbooks, Jinja2 templates, and Ansible's hosts files. + +JetBrains +========= + +An open-source Community edition and closed-source Enterprise edition, integrated development environments based on IntelliJ's framework including IDEA, AppCode, CLion, GoLand, PhpStorm, PyCharm and others. Useful JetBrains platform plugins include: + +* `Ansible Vault Editor <https://plugins.jetbrains.com/plugin/14278-ansible-vault-editor>`_ - Ansible Vault Editor with auto encryption/decryption. + + +***************** +Development Tools +***************** + +Finding related issues and PRs +============================== + +There are various ways to find existing issues and pull requests (PRs) + +- `PR by File <https://ansible.sivel.net/pr/byfile.html>`_ - shows a current list of all open pull requests by individual file. An essential tool for Ansible module maintainers. +- `jctanner's Ansible Tools <https://github.com/jctanner/ansible-tools>`_ - miscellaneous collection of useful helper scripts for Ansible development. + +.. _validate-playbook-tools: + +****************************** +Tools for Validating Playbooks +****************************** + +- `Ansible Lint <https://docs.ansible.com/ansible-lint/index.html>`_ - a highly configurable linter for Ansible playbooks. +- `Ansible Review <https://github.com/willthames/ansible-review>`_ - an extension of Ansible Lint designed for code review. +- `Molecule <https://molecule.readthedocs.io/en/latest/>`_ is a testing framework for Ansible plays and roles. +- `yamllint <https://yamllint.readthedocs.io/en/stable/>`__ is a command-line utility to check syntax validity including key repetition and indentation issues. + + +*********** +Other Tools +*********** + +- `Ansible cmdb <https://github.com/fboender/ansible-cmdb>`_ - takes the output of Ansible's fact gathering and converts it into a static HTML overview page containing system configuration information. +- `Ansible Inventory Grapher <https://github.com/willthames/ansible-inventory-grapher>`_ - visually displays inventory inheritance hierarchies and at what level a variable is defined in inventory. +- `Ansible Playbook Grapher <https://github.com/haidaraM/ansible-playbook-grapher>`_ - A command line tool to create a graph representing your Ansible playbook tasks and roles. +- `Ansible Shell <https://github.com/dominis/ansible-shell>`_ - an interactive shell for Ansible with built-in tab completion for all the modules. +- `Ansible Silo <https://github.com/groupon/ansible-silo>`_ - a self-contained Ansible environment by Docker. +- `Ansigenome <https://github.com/nickjj/ansigenome>`_ - a command line tool designed to help you manage your Ansible roles. +- `ARA <https://github.com/openstack/ara>`_ - records Ansible playbook runs and makes the recorded data available and intuitive for users and systems by integrating with Ansible as a callback plugin. +- `Awesome Ansible <https://github.com/jdauphant/awesome-ansible>`_ - a collaboratively curated list of awesome Ansible resources. +- `AWX <https://github.com/ansible/awx>`_ - provides a web-based user interface, REST API, and task engine built on top of Ansible. AWX is the upstream project for Red Hat Ansible Tower, part of the Red Hat Ansible Automation subscription. +- `Mitogen for Ansible <https://mitogen.networkgenomics.com/ansible_detailed.html>`_ - uses the `Mitogen <https://github.com/dw/mitogen/>`_ library to execute Ansible playbooks in a more efficient way (decreases the execution time). +- `nanvault <https://github.com/marcobellaccini/nanvault>`_ - a standalone tool to encrypt and decrypt files in the Ansible Vault format, featuring UNIX-style composability. +- `OpsTools-ansible <https://github.com/centos-opstools/opstools-ansible>`_ - uses Ansible to configure an environment that provides the support of `OpsTools <https://wiki.centos.org/SpecialInterestGroup/OpsTools>`_, namely centralized logging and analysis, availability monitoring, and performance monitoring. +- `TD4A <https://github.com/cidrblock/td4a>`_ - a template designer for automation. TD4A is a visual design aid for building and testing jinja2 templates. It will combine data in yaml format with a jinja2 template and render the output. +- `PHP-Ansible <https://github.com/maschmann/php-ansible>`_ - an object oriented Ansible wrapper for PHP. + diff --git a/docs/docsite/rst/community/release_managers.rst b/docs/docsite/rst/community/release_managers.rst new file mode 100644 index 00000000..d7c84cd5 --- /dev/null +++ b/docs/docsite/rst/community/release_managers.rst @@ -0,0 +1,82 @@ +.. _release_managers: + +************************** +Release Manager Guidelines +************************** + +.. contents:: Topics + +The release manager's purpose is to ensure a smooth release. To achieve that goal, they need to +coordinate between: + +* Developers with commit privileges on the `Ansible GitHub repository <https://github.com/ansible/ansible/>`_ +* Contributors without commit privileges +* The community +* Ansible documentation team +* Ansible Tower team + +Pre-releases: what and why +========================== + +Pre-releases exist to draw testers. They give people who don't feel comfortable running from source +control a means to get an early version of the code to test and give us feedback. To ensure we get +good feedback about a release, we need to make sure all major changes in a release are put into +a pre-release. Testers must be given time to test those changes before the final release. Ideally we +want there to be sufficient time between pre-releases for people to install and test one version for +a span of time. Then they can spend more time using the new code than installing the latest +version. + +The right length of time for a tester is probably around two weeks. However, for our three-to-four month +development cycle to work, we compress this down to one week; any less runs the risk +of people spending more time installing the code instead of running it. However, if there's a time +crunch (with a release date that cannot slip), it is better to release with new changes than to hold +back those changes to give people time to test between. People cannot test what is not released, so +we have to get those tarballs out there even if people feel they have to install more frequently. + + +Beta releases +------------- + +In a beta release, we know there are still bugs. We will continue to accept fixes for these. +Although we review these fixes, sometimes they can be invasive or potentially destabilize other +areas of the code. + +During the beta, we will no longer accept feature submissions. + + +Release candidates +------------------ + +In a release candidate, we've fixed all known blockers. Any remaining bugfixes are +ones that we are willing to leave out of the release. At this point we need user testing to +determine if there are any other blocker bugs lurking. + +Blocker bugs generally are those that cause significant problems for users. Regressions are +more likely to be considered blockers because they will break present users' usage of Ansible. + +The Release Manager will cherry-pick fixes for new release blockers. The release manager will also +choose whether to accept bugfixes for isolated areas of the code or defer those to the next minor +release. By themselves, non-blocker bugs will not trigger a new release; they will only make it +into the next major release if blocker bugs require that a new release be made. + +The last RC should be as close to the final as possible. The following things may be changed: + + * Version numbers are changed automatically and will differ as the pre-release tags are removed from + the versions. + * Tests and :file:`docs/docsite/` can differ if really needed as they do not break runtime. + However, the release manager may still reject them as they have the potential to cause + breakage that will be visible during the release process. + +.. note:: We want to specifically emphasize that code (in :file:`bin/`, :file:`lib/ansible/`, and + :file:`setup.py`) must be the same unless there are extraordinary extenuating circumstances. If + there are extenuating circumstances, the Release Manager is responsible for notifying groups + (like the Tower Team) which would want to test the code. + + +Ansible release process +======================= + +The release process is kept in a `separate document +<https://docs.google.com/document/d/10EWLkMesi9s_CK_GmbZlE_ZLhuQr6TBrdMLKo5dnMAI/edit#heading=h.ooo3izcel3cz>`_ +so that it can be easily updated during a release. If you need access to edit this, please ask one +of the current release managers to add you. diff --git a/docs/docsite/rst/community/reporting_bugs_and_features.rst b/docs/docsite/rst/community/reporting_bugs_and_features.rst new file mode 100644 index 00000000..4cf3ca62 --- /dev/null +++ b/docs/docsite/rst/community/reporting_bugs_and_features.rst @@ -0,0 +1,56 @@ +.. _reporting_bugs_and_features: + +************************************** +Reporting bugs and requesting features +************************************** + +.. contents:: + :local: + +.. _reporting_bugs: + +Reporting a bug +=============== + +Security bugs +------------- + +Ansible practices responsible disclosure - if this is a security-related bug, email `security@ansible.com <mailto:security@ansible.com>`_ instead of filing a ticket or posting to any public groups, and you will receive a prompt response. + +Bugs in ansible-base +-------------------- + +If you find a bug that affects multiple plugins, a plugin that remained in the ansible/ansible repo, or the overall functioning of Ansible, report it to `github.com/ansible/ansible/issues <https://github.com/ansible/ansible/issues>`_. You need a free GitHub account. Before reporting a bug, use the bug/issue search to see if the issue has already been reported. If you are not sure if something is a bug yet, you can report the behavior on the :ref:`mailing list or IRC first <communication>`. + +Do not open issues for "how do I do this" type questions. These are great topics for IRC or the mailing list, where things are likely to be more of a discussion. + +If you find a bug, open the issue yourself to ensure we have a record of it. Do not rely on someone else in the community to file the bug report for you. We have created an issue template, which saves time and helps us help everyone with their issues more quickly. Please fill it out as completely and as accurately as possible: + + * Include the Ansible version + * Include any relevant configuration + * Include the exact commands or tasks you are running + * Describe the behavior you expected + * Provide steps to reproduce the bug + * Use minimal well-reduced and well-commented examples, not your entire production playbook + * When sharing YAML in playbooks, preserve the formatting by using `code blocks <https://help.github.com/articles/creating-and-highlighting-code-blocks/>`_. + * Document the behavior you got + * Include output where possible + * For multiple-file content, use gist.github.com, which is more durable than pastebin content + +Bugs in collections +------------------- + +Many bugs only affect a single module or plugin. If you find a bug that affects a module or plugin hosted in a collection, file the bug in the repository of the :ref:`collection <collections>`: + + #. Find the collection on Galaxy. + #. Click on the Issue Tracker link for that collection. + #. Follow the contributor guidelines or instructions in the collection repo. + +If you are not sure whether a bug is in ansible-base or in a collection, you can report the behavior on the :ref:`mailing list or IRC first <communication>`. + +.. _request_features: + +Requesting a feature +==================== + +The best way to get a feature into Ansible is to :ref:`submit a pull request <community_pull_requests>`, either against ansible-base or against a collection. See also :ref:`ansible_collection_merge_requirements`. diff --git a/docs/docsite/rst/community/triage_process.rst b/docs/docsite/rst/community/triage_process.rst new file mode 100644 index 00000000..5560f655 --- /dev/null +++ b/docs/docsite/rst/community/triage_process.rst @@ -0,0 +1,8 @@ +************** +Triage Process +************** + +The issue and PR triage processes are driven by the `Ansibot <https://github.com/ansible/ansibullbot>`_. Whenever an issue or PR is filed, the Ansibot examines the issue to ensure that all relevant data is present, and handles the routing of the issue as it works its way to eventual completion. + +For details on how Ansibot manages the triage process, please consult the `Ansibot +Issue Guide <https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md>`_. diff --git a/docs/docsite/rst/core_index.rst b/docs/docsite/rst/core_index.rst new file mode 100644 index 00000000..0ee63ca9 --- /dev/null +++ b/docs/docsite/rst/core_index.rst @@ -0,0 +1,84 @@ +.. _ansible_core_documentation: + +.. + This is the index file for ansible-core. It gets symlinked to index.rst by the Makefile + +************************** +Ansible Core Documentation +************************** + +About ansible-core +=================== + +Ansible is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks such as continuous deployments or zero downtime rolling updates. + +Ansible's main goals are simplicity and ease-of-use. It also has a strong focus on security and reliability, featuring a minimum of moving parts, usage of OpenSSH for transport (with other transports and pull modes as alternatives), and a language that is designed around auditability by humans--even those not familiar with the program. + +We believe simplicity is relevant to all sizes of environments, so we design for busy users of all types: developers, sysadmins, release engineers, IT managers, and everyone in between. Ansible is appropriate for managing all environments, from small setups with a handful of instances to enterprise environments with many thousands of instances. + +You can learn more at `AnsibleFest <https://www.ansible.com/ansiblefest>`_, the annual event for all Ansible contributors, users, and customers hosted by Red Hat. AnsibleFest is the place to connect with others, learn new skills, and find a new friend to automate with. + +Ansible manages machines in an agent-less manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. Because OpenSSH is one of the most peer-reviewed open source components, security exposure is greatly reduced. Ansible is decentralized--it relies on your existing OS credentials to control access to remote machines. If needed, Ansible can easily connect with Kerberos, LDAP, and other centralized authentication management systems. + +This documentation covers the version of Ansible noted in the upper left corner of this page. We maintain multiple versions of Ansible and of the documentation, so please be sure you are using the version of the documentation that covers the version of Ansible you're using. For recent features, we note the version of Ansible where the feature was added. + + +``ansible-core`` releases a new major release approximately twice a year. The core application evolves somewhat conservatively, valuing simplicity in language design and setup. Contributors develop and change modules and plugins, hosted in collections since version 2.10, much more quickly. + + + +.. toctree:: + :maxdepth: 2 + :caption: Installation, Upgrade & Configuration + + installation_guide/index + porting_guides/core_porting_guides + +.. toctree:: + :maxdepth: 2 + :caption: Using Ansible Core + + user_guide/index + +.. toctree:: + :maxdepth: 2 + :caption: Contributing to Ansible Core + + community/index + +.. toctree:: + :maxdepth: 2 + :caption: Extending Ansible + + dev_guide/index + + +.. toctree:: + :maxdepth: 1 + :caption: Reference & Appendices + + collections/index + collections/all_plugins + reference_appendices/playbooks_keywords + reference_appendices/common_return_values + reference_appendices/config + reference_appendices/general_precedence + reference_appendices/YAMLSyntax + reference_appendices/python_3_support + reference_appendices/interpreter_discovery + reference_appendices/release_and_maintenance + reference_appendices/test_strategies + dev_guide/testing/sanity/index + reference_appendices/faq + reference_appendices/glossary + reference_appendices/module_utils + reference_appendices/special_variables + reference_appendices/tower + reference_appendices/automationhub + reference_appendices/logging + +.. toctree:: + :maxdepth: 2 + :caption: Roadmaps + + roadmap/ansible_base_roadmap_index.rst diff --git a/docs/docsite/rst/dev_guide/debugging.rst b/docs/docsite/rst/dev_guide/debugging.rst new file mode 100644 index 00000000..6885b252 --- /dev/null +++ b/docs/docsite/rst/dev_guide/debugging.rst @@ -0,0 +1,112 @@ +.. _debugging_modules: + +***************** +Debugging modules +***************** + +.. contents:: + :local: + +.. _detailed_debugging: + +Detailed debugging steps +======================== + +Ansible modules are put together as a zip file consisting of the module file and the various Python module boilerplate inside of a wrapper script. To see what is actually happening in the module, you need to extract the file from the wrapper. The wrapper script provides helper methods that let you do that. + +The following steps use ``localhost`` as the target host, but you can use the same steps to debug against remote hosts as well. For a simpler approach to debugging without using the temporary files, see :ref:`simple debugging <simple_debugging>`. + + +#. Set :envvar:`ANSIBLE_KEEP_REMOTE_FILES` to ``1`` on the control host so Ansible will keep the remote module files instead of deleting them after the module finishes executing. Use the ``-vvv`` option to make Ansible more verbose. This will display the file name of the temporary module file. + + .. code-block:: shell-session + + $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible localhost -m ping -a 'data=debugging_session' -vvv + <127.0.0.1> ESTABLISH LOCAL CONNECTION FOR USER: badger + <127.0.0.1> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo $HOME/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595 `" && echo "` echo $HOME/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595 `" )' + <127.0.0.1> PUT /var/tmp/tmpjdbJ1w TO /home/badger/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595/AnsiballZ_ping.py + <127.0.0.1> EXEC /bin/sh -c 'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595/AnsiballZ_ping.py && sleep 0' + localhost | SUCCESS => { + "changed": false, + "invocation": { + "module_args": { + "data": "debugging_session" + }, + "module_name": "ping" + }, + "ping": "debugging_session" + } + +#. Navigate to the temporary directory from the previous step. If the previous command was run against a remote host, connect to that host first before trying to navigate to the temporary directory. + + .. code-block:: shell-session + + $ ssh remotehost # only if not debugging against localhost + $ cd /home/badger/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595 + +#. Run the wrapper's ``explode`` command to turn the string into some Python files that you can work with. + + .. code-block:: shell-session + + $ python AnsiballZ_ping.py explode + Module expanded into: + /home/badger/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595/debug_dir + + If you want to examine the wrapper file you can. It will show a small Python script with a large base64 encoded string. The string contains the module to execute. + +#. When you look into the temporary directory you'll see a structure like this: + + .. code-block:: shell-session + + ├── AnsiballZ_ping.py + └── debug_dir + ├── ansible + │ ├── __init__.py + │ ├── module_utils + │ │ ├── __init__.py + │ │ ├── _text.py + │ │ ├── basic.py + │ │ ├── common + │ │ ├── compat + │ │ ├── distro + │ │ ├── parsing + │ │ ├── pycompat24.py + │ │ └── six + │ └── modules + │ ├── __init__.py + │ └── ping.py + └── args + + * ``AnsiballZ_ping.py`` is the Python script with the the module code stored in a base64 encoded string. It contains various helper functions for executing the module. + + * ``ping.py`` is the code for the module itself. You can modify this code to see what effect it would have on your module, or for debugging purposes. + + * The ``args`` file contains a JSON string. The string is a dictionary containing the module arguments and other variables that Ansible passes into the module to change its behavior. Modify this file to change the parameters passed to the module. + + * The ``ansible`` directory contains the module code in ``modules`` as well as code from :mod:`ansible.module_utils` that is used by the module. Ansible includes files for any :mod:`ansible.module_utils` imports in the module but not any files from any other module. If your module uses :mod:`ansible.module_utils.url` Ansible will include it for you. But if your module includes `requests <https://requests.readthedocs.io/en/master/api/>`_, then you'll have to make sure that the Python `requests library <https://pypi.org/project/requests/>`_ is installed on the system before running the module. + + You can modify files in this directory if you suspect that the module is having a problem in some of this boilerplate code rather than in the module code you have written. + +#. Once you edit the code or arguments in the exploded tree, use the ``execute`` subcommand to run it: + + .. code-block:: shell-session + + $ python AnsiballZ_ping.py execute + {"invocation": {"module_args": {"data": "debugging_session"}}, "changed": false, "ping": "debugging_session"} + + This subcommand inserts the absolute path to ``debug_dir`` as the first item in ``sys.path`` and invokes the script using the arguments in the ``args`` file. You can continue to run the module like this until you understand the problem. Then you can copy the changes back into your real module file and test that the real module works via ``ansible`` or ``ansible-playbook``. + + +.. _simple_debugging: + +Simple debugging +================ + +The easiest way to run a debugger in a module, either local or remote, is to use `epdb <https://pypi.org/project/epdb/>`_. Add ``import epdb; epdb.serve()`` in the module code on the control node at the desired break point. To connect to the debugger, run ``epdb.connect()``. See the `epdb documentation <https://pypi.org/project/epdb/>`_ for how to specify the ``host`` and ``port``. If connecting to a remote node, make sure to use a port that is allowed by any firewall between the control node and the remote node. + +This technique should work with any remote debugger, but we do not guarantee any particual remote debugging tool will work. + +The `q <https://pypi.org/project/q/>`_ library is another very useful debugging tool. + +Since ``print()`` statements do not work inside modules, raising an exception is a good approach if you just want to see some specific data. Put ``raise Exception(some_value)`` somewhere in the module and run it normally. Ansible will handle this exception, pass the message back to the control node, and display it. + diff --git a/docs/docsite/rst/dev_guide/developing_api.rst b/docs/docsite/rst/dev_guide/developing_api.rst new file mode 100644 index 00000000..eeff4684 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_api.rst @@ -0,0 +1,47 @@ +.. _developing_api: + +********** +Python API +********** + +.. contents:: Topics + +.. note:: This API is intended for internal Ansible use. Ansible may make changes to this API at any time that could break backward compatibility with older versions of the API. Because of this, external use is not supported by Ansible. If you want to use Python API only for executing playbooks or modules, consider `ansible-runner <https://ansible-runner.readthedocs.io/en/latest/>`_ first. + +There are several ways to use Ansible from an API perspective. You can use +the Ansible Python API to control nodes, you can extend Ansible to respond to various Python events, you can +write plugins, and you can plug in inventory data from external data sources. This document +gives a basic overview and examples of the Ansible execution and playbook API. + +If you would like to use Ansible programmatically from a language other than Python, trigger events asynchronously, +or have access control and logging demands, please see the `Ansible Tower documentation <https://docs.ansible.com/ansible-tower/>`_. + +.. note:: Because Ansible relies on forking processes, this API is not thread safe. + +.. _python_api_example: + +Python API example +================== + +This example is a simple demonstration that shows how to minimally run a couple of tasks: + +.. literalinclude:: ../../../../examples/scripts/uptime.py + :language: python + +.. note:: Ansible emits warnings and errors via the display object, which prints directly to stdout, stderr and the Ansible log. + +The source code for the ``ansible`` +command line tools (``lib/ansible/cli/``) is `available on GitHub <https://github.com/ansible/ansible/tree/devel/lib/ansible/cli>`_. + +.. seealso:: + + :ref:`developing_inventory` + Developing dynamic inventory integrations + :ref:`developing_modules_general` + Getting started on developing a module + :ref:`developing_plugins` + How to develop plugins + `Development Mailing List <https://groups.google.com/group/ansible-devel>`_ + Mailing list for development topics + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/dev_guide/developing_collections.rst b/docs/docsite/rst/dev_guide/developing_collections.rst new file mode 100644 index 00000000..3aa25502 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_collections.rst @@ -0,0 +1,812 @@ + +.. _developing_collections: + +********************** +Developing collections +********************** + +Collections are a distribution format for Ansible content. You can use collections to package and distribute playbooks, roles, modules, and plugins. +You can publish and use collections through `Ansible Galaxy <https://galaxy.ansible.com>`_. + +* For details on how to *use* collections see :ref:`collections`. +* For the current development status of Collections and FAQ see `Ansible Collections Overview and FAQ <https://github.com/ansible-collections/overview/blob/main/README.rst>`_. + +.. contents:: + :local: + :depth: 2 + +.. _collection_structure: + +Collection structure +==================== + +Collections follow a simple data structure. None of the directories are required unless you have specific content that belongs in one of them. A collection does require a ``galaxy.yml`` file at the root level of the collection. This file contains all of the metadata that Galaxy and other tools need in order to package, build and publish the collection:: + + collection/ + ├── docs/ + ├── galaxy.yml + ├── meta/ + │ └── runtime.yml + ├── plugins/ + │ ├── modules/ + │ │ └── module1.py + │ ├── inventory/ + │ └── .../ + ├── README.md + ├── roles/ + │ ├── role1/ + │ ├── role2/ + │ └── .../ + ├── playbooks/ + │ ├── files/ + │ ├── vars/ + │ ├── templates/ + │ └── tasks/ + └── tests/ + + +.. note:: + * Ansible only accepts ``.md`` extensions for the :file:`README` file and any files in the :file:`/docs` folder. + * See the `ansible-collections <https://github.com/ansible-collections/>`_ GitHub Org for examples of collection structure. + * Not all directories are currently in use. Those are placeholders for future features. + +.. _galaxy_yml: + +galaxy.yml +---------- + +A collection must have a ``galaxy.yml`` file that contains the necessary information to build a collection artifact. +See :ref:`collections_galaxy_meta` for details. + +.. _collections_doc_dir: + +docs directory +--------------- + +Put general documentation for the collection here. Keep the specific documentation for plugins and modules embedded as Python docstrings. Use the ``docs`` folder to describe how to use the roles and plugins the collection provides, role requirements, and so on. Use markdown and do not add subfolders. + +Use ``ansible-doc`` to view documentation for plugins inside a collection: + +.. code-block:: bash + + ansible-doc -t lookup my_namespace.my_collection.lookup1 + +The ``ansible-doc`` command requires the fully qualified collection name (FQCN) to display specific plugin documentation. In this example, ``my_namespace`` is the Galaxy namespace and ``my_collection`` is the collection name within that namespace. + +.. note:: The Galaxy namespace of an Ansible collection is defined in the ``galaxy.yml`` file. It can be different from the GitHub organization or repository name. + +.. _collections_plugin_dir: + +plugins directory +------------------ + +Add a 'per plugin type' specific subdirectory here, including ``module_utils`` which is usable not only by modules, but by most plugins by using their FQCN. This is a way to distribute modules, lookups, filters, and so on without having to import a role in every play. + +Vars plugins are unsupported in collections. Cache plugins may be used in collections for fact caching, but are not supported for inventory plugins. + +.. _collection_module_utils: + +module_utils +^^^^^^^^^^^^ + +When coding with ``module_utils`` in a collection, the Python ``import`` statement needs to take into account the FQCN along with the ``ansible_collections`` convention. The resulting Python import will look like ``from ansible_collections.{namespace}.{collection}.plugins.module_utils.{util} import {something}`` + +The following example snippets show a Python and PowerShell module using both default Ansible ``module_utils`` and +those provided by a collection. In this example the namespace is ``community``, the collection is ``test_collection``. +In the Python example the ``module_util`` in question is called ``qradar`` such that the FQCN is +``community.test_collection.plugins.module_utils.qradar``: + +.. code-block:: python + + from ansible.module_utils.basic import AnsibleModule + from ansible.module_utils._text import to_text + + from ansible.module_utils.six.moves.urllib.parse import urlencode, quote_plus + from ansible.module_utils.six.moves.urllib.error import HTTPError + from ansible_collections.community.test_collection.plugins.module_utils.qradar import QRadarRequest + + argspec = dict( + name=dict(required=True, type='str'), + state=dict(choices=['present', 'absent'], required=True), + ) + + module = AnsibleModule( + argument_spec=argspec, + supports_check_mode=True + ) + + qradar_request = QRadarRequest( + module, + headers={"Content-Type": "application/json"}, + not_rest_data_keys=['state'] + ) + +Note that importing something from an ``__init__.py`` file requires using the file name: + +.. code-block:: python + + from ansible_collections.namespace.collection_name.plugins.callback.__init__ import CustomBaseClass + +In the PowerShell example the ``module_util`` in question is called ``hyperv`` such that the FCQN is +``community.test_collection.plugins.module_utils.hyperv``: + +.. code-block:: powershell + + #!powershell + #AnsibleRequires -CSharpUtil Ansible.Basic + #AnsibleRequires -PowerShell ansible_collections.community.test_collection.plugins.module_utils.hyperv + + $spec = @{ + name = @{ required = $true; type = "str" } + state = @{ required = $true; choices = @("present", "absent") } + } + $module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) + + Invoke-HyperVFunction -Name $module.Params.name + + $module.ExitJson() + +.. _collections_roles_dir: + +roles directory +---------------- + +Collection roles are mostly the same as existing roles, but with a couple of limitations: + + - Role names are now limited to contain only lowercase alphanumeric characters, plus ``_`` and start with an alpha character. + - Roles in a collection cannot contain plugins any more. Plugins must live in the collection ``plugins`` directory tree. Each plugin is accessible to all roles in the collection. + +The directory name of the role is used as the role name. Therefore, the directory name must comply with the +above role name rules. +The collection import into Galaxy will fail if a role name does not comply with these rules. + +You can migrate 'traditional roles' into a collection but they must follow the rules above. You may need to rename roles if they don't conform. You will have to move or link any role-based plugins to the collection specific directories. + +.. note:: + + For roles imported into Galaxy directly from a GitHub repository, setting the ``role_name`` value in the role's metadata overrides the role name used by Galaxy. For collections, that value is ignored. When importing a collection, Galaxy uses the role directory as the name of the role and ignores the ``role_name`` metadata value. + +playbooks directory +-------------------- + +TBD. + +.. _developing_collections_tests_directory: + +tests directory +---------------- + +Ansible Collections are tested much like Ansible itself, by using the +`ansible-test` utility which is released as part of Ansible, version 2.9.0 and +newer. Because Ansible Collections are tested using the same tooling as Ansible +itself, via `ansible-test`, all Ansible developer documentation for testing is +applicable for authoring Collections Tests with one key concept to keep in mind. + +See :ref:`testing_collections` for specific information on how to test collections +with ``ansible-test``. + +When reading the :ref:`developing_testing` documentation, there will be content +that applies to running Ansible from source code via a git clone, which is +typical of an Ansible developer. However, it's not always typical for an Ansible +Collection author to be running Ansible from source but instead from a stable +release, and to create Collections it is not necessary to run Ansible from +source. Therefore, when references of dealing with `ansible-test` binary paths, +command completion, or environment variables are presented throughout the +:ref:`developing_testing` documentation; keep in mind that it is not needed for +Ansible Collection Testing because the act of installing the stable release of +Ansible containing `ansible-test` is expected to setup those things for you. + +.. _meta_runtime_yml: + +meta directory +-------------- + +A collection can store some additional metadata in a ``runtime.yml`` file in the collection's ``meta`` directory. The ``runtime.yml`` file supports the top level keys: + +- *requires_ansible*: + + The version of Ansible required to use the collection. Multiple versions can be separated with a comma. + + .. code:: yaml + + requires_ansible: ">=2.10,<2.11" + + .. note:: although the version is a `PEP440 Version Specifier <https://www.python.org/dev/peps/pep-0440/#version-specifiers>`_ under the hood, Ansible deviates from PEP440 behavior by truncating prerelease segments from the Ansible version. This means that Ansible 2.11.0b1 is compatible with something that ``requires_ansible: ">=2.11"``. + +- *plugin_routing*: + + Content in a collection that Ansible needs to load from another location or that has been deprecated/removed. + The top level keys of ``plugin_routing`` are types of plugins, with individual plugin names as subkeys. + To define a new location for a plugin, set the ``redirect`` field to another name. + To deprecate a plugin, use the ``deprecation`` field to provide a custom warning message and the removal version or date. If the plugin has been renamed or moved to a new location, the ``redirect`` field should also be provided. If a plugin is being removed entirely, ``tombstone`` can be used for the fatal error message and removal version or date. + + .. code:: yaml + + plugin_routing: + inventory: + kubevirt: + redirect: community.general.kubevirt + my_inventory: + tombstone: + removal_version: "2.0.0" + warning_text: my_inventory has been removed. Please use other_inventory instead. + modules: + my_module: + deprecation: + removal_date: "2021-11-30" + warning_text: my_module will be removed in a future release of this collection. Use another.collection.new_module instead. + redirect: another.collection.new_module + podman_image: + redirect: containers.podman.podman_image + module_utils: + ec2: + redirect: amazon.aws.ec2 + util_dir.subdir.my_util: + redirect: namespace.name.my_util + +- *import_redirection* + + A mapping of names for Python import statements and their redirected locations. + + .. code:: yaml + + import_redirection: + ansible.module_utils.old_utility: + redirect: ansible_collections.namespace_name.collection_name.plugins.module_utils.new_location + + +.. _creating_collections_skeleton: + +Creating a collection skeleton +------------------------------ + +To start a new collection: + +.. code-block:: bash + + collection_dir#> ansible-galaxy collection init my_namespace.my_collection + +.. note:: + + Both the namespace and collection names use the same strict set of requirements. See `Galaxy namespaces <https://galaxy.ansible.com/docs/contributing/namespaces.html#galaxy-namespaces>`_ on the Galaxy docsite for those requirements. + +Once the skeleton exists, you can populate the directories with the content you want inside the collection. See `ansible-collections <https://github.com/ansible-collections/>`_ GitHub Org to get a better idea of what you can place inside a collection. + +.. _creating_collections: + +Creating collections +====================== + +To create a collection: + +#. Create a collection skeleton with the ``collection init`` command. See :ref:`creating_collections_skeleton` above. +#. Add your content to the collection. +#. Build the collection into a collection artifact with :ref:`ansible-galaxy collection build<building_collections>`. +#. Publish the collection artifact to Galaxy with :ref:`ansible-galaxy collection publish<publishing_collections>`. + +A user can then install your collection on their systems. + +Currently the ``ansible-galaxy collection`` command implements the following sub commands: + +* ``init``: Create a basic collection skeleton based on the default template included with Ansible or your own template. +* ``build``: Create a collection artifact that can be uploaded to Galaxy or your own repository. +* ``publish``: Publish a built collection artifact to Galaxy. +* ``install``: Install one or more collections. + +To learn more about the ``ansible-galaxy`` command-line tool, see the :ref:`ansible-galaxy` man page. + + +.. _docfragments_collections: + +Using documentation fragments in collections +-------------------------------------------- + +To include documentation fragments in your collection: + +#. Create the documentation fragment: ``plugins/doc_fragments/fragment_name``. + +#. Refer to the documentation fragment with its FQCN. + +.. code-block:: yaml + + extends_documentation_fragment: + - community.kubernetes.k8s_name_options + - community.kubernetes.k8s_auth_options + - community.kubernetes.k8s_resource_options + - community.kubernetes.k8s_scale_options + +:ref:`module_docs_fragments` covers the basics for documentation fragments. The `kubernetes <https://github.com/ansible-collections/kubernetes>`_ collection includes a complete example. + +You can also share documentation fragments across collections with the FQCN. + +.. _building_collections: + +Building collections +-------------------- + +To build a collection, run ``ansible-galaxy collection build`` from inside the root directory of the collection: + +.. code-block:: bash + + collection_dir#> ansible-galaxy collection build + +This creates a tarball of the built collection in the current directory which can be uploaded to Galaxy.:: + + my_collection/ + ├── galaxy.yml + ├── ... + ├── my_namespace-my_collection-1.0.0.tar.gz + └── ... + +.. note:: + * Certain files and folders are excluded when building the collection artifact. See :ref:`ignoring_files_and_folders_collections` to exclude other files you would not want to distribute. + * If you used the now-deprecated ``Mazer`` tool for any of your collections, delete any and all files it added to your :file:`releases/` directory before you build your collection with ``ansible-galaxy``. + * The current Galaxy maximum tarball size is 2 MB. + + +This tarball is mainly intended to upload to Galaxy +as a distribution method, but you can use it directly to install the collection on target systems. + +.. _ignoring_files_and_folders_collections: + +Ignoring files and folders +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default the build step will include all the files in the collection directory in the final build artifact except for the following: + +* ``galaxy.yml`` +* ``*.pyc`` +* ``*.retry`` +* ``tests/output`` +* previously built artifacts in the root directory +* various version control directories like ``.git/`` + +To exclude other files and folders when building the collection, you can set a list of file glob-like patterns in the +``build_ignore`` key in the collection's ``galaxy.yml`` file. These patterns use the following special characters for +wildcard matching: + +* ``*``: Matches everything +* ``?``: Matches any single character +* ``[seq]``: Matches and character in seq +* ``[!seq]``:Matches any character not in seq + +For example, if you wanted to exclude the :file:`sensitive` folder within the ``playbooks`` folder as well any ``.tar.gz`` archives you +can set the following in your ``galaxy.yml`` file: + +.. code-block:: yaml + + build_ignore: + - playbooks/sensitive + - '*.tar.gz' + +.. note:: + This feature is only supported when running ``ansible-galaxy collection build`` with Ansible 2.10 or newer. + + +.. _trying_collection_locally: + +Trying collections locally +-------------------------- + +You can try your collection locally by installing it from the tarball. The following will enable an adjacent playbook to +access the collection: + +.. code-block:: bash + + ansible-galaxy collection install my_namespace-my_collection-1.0.0.tar.gz -p ./collections + + +You should use one of the values configured in :ref:`COLLECTIONS_PATHS` for your path. This is also where Ansible itself will +expect to find collections when attempting to use them. If you don't specify a path value, ``ansible-galaxy collection install`` +installs the collection in the first path defined in :ref:`COLLECTIONS_PATHS`, which by default is ``~/.ansible/collections``. + +Next, try using the local collection inside a playbook. For examples and more details see :ref:`Using collections <using_collections>` + +.. _collections_scm_install: + +Installing collections from a git repository +-------------------------------------------- + +You can also test a version of your collection in development by installing it from a git repository. + +.. code-block:: bash + + ansible-galaxy collection install git+https://github.com/org/repo.git,devel + +.. include:: ../shared_snippets/installing_collections_git_repo.txt + +.. _publishing_collections: + +Publishing collections +---------------------- + +You can publish collections to Galaxy using the ``ansible-galaxy collection publish`` command or the Galaxy UI itself. You need a namespace on Galaxy to upload your collection. See `Galaxy namespaces <https://galaxy.ansible.com/docs/contributing/namespaces.html#galaxy-namespaces>`_ on the Galaxy docsite for details. + +.. note:: Once you upload a version of a collection, you cannot delete or modify that version. Ensure that everything looks okay before you upload it. + +.. _galaxy_get_token: + +Getting your API token +^^^^^^^^^^^^^^^^^^^^^^ + +To upload your collection to Galaxy, you must first obtain an API token (``--token`` in the ``ansible-galaxy`` CLI command or ``token`` in the :file:`ansible.cfg` file under the ``galaxy_server`` section). The API token is a secret token used to protect your content. + +To get your API token: + +* For Galaxy, go to the `Galaxy profile preferences <https://galaxy.ansible.com/me/preferences>`_ page and click :guilabel:`API Key`. +* For Automation Hub, go to https://cloud.redhat.com/ansible/automation-hub/token/ and click :guilabel:`Load token` from the version dropdown. + +Storing or using your API token +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Once you have retrieved your API token, you can store or use the token for collections in two ways: + +* Pass the token to the ``ansible-galaxy`` command using the ``--token``. +* Specify the token within a Galaxy server list in your :file:`ansible.cfg` file. + +Using the ``token`` argument +............................ + +You can use the ``--token`` argument with the ``ansible-galaxy`` command (in conjunction with the ``--server`` argument or :ref:`GALAXY_SERVER` setting in your :file:`ansible.cfg` file). You cannot use ``apt-key`` with any servers defined in your :ref:`Galaxy server list <galaxy_server_config>`. + +.. code-block:: text + + ansible-galaxy collection publish ./geerlingguy-collection-1.2.3.tar.gz --token=<key goes here> + + +Specify the token within a Galaxy server list +............................................. + +With this option, you configure one or more servers for Galaxy in your :file:`ansible.cfg` file under the ``galaxy_server_list`` section. For each server, you also configure the token. + + +.. code-block:: ini + + [galaxy] + server_list = release_galaxy + + [galaxy_server.release_galaxy] + url=https://galaxy.ansible.com/ + token=my_token + +See :ref:`galaxy_server_config` for complete details. + +.. _upload_collection_ansible_galaxy: + +Upload using ansible-galaxy +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. note:: + By default, ``ansible-galaxy`` uses https://galaxy.ansible.com as the Galaxy server (as listed in the :file:`ansible.cfg` file under :ref:`galaxy_server`). If you are only publishing your collection to Ansible Galaxy, you do not need any further configuration. If you are using Red Hat Automation Hub or any other Galaxy server, see :ref:`Configuring the ansible-galaxy client <galaxy_server_config>`. + +To upload the collection artifact with the ``ansible-galaxy`` command: + +.. code-block:: bash + + ansible-galaxy collection publish path/to/my_namespace-my_collection-1.0.0.tar.gz + +.. note:: + + The above command assumes you have retrieved and stored your API token as part of a Galaxy server list. See :ref:`galaxy_get_token` for details. + +The ``ansible-galaxy collection publish`` command triggers an import process, just as if you uploaded the collection through the Galaxy website. +The command waits until the import process completes before reporting the status back. If you want to continue +without waiting for the import result, use the ``--no-wait`` argument and manually look at the import progress in your +`My Imports <https://galaxy.ansible.com/my-imports/>`_ page. + + +.. _upload_collection_galaxy: + +Upload a collection from the Galaxy website +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To upload your collection artifact directly on Galaxy: + +#. Go to the `My Content <https://galaxy.ansible.com/my-content/namespaces>`_ page, and click the **Add Content** button on one of your namespaces. +#. From the **Add Content** dialogue, click **Upload New Collection**, and select the collection archive file from your local filesystem. + +When uploading collections it doesn't matter which namespace you select. The collection will be uploaded to the +namespace specified in the collection metadata in the ``galaxy.yml`` file. If you're not an owner of the +namespace, the upload request will fail. + +Once Galaxy uploads and accepts a collection, you will be redirected to the **My Imports** page, which displays output from the +import process, including any errors or warnings about the metadata and content contained in the collection. + +.. _collection_versions: + +Collection versions +------------------- + +Once you upload a version of a collection, you cannot delete or modify that version. Ensure that everything looks okay before +uploading. The only way to change a collection is to release a new version. The latest version of a collection (by highest version number) +will be the version displayed everywhere in Galaxy; however, users will still be able to download older versions. + +Collection versions use `Semantic Versioning <https://semver.org/>`_ for version numbers. Please read the official documentation for details and examples. In summary: + +* Increment major (for example: x in `x.y.z`) version number for an incompatible API change. +* Increment minor (for example: y in `x.y.z`) version number for new functionality in a backwards compatible manner (for example new modules/plugins, parameters, return values). +* Increment patch (for example: z in `x.y.z`) version number for backwards compatible bug fixes. + +.. _migrate_to_collection: + +Migrating Ansible content to a different collection +==================================================== + +First, look at `Ansible Collection Checklist <https://github.com/ansible-collections/overview/blob/main/collection_requirements.rst>`_. + +To migrate content from one collection to another, if the collections are parts of `Ansible distribution <https://github.com/ansible-community/ansible-build-data/blob/main/2.10/ansible.in>`_: + +#. Copy content from the source (old) collection to the target (new) collection. +#. Deprecate the module/plugin with ``removal_version`` scheduled for the next major version in ``meta/runtime.yml`` of the old collection. The deprecation must be released after the copied content has been included in a release of the new collection. +#. When the next major release of the old collection is prepared: + + * remove the module/plugin from the old collection + * remove the symlink stored in ``plugin/modules`` directory if appropriate (mainly when removing from ``community.general`` and ``community.network``) + * remove related unit and integration tests + * remove specific module utils + * remove specific documentation fragments if there are any in the old collection + * add a changelog fragment containing entries for ``removed_features`` and ``breaking_changes``; you can see an example of a changelog fragment in this `pull request <https://github.com/ansible-collections/community.general/pull/1304>`_ + * change ``meta/runtime.yml`` in the old collection: + + * add ``redirect`` to the corresponding module/plugin's entry + * in particular, add ``redirect`` for the removed module utils and documentation fragments if applicable + * remove ``removal_version`` from there + * remove related entries from ``tests/sanity/ignore.txt`` files if exist + * remove changelog fragments for removed content that are not yet part of the changelog (in other words, do not modify `changelogs/changelog.yaml` and do not delete files mentioned in it) + * remove requirements that are no longer required in ``tests/unit/requirements.txt``, ``tests/requirements.yml`` and ``galaxy.yml`` + +According to the above, you need to create at least three PRs as follows: + +#. Create a PR against the new collection to copy the content. +#. Deprecate the module/plugin in the old collection. +#. Later create a PR against the old collection to remove the content according to the schedule. + + +Adding the content to the new collection +---------------------------------------- + +Create a PR in the new collection to: + +#. Copy ALL the related files from the old collection. +#. If it is an action plugin, include the corresponding module with documentation. +#. If it is a module, check if it has a corresponding action plugin that should move with it. +#. Check ``meta/`` for relevant updates to ``runtime.yml`` if it exists. +#. Carefully check the moved ``tests/integration`` and ``tests/units`` and update for FQCN. +#. Review ``tests/sanity/ignore-*.txt`` entries in the old collection. +#. Update ``meta/runtime.yml`` in the old collection. + + +Removing the content from the old collection +-------------------------------------------- + +Create a PR against the source collection repository to remove the modules, module_utils, plugins, and docs_fragments related to this migration: + +#. If you are removing an action plugin, remove the corresponding module that contains the documentation. +#. If you are removing a module, remove any corresponding action plugin that should stay with it. +#. Remove any entries about removed plugins from ``meta/runtime.yml``. Ensure they are added into the new repo. +#. Remove sanity ignore lines from ``tests/sanity/ignore\*.txt`` +#. Remove associated integration tests from ``tests/integrations/targets/`` and unit tests from ``tests/units/plugins/``. +#. if you are removing from content from ``community.general`` or ``community.network``, remove entries from ``.github/BOTMETA.yml``. +#. Carefully review ``meta/runtime.yml`` for any entries you may need to remove or update, in particular deprecated entries. +#. Update ``meta/runtime.yml`` to contain redirects for EVERY PLUGIN, pointing to the new collection name. + +.. warning:: + + Maintainers for the old collection have to make sure that the PR is merged in a way that it does not break user experience and semantic versioning: + + #. A new version containing the merged PR must not be released before the collection the content has been moved to has been released again, with that content contained in it. Otherwise the redirects cannot work and users relying on that content will experience breakage. + #. Once 1.0.0 of the collection from which the content has been removed has been released, such PRs can only be merged for a new **major** version (in other words, 2.0.0, 3.0.0, and so on). + + +BOTMETA.yml +----------- + +The ``BOTMETA.yml``, for example in `community.general collection repository <https://github.com/ansible-collections/community.general/blob/main/.github/BOTMETA.yml>`_, is the source of truth for: + +* ansibullbot + +If the old and/or new collection has ``ansibullbot``, its ``BOTMETA.yml`` must be updated correspondingly. + +Ansibulbot will know how to redirect existing issues and PRs to the new repo. +The build process for docs.ansible.com will know where to find the module docs. + +.. code-block:: yaml + + $modules/monitoring/grafana/grafana_plugin.py: + migrated_to: community.grafana + $modules/monitoring/grafana/grafana_dashboard.py: + migrated_to: community.grafana + $modules/monitoring/grafana/grafana_datasource.py: + migrated_to: community.grafana + $plugins/callback/grafana_annotations.py: + maintainers: $team_grafana + labels: monitoring grafana + migrated_to: community.grafana + $plugins/doc_fragments/grafana.py: + maintainers: $team_grafana + labels: monitoring grafana + migrated_to: community.grafana + +`Example PR <https://github.com/ansible/ansible/pull/66981/files>`_ + +* The ``migrated_to:`` key must be added explicitly for every *file*. You cannot add ``migrated_to`` at the directory level. This is to allow module and plugin webdocs to be redirected to the new collection docs. +* ``migrated_to:`` MUST be added for every: + + * module + * plugin + * module_utils + * contrib/inventory script + +* You do NOT need to add ``migrated_to`` for: + + * Unit tests + * Integration tests + * ReStructured Text docs (anything under ``docs/docsite/rst/``) + * Files that never existed in ``ansible/ansible:devel`` + +.. _testing_collections: + +Testing collections +=================== + +The main tool for testing collections is ``ansible-test``, Ansible's testing tool described in :ref:`developing_testing`. You can run several compile and sanity checks, as well as run unit and integration tests for plugins using ``ansible-test``. When you test collections, test against the ansible-base version(s) you are targeting. + +You must always execute ``ansible-test`` from the root directory of a collection. You can run ``ansible-test`` in Docker containers without installing any special requirements. The Ansible team uses this approach in Shippable both in the ansible/ansible GitHub repository and in the large community collections such as `community.general <https://github.com/ansible-collections/community.general/>`_ and `community.network <https://github.com/ansible-collections/community.network/>`_. The examples below demonstrate running tests in Docker containers. + +Compile and sanity tests +------------------------ + +To run all compile and sanity tests:: + + ansible-test sanity --docker default -v + +See :ref:`testing_compile` and :ref:`testing_sanity` for more information. See the :ref:`full list of sanity tests <all_sanity_tests>` for details on the sanity tests and how to fix identified issues. + +Unit tests +---------- + +You must place unit tests in the appropriate``tests/unit/plugins/`` directory. For example, you would place tests for ``plugins/module_utils/foo/bar.py`` in ``tests/unit/plugins/module_utils/foo/test_bar.py`` or ``tests/unit/plugins/module_utils/foo/bar/test_bar.py``. For examples, see the `unit tests in community.general <https://github.com/ansible-collections/community.general/tree/master/tests/unit/>`_. + +To run all unit tests for all supported Python versions:: + + ansible-test units --docker default -v + +To run all unit tests only for a specific Python version:: + + ansible-test units --docker default -v --python 3.6 + +To run only a specific unit test:: + + ansible-test units --docker default -v --python 3.6 tests/unit/plugins/module_utils/foo/test_bar.py + +You can specify Python requirements in the ``tests/unit/requirements.txt`` file. See :ref:`testing_units` for more information, especially on fixture files. + +Integration tests +----------------- + +You must place integration tests in the appropriate ``tests/integration/targets/`` directory. For module integration tests, you can use the module name alone. For example, you would place integration tests for ``plugins/modules/foo.py`` in a directory called ``tests/integration/targets/foo/``. For non-module plugin integration tests, you must add the plugin type to the directory name. For example, you would place integration tests for ``plugins/connections/bar.py`` in a directory called ``tests/integration/targets/connection_bar/``. For lookup plugins, the directory must be called ``lookup_foo``, for inventory plugins, ``inventory_foo``, and so on. + +You can write two different kinds of integration tests: + +* Ansible role tests run with ``ansible-playbook`` and validate various aspects of the module. They can depend on other integration tests (usually named ``prepare_bar`` or ``setup_bar``, which prepare a service or install a requirement named ``bar`` in order to test module ``foo``) to set-up required resources, such as installing required libraries or setting up server services. +* ``runme.sh`` tests run directly as scripts. They can set up inventory files, and execute ``ansible-playbook`` or ``ansible-inventory`` with various settings. + +For examples, see the `integration tests in community.general <https://github.com/ansible-collections/community.general/tree/master/tests/integration/targets/>`_. See also :ref:`testing_integration` for more details. + +Since integration tests can install requirements, and set-up, start and stop services, we recommended running them in docker containers or otherwise restricted environments whenever possible. By default, ``ansible-test`` supports Docker images for several operating systems. See the `list of supported docker images <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/completion/docker.txt>`_ for all options. Use the ``default`` image mainly for platform-independent integration tests, such as those for cloud modules. The following examples use the ``centos8`` image. + +To execute all integration tests for a collection:: + + ansible-test integration --docker centos8 -v + +If you want more detailed output, run the command with ``-vvv`` instead of ``-v``. Alternatively, specify ``--retry-on-error`` to automatically re-run failed tests with higher verbosity levels. + +To execute only the integration tests in a specific directory:: + + ansible-test integration --docker centos8 -v connection_bar + +You can specify multiple target names. Each target name is the name of a directory in ``tests/integration/targets/``. + +.. _hacking_collections: + +Contributing to collections +=========================== + +If you want to add functionality to an existing collection, modify a collection you are using to fix a bug, or change the behavior of a module in a collection, clone the git repository for that collection and make changes on a branch. You can combine changes to a collection with a local checkout of Ansible (``source hacking/env-setup``). + +This section describes the process for `community.general <https://github.com/ansible-collections/community.general/>`_. To contribute to other collections, replace the folder names ``community`` and ``general`` with the namespace and collection name of a different collection. + +We assume that you have included ``~/dev/ansible/collections/`` in :ref:`COLLECTIONS_PATHS`, and if that path mentions multiple directories, that you made sure that no other directory earlier in the search path contains a copy of ``community.general``. Create the directory ``~/dev/ansible/collections/ansible_collections/community``, and in it clone `the community.general Git repository <https://github.com/ansible-collections/community.general/>`_ or a fork of it into the folder ``general``:: + + mkdir -p ~/dev/ansible/collections/ansible_collections/community + cd ~/dev/ansible/collections/ansible_collections/community + git clone git@github.com:ansible-collections/community.general.git general + +If you clone a fork, add the original repository as a remote ``upstream``:: + + cd ~/dev/ansible/collections/ansible_collections/community/general + git remote add upstream git@github.com:ansible-collections/community.general.git + +Now you can use this checkout of ``community.general`` in playbooks and roles with whichever version of Ansible you have installed locally, including a local checkout of ``ansible/ansible``'s ``devel`` branch. + +For collections hosted in the ``ansible_collections`` GitHub org, create a branch and commit your changes on the branch. When you are done (remember to add tests, see :ref:`testing_collections`), push your changes to your fork of the collection and create a Pull Request. For other collections, especially for collections not hosted on GitHub, check the ``README.md`` of the collection for information on contributing to it. + +.. _collection_changelogs: + +Generating changelogs for a collection +====================================== + +We recommend that you use the `antsibull-changelog <https://github.com/ansible-community/antsibull-changelog>`_ tool to generate Ansible-compatible changelogs for your collection. The Ansible changelog uses the output of this tool to collate all the collections included in an Ansible release into one combined changelog for the release. + +.. note:: + + Ansible here refers to the Ansible 2.10 or later release that includes a curated set of collections. + +Understanding antsibull-changelog +--------------------------------- + +The ``antsibull-changelog`` tool allows you to create and update changelogs for Ansible collections that are compatible with the combined Ansible changelogs. This is an update to the changelog generator used in prior Ansible releases. The tool adds three new changelog fragment categories: ``breaking_changes``, ``security_fixes`` and ``trivial``. The tool also generates the ``changelog.yaml`` file that Ansible uses to create the combined ``CHANGELOG.rst`` file and Porting Guide for the release. + +See :ref:`changelogs_how_to` and the `antsibull-changelog documentation <https://github.com/ansible-community/antsibull-changelog/tree/main/docs>`_ for complete details. + +.. note:: + + The collection maintainers set the changelog policy for their collections. See the individual collection contributing guidelines for complete details. + +Generating changelogs +--------------------- + +To initialize changelog generation: + +#. Install ``antsibull-changelog``: :code:`pip install antsibull-changelog`. +#. Initialize changelogs for your repository: :code:`antsibull-changelog init <path/to/your/collection>`. +#. Optionally, edit the ``changelogs/config.yaml`` file to customize the location of the generated changelog ``.rst`` file or other options. See `Bootstrapping changelogs for collections <https://github.com/ansible-community/antsibull-changelog/blob/main/docs/changelogs.rst#bootstrapping-changelogs-for-collections>`_ for details. + +To generate changelogs from the changelog fragments you created: + +#. Optionally, validate your changelog fragments: :code:`antsibull-changelog lint`. +#. Generate the changelog for your release: :code:`antsibull-changelog release [--version version_number]`. + +.. note:: + + Add the ``--reload-plugins`` option if you ran the ``antsibull-changelog release`` command previously and the version of the collection has not changed. ``antsibull-changelog`` caches the information on all plugins and does not update its cache until the collection version changes. + + +Porting Guide entries +---------------------- + +The following changelog fragment categories are consumed by the Ansible changelog generator into the Ansible Porting Guide: + +* ``major_changes`` +* ``breaking_changes`` +* ``deprecated_features`` +* ``removed_features`` + +Including collection changelogs into Ansible +============================================= + + +If your collection is part of Ansible, use one of the following three options to include your changelog into the Ansible release changelog: + +* Use the ``antsibull-changelog`` tool. + +* If are not using this tool, include the properly formatted ``changelog.yaml`` file into your collection. See the `changlog.yaml format <https://github.com/ansible-community/antsibull-changelog/blob/main/docs/changelog.yaml-format.md>`_ for details. + +* Add a link to own changelogs or release notes in any format by opening an issue at https://github.com/ansible-community/ansible-build-data/ with the HTML link to that information. + +.. note:: + + For the first two options, Ansible pulls the changelog details from Galaxy so your changelogs must be included in the collection version on Galaxy that is included in the upcoming Ansible release. + +.. seealso:: + + :ref:`collections` + Learn how to install and use collections. + :ref:`collections_galaxy_meta` + Understand the collections metadata structure. + :ref:`developing_modules_general` + Learn about how to write Ansible modules + `Mailing List <https://groups.google.com/group/ansible-devel>`_ + The development mailing list + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/dev_guide/developing_core.rst b/docs/docsite/rst/dev_guide/developing_core.rst new file mode 100644 index 00000000..602f9aaf --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_core.rst @@ -0,0 +1,21 @@ +*************************** +Developing ``ansible-base`` +*************************** + +Although ``ansible-base`` (the code hosted in the `ansible/ansible repository <https://github.com/ansible/ansible>`_ on GitHub) includes a few plugins that can be swapped out via playbook directives or configuration, much of the code there is not modular. The documents here give insight into how the parts of ``ansible-base`` work together. + +.. toctree:: + :maxdepth: 1 + + developing_program_flow_modules + +.. seealso:: + + :ref:`developing_api` + Learn about the Python API for task execution + :ref:`developing_plugins` + Learn about developing plugins + `Mailing List <https://groups.google.com/group/ansible-devel>`_ + The development mailing list + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible-devel IRC chat channel diff --git a/docs/docsite/rst/dev_guide/developing_inventory.rst b/docs/docsite/rst/dev_guide/developing_inventory.rst new file mode 100644 index 00000000..26a56a36 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_inventory.rst @@ -0,0 +1,422 @@ +.. _developing_inventory: + +**************************** +Developing dynamic inventory +**************************** + +Ansible can pull inventory information from dynamic sources, including cloud sources, by using the supplied :ref:`inventory plugins <inventory_plugins>`. For details about how to pull inventory information, see :ref:`dynamic_inventory`. If the source you want is not currently covered by existing plugins, you can create your own inventory plugin as with any other plugin type. + +In previous versions, you had to create a script or program that could output JSON in the correct format when invoked with the proper arguments. +You can still use and write inventory scripts, as we ensured backwards compatibility via the :ref:`script inventory plugin <script_inventory>` +and there is no restriction on the programming language used. +If you choose to write a script, however, you will need to implement some features yourself such as caching, configuration management, dynamic variable and group composition, and so on. +If you use :ref:`inventory plugins <inventory_plugins>` instead, you can leverage the Ansible codebase and add these common features automatically. + +.. contents:: Topics + :local: + + +.. _inventory_sources: + +Inventory sources +================= + +Inventory sources are the input strings that inventory plugins work with. +An inventory source can be a path to a file or to a script, or it can be raw data that the plugin can interpret. + +The table below shows some examples of inventory plugins and the source types that you can pass to them with ``-i`` on the command line. + ++--------------------------------------------+-----------------------------------------+ +| Plugin | Source | ++--------------------------------------------+-----------------------------------------+ +| :ref:`host list <host_list_inventory>` | A comma-separated list of hosts | ++--------------------------------------------+-----------------------------------------+ +| :ref:`yaml <yaml_inventory>` | Path to a YAML format data file | ++--------------------------------------------+-----------------------------------------+ +| :ref:`constructed <constructed_inventory>` | Path to a YAML configuration file | ++--------------------------------------------+-----------------------------------------+ +| :ref:`ini <ini_inventory>` | Path to an INI formatted data file | ++--------------------------------------------+-----------------------------------------+ +| :ref:`virtualbox <virtualbox_inventory>` | Path to a YAML configuration file | ++--------------------------------------------+-----------------------------------------+ +| :ref:`script plugin <script_inventory>` | Path to an executable that outputs JSON | ++--------------------------------------------+-----------------------------------------+ + + +.. _developing_inventory_inventory_plugins: + +Inventory plugins +================= + +Like most plugin types (except modules), inventory plugins must be developed in Python. They execute on the controller and should therefore adhere to the :ref:`control_node_requirements`. + +Most of the documentation in :ref:`developing_plugins` also applies here. You should read that document first for a general understanding and then come back to this document for specifics on inventory plugins. + +Normally, inventory plugins are executed at the start of a run, and before the playbooks, plays, or roles are loaded. +However, you can use the ``meta: refresh_inventory`` task to clear the current inventory and execute the inventory plugins again, and this task will generate a new inventory. + +If you use the persistent cache, inventory plugins can also use the configured cache plugin to store and retrieve data. Caching inventory avoids making repeated and costly external calls. + +.. _developing_an_inventory_plugin: + +Developing an inventory plugin +------------------------------ + +The first thing you want to do is use the base class: + +.. code-block:: python + + from ansible.plugins.inventory import BaseInventoryPlugin + + class InventoryModule(BaseInventoryPlugin): + + NAME = 'myplugin' # used internally by Ansible, it should match the file name but not required + +If the inventory plugin is in a collection, the NAME should be in the 'namespace.collection_name.myplugin' format. The base class has a couple of methods that each plugin should implement and a few helpers for parsing the inventory source and updating the inventory. + +After you have the basic plugin working, you can incorporate other features by adding more base classes: + +.. code-block:: python + + from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable + + class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + + NAME = 'myplugin' + +For the bulk of the work in a plugin, we mostly want to deal with 2 methods ``verify_file`` and ``parse``. + +.. _inventory_plugin_verify_file: + +verify_file method +^^^^^^^^^^^^^^^^^^ + +Ansible uses this method to quickly determine if the inventory source is usable by the plugin. The determination does not need to be 100% accurate, as there might be an overlap in what plugins can handle and by default Ansible will try the enabled plugins as per their sequence. + +.. code-block:: python + + def verify_file(self, path): + ''' return true/false if this is possibly a valid file for this plugin to consume ''' + valid = False + if super(InventoryModule, self).verify_file(path): + # base class verifies that file exists and is readable by current user + if path.endswith(('virtualbox.yaml', 'virtualbox.yml', 'vbox.yaml', 'vbox.yml')): + valid = True + return valid + +In the above example, from the :ref:`virtualbox inventory plugin <virtualbox_inventory>`, we screen for specific file name patterns to avoid attempting to consume any valid YAML file. You can add any type of condition here, but the most common one is 'extension matching'. If you implement extension matching for YAML configuration files, the path suffix <plugin_name>.<yml|yaml> should be accepted. All valid extensions should be documented in the plugin description. + +The following is another example that does not use a 'file' but the inventory source string itself, +from the :ref:`host list <host_list_inventory>` plugin: + +.. code-block:: python + + def verify_file(self, path): + ''' don't call base class as we don't expect a path, but a host list ''' + host_list = path + valid = False + b_path = to_bytes(host_list, errors='surrogate_or_strict') + if not os.path.exists(b_path) and ',' in host_list: + # the path does NOT exist and there is a comma to indicate this is a 'host list' + valid = True + return valid + +This method is just to expedite the inventory process and avoid unnecessary parsing of sources that are easy to filter out before causing a parse error. + +.. _inventory_plugin_parse: + +parse method +^^^^^^^^^^^^ + +This method does the bulk of the work in the plugin. +It takes the following parameters: + + * inventory: inventory object with existing data and the methods to add hosts/groups/variables to inventory + * loader: Ansible's DataLoader. The DataLoader can read files, auto load JSON/YAML and decrypt vaulted data, and cache read files. + * path: string with inventory source (this is usually a path, but is not required) + * cache: indicates whether the plugin should use or avoid caches (cache plugin and/or loader) + + +The base class does some minimal assignment for reuse in other methods. + +.. code-block:: python + + def parse(self, inventory, loader, path, cache=True): + + self.loader = loader + self.inventory = inventory + self.templar = Templar(loader=loader) + +It is up to the plugin now to parse the provided inventory source and translate it into Ansible inventory. +To facilitate this, the example below uses a few helper functions: + +.. code-block:: python + + NAME = 'myplugin' + + def parse(self, inventory, loader, path, cache=True): + + # call base method to ensure properties are available for use with other helper methods + super(InventoryModule, self).parse(inventory, loader, path, cache) + + # this method will parse 'common format' inventory sources and + # update any options declared in DOCUMENTATION as needed + config = self._read_config_data(path) + + # if NOT using _read_config_data you should call set_options directly, + # to process any defined configuration for this plugin, + # if you don't define any options you can skip + #self.set_options() + + # example consuming options from inventory source + mysession = apilib.session(user=self.get_option('api_user'), + password=self.get_option('api_pass'), + server=self.get_option('api_server') + ) + + + # make requests to get data to feed into inventory + mydata = mysession.getitall() + + #parse data and create inventory objects: + for colo in mydata: + for server in mydata[colo]['servers']: + self.inventory.add_host(server['name']) + self.inventory.set_variable(server['name'], 'ansible_host', server['external_ip']) + +The specifics will vary depending on API and structure returned. Remember that if you get an inventory source error or any other issue, you should ``raise AnsibleParserError`` to let Ansible know that the source was invalid or the process failed. + +For examples on how to implement an inventory plugin, see the source code here: +`lib/ansible/plugins/inventory <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/inventory>`_. + +.. _inventory_plugin_caching: + +inventory cache +^^^^^^^^^^^^^^^ + +To cache the inventory, extend the inventory plugin documentation with the inventory_cache documentation fragment and use the Cacheable base class. + +.. code-block:: yaml + + extends_documentation_fragment: + - inventory_cache + +.. code-block:: python + + class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + + NAME = 'myplugin' + +Next, load the cache plugin specified by the user to read from and update the cache. If your inventory plugin uses YAML-based configuration files and the ``_read_config_data`` method, the cache plugin is loaded within that method. If your inventory plugin does not use ``_read_config_data``, you must load the cache explicitly with ``load_cache_plugin``. + +.. code-block:: python + + NAME = 'myplugin' + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self.load_cache_plugin() + +Before using the cache plugin, you must retrieve a unique cache key by using the ``get_cache_key`` method. This task needs to be done by all inventory modules using the cache, so that you don't use/overwrite other parts of the cache. + +.. code-block:: python + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self.load_cache_plugin() + cache_key = self.get_cache_key(path) + +Now that you've enabled caching, loaded the correct plugin, and retrieved a unique cache key, you can set up the flow of data between the cache and your inventory using the ``cache`` parameter of the ``parse`` method. This value comes from the inventory manager and indicates whether the inventory is being refreshed (such as via ``--flush-cache`` or the meta task ``refresh_inventory``). Although the cache shouldn't be used to populate the inventory when being refreshed, the cache should be updated with the new inventory if the user has enabled caching. You can use ``self._cache`` like a dictionary. The following pattern allows refreshing the inventory to work in conjunction with caching. + +.. code-block:: python + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self.load_cache_plugin() + cache_key = self.get_cache_key(path) + + # cache may be True or False at this point to indicate if the inventory is being refreshed + # get the user's cache option too to see if we should save the cache if it is changing + user_cache_setting = self.get_option('cache') + + # read if the user has caching enabled and the cache isn't being refreshed + attempt_to_read_cache = user_cache_setting and cache + # update if the user has caching enabled and the cache is being refreshed; update this value to True if the cache has expired below + cache_needs_update = user_cache_setting and not cache + + # attempt to read the cache if inventory isn't being refreshed and the user has caching enabled + if attempt_to_read_cache: + try: + results = self._cache[cache_key] + except KeyError: + # This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated + cache_needs_update = True + + if cache_needs_update: + results = self.get_inventory() + + # set the cache + self._cache[cache_key] = results + + self.populate(results) + +After the ``parse`` method is complete, the contents of ``self._cache`` is used to set the cache plugin if the contents of the cache have changed. + +You have three other cache methods available: + - ``set_cache_plugin`` forces the cache plugin to be set with the contents of ``self._cache``, before the ``parse`` method completes + - ``update_cache_if_changed`` sets the cache plugin only if ``self._cache`` has been modified, before the ``parse`` method completes + - ``clear_cache`` flushes the cache, ultimately by calling the cache plugin's ``flush()`` method, whose implementation is dependent upon the particular cache plugin in use. Note that if the user is using the same cache backend for facts and inventory, both will get flushed. To avoid this, the user can specify a distinct cache backend in their inventory plugin configuration. + +.. _inventory_source_common_format: + +Common format for inventory sources +----------------------------------- + +To simplify development, most plugins use a standard YAML-based configuration file as the inventory source. The file has only one required field ``plugin``, which should contain the name of the plugin that is expected to consume the file. +Depending on other common features used, you might need other fields, and you can add custom options in each plugin as required. +For example, if you use the integrated caching, ``cache_plugin``, ``cache_timeout`` and other cache-related fields could be present. + +.. _inventory_development_auto: + +The 'auto' plugin +----------------- + +From Ansible 2.5 onwards, we include the :ref:`auto inventory plugin <auto_inventory>` and enable it by default. If the ``plugin`` field in your standard configuration file matches the name of your inventory plugin, the ``auto`` inventory plugin will load your plugin. The 'auto' plugin makes it easier to use your plugin without having to update configurations. + + +.. _inventory_scripts: +.. _developing_inventory_scripts: + +Inventory scripts +================= + +Even though we now have inventory plugins, we still support inventory scripts, not only for backwards compatibility but also to allow users to leverage other programming languages. + + +.. _inventory_script_conventions: + +Inventory script conventions +---------------------------- + +Inventory scripts must accept the ``--list`` and ``--host <hostname>`` arguments. Although other arguments are allowed, Ansible will not use them. +Such arguments might still be useful for executing the scripts directly. + +When the script is called with the single argument ``--list``, the script must output to stdout a JSON-encoded hash or +dictionary that contains all the groups to be managed. Each group's value should be either a hash or dictionary containing a list of each host, any child groups, and potential group variables, or simply a list of hosts:: + + + { + "group001": { + "hosts": ["host001", "host002"], + "vars": { + "var1": true + }, + "children": ["group002"] + }, + "group002": { + "hosts": ["host003","host004"], + "vars": { + "var2": 500 + }, + "children":[] + } + + } + +If any of the elements of a group are empty, they may be omitted from the output. + +When called with the argument ``--host <hostname>`` (where <hostname> is a host from above), the script must print either an empty JSON hash/dictionary, or a hash/dictionary of variables to make them available to templates and playbooks. For example:: + + + { + "VAR001": "VALUE", + "VAR002": "VALUE", + } + +Printing variables is optional. If the script does not print variables, it should print an empty hash or dictionary. + +.. _inventory_script_tuning: + +Tuning the external inventory script +------------------------------------ + +.. versionadded:: 1.3 + +The stock inventory script system mentioned above works for all versions of Ansible, but calling ``--host`` for every host can be rather inefficient, especially if it involves API calls to a remote subsystem. + +To avoid this inefficiency, if the inventory script returns a top-level element called "_meta", it is possible to return all the host variables in a single script execution. When this meta element contains a value for "hostvars", the inventory script will not be invoked with ``--host`` for each host. This behavior results in a significant performance increase for large numbers of hosts. + +The data to be added to the top-level JSON dictionary looks like this:: + + { + + # results of inventory script as above go here + # ... + + "_meta": { + "hostvars": { + "host001": { + "var001" : "value" + }, + "host002": { + "var002": "value" + } + } + } + } + +To satisfy the requirements of using ``_meta``, to prevent ansible from calling your inventory with ``--host`` you must at least populate ``_meta`` with an empty ``hostvars`` dictionary. +For example:: + + { + + # results of inventory script as above go here + # ... + + "_meta": { + "hostvars": {} + } + } + + +.. _replacing_inventory_ini_with_dynamic_provider: + +If you intend to replace an existing static inventory file with an inventory script, it must return a JSON object which contains an 'all' group that includes every host in the inventory as a member and every group in the inventory as a child. It should also include an 'ungrouped' group which contains all hosts which are not members of any other group. +A skeleton example of this JSON object is: + +.. code-block:: json + + { + "_meta": { + "hostvars": {} + }, + "all": { + "children": [ + "ungrouped" + ] + }, + "ungrouped": { + "children": [ + ] + } + } + +An easy way to see how this should look is using :ref:`ansible-inventory`, which also supports ``--list`` and ``--host`` parameters like an inventory script would. + +.. seealso:: + + :ref:`developing_api` + Python API to Playbooks and Ad Hoc Task Execution + :ref:`developing_modules_general` + Get started with developing a module + :ref:`developing_plugins` + How to develop plugins + `Ansible Tower <https://www.ansible.com/products/tower>`_ + REST API endpoint and GUI for Ansible, syncs with dynamic inventory + `Development Mailing List <https://groups.google.com/group/ansible-devel>`_ + Mailing list for development topics + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/dev_guide/developing_locally.rst b/docs/docsite/rst/dev_guide/developing_locally.rst new file mode 100644 index 00000000..4c7f6b71 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_locally.rst @@ -0,0 +1,105 @@ +.. _using_local_modules_and_plugins: +.. _developing_locally: + +********************************** +Adding modules and plugins locally +********************************** + +The easiest, quickest, and the most popular way to extend Ansible is to use a local module or a plugin. You can create them or copy existing ones for local use. You can store a local module or plugin on your Ansible control node and share it with your team or organization. You can also share a local plugin or module by including it in a collection or embedding it in a role, then publishing the collection or role on Ansible Galaxy. If you are using roles on Ansible Galaxy, then you are already using local modules and plugins without realizing it. + +If you are using an existing module or plugin but Ansible can't find it, this page is all you need. However, if you want to create a plugin or a module, go to :ref:`developing_plugins` and :ref:`developing_modules_general` topics and then return to this page to know how to add it locally. + +Extending Ansible with local modules and plugins offers lots of shortcuts such as: + +* You can copy other people's modules and plugins. +* When writing a new module, you can choose any programming language you like. +* You do not have to clone any repositories. +* You do not have to open a pull request. +* You do not have to add tests (though we recommend that you do!). + +To save a local module or plugin such that Ansible can find and use it, add the module or plugin in the appropriate directory (the directories are specified in later parts of this topic). + +.. contents:: + :local: + +.. _modules_vs_plugins: + +Modules and plugins: what is the difference? +============================================ +If you are looking to add local functionality to Ansible, you might wonder whether you need a module or a plugin. Here is a quick overview to help you decide between the two: + +* Modules are reusable, standalone scripts that can be used by the Ansible API, the :command:`ansible` command, or the :command:`ansible-playbook` command. Modules provide a defined interface. Each module accepts arguments and returns information to Ansible by printing a JSON string to stdout before exiting. Modules execute on the target system (usually that means on a remote system) in separate processes. +* :ref:`Plugins <plugins_lookup>` augment Ansible's core functionality and execute on the control node within the ``/usr/bin/ansible`` process. Plugins offer options and extensions for the core features of Ansible - transforming data, logging output, connecting to inventory, and more. + +.. _local_modules: + +Adding a module locally +======================= +Ansible automatically loads all executable files found in certain directories as modules. + +For local modules, use the name of the file as the module name: for example, if the module file is ``~/.ansible/plugins/modules/local_users.py``, use ``local_users`` as the module name. + +To load your local modules automatically and make them available to all playbooks and roles, add them in any of these locations: + +* any directory added to the ``ANSIBLE_LIBRARY`` environment variable (``$ANSIBLE_LIBRARY`` takes a colon-separated list like ``$PATH``) +* ``~/.ansible/plugins/modules/`` +* ``/usr/share/ansible/plugins/modules/`` + +After you save your module file in one of these locations, Ansible loads it and you can use it in any local task, playbook, or role. + +To confirm that ``my_custom_module`` is available: + +* type ``ansible localhost -m my_custom_module``. You should see the output for that module. + +or + +* type ``ansible-doc -t module my_custom_module``. You should see the documentation for that module. + +.. note:: + + Currently, the ``ansible-doc`` command can parse module documentation only from modules written in Python. If you have a module written in a programming language other than Python, please write the documentation in a Python file adjacent to the module file. + +You can limit the availability of your local module. If you want to use a local module only with selected playbooks or only with a single role, load it in one of the following locations: + +* In a selected playbook or playbooks: Store the module in a subdirectory called ``library`` in the directory that contains those playbooks. +* In a single role: Store the module in a subdirectory called ``library`` within that role. + +.. _distributing_plugins: +.. _local_plugins: + +Adding a plugin locally +======================= +Ansible loads plugins automatically too, and loads each type of plugin separately from a directory named for the type of plugin. Here's the full list of plugin directory names: + + * action_plugins* + * cache_plugins + * callback_plugins + * connection_plugins + * filter_plugins* + * inventory_plugins + * lookup_plugins + * shell_plugins + * strategy_plugins + * test_plugins* + * vars_plugins + +.. note:: + + After you add the plugins and verify that they are available for use, you can see the documentation for all the plugins except for the ones marked with an asterisk (*) above. + +To load your local plugins automatically, add them in any of these locations: + +* any directory added to the relevant ``ANSIBLE_plugin_type_PLUGINS`` environment variable (these variables, such as ``$ANSIBLE_INVENTORY_PLUGINS`` and ``$ANSIBLE_VARS_PLUGINS`` take colon-separated lists like ``$PATH``) +* the directory named for the correct ``plugin_type`` within ``~/.ansible/plugins/`` - for example, ``~/.ansible/plugins/callback`` +* the directory named for the correct ``plugin_type`` within ``/usr/share/ansible/plugins/`` - for example, ``/usr/share/ansible/plugins/action`` + +After your plugin file is in one of these locations, Ansible loads it and you can use it in any local module, task, playbook, or role. Alternatively, you can edit your ``ansible.cfg`` file to add directories that contain local plugins. For details about adding directories of local plugins, see :ref:`ansible_configuration_settings`. + +To confirm that ``plugins/plugin_type/my_custom_plugin`` is available: + +* type ``ansible-doc -t <plugin_type> my_custom_lookup_plugin``. For example, ``ansible-doc -t lookup my_custom_lookup_plugin``. You should see the documentation for that plugin. This works for all plugin types except the ones marked with ``*`` in the list above - see :ref:`ansible-doc` for more details. + +You can limit the availability of your local plugin. If you want to use a local plugin only with selected playbooks or only with a single role, load it in one of the following locations: + +* In a selected playbook or playbooks: Store the plugin in a subdirectory for the correct ``plugin_type`` (for example, ``callback_plugins`` or ``inventory_plugins``) in the directory that contains the playbooks. +* In a single role: Store the plugin in a subdirectory for the correct ``plugin_type`` (for example, ``cache_plugins`` or ``strategy_plugins``) within that role. When shipped as part of a role, the plugin is available as soon as the role is executed. diff --git a/docs/docsite/rst/dev_guide/developing_module_utilities.rst b/docs/docsite/rst/dev_guide/developing_module_utilities.rst new file mode 100644 index 00000000..dfeaef55 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_module_utilities.rst @@ -0,0 +1,69 @@ +.. _developing_module_utilities: + +************************************* +Using and developing module utilities +************************************* + +Ansible provides a number of module utilities, or snippets of shared code, that +provide helper functions you can use when developing your own modules. The +``basic.py`` module utility provides the main entry point for accessing the +Ansible library, and all Python Ansible modules must import something from +``ansible.module_utils``. A common option is to import ``AnsibleModule``:: + + from ansible.module_utils.basic import AnsibleModule + +The ``ansible.module_utils`` namespace is not a plain Python package: it is +constructed dynamically for each task invocation, by extracting imports and +resolving those matching the namespace against a :ref:`search path <ansible_search_path>` derived from the +active configuration. + +To reduce the maintenance burden in a collection or in local modules, you can extract +duplicated code into one or more module utilities and import them into your modules. For example, if you have your own custom modules that import a ``my_shared_code`` library, you can place that into a ``./module_utils/my_shared_code.py`` file like this:: + + from ansible.module_utils.my_shared_code import MySharedCodeClient + +When you run ``ansible-playbook``, Ansible will merge any files in your local ``module_utils`` directories into the ``ansible.module_utils`` namespace in the order defined by the :ref:`Ansible search path <ansible_search_path>`. + +Naming and finding module utilities +=================================== + +You can generally tell what a module utility does from its name and/or its location. Generic utilities (shared code used by many different kinds of modules) live in the main ansible/ansible codebase, in the ``common`` subdirectory or in the root directory of ``lib/ansible/module_utils``. Utilities used by a particular set of modules generally live in the same collection as those modules. For example: + +* ``lib/ansible/module_utils/urls.py`` contains shared code for parsing URLs +* ``openstack.cloud.plugins.module_utils.openstack.py`` contains utilities for modules that work with OpenStack instances +* ``ansible.netcommon.plugins.module_utils.network.common.config.py`` contains utility functions for use by networking modules + +Following this pattern with your own module utilities makes everything easy to find and use. + +.. _standard_mod_utils: + +Standard module utilities +========================= + +Ansible ships with an extensive library of ``module_utils`` files. You can find the module utility source code in the ``lib/ansible/module_utils`` directory under your main Ansible path. We describe the most widely used utilities below. For more details on any specific module utility, please see the `source code for module_utils <https://github.com/ansible/ansible/tree/devel/lib/ansible/module_utils>`_. + +.. include:: shared_snippets/licensing.txt + +- ``api.py`` - Supports generic API modules +- ``basic.py`` - General definitions and helper utilities for Ansible modules +- ``common/dict_transformations.py`` - Helper functions for dictionary transformations +- ``common/file.py`` - Helper functions for working with files +- ``common/text/`` - Helper functions for converting and formatting text +- ``common/parameters.py`` - Helper functions for dealing with module parameters +- ``common/sys_info.py`` - Functions for getting distribution and platform information +- ``common/validation.py`` - Helper functions for validating module parameters against a module argument spec +- ``facts/`` - Directory of utilities for modules that return facts. See `PR 23012 <https://github.com/ansible/ansible/pull/23012>`_ for more information +- ``json_utils.py`` - Utilities for filtering unrelated output around module JSON output, like leading and trailing lines +- ``powershell/`` - Directory of definitions and helper functions for Windows PowerShell modules +- ``pycompat24.py`` - Exception workaround for Python 2.4 +- ``service.py`` - Utilities to enable modules to work with Linux services (placeholder, not in use) +- ``six/__init__.py`` - Bundled copy of the `Six Python library <https://pypi.org/project/six/>`_ to aid in writing code compatible with both Python 2 and Python 3 +- ``splitter.py`` - String splitting and manipulation utilities for working with Jinja2 templates +- ``urls.py`` - Utilities for working with http and https requests + +Several commonly-used utilities migrated to collections in Ansible 2.10, including: + +- ``ismount.py`` migrated to ``ansible.posix.plugins.module_utils.mount.py`` - Single helper function that fixes os.path.ismount +- ``known_hosts.py`` migrated to ``community.general.plugins.module_utils.known_hosts.py`` - utilities for working with known_hosts file + +For a list of migrated content with destination collections, see https://github.com/ansible/ansible/blob/devel/lib/ansible/config/ansible_builtin_runtime.yml. diff --git a/docs/docsite/rst/dev_guide/developing_modules.rst b/docs/docsite/rst/dev_guide/developing_modules.rst new file mode 100644 index 00000000..5cfcf15c --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_modules.rst @@ -0,0 +1,51 @@ +.. _developing_modules: +.. _module_dev_should_you: + +**************************** +Should you develop a module? +**************************** + +Developing Ansible modules is easy, but often it is not necessary. Before you start writing a new module, ask: + +1. Does a similar module already exist? + +An existing module may cover the functionality you want. Ansible collections include thousands of modules. Search our :ref:`list of included collections <list_of_collections>` or `Ansible Galaxy <https://galaxy.ansible.com>`_ to see if an existing module does what you need. + +2. Should you use or develop an action plugin instead of a module? + +An action plugin may be the best way to get the functionality you want. Action plugins run on the control node instead of on the managed node, and their functionality is available to all modules. For more information about developing plugins, read the :ref:`developing plugins page <developing_plugins>`. + +3. Should you use a role instead of a module? + +A combination of existing modules may cover the functionality you want. You can write a role for this type of use case. Check out the :ref:`roles documentation<playbooks_reuse_roles>`. + +4. Should you create a collection instead of a single module? + +The functionality you want may be too large for a single module. If you want to connect Ansible to a new cloud provider, database, or network platform, you may need to :ref:`develop a new collection<developing_modules_in_groups>`. + +* Each module should have a concise and well defined functionality. Basically, follow the UNIX philosophy of doing one thing well. + +* A module should not require that a user know all the underlying options of an API/tool to be used. For instance, if the legal values for a required module parameter cannot be documented, that's a sign that the module would be rejected. + +* Modules should typically encompass much of the logic for interacting with a resource. A lightweight wrapper around an API that does not contain much logic would likely cause users to offload too much logic into a playbook, and for this reason the module would be rejected. Instead try creating multiple modules for interacting with smaller individual pieces of the API. + +If your use case isn't covered by an existing module, an action plugin, or a role, and you don't need to create multiple modules, then you're ready to start developing a new module. Choose from the topics below for next steps: + +* I want to :ref:`get started on a new module <developing_modules_general>`. +* I want to review :ref:`tips and conventions for developing good modules <developing_modules_best_practices>`. +* I want to :ref:`write a Windows module <developing_modules_general_windows>`. +* I want :ref:`an overview of Ansible's architecture <developing_program_flow_modules>`. +* I want to :ref:`document my module <developing_modules_documenting>`. +* I want to :ref:`contribute my module back to Ansible Core <developing_modules_checklist>`. +* I want to :ref:`add unit and integration tests to my module <developing_testing>`. +* I want to :ref:`add Python 3 support to my module <developing_python_3>`. +* I want to :ref:`write multiple modules <developing_modules_in_groups>`. + +.. seealso:: + + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + `Mailing List <https://groups.google.com/group/ansible-devel>`_ + Development mailing list + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/dev_guide/developing_modules_best_practices.rst b/docs/docsite/rst/dev_guide/developing_modules_best_practices.rst new file mode 100644 index 00000000..19787f69 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_modules_best_practices.rst @@ -0,0 +1,177 @@ +.. _developing_modules_best_practices: +.. _module_dev_conventions: + +******************************* +Conventions, tips, and pitfalls +******************************* + +.. contents:: Topics + :local: + +As you design and develop modules, follow these basic conventions and tips for clean, usable code: + +Scoping your module(s) +====================== + +Especially if you want to contribute your module(s) to an existing Ansible Collection, make sure each module includes enough logic and functionality, but not too much. If these guidelines seem confusing, consider :ref:`whether you really need to write a module <module_dev_should_you>` at all. + +* Each module should have a concise and well-defined functionality. Basically, follow the UNIX philosophy of doing one thing well. +* Do not add ``get``, ``list`` or ``info`` state options to an existing module - create a new ``_info`` or ``_facts`` module. +* Modules should not require that a user know all the underlying options of an API/tool to be used. For instance, if the legal values for a required module option cannot be documented, the module does not belong in Ansible Core. +* Modules should encompass much of the logic for interacting with a resource. A lightweight wrapper around a complex API forces users to offload too much logic into their playbooks. If you want to connect Ansible to a complex API, :ref:`create multiple modules <developing_modules_in_groups>` that interact with smaller individual pieces of the API. +* Avoid creating a module that does the work of other modules; this leads to code duplication and divergence, and makes things less uniform, unpredictable and harder to maintain. Modules should be the building blocks. If you are asking 'how can I have a module execute other modules' ... you want to write a role. + +Designing module interfaces +=========================== + +* If your module is addressing an object, the option for that object should be called ``name`` whenever possible, or accept ``name`` as an alias. +* Modules accepting boolean status should accept ``yes``, ``no``, ``true``, ``false``, or anything else a user may likely throw at them. The AnsibleModule common code supports this with ``type='bool'``. +* Avoid ``action``/``command``, they are imperative and not declarative, there are other ways to express the same thing. + +General guidelines & tips +========================= + +* Each module should be self-contained in one file, so it can be auto-transferred by ``ansible-base``. +* Module name MUST use underscores instead of hyphens or spaces as a word separator. Using hyphens and spaces will prevent ``ansible-base`` from importing your module. +* Always use the ``hacking/test-module.py`` script when developing modules - it will warn you about common pitfalls. +* If you have a local module that returns information specific to your installations, a good name for this module is ``site_info``. +* Eliminate or minimize dependencies. If your module has dependencies, document them at the top of the module file and raise JSON error messages when dependency import fails. +* Don't write to files directly; use a temporary file and then use the ``atomic_move`` function from ``ansible.module_utils.basic`` to move the updated temporary file into place. This prevents data corruption and ensures that the correct context for the file is kept. +* Avoid creating caches. Ansible is designed without a central server or authority, so you cannot guarantee it will not run with different permissions, options or locations. If you need a central authority, have it on top of Ansible (for example, using bastion/cm/ci server or tower); do not try to build it into modules. +* If you package your module(s) in an RPM, install the modules on the control machine in ``/usr/share/ansible``. Packaging modules in RPMs is optional. + +Functions and Methods +===================== + +* Each function should be concise and should describe a meaningful amount of work. +* "Don't repeat yourself" is generally a good philosophy. +* Function names should use underscores: ``my_function_name``. +* The name of each function should describe what the function does. +* Each function should have a docstring. +* If your code is too nested, that's usually a sign the loop body could benefit from being a function. Parts of our existing code are not the best examples of this at times. + +Python tips +=========== + +* When fetching URLs, use ``fetch_url`` or ``open_url`` from ``ansible.module_utils.urls``. Do not use ``urllib2``, which does not natively verify TLS certificates and so is insecure for https. +* Include a ``main`` function that wraps the normal execution. +* Call your ``main`` function from a conditional so you can import it into unit tests - for example: + +.. code-block:: python + + if __name__ == '__main__': + main() + +.. _shared_code: + +Importing and using shared code +=============================== + +* Use shared code whenever possible - don't reinvent the wheel. Ansible offers the ``AnsibleModule`` common Python code, plus :ref:`utilities <developing_module_utilities>` for many common use cases and patterns. You can also create documentation fragments for docs that apply to multiple modules. +* Import ``ansible.module_utils`` code in the same place as you import other libraries. +* Do NOT use wildcards (*) for importing other python modules; instead, list the function(s) you are importing (for example, ``from some.other_python_module.basic import otherFunction``). +* Import custom packages in ``try``/``except``, capture any import errors, and handle them with ``fail_json()`` in ``main()``. For example: + +.. code-block:: python + + import traceback + + from ansible.module_utils.basic import missing_required_lib + + LIB_IMP_ERR = None + try: + import foo + HAS_LIB = True + except: + HAS_LIB = False + LIB_IMP_ERR = traceback.format_exc() + + +Then in ``main()``, just after the argspec, do + +.. code-block:: python + + if not HAS_LIB: + module.fail_json(msg=missing_required_lib("foo"), + exception=LIB_IMP_ERR) + + +And document the dependency in the ``requirements`` section of your module's :ref:`documentation_block`. + +.. _module_failures: + +Handling module failures +======================== + +When your module fails, help users understand what went wrong. If you are using the ``AnsibleModule`` common Python code, the ``failed`` element will be included for you automatically when you call ``fail_json``. For polite module failure behavior: + +* Include a key of ``failed`` along with a string explanation in ``msg``. If you don't do this, Ansible will use standard return codes: 0=success and non-zero=failure. +* Don't raise a traceback (stacktrace). Ansible can deal with stacktraces and automatically converts anything unparseable into a failed result, but raising a stacktrace on module failure is not user-friendly. +* Do not use ``sys.exit()``. Use ``fail_json()`` from the module object. + +Handling exceptions (bugs) gracefully +===================================== + +* Validate upfront--fail fast and return useful and clear error messages. +* Use defensive programming--use a simple design for your module, handle errors gracefully, and avoid direct stacktraces. +* Fail predictably--if we must fail, do it in a way that is the most expected. Either mimic the underlying tool or the general way the system works. +* Give out a useful message on what you were doing and add exception messages to that. +* Avoid catchall exceptions, they are not very useful unless the underlying API gives very good error messages pertaining the attempted action. + +.. _module_output: + +Creating correct and informative module output +============================================== + +Modules must output valid JSON only. Follow these guidelines for creating correct, useful module output: + +* Make your top-level return type a hash (dictionary). +* Nest complex return values within the top-level hash. +* Incorporate any lists or simple scalar values within the top-level return hash. +* Do not send module output to standard error, because the system will merge standard out with standard error and prevent the JSON from parsing. +* Capture standard error and return it as a variable in the JSON on standard out. This is how the command module is implemented. +* Never do ``print("some status message")`` in a module, because it will not produce valid JSON output. +* Always return useful data, even when there is no change. +* Be consistent about returns (some modules are too random), unless it is detrimental to the state/action. +* Make returns reusable--most of the time you don't want to read it, but you do want to process it and re-purpose it. +* Return diff if in diff mode. This is not required for all modules, as it won't make sense for certain ones, but please include it when applicable. +* Enable your return values to be serialized as JSON with Python's standard `JSON encoder and decoder <https://docs.python.org/3/library/json.html>`_ library. Basic python types (strings, int, dicts, lists, and so on) are serializable. +* Do not return an object using exit_json(). Instead, convert the fields you need from the object into the fields of a dictionary and return the dictionary. +* Results from many hosts will be aggregated at once, so your module should return only relevant output. Returning the entire contents of a log file is generally bad form. + +If a module returns stderr or otherwise fails to produce valid JSON, the actual output will still be shown in Ansible, but the command will not succeed. + +.. _module_conventions: + +Following Ansible conventions +============================= + +Ansible conventions offer a predictable user interface across all modules, playbooks, and roles. To follow Ansible conventions in your module development: + +* Use consistent names across modules (yes, we have many legacy deviations - don't make the problem worse!). +* Use consistent options (arguments) within your module(s). +* Do not use 'message' or 'syslog_facility' as an option name, because this is used internally by Ansible. +* Normalize options with other modules - if Ansible and the API your module connects to use different names for the same option, add aliases to your options so the user can choose which names to use in tasks and playbooks. +* Return facts from ``*_facts`` modules in the ``ansible_facts`` field of the :ref:`result dictionary<common_return_values>` so other modules can access them. +* Implement ``check_mode`` in all ``*_info`` and ``*_facts`` modules. Playbooks which conditionalize based on fact information will only conditionalize correctly in ``check_mode`` if the facts are returned in ``check_mode``. Usually you can add ``supports_check_mode=True`` when instantiating ``AnsibleModule``. +* Use module-specific environment variables. For example, if you use the helpers in ``module_utils.api`` for basic authentication with ``module_utils.urls.fetch_url()`` and you fall back on environment variables for default values, use a module-specific environment variable like :code:`API_<MODULENAME>_USERNAME` to avoid conflicts between modules. +* Keep module options simple and focused - if you're loading a lot of choices/states on an existing option, consider adding a new, simple option instead. +* Keep options small when possible. Passing a large data structure to an option might save us a few tasks, but it adds a complex requirement that we cannot easily validate before passing on to the module. +* If you want to pass complex data to an option, write an expert module that allows this, along with several smaller modules that provide a more 'atomic' operation against the underlying APIs and services. Complex operations require complex data. Let the user choose whether to reflect that complexity in tasks and plays or in vars files. +* Implement declarative operations (not CRUD) so the user can ignore existing state and focus on final state. For example, use ``started/stopped``, ``present/absent``. +* Strive for a consistent final state (aka idempotency). If running your module twice in a row against the same system would result in two different states, see if you can redesign or rewrite to achieve consistent final state. If you can't, document the behavior and the reasons for it. +* Provide consistent return values within the standard Ansible return structure, even if NA/None are used for keys normally returned under other options. +* Follow additional guidelines that apply to families of modules if applicable. For example, AWS modules should follow the :ref:`Amazon development checklist <AWS_module_development>`. + + +Module Security +=============== + +* Avoid passing user input from the shell. +* Always check return codes. +* You must always use ``module.run_command``, not ``subprocess`` or ``Popen`` or ``os.system``. +* Avoid using the shell unless absolutely necessary. +* If you must use the shell, you must pass ``use_unsafe_shell=True`` to ``module.run_command``. +* If any variables in your module can come from user input with ``use_unsafe_shell=True``, you must wrap them with ``pipes.quote(x)``. +* When fetching URLs, use ``fetch_url`` or ``open_url`` from ``ansible.module_utils.urls``. Do not use ``urllib2``, which does not natively verify TLS certificates and so is insecure for https. +* Sensitive values marked with ``no_log=True`` will automatically have that value stripped from module return values. If your module could return these sensitive values as part of a dictionary key name, you should call the ``ansible.module_utils.basic.sanitize_keys()`` function to strip the values from the keys. See the ``uri`` module for an example. diff --git a/docs/docsite/rst/dev_guide/developing_modules_checklist.rst b/docs/docsite/rst/dev_guide/developing_modules_checklist.rst new file mode 100644 index 00000000..492b6015 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_modules_checklist.rst @@ -0,0 +1,46 @@ +.. _developing_modules_checklist: +.. _module_contribution: + +********************************************************** +Contributing your module to an existing Ansible collection +********************************************************** + +If you want to contribute a module to an existing collection, you must meet the community's objective and subjective requirements. Please read the details below, and also review our :ref:`tips for module development <developing_modules_best_practices>`. + +Modules accepted into certain collections are included in every Ansible release on PyPI. However, contributing to one of these collections is not the only way to distribute a module - you can :ref:`create your own collection <developing_collections>`, embed modules in roles on Galaxy or simply share copies of your module code for :ref:`local use <developing_locally>`. + +Contributing modules: objective requirements +=============================================== + +To contribute a module to most Ansible collections, you must: + +* write your module in either Python or Powershell for Windows +* use the ``AnsibleModule`` common code +* support Python 2.6 and Python 3.5 - if your module cannot support Python 2.6, explain the required minimum Python version and rationale in the requirements section in ``DOCUMENTATION`` +* use proper :ref:`Python 3 syntax <developing_python_3>` +* follow `PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_ Python style conventions - see :ref:`testing_pep8` for more information +* license your module under the GPL license (GPLv3 or later) +* understand the :ref:`license agreement <contributor_license_agreement>`, which applies to all contributions +* conform to Ansible's :ref:`formatting and documentation <developing_modules_documenting>` standards +* include comprehensive :ref:`tests <developing_testing>` for your module +* minimize module dependencies +* support :ref:`check_mode <check_mode_dry>` if possible +* ensure your code is readable +* if a module is named ``<something>_facts``, it should be because its main purpose is returning ``ansible_facts``. Do not name modules that do not do this with ``_facts``. Only use ``ansible_facts`` for information that is specific to the host machine, for example network interfaces and their configuration, which operating system and which programs are installed. +* Modules that query/return general information (and not ``ansible_facts``) should be named ``_info``. General information is non-host specific information, for example information on online/cloud services (you can access different accounts for the same online service from the same host), or information on VMs and containers accessible from the machine. + +Additional requirements may apply for certain collections. Review the individual collection repositories for more information. + +Please make sure your module meets these requirements before you submit your PR/proposal. If you have questions, reach out via `Ansible's IRC chat channel <http://irc.freenode.net>`_ or the `Ansible development mailing list <https://groups.google.com/group/ansible-devel>`_. + +Contributing to Ansible: subjective requirements +================================================ + +If your module meets these objective requirements, collection maintainers will review your code to see if they think it's clear, concise, secure, and maintainable. They will consider whether your module provides a good user experience, helpful error messages, reasonable defaults, and more. This process is subjective, with no exact standards for acceptance. For the best chance of getting your module accepted, follow our :ref:`tips for module development <developing_modules_best_practices>`. + +Other checklists +================ + +* :ref:`Tips for module development <developing_modules_best_practices>`. +* :ref:`Amazon development checklist <AWS_module_development>`. +* :ref:`Windows development checklist <developing_modules_general_windows>`. diff --git a/docs/docsite/rst/dev_guide/developing_modules_documenting.rst b/docs/docsite/rst/dev_guide/developing_modules_documenting.rst new file mode 100644 index 00000000..096e9f17 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_modules_documenting.rst @@ -0,0 +1,442 @@ +.. _developing_modules_documenting: +.. _module_documenting: + +******************************* +Module format and documentation +******************************* + +If you want to contribute your module to most Ansible collections, you must write your module in Python and follow the standard format described below. (Unless you're writing a Windows module, in which case the :ref:`Windows guidelines <developing_modules_general_windows>` apply.) In addition to following this format, you should review our :ref:`submission checklist <developing_modules_checklist>`, :ref:`programming tips <developing_modules_best_practices>`, and :ref:`strategy for maintaining Python 2 and Python 3 compatibility <developing_python_3>`, as well as information about :ref:`testing <developing_testing>` before you open a pull request. + +Every Ansible module written in Python must begin with seven standard sections in a particular order, followed by the code. The sections in order are: + +.. contents:: + :depth: 1 + :local: + +.. note:: Why don't the imports go first? + + Keen Python programmers may notice that contrary to PEP 8's advice we don't put ``imports`` at the top of the file. This is because the ``DOCUMENTATION`` through ``RETURN`` sections are not used by the module code itself; they are essentially extra docstrings for the file. The imports are placed after these special variables for the same reason as PEP 8 puts the imports after the introductory comments and docstrings. This keeps the active parts of the code together and the pieces which are purely informational apart. The decision to exclude E402 is based on readability (which is what PEP 8 is about). Documentation strings in a module are much more similar to module level docstrings, than code, and are never utilized by the module itself. Placing the imports below this documentation and closer to the code, consolidates and groups all related code in a congruent manner to improve readability, debugging and understanding. + +.. warning:: **Copy old modules with care!** + + Some older Ansible modules have ``imports`` at the bottom of the file, ``Copyright`` notices with the full GPL prefix, and/or ``DOCUMENTATION`` fields in the wrong order. These are legacy files that need updating - do not copy them into new modules. Over time we are updating and correcting older modules. Please follow the guidelines on this page! + +.. _shebang: + +Python shebang & UTF-8 coding +=============================== + +Begin your Ansible module with ``#!/usr/bin/python`` - this "shebang" allows ``ansible_python_interpreter`` to work. Follow the shebang immediately with ``# -*- coding: utf-8 -*-`` to clarify that the file is UTF-8 encoded. + +.. _copyright: + +Copyright and license +===================== + +After the shebang and UTF-8 coding, add a `copyright line <https://www.gnu.org/licenses/gpl-howto.en.html>`_ with the original copyright holder and a license declaration. The license declaration should be ONLY one line, not the full GPL prefix.: + +.. code-block:: python + + #!/usr/bin/python + # -*- coding: utf-8 -*- + + # Copyright: (c) 2018, Terry Jones <terry.jones@example.org> + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +Major additions to the module (for instance, rewrites) may add additional copyright lines. Any legal review will include the source control history, so an exhaustive copyright header is not necessary. +Please do not edit the existing copyright year. This simplifies project administration and is unlikely to cause any interesting legal issues. +When adding a second copyright line for a significant feature or rewrite, add the newer line above the older one: + +.. code-block:: python + + #!/usr/bin/python + # -*- coding: utf-8 -*- + + # Copyright: (c) 2017, [New Contributor(s)] + # Copyright: (c) 2015, [Original Contributor(s)] + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +.. _ansible_metadata_block: + +ANSIBLE_METADATA block +====================== + +Since we moved to collections we have deprecated the METADATA functionality, it is no longer required for modules, but it will not break anything if present. + + +.. _documentation_block: + +DOCUMENTATION block +=================== + +After the shebang, the UTF-8 coding, the copyright line, and the license section comes the ``DOCUMENTATION`` block. Ansible's online module documentation is generated from the ``DOCUMENTATION`` blocks in each module's source code. The ``DOCUMENTATION`` block must be valid YAML. You may find it easier to start writing your ``DOCUMENTATION`` string in an :ref:`editor with YAML syntax highlighting <other_tools_and_programs>` before you include it in your Python file. You can start by copying our `example documentation string <https://github.com/ansible/ansible/blob/devel/examples/DOCUMENTATION.yml>`_ into your module file and modifying it. If you run into syntax issues in your YAML, you can validate it on the `YAML Lint <http://www.yamllint.com/>`_ website. + +Module documentation should briefly and accurately define what each module and option does, and how it works with others in the underlying system. Documentation should be written for broad audience--readable both by experts and non-experts. + * Descriptions should always start with a capital letter and end with a full stop. Consistency always helps. + * Verify that arguments in doc and module spec dict are identical. + * For password / secret arguments ``no_log=True`` should be set. + * For arguments that seem to contain sensitive information but **do not** contain secrets, such as "password_length", set ``no_log=False`` to disable the warning message. + * If an option is only sometimes required, describe the conditions. For example, "Required when I(state=present)." + * If your module allows ``check_mode``, reflect this fact in the documentation. + +To create clear, concise, consistent, and useful documentation, follow the :ref:`style guide <style_guide>`. + +Each documentation field is described below. Before committing your module documentation, please test it at the command line and as HTML: + +* As long as your module file is :ref:`available locally <local_modules>`, you can use ``ansible-doc -t module my_module_name`` to view your module documentation at the command line. Any parsing errors will be obvious - you can view details by adding ``-vvv`` to the command. +* You should also :ref:`test the HTML output <testing_module_documentation>` of your module documentation. + +Documentation fields +-------------------- + +All fields in the ``DOCUMENTATION`` block are lower-case. All fields are required unless specified otherwise: + +:module: + + * The name of the module. + * Must be the same as the filename, without the ``.py`` extension. + +:short_description: + + * A short description which is displayed on the :ref:`list_of_collections` page and ``ansible-doc -l``. + * The ``short_description`` is displayed by ``ansible-doc -l`` without any category grouping, + so it needs enough detail to explain the module's purpose without the context of the directory structure in which it lives. + * Unlike ``description:``, ``short_description`` should not have a trailing period/full stop. + +:description: + + * A detailed description (generally two or more sentences). + * Must be written in full sentences, in other words, with capital letters and periods/full stops. + * Shouldn't mention the module name. + * Make use of multiple entries rather than using one long paragraph. + * Don't quote complete values unless it is required by YAML. + +:version_added: + + * The version of Ansible when the module was added. + * This is a string, and not a float, for example, ``version_added: '2.1'``. + * In collections, this must be the collection version the module was added to, not the Ansible version. For example, ``version_added: 1.0.0``. + +:author: + + * Name of the module author in the form ``First Last (@GitHubID)``. + * Use a multi-line list if there is more than one author. + * Don't use quotes as it should not be required by YAML. + +:deprecated: + + * Marks modules that will be removed in future releases. See also :ref:`module_lifecycle`. + +:options: + + * Options are often called `parameters` or `arguments`. Because the documentation field is called `options`, we will use that term. + * If the module has no options (for example, it's a ``_facts`` module), all you need is one line: ``options: {}``. + * If your module has options (in other words, accepts arguments), each option should be documented thoroughly. For each module option, include: + + :option-name: + + * Declarative operation (not CRUD), to focus on the final state, for example `online:`, rather than `is_online:`. + * The name of the option should be consistent with the rest of the module, as well as other modules in the same category. + * When in doubt, look for other modules to find option names that are used for the same purpose, we like to offer consistency to our users. + + :description: + + * Detailed explanation of what this option does. It should be written in full sentences. + * The first entry is a description of the option itself; subsequent entries detail its use, dependencies, or format of possible values. + * Should not list the possible values (that's what ``choices:`` is for, though it should explain what the values do if they aren't obvious). + * If an option is only sometimes required, describe the conditions. For example, "Required when I(state=present)." + * Mutually exclusive options must be documented as the final sentence on each of the options. + + :required: + + * Only needed if ``true``. + * If missing, we assume the option is not required. + + :default: + + * If ``required`` is false/missing, ``default`` may be specified (assumed 'null' if missing). + * Ensure that the default value in the docs matches the default value in the code. + * The default field must not be listed as part of the description, unless it requires additional information or conditions. + * If the option is a boolean value, you can use any of the boolean values recognized by Ansible: + (such as true/false or yes/no). Choose the one that reads better in the context of the option. + + :choices: + + * List of option values. + * Should be absent if empty. + + :type: + + * Specifies the data type that option accepts, must match the ``argspec``. + * If an argument is ``type='bool'``, this field should be set to ``type: bool`` and no ``choices`` should be specified. + * If an argument is ``type='list'``, ``elements`` should be specified. + + :elements: + + * Specifies the data type for list elements in case ``type='list'``. + + :aliases: + * List of optional name aliases. + * Generally not needed. + + :version_added: + + * Only needed if this option was extended after initial Ansible release, in other words, this is greater than the top level `version_added` field. + * This is a string, and not a float, for example, ``version_added: '2.3'``. + * In collections, this must be the collection version the option was added to, not the Ansible version. For example, ``version_added: 1.0.0``. + + :suboptions: + + * If this option takes a dict or list of dicts, you can define the structure here. + * See :ref:`ansible_collections.azure.azcollection.azure_rm_securitygroup_module`, :ref:`ansible_collections.azure.azcollection.azure_rm_azurefirewall_module`, and :ref:`ansible_collections.openstack.cloud.baremetal_node_action_module` for examples. + +:requirements: + + * List of requirements (if applicable). + * Include minimum versions. + +:seealso: + + * A list of references to other modules, documentation or Internet resources + * In Ansible 2.10 and later, references to modules must use the FQCN or ``ansible.builtin`` for modules in ``ansible-base``. + * A reference can be one of the following formats: + + + .. code-block:: yaml+jinja + + seealso: + + # Reference by module name + - module: cisco.aci.aci_tenant + + # Reference by module name, including description + - module: cisco.aci.aci_tenant + description: ACI module to create tenants on a Cisco ACI fabric. + + # Reference by rST documentation anchor + - ref: aci_guide + description: Detailed information on how to manage your ACI infrastructure using Ansible. + + # Reference by Internet resource + - name: APIC Management Information Model reference + description: Complete reference of the APIC object model. + link: https://developer.cisco.com/docs/apic-mim-ref/ + +:notes: + + * Details of any important information that doesn't fit in one of the above sections. + * For example, whether ``check_mode`` is or is not supported. + + +Linking and other format macros within module documentation +----------------------------------------------------------- + +You can link from your module documentation to other module docs, other resources on docs.ansible.com, and resources elsewhere on the internet with the help of some pre-defined macros. The correct formats for these macros are: + +* ``L()`` for links with a heading. For example: ``See L(Ansible Tower,https://www.ansible.com/products/tower).`` As of Ansible 2.10, do not use ``L()`` for relative links between Ansible documentation and collection documentation. +* ``U()`` for URLs. For example: ``See U(https://www.ansible.com/products/tower) for an overview.`` +* ``R()`` for cross-references with a heading (added in Ansible 2.10). For example: ``See R(Cisco IOS Platform Guide,ios_platform_options)``. Use the RST anchor for the cross-reference. See :ref:`adding_anchors_rst` for details. +* ``M()`` for module names. For example: ``See also M(ansible.builtin.yum) or M(community.general.apt_rpm)``. + +There are also some macros which do not create links but we use them to display certain types of +content in a uniform way: + +* ``I()`` for option names. For example: ``Required if I(state=present).`` This is italicized in + the documentation. +* ``C()`` for files, option values, and inline code. For example: ``If not set the environment variable C(ACME_PASSWORD) will be used.`` or ``Use C(var | foo.bar.my_filter) to transform C(var) into the required format.`` This displays with a mono-space font in the documentation. +* ``B()`` currently has no standardized usage. It is displayed in boldface in the documentation. +* ``HORIZONTALLINE`` is used sparingly as a separator in long descriptions. It becomes a horizontal rule (the ``<hr>`` html tag) in the documentation. + +.. note:: + + For links between modules and documentation within a collection, you can use any of the options above. For links outside of your collection, use ``R()`` if available. Otherwise, use ``U()`` or ``L()`` with full URLs (not relative links). For modules, use ``M()`` with the FQCN or ``ansible.builtin`` as shown in the example. If you are creating your own documentation site, you will need to use the `intersphinx extension <https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html>`_ to convert ``R()`` and ``M()`` to the correct links. + + +.. note:: + - To refer to a group of modules in a collection, use ``R()``. When a collection is not the right granularity, use ``C(..)``: + + -``Refer to the R(community.kubernetes collection, plugins_in_community.kubernetes) for information on managing kubernetes clusters.`` + -``The C(win_*) modules (spread across several collections) allow you to manage various aspects of windows hosts.`` + + +.. note:: + + Because it stands out better, use ``seealso`` for general references over the use of notes or adding links to the description. + +.. _module_docs_fragments: + +Documentation fragments +----------------------- + +If you are writing multiple related modules, they may share common documentation, such as authentication details, file mode settings, ``notes:`` or ``seealso:`` entries. Rather than duplicate that information in each module's ``DOCUMENTATION`` block, you can save it once as a doc_fragment plugin and use it in each module's documentation. In Ansible, shared documentation fragments are contained in a ``ModuleDocFragment`` class in `lib/ansible/plugins/doc_fragments/ <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/doc_fragments>`_ or the equivalent directory in a collection. To include a documentation fragment, add ``extends_documentation_fragment: FRAGMENT_NAME`` in your module documentation. Use the fully qualified collection name for the FRAGMENT_NAME (for example, ``community.kubernetes.k8s_auth_options``). + +Modules should only use items from a doc fragment if the module will implement all of the interface documented there in a manner that behaves the same as the existing modules which import that fragment. The goal is that items imported from the doc fragment will behave identically when used in another module that imports the doc fragment. + +By default, only the ``DOCUMENTATION`` property from a doc fragment is inserted into the module documentation. It is possible to define additional properties in the doc fragment in order to import only certain parts of a doc fragment or mix and match as appropriate. If a property is defined in both the doc fragment and the module, the module value overrides the doc fragment. + +Here is an example doc fragment named ``example_fragment.py``: + +.. code-block:: python + + class ModuleDocFragment(object): + # Standard documentation + DOCUMENTATION = r''' + options: + # options here + ''' + + # Additional section + OTHER = r''' + options: + # other options here + ''' + + +To insert the contents of ``OTHER`` in a module: + +.. code-block:: yaml+jinja + + extends_documentation_fragment: example_fragment.other + +Or use both : + +.. code-block:: yaml+jinja + + extends_documentation_fragment: + - example_fragment + - example_fragment.other + +.. _note: + * Prior to Ansible 2.8, documentation fragments were kept in ``lib/ansible/utils/module_docs_fragments``. + +.. versionadded:: 2.8 + +Since Ansible 2.8, you can have user-supplied doc_fragments by using a ``doc_fragments`` directory adjacent to play or role, just like any other plugin. + +For example, all AWS modules should include: + +.. code-block:: yaml+jinja + + extends_documentation_fragment: + - aws + - ec2 + +:ref:`docfragments_collections` describes how to incorporate documentation fragments in a collection. + +.. _examples_block: + +EXAMPLES block +============== + +After the shebang, the UTF-8 coding, the copyright line, the license section, and the ``DOCUMENTATION`` block comes the ``EXAMPLES`` block. Here you show users how your module works with real-world examples in multi-line plain-text YAML format. The best examples are ready for the user to copy and paste into a playbook. Review and update your examples with every change to your module. + +Per playbook best practices, each example should include a ``name:`` line:: + + EXAMPLES = r''' + - name: Ensure foo is installed + namespace.collection.modulename: + name: foo + state: present + ''' + +The ``name:`` line should be capitalized and not include a trailing dot. + +Use a fully qualified collection name (FQCN) as a part of the module's name like in the example above. For modules in ``ansible-base``, use the ``ansible.builtin.`` identifier, for example ``ansible.builtin.debug``. + +If your examples use boolean options, use yes/no values. Since the documentation generates boolean values as yes/no, having the examples use these values as well makes the module documentation more consistent. + +If your module returns facts that are often needed, an example of how to use them can be helpful. + +.. _return_block: + +RETURN block +============ + +After the shebang, the UTF-8 coding, the copyright line, the license section, ``DOCUMENTATION`` and ``EXAMPLES`` blocks comes the ``RETURN`` block. This section documents the information the module returns for use by other modules. + +If your module doesn't return anything (apart from the standard returns), this section of your module should read: ``RETURN = r''' # '''`` +Otherwise, for each value returned, provide the following fields. All fields are required unless specified otherwise. + +:return name: + Name of the returned field. + + :description: + Detailed description of what this value represents. Capitalized and with trailing dot. + :returned: + When this value is returned, such as ``always``, ``changed`` or ``success``. This is a string and can contain any human-readable content. + :type: + Data type. + :elements: + If ``type='list'``, specifies the data type of the list's elements. + :sample: + One or more examples. + :version_added: + Only needed if this return was extended after initial Ansible release, in other words, this is greater than the top level `version_added` field. + This is a string, and not a float, for example, ``version_added: '2.3'``. + :contains: + Optional. To describe nested return values, set ``type: dict``, or ``type: list``/``elements: dict``, or if you really have to, ``type: complex``, and repeat the elements above for each sub-field. + +Here are two example ``RETURN`` sections, one with three simple fields and one with a complex nested field:: + + RETURN = r''' + dest: + description: Destination file/path. + returned: success + type: str + sample: /path/to/file.txt + src: + description: Source file used for the copy on the target machine. + returned: changed + type: str + sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source + md5sum: + description: MD5 checksum of the file after running copy. + returned: when supported + type: str + sample: 2a5aeecc61dc98c4d780b14b330e3282 + ''' + + RETURN = r''' + packages: + description: Information about package requirements. + returned: success + type: dict + contains: + missing: + description: Packages that are missing from the system. + returned: success + type: list + elements: str + sample: + - libmysqlclient-dev + - libxml2-dev + badversion: + description: Packages that are installed but at bad versions. + returned: success + type: list + elements: dict + sample: + - package: libxml2-dev + version: 2.9.4+dfsg1-2 + constraint: ">= 3.0" + ''' + +.. _python_imports: + +Python imports +============== + +After the shebang, the UTF-8 coding, the copyright line, the license, and the sections for ``DOCUMENTATION``, ``EXAMPLES``, and ``RETURN``, you can finally add the python imports. All modules must use Python imports in the form: + +.. code-block:: python + + from module_utils.basic import AnsibleModule + +The use of "wildcard" imports such as ``from module_utils.basic import *`` is no longer allowed. + +.. _dev_testing_module_documentation: + +Testing module documentation +============================ + +To test Ansible documentation locally please :ref:`follow instruction<testing_module_documentation>`. diff --git a/docs/docsite/rst/dev_guide/developing_modules_general.rst b/docs/docsite/rst/dev_guide/developing_modules_general.rst new file mode 100644 index 00000000..cb183b70 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_modules_general.rst @@ -0,0 +1,221 @@ +.. _developing_modules_general: +.. _module_dev_tutorial_sample: + +******************************************* +Ansible module development: getting started +******************************************* + +A module is a reusable, standalone script that Ansible runs on your behalf, either locally or remotely. Modules interact with your local machine, an API, or a remote system to perform specific tasks like changing a database password or spinning up a cloud instance. Each module can be used by the Ansible API, or by the :command:`ansible` or :command:`ansible-playbook` programs. A module provides a defined interface, accepts arguments, and returns information to Ansible by printing a JSON string to stdout before exiting. + +If you need functionality that is not available in any of the thousands of Ansible modules found in collections, you can easily write your own custom module. When you write a module for local use, you can choose any programming language and follow your own rules. Use this topic to learn how to create an Ansible module in Python. After you create a module, you must add it locally to the appropriate directory so that Ansible can find and execute it. For details about adding a module locally, see :ref:`developing_locally`. + +.. contents:: + :local: + +.. _environment_setup: + +Environment setup +================= + +Prerequisites via apt (Ubuntu) +------------------------------ + +Due to dependencies (for example ansible -> paramiko -> pynacl -> libffi): + +.. code:: bash + + sudo apt update + sudo apt install build-essential libssl-dev libffi-dev python-dev + +Common environment setup +------------------------------ + +1. Clone the Ansible repository: + ``$ git clone https://github.com/ansible/ansible.git`` +2. Change directory into the repository root dir: ``$ cd ansible`` +3. Create a virtual environment: ``$ python3 -m venv venv`` (or for + Python 2 ``$ virtualenv venv``. Note, this requires you to install + the virtualenv package: ``$ pip install virtualenv``) +4. Activate the virtual environment: ``$ . venv/bin/activate`` +5. Install development requirements: + ``$ pip install -r requirements.txt`` +6. Run the environment setup script for each new dev shell process: + ``$ . hacking/env-setup`` + +.. note:: After the initial setup above, every time you are ready to start + developing Ansible you should be able to just run the following from the + root of the Ansible repo: + ``$ . venv/bin/activate && . hacking/env-setup`` + + +Creating an info or a facts module +================================== + +Ansible gathers information about the target machines using facts modules, and gathers information on other objects or files using info modules. +If you find yourself trying to add ``state: info`` or ``state: list`` to an existing module, that is often a sign that a new dedicated ``_facts`` or ``_info`` module is needed. + +In Ansible 2.8 and onwards, we have two type of information modules, they are ``*_info`` and ``*_facts``. + +If a module is named ``<something>_facts``, it should be because its main purpose is returning ``ansible_facts``. Do not name modules that do not do this with ``_facts``. +Only use ``ansible_facts`` for information that is specific to the host machine, for example network interfaces and their configuration, which operating system and which programs are installed. + +Modules that query/return general information (and not ``ansible_facts``) should be named ``_info``. +General information is non-host specific information, for example information on online/cloud services (you can access different accounts for the same online service from the same host), or information on VMs and containers accessible from the machine, or information on individual files or programs. + +Info and facts modules, are just like any other Ansible Module, with a few minor requirements: + +1. They MUST be named ``<something>_info`` or ``<something>_facts``, where <something> is singular. +2. Info ``*_info`` modules MUST return in the form of the :ref:`result dictionary<common_return_values>` so other modules can access them. +3. Fact ``*_facts`` modules MUST return in the ``ansible_facts`` field of the :ref:`result dictionary<common_return_values>` so other modules can access them. +4. They MUST support :ref:`check_mode <check_mode_dry>`. +5. They MUST NOT make any changes to the system. +6. They MUST document the :ref:`return fields<return_block>` and :ref:`examples<examples_block>`. + +To create an info module: + +1. Navigate to the correct directory for your new module: ``$ cd lib/ansible/modules/``. If you are developing module using collection, ``$ cd plugins/modules/`` inside your collection development tree. +2. Create your new module file: ``$ touch my_test_info.py``. +3. Paste the content below into your new info module file. It includes the :ref:`required Ansible format and documentation <developing_modules_documenting>`, a simple :ref:`argument spec for declaring the module options <argument_spec>`, and some example code. +4. Modify and extend the code to do what you want your new info module to do. See the :ref:`programming tips <developing_modules_best_practices>` and :ref:`Python 3 compatibility <developing_python_3>` pages for pointers on writing clean and concise module code. + +.. literalinclude:: ../../../../examples/scripts/my_test_info.py + :language: python + +Use the same process to create a facts module. + +.. literalinclude:: ../../../../examples/scripts/my_test_facts.py + :language: python + +Creating a module +================= + +To create a new module: + +1. Navigate to the correct directory for your new module: ``$ cd lib/ansible/modules/``. If you are developing a module in a :ref:`collection <developing_collections>`, ``$ cd plugins/modules/`` inside your collection development tree. +2. Create your new module file: ``$ touch my_test.py``. +3. Paste the content below into your new module file. It includes the :ref:`required Ansible format and documentation <developing_modules_documenting>`, a simple :ref:`argument spec for declaring the module options <argument_spec>`, and some example code. +4. Modify and extend the code to do what you want your new module to do. See the :ref:`programming tips <developing_modules_best_practices>` and :ref:`Python 3 compatibility <developing_python_3>` pages for pointers on writing clean and concise module code. + +.. literalinclude:: ../../../../examples/scripts/my_test.py + :language: python + +Exercising your module code +=========================== + +After you modify the sample code above to do what you want, you can try out your module. +Our :ref:`debugging tips <debugging_modules>` will help if you run into bugs as you verify your module code. + + +Exercising module code locally +------------------------------ + +If your module does not need to target a remote host, you can quickly and easily exercise your code locally like this: + +- Create an arguments file, a basic JSON config file that passes parameters to your module so you can run it. Name the arguments file ``/tmp/args.json`` and add the following content: + +.. code:: json + + { + "ANSIBLE_MODULE_ARGS": { + "name": "hello", + "new": true + } + } + +- If you are using a virtual environment (highly recommended for + development) activate it: ``$ . venv/bin/activate`` +- Setup the environment for development: ``$ . hacking/env-setup`` +- Run your test module locally and directly: + ``$ python -m ansible.modules.my_test /tmp/args.json`` + +This should return output like this: + +.. code:: json + + {"changed": true, "state": {"original_message": "hello", "new_message": "goodbye"}, "invocation": {"module_args": {"name": "hello", "new": true}}} + + +Exercising module code in a playbook +------------------------------------ + +The next step in testing your new module is to consume it with an Ansible playbook. + +- Create a playbook in any directory: ``$ touch testmod.yml`` +- Add the following to the new playbook file:: + + - name: test my new module + hosts: localhost + tasks: + - name: run the new module + my_test: + name: 'hello' + new: true + register: testout + - name: dump test output + debug: + msg: '{{ testout }}' + +- Run the playbook and analyze the output: ``$ ansible-playbook ./testmod.yml`` + +Testing basics +==================== + +These two examples will get you started with testing your module code. Please review our :ref:`testing <developing_testing>` section for more detailed +information, including instructions for :ref:`testing module documentation <testing_module_documentation>`, adding :ref:`integration tests <testing_integration>`, and more. + +.. note:: + Every new module and plugin should have integration tests, even if the tests cannot be run on Ansible CI infrastructure. + In this case, the tests should be marked with the ``unsupported`` alias in `aliases file <https://docs.ansible.com/ansible/latest/dev_guide/testing/sanity/integration-aliases.html>`_. + +Performing sanity tests +----------------------- + +You can run through Ansible's sanity checks in a container: + +``$ ansible-test sanity -v --docker --python 2.7 MODULE_NAME`` + +.. note:: + Note that this example requires Docker to be installed and running. If you'd rather not use a container for this, you can choose to use ``--venv`` instead of ``--docker``. + +Unit tests +---------- + +You can add unit tests for your module in ``./test/units/modules``. You must first setup your testing environment. In this example, we're using Python 3.5. + +- Install the requirements (outside of your virtual environment): ``$ pip3 install -r ./test/lib/ansible_test/_data/requirements/units.txt`` +- Run ``. hacking/env-setup`` +- To run all tests do the following: ``$ ansible-test units --python 3.5``. If you are using a CI environment, these tests will run automatically. + +.. note:: Ansible uses pytest for unit testing. + +To run pytest against a single test module, you can do the following (provide the path to the test module appropriately): + +``$ pytest -r a --cov=. --cov-report=html --fulltrace --color yes +test/units/modules/.../test/my_test.py`` + +Contributing back to Ansible +============================ + +If you would like to contribute to ``ansible-base`` by adding a new feature or fixing a bug, `create a fork <https://help.github.com/articles/fork-a-repo/>`_ of the ansible/ansible repository and develop against a new feature branch using the ``devel`` branch as a starting point. When you you have a good working code change, you can submit a pull request to the Ansible repository by selecting your feature branch as a source and the Ansible devel branch as a target. + +If you want to contribute a module to an :ref:`Ansible collection <contributing_maintained_collections>`, review our :ref:`submission checklist <developing_modules_checklist>`, :ref:`programming tips <developing_modules_best_practices>`, and :ref:`strategy for maintaining Python 2 and Python 3 compatibility <developing_python_3>`, as well as information about :ref:`testing <developing_testing>` before you open a pull request. + +The :ref:`Community Guide <ansible_community_guide>` covers how to open a pull request and what happens next. + + +Communication and development support +===================================== + +Join the IRC channel ``#ansible-devel`` on freenode for discussions +surrounding Ansible development. + +For questions and discussions pertaining to using the Ansible product, +use the ``#ansible`` channel. + +For more specific IRC channels look at :ref:`Community Guide, Communicating <communication_irc>`. + +Credit +====== + +Thank you to Thomas Stringer (`@trstringer <https://github.com/trstringer>`_) for contributing source +material for this topic. diff --git a/docs/docsite/rst/dev_guide/developing_modules_general_aci.rst b/docs/docsite/rst/dev_guide/developing_modules_general_aci.rst new file mode 100644 index 00000000..97ee2b42 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_modules_general_aci.rst @@ -0,0 +1,443 @@ +.. _aci_dev_guide: + +**************************** +Developing Cisco ACI modules +**************************** +This is a brief walk-through of how to create new Cisco ACI modules for Ansible. + +For more information about Cisco ACI, look at the :ref:`Cisco ACI user guide <aci_guide>`. + +What's covered in this section: + +.. contents:: + :depth: 3 + :local: + + +.. _aci_dev_guide_intro: + +Introduction +============ +The `cisco.aci collection <https://galaxy.ansible.com/cisco/aci>`_ already includes a large number of Cisco ACI modules, however the ACI object model is huge and covering all possible functionality would easily cover more than 1500 individual modules. + +If you need specific functionality, you have 2 options: + +- Learn the ACI object model and use the low-level APIC REST API using the :ref:`aci_rest <aci_rest_module>` module +- Write your own dedicated modules, which is actually quite easy + +.. seealso:: + + `ACI Fundamentals: ACI Policy Model <https://www.cisco.com/c/en/us/td/docs/switches/datacenter/aci/apic/sw/1-x/aci-fundamentals/b_ACI-Fundamentals/b_ACI-Fundamentals_chapter_010001.html>`_ + A good introduction to the ACI object model. + `APIC Management Information Model reference <https://developer.cisco.com/docs/apic-mim-ref/>`_ + Complete reference of the APIC object model. + `APIC REST API Configuration Guide <https://www.cisco.com/c/en/us/td/docs/switches/datacenter/aci/apic/sw/2-x/rest_cfg/2_1_x/b_Cisco_APIC_REST_API_Configuration_Guide.html>`_ + Detailed guide on how the APIC REST API is designed and used, incl. many examples. + + +So let's look at how a typical ACI module is built up. + + +.. _aci_dev_guide_module_structure: + +ACI module structure +==================== + +Importing objects from Python libraries +--------------------------------------- +The following imports are standard across ACI modules: + +.. code-block:: python + + from ansible.module_utils.aci import ACIModule, aci_argument_spec + from ansible.module_utils.basic import AnsibleModule + + +Defining the argument spec +-------------------------- +The first line adds the standard connection parameters to the module. After that, the next section will update the ``argument_spec`` dictionary with module-specific parameters. The module-specific parameters should include: + +* the object_id (usually the name) +* the configurable properties of the object +* the parent object IDs (all parents up to the root) +* only child classes that are a 1-to-1 relationship (1-to-many/many-to-many require their own module to properly manage) +* the state + + + ``state: absent`` to ensure object does not exist + + ``state: present`` to ensure the object and configs exist; this is also the default + + ``state: query`` to retrieve information about objects in the class + +.. code-block:: python + + def main(): + argument_spec = aci_argument_spec() + argument_spec.update( + object_id=dict(type='str', aliases=['name']), + object_prop1=dict(type='str'), + object_prop2=dict(type='str', choices=['choice1', 'choice2', 'choice3']), + object_prop3=dict(type='int'), + parent_id=dict(type='str'), + child_object_id=dict(type='str'), + child_object_prop=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present', 'query']), + ) + + +.. hint:: Do not provide default values for configuration arguments. Default values could cause unintended changes to the object. + +Using the AnsibleModule object +------------------------------ +The following section creates an AnsibleModule instance. The module should support check-mode, so we pass the ``argument_spec`` and ``supports_check_mode`` arguments. Since these modules support querying the APIC for all objects of the module's class, the object/parent IDs should only be required if ``state: absent`` or ``state: present``. + +.. code-block:: python + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ['state', 'absent', ['object_id', 'parent_id']], + ['state', 'present', ['object_id', 'parent_id']], + ], + ) + + +Mapping variable definition +--------------------------- +Once the AnsibleModule object has been initiated, the necessary parameter values should be extracted from ``params`` and any data validation should be done. Usually the only params that need to be extracted are those related to the ACI object configuration and its child configuration. If you have integer objects that you would like to validate, then the validation should be done here, and the ``ACIModule.payload()`` method will handle the string conversion. + +.. code-block:: python + + object_id = object_id + object_prop1 = module.params['object_prop1'] + object_prop2 = module.params['object_prop2'] + object_prop3 = module.params['object_prop3'] + if object_prop3 is not None and object_prop3 not in range(x, y): + module.fail_json(msg='Valid object_prop3 values are between x and (y-1)') + child_object_id = module.params[' child_objec_id'] + child_object_prop = module.params['child_object_prop'] + state = module.params['state'] + + +Using the ACIModule object +-------------------------- +The ACIModule class handles most of the logic for the ACI modules. The ACIModule extends functionality to the AnsibleModule object, so the module instance must be passed into the class instantiation. + +.. code-block:: python + + aci = ACIModule(module) + +The ACIModule has six main methods that are used by the modules: + +* construct_url +* get_existing +* payload +* get_diff +* post_config +* delete_config + +The first two methods are used regardless of what value is passed to the ``state`` parameter. + +Constructing URLs +^^^^^^^^^^^^^^^^^ +The ``construct_url()`` method is used to dynamically build the appropriate URL to interact with the object, and the appropriate filter string that should be appended to the URL to filter the results. + +* When the ``state`` is not ``query``, the URL is the base URL to access the APIC plus the distinguished name to access the object. The filter string will restrict the returned data to just the configuration data. +* When ``state`` is ``query``, the URL and filter string used depends on what parameters are passed to the object. This method handles the complexity so that it is easier to add new modules and so that all modules are consistent in what type of data is returned. + +.. note:: Our design goal is to take all ID parameters that have values, and return the most specific data possible. If you do not supply any ID parameters to the task, then all objects of the class will be returned. If your task does consist of ID parameters sed, then the data for the specific object is returned. If a partial set of ID parameters are passed, then the module will use the IDs that are passed to build the URL and filter strings appropriately. + +The ``construct_url()`` method takes 2 required arguments: + +* **self** - passed automatically with the class instance +* **root_class** - A dictionary consisting of ``aci_class``, ``aci_rn``, ``target_filter``, and ``module_object`` keys + + + **aci_class**: The name of the class used by the APIC, for example ``fvTenant`` + + + **aci_rn**: The relative name of the object, for example ``tn-ACME`` + + + **target_filter**: A dictionary with key-value pairs that make up the query string for selecting a subset of entries, for example ``{'name': 'ACME'}`` + + + **module_object**: The particular object for this class, for example ``ACME`` + +Example: + +.. code-block:: python + + aci.construct_url( + root_class=dict( + aci_class='fvTenant', + aci_rn='tn-{0}'.format(tenant), + target_filter={'name': tenant}, + module_object=tenant, + ), + ) + +Some modules, like ``aci_tenant``, are the root class and so they would not need to pass any additional arguments to the method. + +The ``construct_url()`` method takes 4 optional arguments, the first three imitate the root class as described above, but are for child objects: + +* subclass_1 - A dictionary consisting of ``aci_class``, ``aci_rn``, ``target_filter``, and ``module_object`` keys + + + Example: Application Profile Class (AP) + +* subclass_2 - A dictionary consisting of ``aci_class``, ``aci_rn``, ``target_filter``, and ``module_object`` keys + + + Example: End Point Group (EPG) + +* subclass_3 - A dictionary consisting of ``aci_class``, ``aci_rn``, ``target_filter``, and ``module_object`` keys + + + Example: Binding a Contract to an EPG + +* child_classes - The list of APIC names for the child classes supported by the modules. + + + This is a list, even if it is a list of one + + These are the unfriendly names used by the APIC + + These are used to limit the returned child_classes when possible + + Example: ``child_classes=['fvRsBDSubnetToProfile', 'fvRsNdPfxPol']`` + +.. note:: Sometimes the APIC will require special characters ([, ], and -) or will use object metadata in the name ("vlanns" for VLAN pools); the module should handle adding special characters or joining of multiple parameters in order to keep expected inputs simple. + +Getting the existing configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Once the URL and filter string have been built, the module is ready to retrieve the existing configuration for the object: + +* ``state: present`` retrieves the configuration to use as a comparison against what was entered in the task. All values that are different than the existing values will be updated. +* ``state: absent`` uses the existing configuration to see if the item exists and needs to be deleted. +* ``state: query`` uses this to perform the query for the task and report back the existing data. + +.. code-block:: python + + aci.get_existing() + + +When state is present +^^^^^^^^^^^^^^^^^^^^^ +When ``state: present``, the module needs to perform a diff against the existing configuration and the task entries. If any value needs to be updated, then the module will make a POST request with only the items that need to be updated. Some modules have children that are in a 1-to-1 relationship with another object; for these cases, the module can be used to manage the child objects. + +Building the ACI payload +"""""""""""""""""""""""" +The ``aci.payload()`` method is used to build a dictionary of the proposed object configuration. All parameters that were not provided a value in the task will be removed from the dictionary (both for the object and its children). Any parameter that does have a value will be converted to a string and added to the final dictionary object that will be used for comparison against the existing configuration. + +The ``aci.payload()`` method takes two required arguments and 1 optional argument, depending on if the module manages child objects. + +* ``aci_class`` is the APIC name for the object's class, for example ``aci_class='fvBD'`` +* ``class_config`` is the appropriate dictionary to be used as the payload for the POST request + + + The keys should match the names used by the APIC. + + The values should be the corresponding value in ``module.params``; these are the variables defined above + +* ``child_configs`` is optional, and is a list of child config dictionaries. + + + The child configs include the full child object dictionary, not just the attributes configuration portion. + + The configuration portion is built the same way as the object. + +.. code-block:: python + + aci.payload( + aci_class=aci_class, + class_config=dict( + name=bd, + descr=description, + type=bd_type, + ), + child_configs=[ + dict( + fvRsCtx=dict( + attributes=dict( + tnFvCtxName=vrf + ), + ), + ), + ], + ) + + +Performing the request +"""""""""""""""""""""" +The ``get_diff()`` method is used to perform the diff, and takes only one required argument, ``aci_class``. +Example: ``aci.get_diff(aci_class='fvBD')`` + +The ``post_config()`` method is used to make the POST request to the APIC if needed. This method doesn't take any arguments and handles check mode. +Example: ``aci.post_config()`` + + +Example code +"""""""""""" +.. code-block:: text + + if state == 'present': + aci.payload( + aci_class='<object APIC class>', + class_config=dict( + name=object_id, + prop1=object_prop1, + prop2=object_prop2, + prop3=object_prop3, + ), + child_configs=[ + dict( + '<child APIC class>'=dict( + attributes=dict( + child_key=child_object_id, + child_prop=child_object_prop + ), + ), + ), + ], + ) + + aci.get_diff(aci_class='<object APIC class>') + + aci.post_config() + + +When state is absent +^^^^^^^^^^^^^^^^^^^^ +If the task sets the state to absent, then the ``delete_config()`` method is all that is needed. This method does not take any arguments, and handles check mode. + +.. code-block:: text + + elif state == 'absent': + aci.delete_config() + + +Exiting the module +^^^^^^^^^^^^^^^^^^ +To have the module exit, call the ACIModule method ``exit_json()``. This method automatically takes care of returning the common return values for you. + +.. code-block:: text + + aci.exit_json() + + if __name__ == '__main__': + main() + + +.. _aci_dev_guide_testing: + +Testing ACI library functions +============================= +You can test your ``construct_url()`` and ``payload()`` arguments without accessing APIC hardware by using the following python script: + +.. code-block:: text + + #!/usr/bin/python + import json + from ansible.module_utils.network.aci.aci import ACIModule + + # Just another class mimicing a bare AnsibleModule class for construct_url() and payload() methods + class AltModule(): + params = dict( + host='dummy', + port=123, + protocol='https', + state='present', + output_level='debug', + ) + + # A sub-class of ACIModule to overload __init__ (we don't need to log into APIC) + class AltACIModule(ACIModule): + def __init__(self): + self.result = dict(changed=False) + self.module = AltModule() + self.params = self.module.params + + # Instantiate our version of the ACI module + aci = AltACIModule() + + # Define the variables you need below + aep = 'AEP' + aep_domain = 'uni/phys-DOMAIN' + + # Below test the construct_url() arguments to see if it produced correct results + aci.construct_url( + root_class=dict( + aci_class='infraAttEntityP', + aci_rn='infra/attentp-{}'.format(aep), + target_filter={'name': aep}, + module_object=aep, + ), + subclass_1=dict( + aci_class='infraRsDomP', + aci_rn='rsdomP-[{}]'.format(aep_domain), + target_filter={'tDn': aep_domain}, + module_object=aep_domain, + ), + ) + + # Below test the payload arguments to see if it produced correct results + aci.payload( + aci_class='infraRsDomP', + class_config=dict(tDn=aep_domain), + ) + + # Print the URL and proposed payload + print 'URL:', json.dumps(aci.url, indent=4) + print 'PAYLOAD:', json.dumps(aci.proposed, indent=4) + + +This will result in: + +.. code-block:: yaml + + URL: "https://dummy/api/mo/uni/infra/attentp-AEP/rsdomP-[phys-DOMAIN].json" + PAYLOAD: { + "infraRsDomP": { + "attributes": { + "tDn": "phys-DOMAIN" + } + } + } + +Testing for sanity checks +------------------------- +You can run from your fork something like: + +.. code-block:: bash + + $ ansible-test sanity --python 2.7 lib/ansible/modules/network/aci/aci_tenant.py + +.. seealso:: + + :ref:`testing_sanity` + Information on how to build sanity tests. + + +Testing ACI integration tests +----------------------------- +You can run this: + +.. code-block:: bash + + $ ansible-test network-integration --continue-on-error --allow-unsupported --diff -v aci_tenant + +.. note:: You may need to add ``--python 2.7`` or ``--python 3.6`` in order to use the correct python version for performing tests. + +You may want to edit the used inventory at *test/integration/inventory.networking* and add something like: + +.. code-block:: ini + + [aci:vars] + aci_hostname=my-apic-1 + aci_username=admin + aci_password=my-password + aci_use_ssl=yes + aci_use_proxy=no + + [aci] + localhost ansible_ssh_host=127.0.0.1 ansible_connection=local + +.. seealso:: + + :ref:`testing_integration` + Information on how to build integration tests. + + +Testing for test coverage +------------------------- +You can run this: + +.. code-block:: bash + + $ ansible-test network-integration --python 2.7 --allow-unsupported --coverage aci_tenant + $ ansible-test coverage report diff --git a/docs/docsite/rst/dev_guide/developing_modules_general_windows.rst b/docs/docsite/rst/dev_guide/developing_modules_general_windows.rst new file mode 100644 index 00000000..3dd66c2e --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_modules_general_windows.rst @@ -0,0 +1,696 @@ +.. _developing_modules_general_windows: + +************************************** +Windows module development walkthrough +************************************** + +In this section, we will walk through developing, testing, and debugging an +Ansible Windows module. + +Because Windows modules are written in Powershell and need to be run on a +Windows host, this guide differs from the usual development walkthrough guide. + +What's covered in this section: + +.. contents:: + :local: + + +Windows environment setup +========================= + +Unlike Python module development which can be run on the host that runs +Ansible, Windows modules need to be written and tested for Windows hosts. +While evaluation editions of Windows can be downloaded from +Microsoft, these images are usually not ready to be used by Ansible without +further modification. The easiest way to set up a Windows host so that it is +ready to by used by Ansible is to set up a virtual machine using Vagrant. +Vagrant can be used to download existing OS images called *boxes* that are then +deployed to a hypervisor like VirtualBox. These boxes can either be created and +stored offline or they can be downloaded from a central repository called +Vagrant Cloud. + +This guide will use the Vagrant boxes created by the `packer-windoze <https://github.com/jborean93/packer-windoze>`_ +repository which have also been uploaded to `Vagrant Cloud <https://app.vagrantup.com/boxes/search?utf8=%E2%9C%93&sort=downloads&provider=&q=jborean93>`_. +To find out more info on how these images are created, please go to the GitHub +repo and look at the ``README`` file. + +Before you can get started, the following programs must be installed (please consult the Vagrant and +VirtualBox documentation for installation instructions): + +- Vagrant +- VirtualBox + +Create a Windows server in a VM +=============================== + +To create a single Windows Server 2016 instance, run the following: + +.. code-block:: shell + + vagrant init jborean93/WindowsServer2016 + vagrant up + +This will download the Vagrant box from Vagrant Cloud and add it to the local +boxes on your host and then start up that instance in VirtualBox. When starting +for the first time, the Windows VM will run through the sysprep process and +then create a HTTP and HTTPS WinRM listener automatically. Vagrant will finish +its process once the listeners are online, after which the VM can be used by Ansible. + +Create an Ansible inventory +=========================== + +The following Ansible inventory file can be used to connect to the newly +created Windows VM: + +.. code-block:: ini + + [windows] + WindowsServer ansible_host=127.0.0.1 + + [windows:vars] + ansible_user=vagrant + ansible_password=vagrant + ansible_port=55986 + ansible_connection=winrm + ansible_winrm_transport=ntlm + ansible_winrm_server_cert_validation=ignore + +.. note:: The port ``55986`` is automatically forwarded by Vagrant to the + Windows host that was created, if this conflicts with an existing local + port then Vagrant will automatically use another one at random and display + show that in the output. + +The OS that is created is based on the image set. The following +images can be used: + +- `jborean93/WindowsServer2008-x86 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2008-x86>`_ +- `jborean93/WindowsServer2008-x64 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2008-x64>`_ +- `jborean93/WindowsServer2008R2 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2008R2>`_ +- `jborean93/WindowsServer2012 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2012>`_ +- `jborean93/WindowsServer2012R2 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2012R2>`_ +- `jborean93/WindowsServer2016 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2016>`_ + +When the host is online, it can accessible by RDP on ``127.0.0.1:3389`` but the +port may differ depending if there was a conflict. To get rid of the host, run +``vagrant destroy --force`` and Vagrant will automatically remove the VM and +any other files associated with that VM. + +While this is useful when testing modules on a single Windows instance, these +host won't work without modification with domain based modules. The Vagrantfile +at `ansible-windows <https://github.com/jborean93/ansible-windows/tree/master/vagrant>`_ +can be used to create a test domain environment to be used in Ansible. This +repo contains three files which are used by both Ansible and Vagrant to create +multiple Windows hosts in a domain environment. These files are: + +- ``Vagrantfile``: The Vagrant file that reads the inventory setup of ``inventory.yml`` and provisions the hosts that are required +- ``inventory.yml``: Contains the hosts that are required and other connection information such as IP addresses and forwarded ports +- ``main.yml``: Ansible playbook called by Vagrant to provision the domain controller and join the child hosts to the domain + +By default, these files will create the following environment: + +- A single domain controller running on Windows Server 2016 +- Five child hosts for each major Windows Server version joined to that domain +- A domain with the DNS name ``domain.local`` +- A local administrator account on each host with the username ``vagrant`` and password ``vagrant`` +- A domain admin account ``vagrant-domain@domain.local`` with the password ``VagrantPass1`` + +The domain name and accounts can be modified by changing the variables +``domain_*`` in the ``inventory.yml`` file if it is required. The inventory +file can also be modified to provision more or less servers by changing the +hosts that are defined under the ``domain_children`` key. The host variable +``ansible_host`` is the private IP that will be assigned to the VirtualBox host +only network adapter while ``vagrant_box`` is the box that will be used to +create the VM. + +Provisioning the environment +============================ + +To provision the environment as is, run the following: + +.. code-block:: shell + + git clone https://github.com/jborean93/ansible-windows.git + cd vagrant + vagrant up + +.. note:: Vagrant provisions each host sequentially so this can take some time + to complete. If any errors occur during the Ansible phase of setting up the + domain, run ``vagrant provision`` to rerun just that step. + +Unlike setting up a single Windows instance with Vagrant, these hosts can also +be accessed using the IP address directly as well as through the forwarded +ports. It is easier to access it over the host only network adapter as the +normal protocol ports are used, for example RDP is still over ``3389``. In cases where +the host cannot be resolved using the host only network IP, the following +protocols can be access over ``127.0.0.1`` using these forwarded ports: + +- ``RDP``: 295xx +- ``SSH``: 296xx +- ``WinRM HTTP``: 297xx +- ``WinRM HTTPS``: 298xx +- ``SMB``: 299xx + +Replace ``xx`` with the entry number in the inventory file where the domain +controller started with ``00`` and is incremented from there. For example, in +the default ``inventory.yml`` file, WinRM over HTTPS for ``SERVER2012R2`` is +forwarded over port ``29804`` as it's the fourth entry in ``domain_children``. + +.. note:: While an SSH server is available on all Windows hosts but Server + 2008 (non R2), it is not a support connection for Ansible managing Windows + hosts and should not be used with Ansible. + +Windows new module development +============================== + +When creating a new module there are a few things to keep in mind: + +- Module code is in Powershell (.ps1) files while the documentation is contained in Python (.py) files of the same name +- Avoid using ``Write-Host/Debug/Verbose/Error`` in the module and add what needs to be returned to the ``$module.Result`` variable +- To fail a module, call ``$module.FailJson("failure message here")``, an Exception or ErrorRecord can be set to the second argument for a more descriptive error message +- You can pass in the exception or ErrorRecord as a second argument to ``FailJson("failure", $_)`` to get a more detailed output +- Most new modules require check mode and integration tests before they are merged into the main Ansible codebase +- Avoid using try/catch statements over a large code block, rather use them for individual calls so the error message can be more descriptive +- Try and catch specific exceptions when using try/catch statements +- Avoid using PSCustomObjects unless necessary +- Look for common functions in ``./lib/ansible/module_utils/powershell/`` and use the code there instead of duplicating work. These can be imported by adding the line ``#Requires -Module *`` where * is the filename to import, and will be automatically included with the module code sent to the Windows target when run via Ansible +- As well as PowerShell module utils, C# module utils are stored in ``./lib/ansible/module_utils/csharp/`` and are automatically imported in a module execution if the line ``#AnsibleRequires -CSharpUtil *`` is present +- C# and PowerShell module utils achieve the same goal but C# allows a developer to implement low level tasks, such as calling the Win32 API, and can be faster in some cases +- Ensure the code runs under Powershell v3 and higher on Windows Server 2008 and higher; if higher minimum Powershell or OS versions are required, ensure the documentation reflects this clearly +- Ansible runs modules under strictmode version 2.0. Be sure to test with that enabled by putting ``Set-StrictMode -Version 2.0`` at the top of your dev script +- Favor native Powershell cmdlets over executable calls if possible +- Use the full cmdlet name instead of aliases, for example ``Remove-Item`` over ``rm`` +- Use named parameters with cmdlets, for example ``Remove-Item -Path C:\temp`` over ``Remove-Item C:\temp`` + +A very basic Powershell module `win_environment <https://github.com/ansible-collections/ansible.windows/blob/master/plugins/modules/win_environment.ps1>`_ incorporates best practices for Powershell modules. It demonstrates how to implement check-mode and diff-support, and also shows a warning to the user when a specific condition is met. + +A slightly more advanced module is `win_uri <https://github.com/ansible-collections/ansible.windows/blob/master/plugins/modules/win_uri.ps1>`_ which additionally shows how to use different parameter types (bool, str, int, list, dict, path) and a selection of choices for parameters, how to fail a module and how to handle exceptions. + +As part of the new ``AnsibleModule`` wrapper, the input parameters are defined and validated based on an argument +spec. The following options can be set at the root level of the argument spec: + +- ``mutually_exclusive``: A list of lists, where the inner list contains module options that cannot be set together +- ``no_log``: Stops the module from emitting any logs to the Windows Event log +- ``options``: A dictionary where the key is the module option and the value is the spec for that option +- ``required_by``: A dictionary where the option(s) specified by the value must be set if the option specified by the key is also set +- ``required_if``: A list of lists where the inner list contains 3 or 4 elements; + * The first element is the module option to check the value against + * The second element is the value of the option specified by the first element, if matched then the required if check is run + * The third element is a list of required module options when the above is matched + * An optional fourth element is a boolean that states whether all module options in the third elements are required (default: ``$false``) or only one (``$true``) +- ``required_one_of``: A list of lists, where the inner list contains module options where at least one must be set +- ``required_together``: A list of lists, where the inner list contains module options that must be set together +- ``supports_check_mode``: Whether the module supports check mode, by default this is ``$false`` + +The actual input options for a module are set within the ``options`` value as a dictionary. The keys of this dictionary +are the module option names while the values are the spec of that module option. Each spec can have the following +options set: + +- ``aliases``: A list of aliases for the module option +- ``choices``: A list of valid values for the module option, if ``type=list`` then each list value is validated against the choices and not the list itself +- ``default``: The default value for the module option if not set +- ``deprecated_aliases``: A list of hashtables that define aliases that are deprecated and the versions they will be removed in. Each entry must contain the keys ``name`` and ``collection_name`` with either ``version`` or ``date`` +- ``elements``: When ``type=list``, this sets the type of each list value, the values are the same as ``type`` +- ``no_log``: Will sanitise the input value before being returned in the ``module_invocation`` return value +- ``removed_in_version``: States when a deprecated module option is to be removed, a warning is displayed to the end user if set +- ``removed_at_date``: States the date (YYYY-MM-DD) when a deprecated module option will be removed, a warning is displayed to the end user if set +- ``removed_from_collection``: States from which collection the deprecated module option will be removed; must be specified if one of ``removed_in_version`` and ``removed_at_date`` is specified +- ``required``: Will fail when the module option is not set +- ``type``: The type of the module option, if not set then it defaults to ``str``. The valid types are; + * ``bool``: A boolean value + * ``dict``: A dictionary value, if the input is a JSON or key=value string then it is converted to dictionary + * ``float``: A float or `Single <https://docs.microsoft.com/en-us/dotnet/api/system.single?view=netframework-4.7.2>`_ value + * ``int``: An Int32 value + * ``json``: A string where the value is converted to a JSON string if the input is a dictionary + * ``list``: A list of values, ``elements=<type>`` can convert the individual list value types if set. If ``elements=dict`` then ``options`` is defined, the values will be validated against the argument spec. When the input is a string then the string is split by ``,`` and any whitespace is trimmed + * ``path``: A string where values likes ``%TEMP%`` are expanded based on environment values. If the input value starts with ``\\?\`` then no expansion is run + * ``raw``: No conversions occur on the value passed in by Ansible + * ``sid``: Will convert Windows security identifier values or Windows account names to a `SecurityIdentifier <https://docs.microsoft.com/en-us/dotnet/api/system.security.principal.securityidentifier?view=netframework-4.7.2>`_ value + * ``str``: The value is converted to a string + +When ``type=dict``, or ``type=list`` and ``elements=dict``, the following keys can also be set for that module option: + +- ``apply_defaults``: The value is based on the ``options`` spec defaults for that key if ``True`` and null if ``False``. Only valid when the module option is not defined by the user and ``type=dict``. +- ``mutually_exclusive``: Same as the root level ``mutually_exclusive`` but validated against the values in the sub dict +- ``options``: Same as the root level ``options`` but contains the valid options for the sub option +- ``required_if``: Same as the root level ``required_if`` but validated against the values in the sub dict +- ``required_by``: Same as the root level ``required_by`` but validated against the values in the sub dict +- ``required_together``: Same as the root level ``required_together`` but validated against the values in the sub dict +- ``required_one_of``: Same as the root level ``required_one_of`` but validated against the values in the sub dict + +A module type can also be a delegate function that converts the value to whatever is required by the module option. For +example the following snippet shows how to create a custom type that creates a ``UInt64`` value: + +.. code-block:: powershell + + $spec = @{ + uint64_type = @{ type = [Func[[Object], [UInt64]]]{ [System.UInt64]::Parse($args[0]) } } + } + $uint64_type = $module.Params.uint64_type + +When in doubt, look at some of the other core modules and see how things have been +implemented there. + +Sometimes there are multiple ways that Windows offers to complete a task; this +is the order to favor when writing modules: + +- Native Powershell cmdlets like ``Remove-Item -Path C:\temp -Recurse`` +- .NET classes like ``[System.IO.Path]::GetRandomFileName()`` +- WMI objects through the ``New-CimInstance`` cmdlet +- COM objects through ``New-Object -ComObject`` cmdlet +- Calls to native executables like ``Secedit.exe`` + +PowerShell modules support a small subset of the ``#Requires`` options built +into PowerShell as well as some Ansible-specific requirements specified by +``#AnsibleRequires``. These statements can be placed at any point in the script, +but are most commonly near the top. They are used to make it easier to state the +requirements of the module without writing any of the checks. Each ``requires`` +statement must be on its own line, but there can be multiple requires statements +in one script. + +These are the checks that can be used within Ansible modules: + +- ``#Requires -Module Ansible.ModuleUtils.<module_util>``: Added in Ansible 2.4, specifies a module_util to load in for the module execution. +- ``#Requires -Version x.y``: Added in Ansible 2.5, specifies the version of PowerShell that is required by the module. The module will fail if this requirement is not met. +- ``#AnsibleRequires -OSVersion x.y``: Added in Ansible 2.5, specifies the OS build version that is required by the module and will fail if this requirement is not met. The actual OS version is derived from ``[Environment]::OSVersion.Version``. +- ``#AnsibleRequires -Become``: Added in Ansible 2.5, forces the exec runner to run the module with ``become``, which is primarily used to bypass WinRM restrictions. If ``ansible_become_user`` is not specified then the ``SYSTEM`` account is used instead. +- ``#AnsibleRequires -CSharpUtil Ansible.<module_util>``: Added in Ansible 2.8, specifies a C# module_util to load in for the module execution. + +C# module utils can reference other C# utils by adding the line +``using Ansible.<module_util>;`` to the top of the script with all the other +using statements. + + +Windows module utilities +======================== + +Like Python modules, PowerShell modules also provide a number of module +utilities that provide helper functions within PowerShell. These module_utils +can be imported by adding the following line to a PowerShell module: + +.. code-block:: powershell + + #Requires -Module Ansible.ModuleUtils.Legacy + +This will import the module_util at ``./lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1`` +and enable calling all of its functions. As of Ansible 2.8, Windows module +utils can also be written in C# and stored at ``lib/ansible/module_utils/csharp``. +These module_utils can be imported by adding the following line to a PowerShell +module: + +.. code-block:: powershell + + #AnsibleRequires -CSharpUtil Ansible.Basic + +This will import the module_util at ``./lib/ansible/module_utils/csharp/Ansible.Basic.cs`` +and automatically load the types in the executing process. C# module utils can +reference each other and be loaded together by adding the following line to the +using statements at the top of the util: + +.. code-block:: csharp + + using Ansible.Become; + +There are special comments that can be set in a C# file for controlling the +compilation parameters. The following comments can be added to the script; + +- ``//AssemblyReference -Name <assembly dll> [-CLR [Core|Framework]]``: The assembly DLL to reference during compilation, the optional ``-CLR`` flag can also be used to state whether to reference when running under .NET Core, Framework, or both (if omitted) +- ``//NoWarn -Name <error id> [-CLR [Core|Framework]]``: A compiler warning ID to ignore when compiling the code, the optional ``-CLR`` works the same as above. A list of warnings can be found at `Compiler errors <https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/compiler-messages/index>`_ + +As well as this, the following pre-processor symbols are defined; + +- ``CORECLR``: This symbol is present when PowerShell is running through .NET Core +- ``WINDOWS``: This symbol is present when PowerShell is running on Windows +- ``UNIX``: This symbol is present when PowerShell is running on Unix + +A combination of these flags help to make a module util interoperable on both +.NET Framework and .NET Core, here is an example of them in action: + +.. code-block:: csharp + + #if CORECLR + using Newtonsoft.Json; + #else + using System.Web.Script.Serialization; + #endif + + //AssemblyReference -Name Newtonsoft.Json.dll -CLR Core + //AssemblyReference -Name System.Web.Extensions.dll -CLR Framework + + // Ignore error CS1702 for all .NET types + //NoWarn -Name CS1702 + + // Ignore error CS1956 only for .NET Framework + //NoWarn -Name CS1956 -CLR Framework + + +The following is a list of module_utils that are packaged with Ansible and a general description of what +they do: + +- ArgvParser: Utiliy used to convert a list of arguments to an escaped string compliant with the Windows argument parsing rules. +- CamelConversion: Utility used to convert camelCase strings/lists/dicts to snake_case. +- CommandUtil: Utility used to execute a Windows process and return the stdout/stderr and rc as separate objects. +- FileUtil: Utility that expands on the ``Get-ChildItem`` and ``Test-Path`` to work with special files like ``C:\pagefile.sys``. +- Legacy: General definitions and helper utilities for Ansible module. +- LinkUtil: Utility to create, remove, and get information about symbolic links, junction points and hard inks. +- SID: Utilities used to convert a user or group to a Windows SID and vice versa. + +For more details on any specific module utility and their requirements, please see the `Ansible +module utilities source code <https://github.com/ansible/ansible/tree/devel/lib/ansible/module_utils/powershell>`_. + +PowerShell module utilities can be stored outside of the standard Ansible +distribution for use with custom modules. Custom module_utils are placed in a +folder called ``module_utils`` located in the root folder of the playbook or role +directory. + +C# module utilities can also be stored outside of the standard Ansible distribution for use with custom modules. Like +PowerShell utils, these are stored in a folder called ``module_utils`` and the filename must end in the extension +``.cs``, start with ``Ansible.`` and be named after the namespace defined in the util. + +The below example is a role structure that contains two PowerShell custom module_utils called +``Ansible.ModuleUtils.ModuleUtil1``, ``Ansible.ModuleUtils.ModuleUtil2``, and a C# util containing the namespace +``Ansible.CustomUtil``:: + + meta/ + main.yml + defaults/ + main.yml + module_utils/ + Ansible.ModuleUtils.ModuleUtil1.psm1 + Ansible.ModuleUtils.ModuleUtil2.psm1 + Ansible.CustomUtil.cs + tasks/ + main.yml + +Each PowerShell module_util must contain at least one function that has been exported with ``Export-ModuleMember`` +at the end of the file. For example + +.. code-block:: powershell + + Export-ModuleMember -Function Invoke-CustomUtil, Get-CustomInfo + + +Exposing shared module options +++++++++++++++++++++++++++++++ + +PowerShell module utils can easily expose common module options that a module can use when building its argument spec. +This allows common features to be stored and maintained in one location and have those features used by multiple +modules with minimal effort. Any new features or bugifxes added to one of these utils are then automatically used by +the various modules that call that util. + +An example of this would be to have a module util that handles authentication and communication against an API This +util can be used by multiple modules to expose a common set of module options like the API endpoint, username, +password, timeout, cert validation, and so on without having to add those options to each module spec. + +The standard convention for a module util that has a shared argument spec would have + +- A ``Get-<namespace.name.util name>Spec`` function that outputs the common spec for a module + * It is highly recommended to make this function name be unique to the module to avoid any conflicts with other utils that can be loaded + * The format of the output spec is a Hashtable in the same format as the ``$spec`` used for normal modules +- A function that takes in an ``AnsibleModule`` object called under the ``-Module`` parameter which it can use to get the shared options + +Because these options can be shared across various module it is highly recommended to keep the module option names and +aliases in the shared spec as specific as they can be. For example do not have a util option called ``password``, +rather you should prefix it with a unique name like ``acme_password``. + +.. warning:: + Failure to have a unique option name or alias can prevent the util being used by module that also use those names or + aliases for its own options. + +The following is an example module util called ``ServiceAuth.psm1`` in a collection that implements a common way for +modules to authentication with a service. + +.. code-block:: powershell + + Invoke-MyServiceResource { + [CmdletBinding()] + param ( + [Parameter(Mandatory=$true)] + [ValidateScript({ $_.GetType().FullName -eq 'Ansible.Basic.AnsibleModule' })] + $Module, + + [Parameter(Mandatory=$true)] + [String] + $ResourceId + + [String] + $State = 'present' + ) + + # Process the common module options known to the util + $params = @{ + ServerUri = $Module.Params.my_service_url + } + if ($Module.Params.my_service_username) { + $params.Credential = Get-MyServiceCredential + } + + if ($State -eq 'absent') { + Remove-MyService @params -ResourceId $ResourceId + } else { + New-MyService @params -ResourceId $ResourceId + } + } + + Get-MyNamespaceMyCollectionServiceAuthSpec { + # Output the util spec + @{ + options = @{ + my_service_url = @{ type = 'str'; required = $true } + my_service_username = @{ type = 'str' } + my_service_password = @{ type = 'str'; no_log = $true } + } + + required_together = @( + ,@('my_service_username', 'my_service_password') + ) + } + } + + $exportMembers = @{ + Function = 'Get-MyNamespaceMyCollectionServiceAuthSpec', 'Invoke-MyServiceResource' + } + Export-ModuleMember @exportMembers + + +For a module to take advantage of this common argument spec it can be set out like + +.. code-block:: powershell + + #!powershell + + # Include the module util ServiceAuth.psm1 from the my_namespace.my_collection collection + #AnsibleRequires -PowerShell ansible_collections.my_namespace.my_collection.plugins.module_utils.ServiceAuth + + # Create the module spec like normal + $spec = @{ + options = @{ + resource_id = @{ type = 'str'; required = $true } + state = @{ type = 'str'; choices = 'absent', 'present' } + } + } + + # Create the module from the module spec but also include the util spec to merge into our own. + $module = [Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-MyNamespaceMyCollectionServiceAuthSpec)) + + # Call the ServiceAuth module util and pass in the module object so it can access the module options. + Invoke-MyServiceResource -Module $module -ResourceId $module.Params.resource_id -State $module.params.state + + $module.ExitJson() + + +.. note:: + Options defined in the module spec will always have precedence over a util spec. Any list values under the same key + in a util spec will be appended to the module spec for that same key. Dictionary values will add any keys that are + missing from the module spec and merge any values that are lists or dictionaries. This is similar to how the doc + fragment plugins work when extending module documentation. + +To document these shared util options for a module, create a doc fragment plugin that documents the options implemented +by the module util and extend the module docs for every module that implements the util to include that fragment in +its docs. + + +Windows playbook module testing +=============================== + +You can test a module with an Ansible playbook. For example: + +- Create a playbook in any directory ``touch testmodule.yml``. +- Create an inventory file in the same directory ``touch hosts``. +- Populate the inventory file with the variables required to connect to a Windows host(s). +- Add the following to the new playbook file:: + + --- + - name: test out windows module + hosts: windows + tasks: + - name: test out module + win_module: + name: test name + +- Run the playbook ``ansible-playbook -i hosts testmodule.yml`` + +This can be useful for seeing how Ansible runs with +the new module end to end. Other possible ways to test the module are +shown below. + + +Windows debugging +================= + +Debugging a module currently can only be done on a Windows host. This can be +useful when developing a new module or implementing bug fixes. These +are some steps that need to be followed to set this up: + +- Copy the module script to the Windows server +- Copy the folders ``./lib/ansible/module_utils/powershell`` and ``./lib/ansible/module_utils/csharp`` to the same directory as the script above +- Add an extra ``#`` to the start of any ``#Requires -Module`` lines in the module code, this is only required for any lines starting with ``#Requires -Module`` +- Add the following to the start of the module script that was copied to the server: + +.. code-block:: powershell + + # Set $ErrorActionPreference to what's set during Ansible execution + $ErrorActionPreference = "Stop" + + # Set the first argument as the path to a JSON file that contains the module args + $args = @("$($pwd.Path)\args.json") + + # Or instead of an args file, set $complex_args to the pre-processed module args + $complex_args = @{ + _ansible_check_mode = $false + _ansible_diff = $false + path = "C:\temp" + state = "present" + } + + # Import any C# utils referenced with '#AnsibleRequires -CSharpUtil' or 'using Ansible.; + # The $_csharp_utils entries should be the context of the C# util files and not the path + Import-Module -Name "$($pwd.Path)\powershell\Ansible.ModuleUtils.AddType.psm1" + $_csharp_utils = @( + [System.IO.File]::ReadAllText("$($pwd.Path)\csharp\Ansible.Basic.cs") + ) + Add-CSharpType -References $_csharp_utils -IncludeDebugInfo + + # Import any PowerShell modules referenced with '#Requires -Module` + Import-Module -Name "$($pwd.Path)\powershell\Ansible.ModuleUtils.Legacy.psm1" + + # End of the setup code and start of the module code + #!powershell + +You can add more args to ``$complex_args`` as required by the module or define the module options through a JSON file +with the structure:: + + { + "ANSIBLE_MODULE_ARGS": { + "_ansible_check_mode": false, + "_ansible_diff": false, + "path": "C:\\temp", + "state": "present" + } + } + +There are multiple IDEs that can be used to debug a Powershell script, two of +the most popular ones are + +- `Powershell ISE`_ +- `Visual Studio Code`_ + +.. _Powershell ISE: https://docs.microsoft.com/en-us/powershell/scripting/core-powershell/ise/how-to-debug-scripts-in-windows-powershell-ise +.. _Visual Studio Code: https://blogs.technet.microsoft.com/heyscriptingguy/2017/02/06/debugging-powershell-script-in-visual-studio-code-part-1/ + +To be able to view the arguments as passed by Ansible to the module follow +these steps. + +- Prefix the Ansible command with :envvar:`ANSIBLE_KEEP_REMOTE_FILES=1<ANSIBLE_KEEP_REMOTE_FILES>` to specify that Ansible should keep the exec files on the server. +- Log onto the Windows server using the same user account that Ansible used to execute the module. +- Navigate to ``%TEMP%\..``. It should contain a folder starting with ``ansible-tmp-``. +- Inside this folder, open the PowerShell script for the module. +- In this script is a raw JSON script under ``$json_raw`` which contains the module arguments under ``module_args``. These args can be assigned manually to the ``$complex_args`` variable that is defined on your debug script or put in the ``args.json`` file. + + +Windows unit testing +==================== + +Currently there is no mechanism to run unit tests for Powershell modules under Ansible CI. + + +Windows integration testing +=========================== + +Integration tests for Ansible modules are typically written as Ansible roles. These test +roles are located in ``./test/integration/targets``. You must first set up your testing +environment, and configure a test inventory for Ansible to connect to. + +In this example we will set up a test inventory to connect to two hosts and run the integration +tests for win_stat: + +- Run the command ``source ./hacking/env-setup`` to prepare environment. +- Create a copy of ``./test/integration/inventory.winrm.template`` and name it ``inventory.winrm``. +- Fill in entries under ``[windows]`` and set the required variables that are needed to connect to the host. +- :ref:`Install the required Python modules <windows_winrm>` to support WinRM and a configured authentication method. +- To execute the integration tests, run ``ansible-test windows-integration win_stat``; you can replace ``win_stat`` with the role you want to test. + +This will execute all the tests currently defined for that role. You can set +the verbosity level using the ``-v`` argument just as you would with +ansible-playbook. + +When developing tests for a new module, it is recommended to test a scenario once in +check mode and twice not in check mode. This ensures that check mode +does not make any changes but reports a change, as well as that the second run is +idempotent and does not report changes. For example: + +.. code-block:: yaml + + - name: remove a file (check mode) + win_file: + path: C:\temp + state: absent + register: remove_file_check + check_mode: yes + + - name: get result of remove a file (check mode) + win_command: powershell.exe "if (Test-Path -Path 'C:\temp') { 'true' } else { 'false' }" + register: remove_file_actual_check + + - name: assert remove a file (check mode) + assert: + that: + - remove_file_check is changed + - remove_file_actual_check.stdout == 'true\r\n' + + - name: remove a file + win_file: + path: C:\temp + state: absent + register: remove_file + + - name: get result of remove a file + win_command: powershell.exe "if (Test-Path -Path 'C:\temp') { 'true' } else { 'false' }" + register: remove_file_actual + + - name: assert remove a file + assert: + that: + - remove_file is changed + - remove_file_actual.stdout == 'false\r\n' + + - name: remove a file (idempotent) + win_file: + path: C:\temp + state: absent + register: remove_file_again + + - name: assert remove a file (idempotent) + assert: + that: + - not remove_file_again is changed + + +Windows communication and development support +============================================= + +Join the IRC channel ``#ansible-devel`` or ``#ansible-windows`` on freenode for +discussions about Ansible development for Windows. + +For questions and discussions pertaining to using the Ansible product, +use the ``#ansible`` channel. diff --git a/docs/docsite/rst/dev_guide/developing_modules_in_groups.rst b/docs/docsite/rst/dev_guide/developing_modules_in_groups.rst new file mode 100644 index 00000000..31a9ec9d --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_modules_in_groups.rst @@ -0,0 +1,80 @@ +.. _developing_modules_in_groups: + +************************* +Creating a new collection +************************* + +Starting with Ansible 2.10, related modules should be developed in a collection. The Ansible core team and community compiled these module development tips and tricks to help companies developing Ansible modules for their products and users developing Ansible modules for third-party products. See :ref:`developing_collections` for a more detailed description of the collections format and additional development guidelines. + +.. contents:: + :local: + +.. include:: shared_snippets/licensing.txt + +Before you start coding +======================= + +This list of prerequisites is designed to help ensure that you develop high-quality modules that work well with ansible-base and provide a seamless user experience. + +* Read though all the pages linked off :ref:`developing_modules_general`; paying particular focus to the :ref:`developing_modules_checklist`. +* We encourage PEP 8 compliance. See :ref:`testing_pep8` for more information. +* We encourage supporting :ref:`Python 2.6+ and Python 3.5+ <developing_python_3>`. +* Look at Ansible Galaxy and review the naming conventions in your functional area (such as cloud, networking, databases). +* With great power comes great responsibility: Ansible collection maintainers have a duty to help keep content up to date and release collections they are responsible for regularly. As with all successful community projects, collection maintainers should keep a watchful eye for reported issues and contributions. +* We strongly recommend unit and/or integration tests. Unit tests are especially valuable when external resources (such as cloud or network devices) are required. For more information see :ref:`developing_testing` and the `Testing Working Group <https://github.com/ansible/community/blob/master/meetings/README.md>`_. + + +Naming conventions +================== + +Fully Qualified Collection Names (FQCNs) for plugins and modules include three elements: + + * the Galaxy namespace, which generally represents the company or group + * the collection name, which generally represents the product or OS + * the plugin or module name + * always in lower case + * words separated with an underscore (``_``) character + * singular, rather than plural, for example, ``command`` not ``commands`` + +For example, ``community.mongodb.mongodb_linux`` or ``cisco.meraki.meraki_device``. + +It is convenient if the organization and repository names on GitHub (or elsewhere) match your namespace and collection names on Ansible Galaxy, but it is not required. The plugin names you select, however, are always the same in your code repository and in your collection artifact on Galaxy. + +Speak to us +=========== + +Circulating your ideas before coding helps you adopt good practices and avoid common mistakes. After reading the "Before you start coding" section you should have a reasonable idea of the structure of your modules. Write a list of your proposed plugin and/or module names, with a short description of what each one does. Circulate that list on IRC or a mailing list so the Ansible community can review your ideas for consistency and familiarity. Names and functionality that are consistent, predictable, and familiar make your collection easier to use. + +Where to get support +==================== + +Ansible has a thriving and knowledgeable community of module developers that is a great resource for getting your questions answered. + +In the :ref:`ansible_community_guide` you can find how to: + +* Subscribe to the Mailing Lists - We suggest "Ansible Development List" and "Ansible Announce list" +* ``#ansible-devel`` - We have found that IRC ``#ansible-devel`` on FreeNode's IRC network works best for developers so we can have an interactive dialogue. +* IRC meetings - Join the various weekly IRC meetings `meeting schedule and agenda page <https://github.com/ansible/community/blob/master/meetings/README.md>`_ + +Required files +============== + +Your collection should include the following files to be usable: + +* an ``__init__.py`` file - An empty file to initialize namespace and allow Python to import the files. *Required* +* at least one plugin, for example, ``/plugins/modules/$your_first_module.py``. *Required* +* if needed, one or more ``/plugins/doc_fragments/$topic.py`` files - Code documentation, such as details regarding common arguments. *Optional* +* if needed, one or more ``/plugins/module_utils/$topic.py`` files - Code shared between more than one module, such as common arguments. *Optional* + +When you have these files ready, review the :ref:`developing_modules_checklist` again. If you are creating a new collection, you are responsible for all procedures related to your repository, including setting rules for contributions, finding reviewers, and testing and maintaining the code in your collection. + +If you need help or advice, consider join the ``#ansible-devel`` IRC channel (see how in the "Where to get support"). + +New to git or GitHub +==================== + +We realize this may be your first use of Git or GitHub. The following guides may be of use: + +* `How to create a fork of ansible/ansible <https://help.github.com/articles/fork-a-repo/>`_ +* `How to sync (update) your fork <https://help.github.com/articles/syncing-a-fork/>`_ +* `How to create a Pull Request (PR) <https://help.github.com/articles/about-pull-requests/>`_ diff --git a/docs/docsite/rst/dev_guide/developing_plugins.rst b/docs/docsite/rst/dev_guide/developing_plugins.rst new file mode 100644 index 00000000..e40a3281 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_plugins.rst @@ -0,0 +1,495 @@ +.. _developing_plugins: +.. _plugin_guidelines: + +****************** +Developing plugins +****************** + +.. contents:: + :local: + +Plugins augment Ansible's core functionality with logic and features that are accessible to all modules. Ansible collections include a number of handy plugins, and you can easily write your own. All plugins must: + +* be written in Python +* raise errors +* return strings in unicode +* conform to Ansible's configuration and documentation standards + +Once you've reviewed these general guidelines, you can skip to the particular type of plugin you want to develop. + +Writing plugins in Python +========================= + +You must write your plugin in Python so it can be loaded by the ``PluginLoader`` and returned as a Python object that any module can use. Since your plugin will execute on the controller, you must write it in a :ref:`compatible version of Python <control_node_requirements>`. + +Raising errors +============== + +You should return errors encountered during plugin execution by raising ``AnsibleError()`` or a similar class with a message describing the error. When wrapping other exceptions into error messages, you should always use the ``to_native`` Ansible function to ensure proper string compatibility across Python versions: + +.. code-block:: python + + from ansible.module_utils._text import to_native + + try: + cause_an_exception() + except Exception as e: + raise AnsibleError('Something happened, this was original exception: %s' % to_native(e)) + +Check the different `AnsibleError objects <https://github.com/ansible/ansible/blob/devel/lib/ansible/errors/__init__.py>`_ and see which one applies best to your situation. + +String encoding +=============== + +You must convert any strings returned by your plugin into Python's unicode type. Converting to unicode ensures that these strings can run through Jinja2. To convert strings: + +.. code-block:: python + + from ansible.module_utils._text import to_text + result_string = to_text(result_string) + +Plugin configuration & documentation standards +============================================== + +To define configurable options for your plugin, describe them in the ``DOCUMENTATION`` section of the python file. Callback and connection plugins have declared configuration requirements this way since Ansible version 2.4; most plugin types now do the same. This approach ensures that the documentation of your plugin's options will always be correct and up-to-date. To add a configurable option to your plugin, define it in this format: + +.. code-block:: yaml + + options: + option_name: + description: describe this config option + default: default value for this config option + env: + - name: NAME_OF_ENV_VAR + ini: + - section: section_of_ansible.cfg_where_this_config_option_is_defined + key: key_used_in_ansible.cfg + required: True/False + type: boolean/float/integer/list/none/path/pathlist/pathspec/string/tmppath + version_added: X.x + +To access the configuration settings in your plugin, use ``self.get_option(<option_name>)``. For most plugin types, the controller pre-populates the settings. If you need to populate settings explicitly, use a ``self.set_options()`` call. + +Plugins that support embedded documentation (see :ref:`ansible-doc` for the list) should include well-formed doc strings. If you inherit from a plugin, you must document the options it takes, either via a documentation fragment or as a copy. See :ref:`module_documenting` for more information on correct documentation. Thorough documentation is a good idea even if you're developing a plugin for local use. + +Developing particular plugin types +================================== + +.. _developing_actions: + +Action plugins +-------------- + +Action plugins let you integrate local processing and local data with module functionality. + +To create an action plugin, create a new class with the Base(ActionBase) class as the parent: + +.. code-block:: python + + from ansible.plugins.action import ActionBase + + class ActionModule(ActionBase): + pass + +From there, execute the module using the ``_execute_module`` method to call the original module. +After successful execution of the module, you can modify the module return data. + +.. code-block:: python + + module_return = self._execute_module(module_name='<NAME_OF_MODULE>', + module_args=module_args, + task_vars=task_vars, tmp=tmp) + + +For example, if you wanted to check the time difference between your Ansible controller and your target machine(s), you could write an action plugin to check the local time and compare it to the return data from Ansible's ``setup`` module: + +.. code-block:: python + + #!/usr/bin/python + # Make coding more python3-ish, this is required for contributions to Ansible + from __future__ import (absolute_import, division, print_function) + __metaclass__ = type + + from ansible.plugins.action import ActionBase + from datetime import datetime + + + class ActionModule(ActionBase): + def run(self, tmp=None, task_vars=None): + super(ActionModule, self).run(tmp, task_vars) + module_args = self._task.args.copy() + module_return = self._execute_module(module_name='setup', + module_args=module_args, + task_vars=task_vars, tmp=tmp) + ret = dict() + remote_date = None + if not module_return.get('failed'): + for key, value in module_return['ansible_facts'].items(): + if key == 'ansible_date_time': + remote_date = value['iso8601'] + + if remote_date: + remote_date_obj = datetime.strptime(remote_date, '%Y-%m-%dT%H:%M:%SZ') + time_delta = datetime.now() - remote_date_obj + ret['delta_seconds'] = time_delta.seconds + ret['delta_days'] = time_delta.days + ret['delta_microseconds'] = time_delta.microseconds + + return dict(ansible_facts=dict(ret)) + + +This code checks the time on the controller, captures the date and time for the remote machine using the ``setup`` module, and calculates the difference between the captured time and +the local time, returning the time delta in days, seconds and microseconds. + +For practical examples of action plugins, +see the source code for the `action plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/action>`_ + +.. _developing_cache_plugins: + +Cache plugins +------------- + +Cache plugins store gathered facts and data retrieved by inventory plugins. + +Import cache plugins using the cache_loader so you can use ``self.set_options()`` and ``self.get_option(<option_name>)``. If you import a cache plugin directly in the code base, you can only access options via ``ansible.constants``, and you break the cache plugin's ability to be used by an inventory plugin. + +.. code-block:: python + + from ansible.plugins.loader import cache_loader + [...] + plugin = cache_loader.get('custom_cache', **cache_kwargs) + +There are two base classes for cache plugins, ``BaseCacheModule`` for database-backed caches, and ``BaseCacheFileModule`` for file-backed caches. + +To create a cache plugin, start by creating a new ``CacheModule`` class with the appropriate base class. If you're creating a plugin using an ``__init__`` method you should initialize the base class with any provided args and kwargs to be compatible with inventory plugin cache options. The base class calls ``self.set_options(direct=kwargs)``. After the base class ``__init__`` method is called ``self.get_option(<option_name>)`` should be used to access cache options. + +New cache plugins should take the options ``_uri``, ``_prefix``, and ``_timeout`` to be consistent with existing cache plugins. + +.. code-block:: python + + from ansible.plugins.cache import BaseCacheModule + + class CacheModule(BaseCacheModule): + def __init__(self, *args, **kwargs): + super(CacheModule, self).__init__(*args, **kwargs) + self._connection = self.get_option('_uri') + self._prefix = self.get_option('_prefix') + self._timeout = self.get_option('_timeout') + +If you use the ``BaseCacheModule``, you must implement the methods ``get``, ``contains``, ``keys``, ``set``, ``delete``, ``flush``, and ``copy``. The ``contains`` method should return a boolean that indicates if the key exists and has not expired. Unlike file-based caches, the ``get`` method does not raise a KeyError if the cache has expired. + +If you use the ``BaseFileCacheModule``, you must implement ``_load`` and ``_dump`` methods that will be called from the base class methods ``get`` and ``set``. + +If your cache plugin stores JSON, use ``AnsibleJSONEncoder`` in the ``_dump`` or ``set`` method and ``AnsibleJSONDecoder`` in the ``_load`` or ``get`` method. + +For example cache plugins, see the source code for the `cache plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/cache>`_. + +.. _developing_callbacks: + +Callback plugins +---------------- + +Callback plugins add new behaviors to Ansible when responding to events. By default, callback plugins control most of the output you see when running the command line programs. + +To create a callback plugin, create a new class with the Base(Callbacks) class as the parent: + +.. code-block:: python + + from ansible.plugins.callback import CallbackBase + + class CallbackModule(CallbackBase): + pass + +From there, override the specific methods from the CallbackBase that you want to provide a callback for. +For plugins intended for use with Ansible version 2.0 and later, you should only override methods that start with ``v2``. +For a complete list of methods that you can override, please see ``__init__.py`` in the +`lib/ansible/plugins/callback <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/callback>`_ directory. + +The following is a modified example of how Ansible's timer plugin is implemented, +but with an extra option so you can see how configuration works in Ansible version 2.4 and later: + +.. code-block:: python + + # Make coding more python3-ish, this is required for contributions to Ansible + from __future__ import (absolute_import, division, print_function) + __metaclass__ = type + + # not only visible to ansible-doc, it also 'declares' the options the plugin requires and how to configure them. + DOCUMENTATION = ''' + callback: timer + callback_type: aggregate + requirements: + - whitelist in configuration + short_description: Adds time to play stats + version_added: "2.0" # for collections, use the collection version, not the Ansible version + description: + - This callback just adds total play duration to the play stats. + options: + format_string: + description: format of the string shown to user at play end + ini: + - section: callback_timer + key: format_string + env: + - name: ANSIBLE_CALLBACK_TIMER_FORMAT + default: "Playbook run took %s days, %s hours, %s minutes, %s seconds" + ''' + from datetime import datetime + + from ansible.plugins.callback import CallbackBase + + + class CallbackModule(CallbackBase): + """ + This callback module tells you how long your plays ran for. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'namespace.collection_name.timer' + + # only needed if you ship it and don't want to enable by default + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self): + + # make sure the expected objects are present, calling the base's __init__ + super(CallbackModule, self).__init__() + + # start the timer when the plugin is loaded, the first play should start a few milliseconds after. + self.start_time = datetime.now() + + def _days_hours_minutes_seconds(self, runtime): + ''' internal helper method for this callback ''' + minutes = (runtime.seconds // 60) % 60 + r_seconds = runtime.seconds - (minutes * 60) + return runtime.days, runtime.seconds // 3600, minutes, r_seconds + + # this is only event we care about for display, when the play shows its summary stats; the rest are ignored by the base class + def v2_playbook_on_stats(self, stats): + end_time = datetime.now() + runtime = end_time - self.start_time + + # Shows the usage of a config option declared in the DOCUMENTATION variable. Ansible will have set it when it loads the plugin. + # Also note the use of the display object to print to screen. This is available to all callbacks, and you should use this over printing yourself + self._display.display(self._plugin_options['format_string'] % (self._days_hours_minutes_seconds(runtime))) + +Note that the ``CALLBACK_VERSION`` and ``CALLBACK_NAME`` definitions are required for properly functioning plugins for Ansible version 2.0 and later. ``CALLBACK_TYPE`` is mostly needed to distinguish 'stdout' plugins from the rest, since you can only load one plugin that writes to stdout. + +For example callback plugins, see the source code for the `callback plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/callback>`_ + +.. _developing_connection_plugins: + +Connection plugins +------------------ + +Connection plugins allow Ansible to connect to the target hosts so it can execute tasks on them. Ansible ships with many connection plugins, but only one can be used per host at a time. The most commonly used connection plugins are the ``paramiko`` SSH, native ssh (just called ``ssh``), and ``local`` connection types. All of these can be used in playbooks and with ``/usr/bin/ansible`` to connect to remote machines. + +Ansible version 2.1 introduced the ``smart`` connection plugin. The ``smart`` connection type allows Ansible to automatically select either the ``paramiko`` or ``openssh`` connection plugin based on system capabilities, or the ``ssh`` connection plugin if OpenSSH supports ControlPersist. + +To create a new connection plugin (for example, to support SNMP, Message bus, or other transports), copy the format of one of the existing connection plugins and drop it into ``connection`` directory on your :ref:`local plugin path <local_plugins>`. + +Connection plugins can support common options (such as the ``--timeout`` flag) by defining an entry in the documentation for the attribute name (in this case ``timeout``). If the common option has a non-null default, the plugin should define the same default since a different default would be ignored. + +For example connection plugins, see the source code for the `connection plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/connection>`_. + +.. _developing_filter_plugins: + +Filter plugins +-------------- + +Filter plugins manipulate data. They are a feature of Jinja2 and are also available in Jinja2 templates used by the ``template`` module. As with all plugins, they can be easily extended, but instead of having a file for each one you can have several per file. Most of the filter plugins shipped with Ansible reside in a ``core.py``. + +Filter plugins do not use the standard configuration and documentation system described above. + +For example filter plugins, see the source code for the `filter plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/filter>`_. + +.. _developing_inventory_plugins: + +Inventory plugins +----------------- + +Inventory plugins parse inventory sources and form an in-memory representation of the inventory. Inventory plugins were added in Ansible version 2.4. + +You can see the details for inventory plugins in the :ref:`developing_inventory` page. + +.. _developing_lookup_plugins: + +Lookup plugins +-------------- + +Lookup plugins pull in data from external data stores. Lookup plugins can be used within playbooks both for looping --- playbook language constructs like ``with_fileglob`` and ``with_items`` are implemented via lookup plugins --- and to return values into a variable or parameter. + +Lookup plugins are very flexible, allowing you to retrieve and return any type of data. When writing lookup plugins, always return data of a consistent type that can be easily consumed in a playbook. Avoid parameters that change the returned data type. If there is a need to return a single value sometimes and a complex dictionary other times, write two different lookup plugins. + +Ansible includes many :ref:`filters <playbooks_filters>` which can be used to manipulate the data returned by a lookup plugin. Sometimes it makes sense to do the filtering inside the lookup plugin, other times it is better to return results that can be filtered in the playbook. Keep in mind how the data will be referenced when determining the appropriate level of filtering to be done inside the lookup plugin. + +Here's a simple lookup plugin implementation --- this lookup returns the contents of a text file as a variable: + +.. code-block:: python + + # python 3 headers, required if submitting to Ansible + from __future__ import (absolute_import, division, print_function) + __metaclass__ = type + + DOCUMENTATION = """ + lookup: file + author: Daniel Hokka Zakrisson <daniel@hozac.com> + version_added: "0.9" # for collections, use the collection version, not the Ansible version + short_description: read file contents + description: + - This lookup returns the contents from a file on the Ansible controller's file system. + options: + _terms: + description: path(s) of files to read + required: True + notes: + - if read in variable context, the file can be interpreted as YAML if the content is valid to the parser. + - this lookup does not understand globing --- use the fileglob lookup instead. + """ + from ansible.errors import AnsibleError, AnsibleParserError + from ansible.plugins.lookup import LookupBase + from ansible.utils.display import Display + + display = Display() + + + class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + + + # lookups in general are expected to both take a list as input and output a list + # this is done so they work with the looping construct 'with_'. + ret = [] + for term in terms: + display.debug("File lookup term: %s" % term) + + # Find the file in the expected search path, using a class method + # that implements the 'expected' search path for Ansible plugins. + lookupfile = self.find_file_in_search_path(variables, 'files', term) + + # Don't use print or your own logging, the display class + # takes care of it in a unified way. + display.vvvv(u"File lookup using %s as file" % lookupfile) + try: + if lookupfile: + contents, show_data = self._loader._get_file_contents(lookupfile) + ret.append(contents.rstrip()) + else: + # Always use ansible error classes to throw 'final' exceptions, + # so the Ansible engine will know how to deal with them. + # The Parser error indicates invalid options passed + raise AnsibleParserError() + except AnsibleParserError: + raise AnsibleError("could not locate file in lookup: %s" % term) + + return ret + +The following is an example of how this lookup is called:: + + --- + - hosts: all + vars: + contents: "{{ lookup('namespace.collection_name.file', '/etc/foo.txt') }}" + + tasks: + + - debug: + msg: the value of foo.txt is {{ contents }} as seen today {{ lookup('pipe', 'date +"%Y-%m-%d"') }} + +For example lookup plugins, see the source code for the `lookup plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/lookup>`_. + +For more usage examples of lookup plugins, see :ref:`Using Lookups<playbooks_lookups>`. + +.. _developing_test_plugins: + +Test plugins +------------ + +Test plugins verify data. They are a feature of Jinja2 and are also available in Jinja2 templates used by the ``template`` module. As with all plugins, they can be easily extended, but instead of having a file for each one you can have several per file. Most of the test plugins shipped with Ansible reside in a ``core.py``. These are specially useful in conjunction with some filter plugins like ``map`` and ``select``; they are also available for conditional directives like ``when:``. + +Test plugins do not use the standard configuration and documentation system described above. + +For example test plugins, see the source code for the `test plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/test>`_. + +.. _developing_vars_plugins: + +Vars plugins +------------ + +Vars plugins inject additional variable data into Ansible runs that did not come from an inventory source, playbook, or command line. Playbook constructs like 'host_vars' and 'group_vars' work using vars plugins. + +Vars plugins were partially implemented in Ansible 2.0 and rewritten to be fully implemented starting with Ansible 2.4. Vars plugins are unsupported by collections. + +Older plugins used a ``run`` method as their main body/work: + +.. code-block:: python + + def run(self, name, vault_password=None): + pass # your code goes here + + +Ansible 2.0 did not pass passwords to older plugins, so vaults were unavailable. +Most of the work now happens in the ``get_vars`` method which is called from the VariableManager when needed. + +.. code-block:: python + + def get_vars(self, loader, path, entities): + pass # your code goes here + +The parameters are: + + * loader: Ansible's DataLoader. The DataLoader can read files, auto-load JSON/YAML and decrypt vaulted data, and cache read files. + * path: this is 'directory data' for every inventory source and the current play's playbook directory, so they can search for data in reference to them. ``get_vars`` will be called at least once per available path. + * entities: these are host or group names that are pertinent to the variables needed. The plugin will get called once for hosts and again for groups. + +This ``get_vars`` method just needs to return a dictionary structure with the variables. + +Since Ansible version 2.4, vars plugins only execute as needed when preparing to execute a task. This avoids the costly 'always execute' behavior that occurred during inventory construction in older versions of Ansible. Since Ansible version 2.10, vars plugin execution can be toggled by the user to run when preparing to execute a task or after importing an inventory source. + +Since Ansible 2.10, vars plugins can require whitelisting. Vars plugins that don't require whitelisting will run by default. To require whitelisting for your plugin set the class variable ``REQUIRES_WHITELIST``: + +.. code-block:: python + + class VarsModule(BaseVarsPlugin): + REQUIRES_WHITELIST = True + +Include the ``vars_plugin_staging`` documentation fragment to allow users to determine when vars plugins run. + +.. code-block:: python + + DOCUMENTATION = ''' + vars: custom_hostvars + version_added: "2.10" # for collections, use the collection version, not the Ansible version + short_description: Load custom host vars + description: Load custom host vars + options: + stage: + ini: + - key: stage + section: vars_custom_hostvars + env: + - name: ANSIBLE_VARS_PLUGIN_STAGE + extends_documentation_fragment: + - vars_plugin_staging + ''' + +Also since Ansible 2.10, vars plugins can reside in collections. Vars plugins in collections must require whitelisting to be functional. + +For example vars plugins, see the source code for the `vars plugins included with Ansible Core +<https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/vars>`_. + +.. seealso:: + + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + :ref:`developing_api` + Learn about the Python API for task execution + :ref:`developing_inventory` + Learn about how to develop dynamic inventory sources + :ref:`developing_modules_general` + Learn about how to write Ansible modules + `Mailing List <https://groups.google.com/group/ansible-devel>`_ + The development mailing list + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/dev_guide/developing_program_flow_modules.rst b/docs/docsite/rst/dev_guide/developing_program_flow_modules.rst new file mode 100644 index 00000000..5300fb55 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_program_flow_modules.rst @@ -0,0 +1,880 @@ +.. _flow_modules: +.. _developing_program_flow_modules: + +*************************** +Ansible module architecture +*************************** + +If you are working on the ``ansible-base`` code, writing an Ansible module, or developing an action plugin, you may need to understand how Ansible's program flow executes. If you are just using Ansible Modules in playbooks, you can skip this section. + +.. contents:: + :local: + +.. _flow_types_of_modules: + +Types of modules +================ + +Ansible supports several different types of modules in its code base. Some of +these are for backwards compatibility and others are to enable flexibility. + +.. _flow_action_plugins: + +Action plugins +-------------- + +Action plugins look like modules to anyone writing a playbook. Usage documentation for most action plugins lives inside a module of the same name. Some action plugins do all the work, with the module providing only documentation. Some action plugins execute modules. The ``normal`` action plugin executes modules that don't have special action plugins. Action plugins always execute on the controller. + +Some action plugins do all their work on the controller. For +example, the :ref:`debug <debug_module>` action plugin (which prints text for +the user to see) and the :ref:`assert <assert_module>` action plugin (which +tests whether values in a playbook satisfy certain criteria) execute entirely on the controller. + +Most action plugins set up some values on the controller, then invoke an +actual module on the managed node that does something with these values. For example, the :ref:`template <template_module>` action plugin takes values from +the user to construct a file in a temporary location on the controller using +variables from the playbook environment. It then transfers the temporary file +to a temporary file on the remote system. After that, it invokes the +:ref:`copy module <copy_module>` which operates on the remote system to move the file +into its final location, sets file permissions, and so on. + +.. _flow_new_style_modules: + +New-style modules +----------------- + +All of the modules that ship with Ansible fall into this category. While you can write modules in any language, all official modules (shipped with Ansible) use either Python or PowerShell. + +New-style modules have the arguments to the module embedded inside of them in +some manner. Old-style modules must copy a separate file over to the +managed node, which is less efficient as it requires two over-the-wire +connections instead of only one. + +.. _flow_python_modules: + +Python +^^^^^^ + +New-style Python modules use the :ref:`Ansiballz` framework for constructing +modules. These modules use imports from :code:`ansible.module_utils` to pull in +boilerplate module code, such as argument parsing, formatting of return +values as :term:`JSON`, and various file operations. + +.. note:: In Ansible, up to version 2.0.x, the official Python modules used the + :ref:`module_replacer` framework. For module authors, :ref:`Ansiballz` is + largely a superset of :ref:`module_replacer` functionality, so you usually + do not need to understand the differences between them. + +.. _flow_powershell_modules: + +PowerShell +^^^^^^^^^^ + +New-style PowerShell modules use the :ref:`module_replacer` framework for +constructing modules. These modules get a library of PowerShell code embedded +in them before being sent to the managed node. + +.. _flow_jsonargs_modules: + +JSONARGS modules +---------------- + +These modules are scripts that include the string +``<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>`` in their body. +This string is replaced with the JSON-formatted argument string. These modules typically set a variable to that value like this: + +.. code-block:: python + + json_arguments = """<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>""" + +Which is expanded as: + +.. code-block:: python + + json_arguments = """{"param1": "test's quotes", "param2": "\"To be or not to be\" - Hamlet"}""" + +.. note:: Ansible outputs a :term:`JSON` string with bare quotes. Double quotes are + used to quote string values, double quotes inside of string values are + backslash escaped, and single quotes may appear unescaped inside of + a string value. To use JSONARGS, your scripting language must have a way + to handle this type of string. The example uses Python's triple quoted + strings to do this. Other scripting languages may have a similar quote + character that won't be confused by any quotes in the JSON or it may + allow you to define your own start-of-quote and end-of-quote characters. + If the language doesn't give you any of these then you'll need to write + a :ref:`non-native JSON module <flow_want_json_modules>` or + :ref:`Old-style module <flow_old_style_modules>` instead. + +These modules typically parse the contents of ``json_arguments`` using a JSON +library and then use them as native variables throughout the code. + +.. _flow_want_json_modules: + +Non-native want JSON modules +---------------------------- + +If a module has the string ``WANT_JSON`` in it anywhere, Ansible treats +it as a non-native module that accepts a filename as its only command line +parameter. The filename is for a temporary file containing a :term:`JSON` +string containing the module's parameters. The module needs to open the file, +read and parse the parameters, operate on the data, and print its return data +as a JSON encoded dictionary to stdout before exiting. + +These types of modules are self-contained entities. As of Ansible 2.1, Ansible +only modifies them to change a shebang line if present. + +.. seealso:: Examples of Non-native modules written in ruby are in the `Ansible + for Rubyists <https://github.com/ansible/ansible-for-rubyists>`_ repository. + +.. _flow_binary_modules: + +Binary modules +-------------- + +From Ansible 2.2 onwards, modules may also be small binary programs. Ansible +doesn't perform any magic to make these portable to different systems so they +may be specific to the system on which they were compiled or require other +binary runtime dependencies. Despite these drawbacks, you may have +to compile a custom module against a specific binary +library if that's the only way to get access to certain resources. + +Binary modules take their arguments and return data to Ansible in the same +way as :ref:`want JSON modules <flow_want_json_modules>`. + +.. seealso:: One example of a `binary module + <https://github.com/ansible/ansible/blob/devel/test/integration/targets/binary_modules/library/helloworld.go>`_ + written in go. + +.. _flow_old_style_modules: + +Old-style modules +----------------- + +Old-style modules are similar to +:ref:`want JSON modules <flow_want_json_modules>`, except that the file that +they take contains ``key=value`` pairs for their parameters instead of +:term:`JSON`. Ansible decides that a module is old-style when it doesn't have +any of the markers that would show that it is one of the other types. + +.. _flow_how_modules_are_executed: + +How modules are executed +======================== + +When a user uses :program:`ansible` or :program:`ansible-playbook`, they +specify a task to execute. The task is usually the name of a module along +with several parameters to be passed to the module. Ansible takes these +values and processes them in various ways before they are finally executed on +the remote machine. + +.. _flow_executor_task_executor: + +Executor/task_executor +---------------------- + +The TaskExecutor receives the module name and parameters that were parsed from +the :term:`playbook <playbooks>` (or from the command line in the case of +:command:`/usr/bin/ansible`). It uses the name to decide whether it's looking +at a module or an :ref:`Action Plugin <flow_action_plugins>`. If it's +a module, it loads the :ref:`Normal Action Plugin <flow_normal_action_plugin>` +and passes the name, variables, and other information about the task and play +to that Action Plugin for further processing. + +.. _flow_normal_action_plugin: + +The ``normal`` action plugin +---------------------------- + +The ``normal`` action plugin executes the module on the remote host. It is +the primary coordinator of much of the work to actually execute the module on +the managed machine. + +* It loads the appropriate connection plugin for the task, which then transfers + or executes as needed to create a connection to that host. +* It adds any internal Ansible properties to the module's parameters (for + instance, the ones that pass along ``no_log`` to the module). +* It works with other plugins (connection, shell, become, other action plugins) + to create any temporary files on the remote machine and + cleans up afterwards. +* It pushes the module and module parameters to the + remote host, although the :ref:`module_common <flow_executor_module_common>` + code described in the next section decides which format + those will take. +* It handles any special cases regarding modules (for instance, async + execution, or complications around Windows modules that must have the same names as Python modules, so that internal calling of modules from other Action Plugins work.) + +Much of this functionality comes from the `BaseAction` class, +which lives in :file:`plugins/action/__init__.py`. It uses the +``Connection`` and ``Shell`` objects to do its work. + +.. note:: + When :term:`tasks <tasks>` are run with the ``async:`` parameter, Ansible + uses the ``async`` Action Plugin instead of the ``normal`` Action Plugin + to invoke it. That program flow is currently not documented. Read the + source for information on how that works. + +.. _flow_executor_module_common: + +Executor/module_common.py +------------------------- + +Code in :file:`executor/module_common.py` assembles the module +to be shipped to the managed node. The module is first read in, then examined +to determine its type: + +* :ref:`PowerShell <flow_powershell_modules>` and :ref:`JSON-args modules <flow_jsonargs_modules>` are passed through :ref:`Module Replacer <module_replacer>`. +* New-style :ref:`Python modules <flow_python_modules>` are assembled by :ref:`Ansiballz`. +* :ref:`Non-native-want-JSON <flow_want_json_modules>`, :ref:`Binary modules <flow_binary_modules>`, and :ref:`Old-Style modules <flow_old_style_modules>` aren't touched by either of these and pass through unchanged. + +After the assembling step, one final +modification is made to all modules that have a shebang line. Ansible checks +whether the interpreter in the shebang line has a specific path configured via +an ``ansible_$X_interpreter`` inventory variable. If it does, Ansible +substitutes that path for the interpreter path given in the module. After +this, Ansible returns the complete module data and the module type to the +:ref:`Normal Action <flow_normal_action_plugin>` which continues execution of +the module. + +Assembler frameworks +-------------------- + +Ansible supports two assembler frameworks: Ansiballz and the older Module Replacer. + +.. _module_replacer: + +Module Replacer framework +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Module Replacer framework is the original framework implementing new-style +modules, and is still used for PowerShell modules. It is essentially a preprocessor (like the C Preprocessor for those +familiar with that programming language). It does straight substitutions of +specific substring patterns in the module file. There are two types of +substitutions: + +* Replacements that only happen in the module file. These are public + replacement strings that modules can utilize to get helpful boilerplate or + access to arguments. + + - :code:`from ansible.module_utils.MOD_LIB_NAME import *` is replaced with the + contents of the :file:`ansible/module_utils/MOD_LIB_NAME.py` These should + only be used with :ref:`new-style Python modules <flow_python_modules>`. + - :code:`#<<INCLUDE_ANSIBLE_MODULE_COMMON>>` is equivalent to + :code:`from ansible.module_utils.basic import *` and should also only apply + to new-style Python modules. + - :code:`# POWERSHELL_COMMON` substitutes the contents of + :file:`ansible/module_utils/powershell.ps1`. It should only be used with + :ref:`new-style Powershell modules <flow_powershell_modules>`. + +* Replacements that are used by ``ansible.module_utils`` code. These are internal replacement patterns. They may be used internally, in the above public replacements, but shouldn't be used directly by modules. + + - :code:`"<<ANSIBLE_VERSION>>"` is substituted with the Ansible version. In + :ref:`new-style Python modules <flow_python_modules>` under the + :ref:`Ansiballz` framework the proper way is to instead instantiate an + `AnsibleModule` and then access the version from + :attr:``AnsibleModule.ansible_version``. + - :code:`"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>"` is substituted with + a string which is the Python ``repr`` of the :term:`JSON` encoded module + parameters. Using ``repr`` on the JSON string makes it safe to embed in + a Python file. In new-style Python modules under the Ansiballz framework + this is better accessed by instantiating an `AnsibleModule` and + then using :attr:`AnsibleModule.params`. + - :code:`<<SELINUX_SPECIAL_FILESYSTEMS>>` substitutes a string which is + a comma separated list of file systems which have a file system dependent + security context in SELinux. In new-style Python modules, if you really + need this you should instantiate an `AnsibleModule` and then use + :attr:`AnsibleModule._selinux_special_fs`. The variable has also changed + from a comma separated string of file system names to an actual python + list of filesystem names. + - :code:`<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>` substitutes the module + parameters as a JSON string. Care must be taken to properly quote the + string as JSON data may contain quotes. This pattern is not substituted + in new-style Python modules as they can get the module parameters another + way. + - The string :code:`syslog.LOG_USER` is replaced wherever it occurs with the + ``syslog_facility`` which was named in :file:`ansible.cfg` or any + ``ansible_syslog_facility`` inventory variable that applies to this host. In + new-style Python modules this has changed slightly. If you really need to + access it, you should instantiate an `AnsibleModule` and then use + :attr:`AnsibleModule._syslog_facility` to access it. It is no longer the + actual syslog facility and is now the name of the syslog facility. See + the :ref:`documentation on internal arguments <flow_internal_arguments>` + for details. + +.. _Ansiballz: + +Ansiballz framework +^^^^^^^^^^^^^^^^^^^ + +The Ansiballz framework was adopted in Ansible 2.1 and is used for all new-style Python modules. Unlike the Module Replacer, Ansiballz uses real Python imports of things in +:file:`ansible/module_utils` instead of merely preprocessing the module. It +does this by constructing a zipfile -- which includes the module file, files +in :file:`ansible/module_utils` that are imported by the module, and some +boilerplate to pass in the module's parameters. The zipfile is then Base64 +encoded and wrapped in a small Python script which decodes the Base64 encoding +and places the zipfile into a temp directory on the managed node. It then +extracts just the Ansible module script from the zip file and places that in +the temporary directory as well. Then it sets the PYTHONPATH to find Python +modules inside of the zip file and imports the Ansible module as the special name, ``__main__``. +Importing it as ``__main__`` causes Python to think that it is executing a script rather than simply +importing a module. This lets Ansible run both the wrapper script and the module code in a single copy of Python on the remote machine. + +.. note:: + * Ansible wraps the zipfile in the Python script for two reasons: + + * for compatibility with Python 2.6 which has a less + functional version of Python's ``-m`` command line switch. + + * so that pipelining will function properly. Pipelining needs to pipe the + Python module into the Python interpreter on the remote node. Python + understands scripts on stdin but does not understand zip files. + + * Prior to Ansible 2.7, the module was executed via a second Python interpreter instead of being + executed inside of the same process. This change was made once Python-2.4 support was dropped + to speed up module execution. + +In Ansiballz, any imports of Python modules from the +:py:mod:`ansible.module_utils` package trigger inclusion of that Python file +into the zipfile. Instances of :code:`#<<INCLUDE_ANSIBLE_MODULE_COMMON>>` in +the module are turned into :code:`from ansible.module_utils.basic import *` +and :file:`ansible/module-utils/basic.py` is then included in the zipfile. +Files that are included from :file:`module_utils` are themselves scanned for +imports of other Python modules from :file:`module_utils` to be included in +the zipfile as well. + +.. warning:: + At present, the Ansiballz Framework cannot determine whether an import + should be included if it is a relative import. Always use an absolute + import that has :py:mod:`ansible.module_utils` in it to allow Ansiballz to + determine that the file should be included. + + +.. _flow_passing_module_args: + +Passing args +------------ + +Arguments are passed differently by the two frameworks: + +* In :ref:`module_replacer`, module arguments are turned into a JSON-ified string and substituted into the combined module file. +* In :ref:`Ansiballz`, the JSON-ified string is part of the script which wraps the zipfile. Just before the wrapper script imports the Ansible module as ``__main__``, it monkey-patches the private, ``_ANSIBLE_ARGS`` variable in ``basic.py`` with the variable values. When a :class:`ansible.module_utils.basic.AnsibleModule` is instantiated, it parses this string and places the args into :attr:`AnsibleModule.params` where it can be accessed by the module's other code. + +.. warning:: + If you are writing modules, remember that the way we pass arguments is an internal implementation detail: it has changed in the past and will change again as soon as changes to the common module_utils + code allow Ansible modules to forgo using :class:`ansible.module_utils.basic.AnsibleModule`. Do not rely on the internal global ``_ANSIBLE_ARGS`` variable. + + Very dynamic custom modules which need to parse arguments before they + instantiate an ``AnsibleModule`` may use ``_load_params`` to retrieve those parameters. + Although ``_load_params`` may change in breaking ways if necessary to support + changes in the code, it is likely to be more stable than either the way we pass parameters or the internal global variable. + +.. note:: + Prior to Ansible 2.7, the Ansible module was invoked in a second Python interpreter and the + arguments were then passed to the script over the script's stdin. + + +.. _flow_internal_arguments: + +Internal arguments +------------------ + +Both :ref:`module_replacer` and :ref:`Ansiballz` send additional arguments to +the module beyond those which the user specified in the playbook. These +additional arguments are internal parameters that help implement global +Ansible features. Modules often do not need to know about these explicitly as +the features are implemented in :py:mod:`ansible.module_utils.basic` but certain +features need support from the module so it's good to know about them. + +The internal arguments listed here are global. If you need to add a local internal argument to a custom module, create an action plugin for that specific module - see ``_original_basename`` in the `copy action plugin <https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/action/copy.py#L329>`_ for an example. + +_ansible_no_log +^^^^^^^^^^^^^^^ + +Boolean. Set to True whenever a parameter in a task or play specifies ``no_log``. Any module that calls :py:meth:`AnsibleModule.log` handles this automatically. If a module implements its own logging then +it needs to check this value. To access in a module, instantiate an +``AnsibleModule`` and then check the value of :attr:`AnsibleModule.no_log`. + +.. note:: + ``no_log`` specified in a module's argument_spec is handled by a different mechanism. + +_ansible_debug +^^^^^^^^^^^^^^^ + +Boolean. Turns more verbose logging on or off and turns on logging of +external commands that the module executes. If a module uses +:py:meth:`AnsibleModule.debug` rather than :py:meth:`AnsibleModule.log` then +the messages are only logged if ``_ansible_debug`` is set to ``True``. +To set, add ``debug: True`` to :file:`ansible.cfg` or set the environment +variable :envvar:`ANSIBLE_DEBUG`. To access in a module, instantiate an +``AnsibleModule`` and access :attr:`AnsibleModule._debug`. + +_ansible_diff +^^^^^^^^^^^^^^^ + +Boolean. If a module supports it, tells the module to show a unified diff of +changes to be made to templated files. To set, pass the ``--diff`` command line +option. To access in a module, instantiate an `AnsibleModule` and access +:attr:`AnsibleModule._diff`. + +_ansible_verbosity +^^^^^^^^^^^^^^^^^^ + +Unused. This value could be used for finer grained control over logging. + +_ansible_selinux_special_fs +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +List. Names of filesystems which should have a special SELinux +context. They are used by the `AnsibleModule` methods which operate on +files (changing attributes, moving, and copying). To set, add a comma separated string of filesystem names in :file:`ansible.cfg`:: + + # ansible.cfg + [selinux] + special_context_filesystems=nfs,vboxsf,fuse,ramfs,vfat + +Most modules can use the built-in ``AnsibleModule`` methods to manipulate +files. To access in a module that needs to know about these special context filesystems, instantiate an ``AnsibleModule`` and examine the list in +:attr:`AnsibleModule._selinux_special_fs`. + +This replaces :attr:`ansible.module_utils.basic.SELINUX_SPECIAL_FS` from +:ref:`module_replacer`. In module replacer it was a comma separated string of +filesystem names. Under Ansiballz it's an actual list. + +.. versionadded:: 2.1 + +_ansible_syslog_facility +^^^^^^^^^^^^^^^^^^^^^^^^ + +This parameter controls which syslog facility Ansible module logs to. To set, change the ``syslog_facility`` value in :file:`ansible.cfg`. Most +modules should just use :meth:`AnsibleModule.log` which will then make use of +this. If a module has to use this on its own, it should instantiate an +`AnsibleModule` and then retrieve the name of the syslog facility from +:attr:`AnsibleModule._syslog_facility`. The Ansiballz code is less hacky than the old :ref:`module_replacer` code: + +.. code-block:: python + + # Old module_replacer way + import syslog + syslog.openlog(NAME, 0, syslog.LOG_USER) + + # New Ansiballz way + import syslog + facility_name = module._syslog_facility + facility = getattr(syslog, facility_name, syslog.LOG_USER) + syslog.openlog(NAME, 0, facility) + +.. versionadded:: 2.1 + +_ansible_version +^^^^^^^^^^^^^^^^ + +This parameter passes the version of Ansible that runs the module. To access +it, a module should instantiate an `AnsibleModule` and then retrieve it +from :attr:`AnsibleModule.ansible_version`. This replaces +:attr:`ansible.module_utils.basic.ANSIBLE_VERSION` from +:ref:`module_replacer`. + +.. versionadded:: 2.1 + + +.. _flow_module_return_values: + +Module return values & Unsafe strings +------------------------------------- + +At the end of a module's execution, it formats the data that it wants to return as a JSON string and prints the string to its stdout. The normal action plugin receives the JSON string, parses it into a Python dictionary, and returns it to the executor. + +If Ansible templated every string return value, it would be vulnerable to an attack from users with access to managed nodes. If an unscrupulous user disguised malicious code as Ansible return value strings, and if those strings were then templated on the controller, Ansible could execute arbitrary code. To prevent this scenario, Ansible marks all strings inside returned data as ``Unsafe``, emitting any Jinja2 templates in the strings verbatim, not expanded by Jinja2. + +Strings returned by invoking a module through ``ActionPlugin._execute_module()`` are automatically marked as ``Unsafe`` by the normal action plugin. If another action plugin retrieves information from a module through some other means, it must mark its return data as ``Unsafe`` on its own. + +In case a poorly-coded action plugin fails to mark its results as "Unsafe," Ansible audits the results again when they are returned to the executor, +marking all strings as ``Unsafe``. The normal action plugin protects itself and any other code that it calls with the result data as a parameter. The check inside the executor protects the output of all other action plugins, ensuring that subsequent tasks run by Ansible will not template anything from those results either. + +.. _flow_special_considerations: + +Special considerations +---------------------- + +.. _flow_pipelining: + +Pipelining +^^^^^^^^^^ + +Ansible can transfer a module to a remote machine in one of two ways: + +* it can write out the module to a temporary file on the remote host and then + use a second connection to the remote host to execute it with the + interpreter that the module needs +* or it can use what's known as pipelining to execute the module by piping it + into the remote interpreter's stdin. + +Pipelining only works with modules written in Python at this time because +Ansible only knows that Python supports this mode of operation. Supporting +pipelining means that whatever format the module payload takes before being +sent over the wire must be executable by Python via stdin. + +.. _flow_args_over_stdin: + +Why pass args over stdin? +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Passing arguments via stdin was chosen for the following reasons: + +* When combined with :ref:`ANSIBLE_PIPELINING`, this keeps the module's arguments from + temporarily being saved onto disk on the remote machine. This makes it + harder (but not impossible) for a malicious user on the remote machine to + steal any sensitive information that may be present in the arguments. +* Command line arguments would be insecure as most systems allow unprivileged + users to read the full commandline of a process. +* Environment variables are usually more secure than the commandline but some + systems limit the total size of the environment. This could lead to + truncation of the parameters if we hit that limit. + + +.. _flow_ansiblemodule: + +AnsibleModule +------------- + +.. _argument_spec: + +Argument spec +^^^^^^^^^^^^^ + +The ``argument_spec`` provided to ``AnsibleModule`` defines the supported arguments for a module, as well as their type, defaults and more. + +Example ``argument_spec``: + +.. code-block:: python + + module = AnsibleModule(argument_spec=dict( + top_level=dict( + type='dict', + options=dict( + second_level=dict( + default=True, + type='bool', + ) + ) + ) + )) + +This section will discuss the behavioral attributes for arguments: + +:type: + + ``type`` allows you to define the type of the value accepted for the argument. The default value for ``type`` is ``str``. Possible values are: + + * str + * list + * dict + * bool + * int + * float + * path + * raw + * jsonarg + * json + * bytes + * bits + + The ``raw`` type, performs no type validation or type casting, and maintains the type of the passed value. + +:elements: + + ``elements`` works in combination with ``type`` when ``type='list'``. ``elements`` can then be defined as ``elements='int'`` or any other type, indicating that each element of the specified list should be of that type. + +:default: + + The ``default`` option allows sets a default value for the argument for the scenario when the argument is not provided to the module. When not specified, the default value is ``None``. + +:fallback: + + ``fallback`` accepts a ``tuple`` where the first argument is a callable (function) that will be used to perform the lookup, based on the second argument. The second argument is a list of values to be accepted by the callable. + + The most common callable used is ``env_fallback`` which will allow an argument to optionally use an environment variable when the argument is not supplied. + + Example: + + .. code-block:: python + + username=dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])) + +:choices: + + ``choices`` accepts a list of choices that the argument will accept. The types of ``choices`` should match the ``type``. + +:required: + + ``required`` accepts a boolean, either ``True`` or ``False`` that indicates that the argument is required. When not specified, ``required`` defaults to ``False``. This should not be used in combination with ``default``. + +:no_log: + + ``no_log`` accepts a boolean, either ``True`` or ``False``, that indicates explicitly whether or not the argument value should be masked in logs and output. + + .. note:: + In the absence of ``no_log``, if the parameter name appears to indicate that the argument value is a password or passphrase (such as "admin_password"), a warning will be shown and the value will be masked in logs but **not** output. To disable the warning and masking for parameters that do not contain sensitive information, set ``no_log`` to ``False``. + +:aliases: + + ``aliases`` accepts a list of alternative argument names for the argument, such as the case where the argument is ``name`` but the module accepts ``aliases=['pkg']`` to allow ``pkg`` to be interchangeably with ``name`` + +:options: + + ``options`` implements the ability to create a sub-argument_spec, where the sub options of the top level argument are also validated using the attributes discussed in this section. The example at the top of this section demonstrates use of ``options``. ``type`` or ``elements`` should be ``dict`` is this case. + +:apply_defaults: + + ``apply_defaults`` works alongside ``options`` and allows the ``default`` of the sub-options to be applied even when the top-level argument is not supplied. + + In the example of the ``argument_spec`` at the top of this section, it would allow ``module.params['top_level']['second_level']`` to be defined, even if the user does not provide ``top_level`` when calling the module. + +:removed_in_version: + + ``removed_in_version`` indicates which version of ansible-base or a collection a deprecated argument will be removed in. Mutually exclusive with ``removed_at_date``, and must be used with ``removed_from_collection``. + + Example: + + .. code-block:: python + + 'option': { + 'type': 'str', + 'removed_in_version': '2.0.0', + 'collection_name': 'testns.testcol', + }, + +:removed_at_date: + + ``removed_at_date`` indicates that a deprecated argument will be removed in a minor ansible-base release or major collection release after this date. Mutually exclusive with ``removed_in_version``, and must be used with ``removed_from_collection``. + + Example: + + .. code-block:: python + + 'option': { + 'type': 'str', + 'removed_at_date': '2020-12-31', + 'collection_name': 'testns.testcol', + }, + +:removed_from_collection: + + Specifies which collection (or ansible-base) deprecates this deprecated argument. Specify ``ansible.builtin`` for ansible-base, or the collection's name (format ``foo.bar``). Must be used with ``removed_in_version`` or ``removed_at_date``. + +:deprecated_aliases: + + Deprecates aliases of this argument. Must contain a list or tuple of dictionaries having some the following keys: + + :name: + + The name of the alias to deprecate. (Required.) + + :version: + + The version of ansible-base or the collection this alias will be removed in. Either ``version`` or ``date`` must be specified. + + :date: + + The a date after which a minor release of ansible-base or a major collection release will no longer contain this alias.. Either ``version`` or ``date`` must be specified. + + :collection_name: + + Specifies which collection (or ansible-base) deprecates this deprecated alias. Specify ``ansible.builtin`` for ansible-base, or the collection's name (format ``foo.bar``). Must be used with ``version`` or ``date``. + + Examples: + + .. code-block:: python + + 'option': { + 'type': 'str', + 'aliases': ['foo', 'bar'], + 'depecated_aliases': [ + { + 'name': 'foo', + 'version': '2.0.0', + 'collection_name': 'testns.testcol', + }, + { + 'name': 'foo', + 'date': '2020-12-31', + 'collection_name': 'testns.testcol', + }, + ], + }, + + +:mutually_exclusive: + + If ``options`` is specified, ``mutually_exclusive`` refers to the sub-options described in ``options`` and behaves as in :ref:`argument_spec_dependencies`. + +:required_together: + + If ``options`` is specified, ``required_together`` refers to the sub-options described in ``options`` and behaves as in :ref:`argument_spec_dependencies`. + +:required_one_of: + + If ``options`` is specified, ``required_one_of`` refers to the sub-options described in ``options`` and behaves as in :ref:`argument_spec_dependencies`. + +:required_if: + + If ``options`` is specified, ``required_if`` refers to the sub-options described in ``options`` and behaves as in :ref:`argument_spec_dependencies`. + +:required_by: + + If ``options`` is specified, ``required_by`` refers to the sub-options described in ``options`` and behaves as in :ref:`argument_spec_dependencies`. + + +.. _argument_spec_dependencies: + +Dependencies between module options +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following are optional arguments for ``AnsibleModule()``: + +.. code-block:: python + + module = AnsibleModule( + argument_spec, + mutually_exclusive=[ + ('path', 'content'), + ], + required_one_of=[ + ('path', 'content'), + ], + ) + +:mutually_exclusive: + + Must be a sequence (list or tuple) of sequences of strings. Every sequence of strings is a list of option names which are mutually exclusive. If more than one options of a list are specified together, Ansible will fail the module with an error. + + Example: + + .. code-block:: python + + mutually_exclusive=[ + ('path', 'content'), + ('repository_url', 'repository_filename'), + ], + + In this example, the options ``path`` and ``content`` must not specified at the same time. Also the options ``repository_url`` and ``repository_filename`` must not be specified at the same time. But specifying ``path`` and ``repository_url`` is accepted. + + To ensure that precisely one of two (or more) options is specified, combine ``mutually_exclusive`` with ``required_one_of``. + +:required_together: + + Must be a sequence (list or tuple) of sequences of strings. Every sequence of strings is a list of option names which are must be specified together. If at least one of these options are specified, the other ones from the same sequence must all be present. + + Example: + + .. code-block:: python + + required_together=[ + ('file_path', 'file_hash'), + ], + + In this example, if one of the options ``file_path`` or ``file_hash`` is specified, Ansible will fail the module with an error if the other one is not specified. + +:required_one_of: + + Must be a sequence (list or tuple) of sequences of strings. Every sequence of strings is a list of option names from which at least one must be specified. If none one of these options are specified, Ansible will fail module execution. + + Example: + + .. code-block:: python + + required_one_of=[ + ('path', 'content'), + ], + + In this example, at least one of ``path`` and ``content`` must be specified. If none are specified, execution will fail. Specifying both is explicitly allowed; to prevent this, combine ``required_one_of`` with ``mutually_exclusive``. + +:required_if: + + Must be a sequence of sequences. Every inner sequence describes one conditional dependency. Every sequence must have three or four values. The first two values are the option's name and the option's value which describes the condition. The further elements of the sequence are only needed if the option of that name has precisely this value. + + If you want that all options in a list of option names are specified if the condition is met, use one of the following forms: + + .. code-block:: python + + ('option_name', option_value, ('option_a', 'option_b', ...)), + ('option_name', option_value, ('option_a', 'option_b', ...), False), + + If you want that at least one option of a list of option names is specified if the condition is met, use the following form: + + .. code-block:: python + + ('option_name', option_value, ('option_a', 'option_b', ...), True), + + Example: + + .. code-block:: python + + required_if=[ + ('state', 'present', ('path', 'content'), True), + ('force', True, ('force_reason', 'force_code')), + ], + + In this example, if the user specifies ``state=present``, at least one of the options ``path`` and ``content`` must be supplied (or both). To make sure that precisely one can be specified, combine ``required_if`` with ``mutually_exclusive``. + + On the other hand, if ``force`` (a boolean parameter) is set to ``true``, ``yes`` etc., both ``force_reason`` and ``force_code`` must be specified. + +:required_by: + + Must be a dictionary mapping option names to sequences of option names. If the option name in a dictionary key is specified, the option names it maps to must all also be specified. Note that instead of a sequence of option names, you can also specify one single option name. + + Example: + + .. code-block:: python + + required_by={ + 'force': 'force_reason', + 'path': ('mode', 'owner', 'group'), + }, + + In the example, if ``force`` is specified, ``force_reason`` must also be specified. Also, if ``path`` is specified, then three three options ``mode``, ``owner`` and ``group`` also must be specified. + +Declaring check mode support +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To declare that a module supports check mode, supply ``supports_check_mode=True`` to the ``AnsibleModule()`` call: + +.. code-block:: python + + module = AnsibleModule(argument_spec, supports_check_mode=True) + +The module can determine whether it is called in check mode by checking the boolean value ``module.check_mode``. If it evaluates to ``True``, the module must take care not to do any modification. + +If ``supports_check_mode=False`` is specified, which is the default value, the module will exit in check mode with ``skipped=True`` and message ``remote module (<insert module name here>) does not support check mode``. + +Adding file options +^^^^^^^^^^^^^^^^^^^ + +To declare that a module should add support for all common file options, supply ``add_file_common_args=True`` to the ``AnsibleModule()`` call: + +.. code-block:: python + + module = AnsibleModule(argument_spec, add_file_common_args=True) + +You can find `a list of all file options here <https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/doc_fragments/files.py>`_. It is recommended that you make your ``DOCUMENTATION`` extend the doc fragment ``ansible.builtin.files`` (see :ref:`module_docs_fragments`) in this case, to make sure that all these fields are correctly documented. + +The helper functions ``module.load_file_common_arguments()`` and ``module.set_fs_attributes_if_different()`` can be used to handle these arguments for you: + +.. code-block:: python + + argument_spec = { + 'path': { + 'type': 'str', + 'required': True, + }, + } + + module = AnsibleModule(argument_spec, add_file_common_args=True) + changed = False + + # TODO do something with module.params['path'], like update it's contents + + # Ensure that module.params['path'] satisfies the file options supplied by the user + file_args = module.load_file_common_arguments(module.params) + changed = module.set_fs_attributes_if_different(file_args, changed) + + module.exit_json(changed=changed) diff --git a/docs/docsite/rst/dev_guide/developing_python_3.rst b/docs/docsite/rst/dev_guide/developing_python_3.rst new file mode 100644 index 00000000..3713e412 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_python_3.rst @@ -0,0 +1,404 @@ +.. _developing_python_3: + +******************** +Ansible and Python 3 +******************** + +The ``ansible-base`` code runs on both Python 2 and Python 3 because we want Ansible to be able to manage a wide +variety of machines. Contributors to ansible-base and to Ansible Collections should be aware of the tips in this document so that they can write code that will run on the same versions of Python as the rest of Ansible. + +.. contents:: + :local: + +To ensure that your code runs on Python 3 as well as on Python 2, learn the tips and tricks and idioms +described here. Most of these considerations apply to all three types of Ansible code: + +1. controller-side code - code that runs on the machine where you invoke :command:`/usr/bin/ansible` +2. modules - the code which Ansible transmits to and invokes on the managed machine. +3. shared ``module_utils`` code - the common code that's used by modules to perform tasks and sometimes used by controller-side code as well + +However, the three types of code do not use the same string strategy. If you're developing a module or some ``module_utils`` code, be sure to read the section on string strategy carefully. + +Minimum version of Python 3.x and Python 2.x +============================================ + +On the controller we support Python 3.5 or greater and Python 2.7 or greater. Module-side, we +support Python 3.5 or greater and Python 2.6 or greater. + +Python 3.5 was chosen as a minimum because it is the earliest Python 3 version adopted as the +default Python by a Long Term Support (LTS) Linux distribution (in this case, Ubuntu-16.04). +Previous LTS Linux distributions shipped with a Python 2 version which users can rely upon instead +of the Python 3 version. + +For Python 2, the default is for modules to run on at least Python 2.6. This allows +users with older distributions that are stuck on Python 2.6 to manage their +machines. Modules are allowed to drop support for Python 2.6 when one of +their dependent libraries requires a higher version of Python. This is not an +invitation to add unnecessary dependent libraries in order to force your +module to be usable only with a newer version of Python; instead it is an +acknowledgment that some libraries (for instance, boto3 and docker-py) will +only function with a newer version of Python. + +.. note:: Python 2.4 Module-side Support: + + Support for Python 2.4 and Python 2.5 was dropped in Ansible-2.4. RHEL-5 + (and its rebuilds like CentOS-5) were supported until April of 2017. + Ansible-2.3 was released in April of 2017 and was the last Ansible release + to support Python 2.4 on the module-side. + +Developing Ansible code that supports Python 2 and Python 3 +=========================================================== + +The best place to start learning about writing code that supports both Python 2 and Python 3 +is `Lennart Regebro's book: Porting to Python 3 <http://python3porting.com/>`_. +The book describes several strategies for porting to Python 3. The one we're +using is `to support Python 2 and Python 3 from a single code base +<http://python3porting.com/strategies.html#python-2-and-python-3-without-conversion>`_ + +Understanding strings in Python 2 and Python 3 +---------------------------------------------- + +Python 2 and Python 3 handle strings differently, so when you write code that supports Python 3 +you must decide what string model to use. Strings can be an array of bytes (like in C) or +they can be an array of text. Text is what we think of as letters, digits, +numbers, other printable symbols, and a small number of unprintable "symbols" +(control codes). + +In Python 2, the two types for these (:class:`str <python:str>` for bytes and +:func:`unicode <python:unicode>` for text) are often used interchangeably. When dealing only +with ASCII characters, the strings can be combined, compared, and converted +from one type to another automatically. When non-ASCII characters are +introduced, Python 2 starts throwing exceptions due to not knowing what encoding +the non-ASCII characters should be in. + +Python 3 changes this behavior by making the separation between bytes (:class:`bytes <python3:bytes>`) +and text (:class:`str <python3:str>`) more strict. Python 3 will throw an exception when +trying to combine and compare the two types. The programmer has to explicitly +convert from one type to the other to mix values from each. + +In Python 3 it's immediately apparent to the programmer when code is +mixing the byte and text types inappropriately, whereas in Python 2, code that mixes those types +may work until a user causes an exception by entering non-ASCII input. +Python 3 forces programmers to proactively define a strategy for +working with strings in their program so that they don't mix text and byte strings unintentionally. + +Ansible uses different strategies for working with strings in controller-side code, in +:ref: `modules <module_string_strategy>`, and in :ref:`module_utils <module_utils_string_strategy>` code. + +.. _controller_string_strategy: + +Controller string strategy: the Unicode Sandwich +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In controller-side code we use a strategy known as the Unicode Sandwich (named +after Python 2's :func:`unicode <python:unicode>` text type). For Unicode Sandwich we know that +at the border of our code and the outside world (for example, file and network IO, +environment variables, and some library calls) we are going to receive bytes. +We need to transform these bytes into text and use that throughout the +internal portions of our code. When we have to send those strings back out to +the outside world we first convert the text back into bytes. +To visualize this, imagine a 'sandwich' consisting of a top and bottom layer +of bytes, a layer of conversion between, and all text type in the center. + +Unicode Sandwich common borders: places to convert bytes to text in controller code +----------------------------------------------------------------------------------- + +This is a partial list of places where we have to convert to and from bytes +when using the Unicode Sandwich string strategy. It's not exhaustive but +it gives you an idea of where to watch for problems. + +Reading and writing to files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In Python 2, reading from files yields bytes. In Python 3, it can yield text. +To make code that's portable to both we don't make use of Python 3's ability +to yield text but instead do the conversion explicitly ourselves. For example: + +.. code-block:: python + + from ansible.module_utils._text import to_text + + with open('filename-with-utf8-data.txt', 'rb') as my_file: + b_data = my_file.read() + try: + data = to_text(b_data, errors='surrogate_or_strict') + except UnicodeError: + # Handle the exception gracefully -- usually by displaying a good + # user-centric error message that can be traced back to this piece + # of code. + pass + +.. note:: Much of Ansible assumes that all encoded text is UTF-8. At some + point, if there is demand for other encodings we may change that, but for + now it is safe to assume that bytes are UTF-8. + +Writing to files is the opposite process: + +.. code-block:: python + + from ansible.module_utils._text import to_bytes + + with open('filename.txt', 'wb') as my_file: + my_file.write(to_bytes(some_text_string)) + +Note that we don't have to catch :exc:`UnicodeError` here because we're +transforming to UTF-8 and all text strings in Python can be transformed back +to UTF-8. + +Filesystem interaction +^^^^^^^^^^^^^^^^^^^^^^ + +Dealing with filenames often involves dropping back to bytes because on UNIX-like +systems filenames are bytes. On Python 2, if we pass a text string to these +functions, the text string will be converted to a byte string inside of the +function and a traceback will occur if non-ASCII characters are present. In +Python 3, a traceback will only occur if the text string can't be decoded in +the current locale, but it's still good to be explicit and have code which +works on both versions: + +.. code-block:: python + + import os.path + + from ansible.module_utils._text import to_bytes + + filename = u'/var/tmp/くらとみ.txt' + f = open(to_bytes(filename), 'wb') + mtime = os.path.getmtime(to_bytes(filename)) + b_filename = os.path.expandvars(to_bytes(filename)) + if os.path.exists(to_bytes(filename)): + pass + +When you are only manipulating a filename as a string without talking to the +filesystem (or a C library which talks to the filesystem) you can often get +away without converting to bytes: + +.. code-block:: python + + import os.path + + os.path.join(u'/var/tmp/café', u'くらとみ') + os.path.split(u'/var/tmp/café/くらとみ') + +On the other hand, if the code needs to manipulate the filename and also talk +to the filesystem, it can be more convenient to transform to bytes right away +and manipulate in bytes. + +.. warning:: Make sure all variables passed to a function are the same type. + If you're working with something like :func:`python3:os.path.join` which takes + multiple strings and uses them in combination, you need to make sure that + all the types are the same (either all bytes or all text). Mixing + bytes and text will cause tracebacks. + +Interacting with other programs +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Interacting with other programs goes through the operating system and +C libraries and operates on things that the UNIX kernel defines. These +interfaces are all byte-oriented so the Python interface is byte oriented as +well. On both Python 2 and Python 3, byte strings should be given to Python's +subprocess library and byte strings should be expected back from it. + +One of the main places in Ansible's controller code that we interact with +other programs is the connection plugins' ``exec_command`` methods. These +methods transform any text strings they receive in the command (and arguments +to the command) to execute into bytes and return stdout and stderr as byte strings +Higher level functions (like action plugins' ``_low_level_execute_command``) +transform the output into text strings. + +.. _module_string_strategy: + +Module string strategy: Native String +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In modules we use a strategy known as Native Strings. This makes things +easier on the community members who maintain so many of Ansible's +modules, by not breaking backwards compatibility by +mandating that all strings inside of modules are text and converting between +text and bytes at the borders. + +Native strings refer to the type that Python uses when you specify a bare +string literal: + +.. code-block:: python + + "This is a native string" + +In Python 2, these are byte strings. In Python 3 these are text strings. Modules should be +coded to expect bytes on Python 2 and text on Python 3. + +.. _module_utils_string_strategy: + +Module_utils string strategy: hybrid +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In ``module_utils`` code we use a hybrid string strategy. Although Ansible's +``module_utils`` code is largely like module code, some pieces of it are +used by the controller as well. So it needs to be compatible with modules +and with the controller's assumptions, particularly the string strategy. +The module_utils code attempts to accept native strings as input +to its functions and emit native strings as their output. + +In ``module_utils`` code: + +* Functions **must** accept string parameters as either text strings or byte strings. +* Functions may return either the same type of string as they were given or the native string type for the Python version they are run on. +* Functions that return strings **must** document whether they return strings of the same type as they were given or native strings. + +Module-utils functions are therefore often very defensive in nature. +They convert their string parameters into text (using ``ansible.module_utils._text.to_text``) +at the beginning of the function, do their work, and then convert +the return values into the native string type (using ``ansible.module_utils._text.to_native``) +or back to the string type that their parameters received. + +Tips, tricks, and idioms for Python 2/Python 3 compatibility +------------------------------------------------------------ + +Use forward-compatibility boilerplate +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Use the following boilerplate code at the top of all python files +to make certain constructs act the same way on Python 2 and Python 3: + +.. code-block:: python + + # Make coding more python3-ish + from __future__ import (absolute_import, division, print_function) + __metaclass__ = type + +``__metaclass__ = type`` makes all classes defined in the file into new-style +classes without explicitly inheriting from :class:`object <python3:object>`. + +The ``__future__`` imports do the following: + +:absolute_import: Makes imports look in :data:`sys.path <python3:sys.path>` for the modules being + imported, skipping the directory in which the module doing the importing + lives. If the code wants to use the directory in which the module doing + the importing, there's a new dot notation to do so. +:division: Makes division of integers always return a float. If you need to + find the quotient use ``x // y`` instead of ``x / y``. +:print_function: Changes :func:`print <python3:print>` from a keyword into a function. + +.. seealso:: + * `PEP 0328: Absolute Imports <https://www.python.org/dev/peps/pep-0328/#guido-s-decision>`_ + * `PEP 0238: Division <https://www.python.org/dev/peps/pep-0238>`_ + * `PEP 3105: Print function <https://www.python.org/dev/peps/pep-3105>`_ + +Prefix byte strings with ``b_`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Since mixing text and bytes types leads to tracebacks we want to be clear +about what variables hold text and what variables hold bytes. We do this by +prefixing any variable holding bytes with ``b_``. For instance: + +.. code-block:: python + + filename = u'/var/tmp/café.txt' + b_filename = to_bytes(filename) + with open(b_filename) as f: + data = f.read() + +We do not prefix the text strings instead because we only operate +on byte strings at the borders, so there are fewer variables that need bytes +than text. + +Import Ansible's bundled Python ``six`` library +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The third-party Python `six <https://pypi.org/project/six/>`_ library exists +to help projects create code that runs on both Python 2 and Python 3. Ansible +includes a version of the library in module_utils so that other modules can use it +without requiring that it is installed on the remote system. To make use of +it, import it like this: + +.. code-block:: python + + from ansible.module_utils import six + +.. note:: Ansible can also use a system copy of six + + Ansible will use a system copy of six if the system copy is a later + version than the one Ansible bundles. + +Handle exceptions with ``as`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In order for code to function on Python 2.6+ and Python 3, use the +new exception-catching syntax which uses the ``as`` keyword: + +.. code-block:: python + + try: + a = 2/0 + except ValueError as e: + module.fail_json(msg="Tried to divide by zero: %s" % e) + +Do **not** use the following syntax as it will fail on every version of Python 3: + +.. This code block won't highlight because python2 isn't recognized. This is necessary to pass tests under python 3. +.. code-block:: none + + try: + a = 2/0 + except ValueError, e: + module.fail_json(msg="Tried to divide by zero: %s" % e) + +Update octal numbers +^^^^^^^^^^^^^^^^^^^^ + +In Python 2.x, octal literals could be specified as ``0755``. In Python 3, +octals must be specified as ``0o755``. + +String formatting for controller code +------------------------------------- + +Use ``str.format()`` for Python 2.6 compatibility +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Starting in Python 2.6, strings gained a method called ``format()`` to put +strings together. However, one commonly used feature of ``format()`` wasn't +added until Python 2.7, so you need to remember not to use it in Ansible code: + +.. code-block:: python + + # Does not work in Python 2.6! + new_string = "Dear {}, Welcome to {}".format(username, location) + + # Use this instead + new_string = "Dear {0}, Welcome to {1}".format(username, location) + +Both of the format strings above map positional arguments of the ``format()`` +method into the string. However, the first version doesn't work in +Python 2.6. Always remember to put numbers into the placeholders so the code +is compatible with Python 2.6. + +.. seealso:: + Python documentation on `format strings <https://docs.python.org/2/library/string.html#formatstrings>`_ + +Use percent format with byte strings +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In Python 3.x, byte strings do not have a ``format()`` method. However, it +does have support for the older, percent-formatting. + +.. code-block:: python + + b_command_line = b'ansible-playbook --become-user %s -K %s' % (user, playbook_file) + +.. note:: Percent formatting added in Python 3.5 + + Percent formatting of byte strings was added back into Python 3 in 3.5. + This isn't a problem for us because Python 3.5 is our minimum version. + However, if you happen to be testing Ansible code with Python 3.4 or + earlier, you will find that the byte string formatting here won't work. + Upgrade to Python 3.5 to test. + +.. seealso:: + Python documentation on `percent formatting <https://docs.python.org/2/library/stdtypes.html#string-formatting>`_ + +.. _testing_modules_python_3: + +Testing modules on Python 3 +=================================== + +Ansible modules are slightly harder to code to support Python 3 than normal code from other projects. A lot of mocking has to go into unit testing an Ansible module, so it's harder to test that your changes have fixed everything or to to make sure that later commits haven't regressed the Python 3 support. Review our :ref:`testing <developing_testing>` pages for more information. diff --git a/docs/docsite/rst/dev_guide/developing_rebasing.rst b/docs/docsite/rst/dev_guide/developing_rebasing.rst new file mode 100644 index 00000000..81936be1 --- /dev/null +++ b/docs/docsite/rst/dev_guide/developing_rebasing.rst @@ -0,0 +1,83 @@ +.. _rebase_guide: + +*********************** +Rebasing a pull request +*********************** + +You may find that your pull request (PR) is out-of-date and needs to be rebased. This can happen for several reasons: + +- Files modified in your PR are in conflict with changes which have already been merged. +- Your PR is old enough that significant changes to automated test infrastructure have occurred. + +Rebasing the branch used to create your PR will resolve both of these issues. + +Configuring your remotes +======================== + +Before you can rebase your PR, you need to make sure you have the proper remotes configured. These instructions apply to any repository on GitHub, including collections repositories. On other platforms (bitbucket, gitlab), the same principles and commands apply but the syntax may be different. We use the ansible/ansible repository here as an example. In other repositories, the branch names may be different. Assuming you cloned your fork in the usual fashion, the ``origin`` remote will point to your fork:: + + $ git remote -v + origin git@github.com:YOUR_GITHUB_USERNAME/ansible.git (fetch) + origin git@github.com:YOUR_GITHUB_USERNAME/ansible.git (push) + +However, you also need to add a remote which points to the upstream repository:: + + $ git remote add upstream https://github.com/ansible/ansible.git + +Which should leave you with the following remotes:: + + $ git remote -v + origin git@github.com:YOUR_GITHUB_USERNAME/ansible.git (fetch) + origin git@github.com:YOUR_GITHUB_USERNAME/ansible.git (push) + upstream https://github.com/ansible/ansible.git (fetch) + upstream https://github.com/ansible/ansible.git (push) + +Checking the status of your branch should show your fork is up-to-date with the ``origin`` remote:: + + $ git status + On branch YOUR_BRANCH + Your branch is up-to-date with 'origin/YOUR_BRANCH'. + nothing to commit, working tree clean + +Rebasing your branch +==================== + +Once you have an ``upstream`` remote configured, you can rebase the branch for your PR:: + + $ git pull --rebase upstream devel + +This will replay the changes in your branch on top of the changes made in the upstream ``devel`` branch. +If there are merge conflicts, you will be prompted to resolve those before you can continue. + +After you rebase, the status of your branch changes:: + + $ git status + On branch YOUR_BRANCH + Your branch and 'origin/YOUR_BRANCH' have diverged, + and have 4 and 1 different commits each, respectively. + (use "git pull" to merge the remote branch into yours) + nothing to commit, working tree clean + +Don't worry, this is normal after a rebase. You should ignore the ``git status`` instructions to use ``git pull``. We'll cover what to do next in the following section. + +Updating your pull request +========================== + +Now that you've rebased your branch, you need to push your changes to GitHub to update your PR. + +Since rebasing re-writes git history, you will need to use a force push:: + + $ git push --force-with-lease + +Your PR on GitHub has now been updated. This will automatically trigger testing of your changes. +You should check in on the status of your PR after tests have completed to see if further changes are required. + +Getting help rebasing +===================== + +For help with rebasing your PR, or other development related questions, join us on our #ansible-devel IRC chat channel on `freenode.net <https://freenode.net>`_. + +.. seealso:: + + :ref:`community_development_process` + Information on roadmaps, opening PRs, Ansibullbot, and more diff --git a/docs/docsite/rst/dev_guide/index.rst b/docs/docsite/rst/dev_guide/index.rst new file mode 100644 index 00000000..fb5b7f4a --- /dev/null +++ b/docs/docsite/rst/dev_guide/index.rst @@ -0,0 +1,92 @@ +.. _developer_guide: + +*************** +Developer Guide +*************** + +Welcome to the Ansible Developer Guide! + +**Who should use this guide?** + +If you want to extend Ansible by using a custom module or plugin locally, creating a module or plugin, adding functionality to an existing module, or expanding test coverage, this guide is for you. We've included detailed information for developers on how to test and document modules, as well as the prerequisites for getting your module or plugin accepted into the main Ansible repository. + +Find the task that best describes what you want to do: + +* I'm looking for a way to address a use case: + + * I want to :ref:`add a custom plugin or module locally <developing_locally>`. + * I want to figure out if :ref:`developing a module is the right approach <module_dev_should_you>` for my use case. + * I want to :ref:`develop a collection <developing_collections>`. + * I want to :ref:`contribute to an Ansible-maintained collection <contributing_maintained_collections>`. + * I want to :ref:`contribute to a community-maintained collection <hacking_collections>`. + * I want to :ref:`migrate a role to a collection <migrating_roles>`. + +* I've read the info above, and I'm sure I want to develop a module: + + * What do I need to know before I start coding? + * I want to :ref:`set up my Python development environment <environment_setup>`. + * I want to :ref:`get started writing a module <developing_modules_general>`. + * I want to write a specific kind of module: + * a :ref:`network module <developing_modules_network>` + * a :ref:`Windows module <developing_modules_general_windows>`. + * an :ref:`Amazon module <AWS_module_development>`. + * an :ref:`OpenStack module <OpenStack_module_development>`. + * an :ref:`oVirt/RHV module <oVirt_module_development>`. + * a :ref:`VMware module <VMware_module_development>`. + * I want to :ref:`write a series of related modules <developing_modules_in_groups>` that integrate Ansible with a new product (for example, a database, cloud provider, network platform, and so on). + +* I want to refine my code: + + * I want to :ref:`debug my module code <debugging_modules>`. + * I want to :ref:`add tests <developing_testing>`. + * I want to :ref:`document my module <module_documenting>`. + * I want to :ref:`document my set of modules for a network platform <documenting_modules_network>`. + * I want to follow :ref:`conventions and tips for clean, usable module code <developing_modules_best_practices>`. + * I want to :ref:`make sure my code runs on Python 2 and Python 3 <developing_python_3>`. + +* I want to work on other development projects: + + * I want to :ref:`write a plugin <developing_plugins>`. + * I want to :ref:`connect Ansible to a new source of inventory <developing_inventory>`. + * I want to :ref:`deprecate an outdated module <deprecating_modules>`. + +* I want to contribute back to the Ansible project: + + * I want to :ref:`understand how to contribute to Ansible <ansible_community_guide>`. + * I want to :ref:`contribute my module or plugin <developing_modules_checklist>`. + * I want to :ref:`understand the license agreement <contributor_license_agreement>` for contributions to Ansible. + +If you prefer to read the entire guide, here's a list of the pages in order. + +.. toctree:: + :maxdepth: 2 + + developing_locally + developing_modules + developing_modules_general + developing_modules_checklist + developing_modules_best_practices + developing_python_3 + debugging + developing_modules_documenting + developing_modules_general_windows + developing_modules_general_aci + platforms/aws_guidelines + platforms/openstack_guidelines + platforms/ovirt_dev_guide + platforms/vmware_guidelines + developing_modules_in_groups + testing + module_lifecycle + developing_plugins + developing_inventory + developing_core + developing_program_flow_modules + developing_api + developing_rebasing + developing_module_utilities + developing_collections + migrating_roles + collections_galaxy_meta + migrating_roles + overview_architecture diff --git a/docs/docsite/rst/dev_guide/migrating_roles.rst b/docs/docsite/rst/dev_guide/migrating_roles.rst new file mode 100644 index 00000000..a32fa242 --- /dev/null +++ b/docs/docsite/rst/dev_guide/migrating_roles.rst @@ -0,0 +1,410 @@ + +.. _migrating_roles: + +************************************************* +Migrating Roles to Roles in Collections on Galaxy +************************************************* + +You can migrate any existing standalone role into a collection and host the collection on Galaxy. With Ansible collections, you can distribute many roles in a single cohesive unit of re-usable automation. Inside a collection, you can share custom plugins across all roles in the collection instead of duplicating them in each role's :file:`library/`` directory. + +You must migrate roles to collections if you want to distribute them as certified Ansible content. + +.. note:: + + If you want to import your collection to Galaxy, you need a `Galaxy namespace <https://galaxy.ansible.com/docs/contributing/namespaces.html>`_. + +See :ref:`developing_collections` for details on collections. + + +.. contents:: + :local: + :depth: 1 + +Comparing standalone roles to collection roles +=============================================== + +:ref:`Standalone roles <playbooks_reuse_roles>` have the following directory structure: + +.. code-block:: bash + :emphasize-lines: 5,7,8 + + role/ + ├── defaults + ├── files + ├── handlers + ├── library + ├── meta + ├── module_utils + ├── [*_plugins] + ├── tasks + ├── templates + ├── tests + └── vars + + +The highlighted directories above will change when you migrate to a collection-based role. The collection directory structure includes a :file:`roles/` directory: + +.. code-block:: bash + + mynamespace/ + └── mycollection/ + ├── docs/ + ├── galaxy.yml + ├── plugins/ + │ ├── modules/ + │ │ └── module1.py + │ ├── inventory/ + │ └── .../ + ├── README.md + ├── roles/ + │ ├── role1/ + │ ├── role2/ + │ └── .../ + ├── playbooks/ + │ ├── files/ + │ ├── vars/ + │ ├── templates/ + │ └── tasks/ + └── tests/ + +You will need to use the Fully Qualified Collection Name (FQCN) to use the roles and plugins when you migrate your role into a collection. The FQCN is the combination of the collection ``namespace``, collection ``name``, and the content item you are referring to. + +So for example, in the above collection, the FQCN to access ``role1`` would be: + +.. code-block:: Python + + mynamespace.mycollection.role1 + + +A collection can contain one or more roles in the :file:`roles/` directory and these are almost identical to standalone roles, except you need to move plugins out of the individual roles, and use the :abbr:`FQCN (Fully Qualified Collection Name)` in some places, as detailed in the next section. + +.. note:: + + In standalone roles, some of the plugin directories referenced their plugin types in the plural sense; this is not the case in collections. + +.. _simple_roles_in_collections: + +Migrating a role to a collection +================================= + +To migrate from a standalone role that contains no plugins to a collection role: + +1. Create a local :file:`ansible_collections` directory and ``cd`` to this new directory. + +2. Create a collection. If you want to import this collection to Ansible Galaxy, you need a `Galaxy namespace <https://galaxy.ansible.com/docs/contributing/namespaces.html>`_. + +.. code-block:: bash + + $ ansible-galaxy collection init mynamespace.mycollection + +This creates the collection directory structure. + +3. Copy the standalone role directory into the :file:`roles/` subdirectory of the collection. Roles in collections cannot have hyphens in the role name. Rename any such roles to use underscores instead. + +.. code-block:: bash + + $ mkdir mynamespace/mycollection/roles/my_role/ + $ cp -r /path/to/standalone/role/mynamespace/my_role/\* mynamespace/mycollection/roles/my_role/ + +4. Update ``galaxy.yml`` to include any role dependencies. + +5. Update the collection README.md file to add links to any role README.md files. + + +.. _complex_roles_in_collections: + +Migrating a role with plugins to a collection +============================================== + +To migrate from a standalone role that has plugins to a collection role: + +1. Create a local :file:`ansible_collections directory` and ``cd`` to this new directory. + +2. Create a collection. If you want to import this collection to Ansible Galaxy, you need a `Galaxy namespace <https://galaxy.ansible.com/docs/contributing/namespaces.html>`_. + +.. code-block:: bash + + $ ansible-galaxy collection init mynamespace.mycollection + +This creates the collection directory structure. + +3. Copy the standalone role directory into the :file:`roles/` subdirectory of the collection. Roles in collections cannot have hyphens in the role name. Rename any such roles to use underscores instead. + +.. code-block:: bash + + $ mkdir mynamespace/mycollection/roles/my_role/ + $ cp -r /path/to/standalone/role/mynamespace/my_role/\* mynamespace/mycollection/roles/my_role/ + + +4. Move any modules to the :file:`plugins/modules/` directory. + +.. code-block:: bash + + $ mv -r mynamespace/mycollection/roles/my_role/library/\* mynamespace/mycollection/plugins/modules/ + +5. Move any other plugins to the appropriate :file:`plugins/PLUGINTYPE/` directory. See :ref:`migrating_plugins_collection` for additional steps that may be required. + +6. Update ``galaxy.yml`` to include any role dependencies. + +7. Update the collection README.md file to add links to any role README.md files. + +8. Change any references to the role to use the :abbr:`FQCN (Fully Qualified Collection Name)`. + +.. code-block:: yaml + + --- + - name: example role by FQCN + hosts: some_host_pattern + tasks: + - name: import FQCN role from a collection + import_role: + name: mynamespace.mycollection.my_role + + +You can alternately use the ``collections`` keyword to simplify this: + +.. code-block:: yaml + + --- + - name: example role by FQCN + hosts: some_host_pattern + collections: + - mynamespace.mycollection + tasks: + - name: import role from a collection + import_role: + name: my_role + + +.. _migrating_plugins_collection: + +Migrating other role plugins to a collection +--------------------------------------------- + +To migrate other role plugins to a collection: + + +1. Move each nonmodule plugins to the appropriate :file:`plugins/PLUGINTYPE/` directory. The :file:`mynamespace/mycollection/plugins/README.md` file explains the types of plugins that the collection can contain within optionally created subdirectories. + +.. code-block:: bash + + $ mv -r mynamespace/mycollection/roles/my_role/filter_plugins/\* mynamespace/mycollection/plugins/filter/ + +2. Update documentation to use the FQCN. Plugins that use ``doc_fragments`` need to use FQCN (for example, ``mydocfrag`` becomes ``mynamespace.mycollection.mydocfrag``). + +3. Update relative imports work in collections to start with a period. For example, :file:`./filename` and :file:`../asdfu/filestuff` works but :file:`filename` in same directory must be updated to :file:`./filename`. + + +If you have a custom ``module_utils`` or import from ``__init__.py``, you must also: + +#. Change the Python namespace for custom ``module_utils`` to use the :abbr:`FQCN (Fully Qualified Collection Name)` along with the ``ansible_collections`` convention. See :ref:`update_module_utils_role`. + +#. Change how you import from ``__init__.py``. See :ref:`update_init_role`. + + +.. _update_module_utils_role: + +Updating ``module_utils`` +^^^^^^^^^^^^^^^^^^^^^^^^^ + +If any of your custom modules use a custom module utility, once you migrate to a collection you cannot address the module utility in the top level ``ansible.module_utils`` Python namespace. Ansible does not merge content from collections into the the Ansible internal Python namespace. Update any Python import statements that refer to custom module utilities when you migrate your custom content to collections. See :ref:`module_utils in collections <collection_module_utils>` for more details. + +When coding with ``module_utils`` in a collection, the Python import statement needs to take into account the :abbr:`FQCN (Fully Qualified Collection Name)` along with the ``ansible_collections`` convention. The resulting Python import looks similar to the following example: + +.. code-block:: text + + from ansible_collections.{namespace}.{collectionname}.plugins.module_utils.{util} import {something} + +.. note:: + + You need to follow the same rules in changing paths and using namespaced names for subclassed plugins. + +The following example code snippets show a Python and a PowerShell module using both default Ansible ``module_utils`` and those provided by a collection. In this example the namespace is ``ansible_example`` and the collection is ``community``. + +In the Python example the ``module_utils`` is ``helper`` and the :abbr:`FQCN (Fully Qualified Collection Name)` is ``ansible_example.community.plugins.module_utils.helper``: + +.. code-block:: text + + from ansible.module_utils.basic import AnsibleModule + from ansible.module_utils._text import to_text + from ansible.module_utils.six.moves.urllib.parse import urlencode + from ansible.module_utils.six.moves.urllib.error import HTTPError + from ansible_collections.ansible_example.community.plugins.module_utils.helper import HelperRequest + + argspec = dict( + name=dict(required=True, type='str'), + state=dict(choices=['present', 'absent'], required=True), + ) + + module = AnsibleModule( + argument_spec=argspec, + supports_check_mode=True + ) + + _request = HelperRequest( + module, + headers={"Content-Type": "application/json"}, + data=data + ) + +In the PowerShell example the ``module_utils`` is ``hyperv`` and the :abbr:`FQCN (Fully Qualified Collection Name)` is ``ansible_example.community.plugins.module_utils.hyperv``: + +.. code-block:: powershell + + #!powershell + #AnsibleRequires -CSharpUtil Ansible.Basic + #AnsibleRequires -PowerShell ansible_collections.ansible_example.community.plugins.module_utils.hyperv + + $spec = @{ + name = @{ required = $true; type = "str" } + state = @{ required = $true; choices = @("present", "absent") } + } + $module = [Ansible.Basic.AnsibleModule]::Create($args, $spec) + + Invoke-HyperVFunction -Name $module.Params.name + + $module.ExitJson() + + +.. _update_init_role: + +Importing from __init__.py +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Because of the way that the CPython interpreter does imports, combined with the way the Ansible plugin loader works, if your custom embedded module or plugin requires importing something from an :file:`__init__.py` file, that also becomes part of your collection. You can either originate the content inside a standalone role or use the file name in the Python import statement. The following example is an :file:`__init__.py` file that is part of a callback plugin found inside a collection named ``ansible_example.community``. + +.. code-block:: python + + from ansible_collections.ansible_example.community.plugins.callback.__init__ import CustomBaseClass + + +Example: Migrating a standalone role with plugins to a collection +----------------------------------------------------------------- + +In this example we have a standalone role called ``my-standalone-role.webapp`` to emulate a standalone role that contains dashes in the name (which is not valid in collections). This standalone role contains a custom module in the ``library/`` directory called ``manage_webserver``. + +.. code-block:: bash + + my-standalone-role.webapp + ├── defaults + ├── files + ├── handlers + ├── library + ├── meta + ├── tasks + ├── templates + ├── tests + └── vars + +1. Create a new collection, for example, ``acme.webserver``: + +.. code-block:: bash + + $ ansible-galaxy collection init acme.webserver + - Collection acme.webserver was created successfully + $ tree acme -d 1 + acme + └── webserver + ├── docs + ├── plugins + └── roles + +2. Create the ``webapp`` role inside the collection and copy all contents from the standalone role: + +.. code-block:: bash + + $ mkdir acme/webserver/roles/webapp + $ cp my-standalone-role.webapp/* acme/webserver/roles/webapp/ + +3. Move the ``manage_webserver`` module to its new home in ``acme/webserver/plugins/modules/``: + +.. code-block:: bash + + $ cp my-standalone-role.webapp/library/manage_webserver.py acme/webserver/plugins/modules/manage.py + +.. note:: + + This example changed the original source file ``manage_webserver.py`` to the destination file ``manage.py``. This is optional but the :abbr:`FQCN (Fully Qualified Collection Name)` provides the ``webserver`` context as ``acme.webserver.manage``. + +4. Change ``manage_webserver`` to ``acme.webserver.manage`` in :file:`tasks/` files in the role ( for example, ``my-standalone-role.webapp/tasks/main.yml``) and any use of the original module name. + +.. note:: + + This name change is only required if you changed the original module name, but illustrates content referenced by :abbr:`FQCN (Fully Qualified Collection Name)` can offer context and in turn can make module and plugin names shorter. If you anticipate using these modules independent of the role, keep the original naming conventions. Users can add the :ref:`collections keyword <collections_using_playbook>` in their playbooks. Typically roles are an abstraction layer and users won't use components of the role independently. + + +Example: Supporting standalone roles and migrated collection roles in a downstream RPM +--------------------------------------------------------------------------------------- + +A standalone role can co-exist with its collection role counterpart (for example, as part of a support lifecycle of a product). This should only be done for a transition period, but these two can exist in downstream in packages such as RPMs. For example, the RHEL system roles could coexist with an `example of a RHEL system roles collection <https://github.com/maxamillion/collection-rhel-system-roles>`_ and provide existing backwards compatibility with the downstream RPM. + +This section walks through an example creating this coexistence in a downstream RPM and requires Ansible 2.9.0 or later. + +To deliver a role as both a standalone role and a collection role: + +#. Place the collection in :file:`/usr/share/ansible/collections/ansible_collections/`. +#. Copy the contents of the role inside the collection into a directory named after the standalone role and place the standalone role in :file:`/usr/share/ansible/roles/`. + +All previously bundled modules and plugins used in the standalone role are now referenced by :abbr:`FQCN (Fully Qualified Collection Name)` so even though they are no longer embedded, they can be found from the collection contents.This is an example of how the content inside the collection is a unique entity and does not have to be bound to a role or otherwise. You could alternately create two separate collections: one for the modules and plugins and another for the standalone role to migrate to. The role must use the modules and plugins as :abbr:`FQCN (Fully Qualified Collection Name)`. + +The following is an example RPM spec file that accomplishes this using this example content: + +.. code-block:: text + + Name: acme-ansible-content + Summary: Ansible Collection for deploying and configuring ACME webapp + Version: 1.0.0 + Release: 1%{?dist} + License: GPLv3+ + Source0: amce-webserver-1.0.0.tar.gz + + Url: https://github.com/acme/webserver-ansible-collection + BuildArch: noarch + + %global roleprefix my-standalone-role. + %global collection_namespace acme + %global collection_name webserver + + %global collection_dir %{_datadir}/ansible/collections/ansible_collections/%{collection_namespace}/%{collection_name} + + %description + Ansible Collection and standalone role (for backward compatibility and migration) to deploy, configure, and manage the ACME webapp software. + + %prep + %setup -qc + + %build + + %install + + mkdir -p %{buildroot}/%{collection_dir} + cp -r ./* %{buildroot}/%{collection_dir}/ + + mkdir -p %{buildroot}/%{_datadir}/ansible/roles + for role in %{buildroot}/%{collection_dir}/roles/* + do + cp -pR ${role} %{buildroot}/%{_datadir}/ansible/roles/%{roleprefix}$(basename ${role}) + + mkdir -p %{buildroot}/%{_pkgdocdir}/$(basename ${role}) + for docfile in README.md COPYING LICENSE + do + if [ -f ${role}/${docfile} ] + then + cp -p ${role}/${docfile} %{buildroot}/%{_pkgdocdir}/$(basename ${role})/${docfile} + fi + done + done + + + %files + %dir %{_datadir}/ansible + %dir %{_datadir}/ansible/roles + %dir %{_datadir}/ansible/collections + %dir %{_datadir}/ansible/collections/ansible_collections + %{_datadir}/ansible/roles/ + %doc %{_pkgdocdir}/*/README.md + %doc %{_datadir}/ansible/roles/%{roleprefix}*/README.md + %{collection_dir} + %doc %{collection_dir}/roles/*/README.md + %license %{_pkgdocdir}/*/COPYING + %license %{_pkgdocdir}/*/LICENSE diff --git a/docs/docsite/rst/dev_guide/module_lifecycle.rst b/docs/docsite/rst/dev_guide/module_lifecycle.rst new file mode 100644 index 00000000..1201fffa --- /dev/null +++ b/docs/docsite/rst/dev_guide/module_lifecycle.rst @@ -0,0 +1,50 @@ +.. _module_lifecycle: + +********************************** +The lifecycle of an Ansible module +********************************** + +Modules in the main Ansible repo have a defined life cycle, from first introduction to final removal. The module life cycle is tied to the `Ansible release cycle <release_cycle>`. +A module may move through these four states: + +1. When a module is first accepted into Ansible, we consider it in tech preview and will mark it as such in the documentation. + +2. If a module matures, we will remove the 'preview' mark in the documentation. We support (though we cannot guarantee) backwards compatibility for these modules, which means their parameters should be maintained with stable meanings. + +3. If a module's target API changes radically, or if someone creates a better implementation of its functionality, we may mark it deprecated. Modules that are deprecated are still available but they are reaching the end of their life cycle. We retain deprecated modules for 4 release cycles with deprecation warnings to help users update playbooks and roles that use them. + +4. When a module has been deprecated for four release cycles, we remove the code and mark the stub file removed. Modules that are removed are no longer shipped with Ansible. The stub file helps users find alternative modules. + +.. _deprecating_modules: + +Deprecating modules +=================== + +To deprecate a module, you must: + +1. Rename the file so it starts with an ``_``, for example, rename ``old_cloud.py`` to ``_old_cloud.py``. This keeps the module available and marks it as deprecated on the module index pages. +2. Mention the deprecation in the relevant ``CHANGELOG``. +3. Reference the deprecation in the relevant ``porting_guide_x.y.rst``. +4. Add ``deprecated:`` to the documentation with the following sub-values: + + :removed_in: A ``string``, such as ``"2.10"``; the version of Ansible where the module will be replaced with a docs-only module stub. Usually current release +4. Mutually exclusive with :removed_by_date:. + :remove_by_date: (Added in Ansible 2.10). An ISO 8601 formatted date when the module will be removed. Usually 2 years from the date the module is deprecated. Mutually exclusive with :removed_in:. + :why: Optional string that used to detail why this has been removed. + :alternative: Inform users they should do instead, for example, ``Use M(whatmoduletouseinstead) instead.``. + +* note: with the advent of collections and ``routing.yml`` we might soon require another entry in this file to mark the deprecation. + +* For an example of documenting deprecation, see this `PR that deprecates multiple modules <https://github.com/ansible/ansible/pull/43781/files>`_. + Some of the elements in the PR might now be out of date. + +Changing a module name +====================== + +You can also rename a module and keep an alias to the old name by using a symlink that starts with _. +This example allows the ``stat`` module to be called with ``fileinfo``, making the following examples equivalent:: + + EXAMPLES = ''' + ln -s stat.py _fileinfo.py + ansible -m stat -a "path=/tmp" localhost + ansible -m fileinfo -a "path=/tmp" localhost + ''' diff --git a/docs/docsite/rst/dev_guide/overview_architecture.rst b/docs/docsite/rst/dev_guide/overview_architecture.rst new file mode 100644 index 00000000..fdd90625 --- /dev/null +++ b/docs/docsite/rst/dev_guide/overview_architecture.rst @@ -0,0 +1,149 @@ +******************** +Ansible architecture +******************** + +Ansible is a radically simple IT automation engine that automates cloud provisioning, configuration management, application deployment, intra-service orchestration, and many other IT needs. + +Being designed for multi-tier deployments since day one, Ansible models your IT infrastructure by describing how all of your systems inter-relate, rather than just managing one system at a time. + +It uses no agents and no additional custom security infrastructure, so it's easy to deploy - and most importantly, it uses a very simple language (YAML, in the form of Ansible Playbooks) that allow you to describe your automation jobs in a way that approaches plain English. + +In this section, we'll give you a really quick overview of how Ansible works so you can see how the pieces fit together. + +.. contents:: + :local: + +Modules +======= + +Ansible works by connecting to your nodes and pushing out scripts called "Ansible modules" to them. Most modules accept parameters that describe the desired state of the system. +Ansible then executes these modules (over SSH by default), and removes them when finished. Your library of modules can reside on any machine, and there are no servers, daemons, or databases required. + +You can :ref:`write your own modules <developing_modules_general>`, though you should first consider :ref:`whether you should <developing_modules>`. Typically you'll work with your favorite terminal program, a text editor, and probably a version control system to keep track of changes to your content. You may write specialized modules in any language that can return JSON (Ruby, Python, bash, and so on). + +Module utilities +================ + +When multiple modules use the same code, Ansible stores those functions as module utilities to minimize duplication and maintenance. For example, the code that parses URLs is ``lib/ansible/module_utils/url.py``. You can :ref:`write your own module utilities <developing_module_utilities>` as well. Module utilities may only be written in Python or in PowerShell. + +Plugins +======= + +:ref:`Plugins <plugins_lookup>` augment Ansible's core functionality. While modules execute on the target system in separate processes (usually that means on a remote system), plugins execute on the control node within the ``/usr/bin/ansible`` process. Plugins offer options and extensions for the core features of Ansible - transforming data, logging output, connecting to inventory, and more. Ansible ships with a number of handy plugins, and you can easily :ref:`write your own <developing_plugins>`. For example, you can write an :ref:`inventory plugin <developing_inventory>` to connect to any datasource that returns JSON. Plugins must be written in Python. + +Inventory +========= + +By default, Ansible represents the machines it manages in a file (INI, YAML, and so on) that puts all of your managed machines in groups of your own choosing. + +To add new machines, there is no additional SSL signing server involved, so there's never any hassle deciding why a particular machine didn't get linked up due to obscure NTP or DNS issues. + +If there's another source of truth in your infrastructure, Ansible can also connect to that. Ansible can draw inventory, group, and variable information from sources like EC2, Rackspace, OpenStack, and more. + +Here's what a plain text inventory file looks like:: + + --- + [webservers] + www1.example.com + www2.example.com + + [dbservers] + db0.example.com + db1.example.com + +Once inventory hosts are listed, variables can be assigned to them in simple text files (in a subdirectory called 'group_vars/' or 'host_vars/' or directly in the inventory file. + +Or, as already mentioned, use a dynamic inventory to pull your inventory from data sources like EC2, Rackspace, or OpenStack. + +Playbooks +========= + +Playbooks can finely orchestrate multiple slices of your infrastructure topology, with very detailed control over how many machines to tackle at a time. This is where Ansible starts to get most interesting. + +Ansible's approach to orchestration is one of finely-tuned simplicity, as we believe your automation code should make perfect sense to you years down the road and there should be very little to remember about special syntax or features. + +Here's what a simple playbook looks like:: + + --- + - hosts: webservers + serial: 5 # update 5 machines at a time + roles: + - common + - webapp + + - hosts: content_servers + roles: + - common + - content + +.. _ansible_search_path: + +The Ansible search path +======================= + +Modules, module utilities, plugins, playbooks, and roles can live in multiple locations. If you +write your own code to extend Ansible's core features, you may have multiple files with similar or the same names in different locations on your Ansible control node. The search path determines which of these files Ansible will discover and use on any given playbook run. + +Ansible's search path grows incrementally over a run. As +Ansible finds each playbook and role included in a given run, it appends +any directories related to that playbook or role to the search path. Those +directories remain in scope for the duration of the run, even after the playbook or role +has finished executing. Ansible loads modules, module utilities, and plugins in this order: + +1. Directories adjacent to a playbook specified on the command line. If you run Ansible with ``ansible-playbook /path/to/play.yml``, Ansible appends these directories if they exist: + + .. code-block:: bash + + /path/to/modules + /path/to/module_utils + /path/to/plugins + +2. Directories adjacent to a playbook that is statically imported by a + playbook specified on the command line. If ``play.yml`` includes + ``- import_playbook: /path/to/subdir/play1.yml``, Ansible appends these directories if they exist: + + .. code-block:: bash + + /path/to/subdir/modules + /path/to/subdir/module_utils + /path/to/subdir/plugins + +3. Subdirectories of a role directory referenced by a playbook. If + ``play.yml`` runs ``myrole``, Ansible appends these directories if they exist: + + .. code-block:: bash + + /path/to/roles/myrole/modules + /path/to/roles/myrole/module_utils + /path/to/roles/myrole/plugins + +4. Directories specified as default paths in ``ansible.cfg`` or by the related + environment variables, including the paths for the various plugin types. See :ref:`ansible_configuration_settings` for more information. + Sample ``ansible.cfg`` fields: + + .. code-block:: bash + + DEFAULT_MODULE_PATH + DEFAULT_MODULE_UTILS_PATH + DEFAULT_CACHE_PLUGIN_PATH + DEFAULT_FILTER_PLUGIN_PATH + + Sample environment variables: + + .. code-block:: bash + + ANSIBLE_LIBRARY + ANSIBLE_MODULE_UTILS + ANSIBLE_CACHE_PLUGINS + ANSIBLE_FILTER_PLUGINS + +5. The standard directories that ship as part of the Ansible distribution. + +.. caution:: + + Modules, module utilities, and plugins in user-specified directories will + override the standard versions. This includes some files with generic names. + For example, if you have a file named ``basic.py`` in a user-specified + directory, it will override the standard ``ansible.module_utils.basic``. + + If you have more than one module, module utility, or plugin with the same name in different user-specified directories, the order of commands at the command line and the order of includes and roles in each play will affect which one is found and used on that particular play. diff --git a/docs/docsite/rst/dev_guide/platforms/aws_guidelines.rst b/docs/docsite/rst/dev_guide/platforms/aws_guidelines.rst new file mode 100644 index 00000000..acce3de4 --- /dev/null +++ b/docs/docsite/rst/dev_guide/platforms/aws_guidelines.rst @@ -0,0 +1,754 @@ +.. _AWS_module_development: + +**************************************************** +Guidelines for Ansible Amazon AWS module development +**************************************************** + +The Ansible AWS collection (on `Galaxy <https://galaxy.ansible.com/community/aws>`_, source code `repository <https://github.com/ansible-collections/community.aws>`_) is maintained by the Ansible AWS Working Group. For further information see the `AWS working group community page <https://github.com/ansible/community/wiki/aws>`_. If you are planning to contribute AWS modules to Ansible then getting in touch with the working group is a good way to start, especially because a similar module may already be under development. + +.. contents:: + :local: + +Maintaining existing modules +============================ + +Fixing bugs +----------- + +Bug fixes to code that relies on boto will still be accepted. When possible, +the code should be ported to use boto3. + +Adding new features +------------------- + +Try to keep backward compatibility with relatively recent versions of boto3. That means that if you +want to implement some functionality that uses a new feature of boto3, it should only fail if that +feature actually needs to be run, with a message stating the missing feature and minimum required +version of boto3. + +Use feature testing (for example, ``hasattr('boto3.module', 'shiny_new_method')``) to check whether boto3 +supports a feature rather than version checking. For example, from the ``ec2`` module: + +.. code-block:: python + + if boto_supports_profile_name_arg(ec2): + params['instance_profile_name'] = instance_profile_name + else: + if instance_profile_name is not None: + module.fail_json(msg="instance_profile_name parameter requires boto version 2.5.0 or higher") + +Migrating to boto3 +------------------ + +Prior to Ansible 2.0, modules were written in either boto3 or boto. We are +still porting some modules to boto3. Modules that still require boto should be ported to use boto3 rather than using both libraries (boto and boto3). We would like to remove the boto dependency from all modules. + +Porting code to AnsibleAWSModule +--------------------------------- + +Some old AWS modules use the generic ``AnsibleModule`` as a base rather than the more efficient ``AnsibleAWSModule``. To port an old module to ``AnsibleAWSModule``, change: + +.. code-block:: python + + from ansible.module_utils.basic import AnsibleModule + ... + module = AnsibleModule(...) + +to: + +.. code-block:: python + + from ansible.module_utils.aws.core import AnsibleAWSModule + ... + module = AnsibleAWSModule(...) + +Few other changes are required. AnsibleAWSModule +does not inherit methods from AnsibleModule by default, but most useful methods +are included. If you do find an issue, please raise a bug report. + +When porting, keep in mind that AnsibleAWSModule also will add the default ec2 +argument spec by default. In pre-port modules, you should see common arguments +specified with: + +.. code-block:: python + + def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']), + name=dict(default='default'), + # ... and so on ... + )) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True,) + +These can be replaced with: + +.. code-block:: python + + def main(): + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']), + name=dict(default='default'), + # ... and so on ... + ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,) + +Creating new AWS modules +======================== + +Use boto3 and AnsibleAWSModule +------------------------------- + +All new AWS modules must use boto3 and ``AnsibleAWSModule``. + +``AnsibleAWSModule`` greatly simplifies exception handling and library +management, reducing the amount of boilerplate code. If you cannot +use ``AnsibleAWSModule`` as a base, you must document the reason and request an exception to this rule. + +Naming your module +------------------ + +Base the name of the module on the part of AWS that you actually use. (A good rule of thumb is to +take whatever module you use with boto as a starting point). Don't further abbreviate names - if +something is a well known abbreviation of a major component of AWS (for example, VPC or ELB), that's fine, but +don't create new ones independently. + +Unless the name of your service is quite unique, please consider using ``aws_`` as a prefix. For example ``aws_lambda``. + +Importing botocore and boto3 +---------------------------- + +The ``ansible.module_utils.ec2`` module and ``ansible.module_utils.core.aws`` modules both +automatically import boto3 and botocore. If boto3 is missing from the system then the variable +``HAS_BOTO3`` will be set to false. Normally, this means that modules don't need to import +boto3 directly. There is no need to check ``HAS_BOTO3`` when using AnsibleAWSModule +as the module does that check: + +.. code-block:: python + + from ansible.module_utils.aws.core import AnsibleAWSModule + try: + import botocore + except ImportError: + pass # handled by AnsibleAWSModule + +or: + +.. code-block:: python + + from ansible.module_utils.basic import AnsibleModule + from ansible.module_utils.ec2 import HAS_BOTO3 + try: + import botocore + except ImportError: + pass # handled by imported HAS_BOTO3 + + def main(): + + if not HAS_BOTO3: + module.fail_json(msg='boto3 and botocore are required for this module') + +Supporting Module Defaults +-------------------------- + +The existing AWS modules support using :ref:`module_defaults <module_defaults>` for common +authentication parameters. To do the same for your new module, add an entry for it in +``lib/ansible/config/module_defaults.yml``. These entries take the form of: + +.. code-block:: yaml + + aws_module_name: + - aws + +Connecting to AWS +================= + +AnsibleAWSModule provides the ``resource`` and ``client`` helper methods for obtaining boto3 connections. +These handle some of the more esoteric connection options, such as security tokens and boto profiles. + +If using the basic AnsibleModule then you should use ``get_aws_connection_info`` and then ``boto3_conn`` +to connect to AWS as these handle the same range of connection options. + +These helpers also for missing profiles or a region not set when it needs to be, so you don't have to. + +An example of connecting to ec2 is shown below. Note that unlike boto there is no ``NoAuthHandlerFound`` +exception handling like in boto. Instead, an ``AuthFailure`` exception will be thrown when you use the +connection. To ensure that authorization, parameter validation and permissions errors are all caught, +you should catch ``ClientError`` and ``BotoCoreError`` exceptions with every boto3 connection call. +See exception handling: + +.. code-block:: python + + module.client('ec2') + +or for the higher level ec2 resource: + +.. code-block:: python + + module.resource('ec2') + + +An example of the older style connection used for modules based on AnsibleModule rather than AnsibleAWSModule: + +.. code-block:: python + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) + +.. code-block:: python + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) + + +Common Documentation Fragments for Connection Parameters +-------------------------------------------------------- + +There are two :ref:`common documentation fragments <module_docs_fragments>` +that should be included into almost all AWS modules: + +* ``aws`` - contains the common boto connection parameters +* ``ec2`` - contains the common region parameter required for many AWS modules + +These fragments should be used rather than re-documenting these properties to ensure consistency +and that the more esoteric connection options are documented. For example: + +.. code-block:: python + + DOCUMENTATION = ''' + module: my_module + # some lines omitted here + requirements: [ 'botocore', 'boto3' ] + extends_documentation_fragment: + - aws + - ec2 + ''' + +Handling exceptions +=================== + +You should wrap any boto3 or botocore call in a try block. If an exception is thrown, then there +are a number of possibilities for handling it. + +* Catch the general ``ClientError`` or look for a specific error code with + ``is_boto3_error_code``. +* Use ``aws_module.fail_json_aws()`` to report the module failure in a standard way +* Retry using AWSRetry +* Use ``fail_json()`` to report the failure without using ``ansible.module_utils.aws.core`` +* Do something custom in the case where you know how to handle the exception + +For more information on botocore exception handling see the `botocore error documentation <https://botocore.readthedocs.io/en/latest/client_upgrades.html#error-handling>`_. + +Using is_boto3_error_code +------------------------- + +To use ``ansible.module_utils.aws.core.is_boto3_error_code`` to catch a single +AWS error code, call it in place of ``ClientError`` in your except clauses. In +this case, *only* the ``InvalidGroup.NotFound`` error code will be caught here, +and any other error will be raised for handling elsewhere in the program. + +.. code-block:: python + + try: + info = connection.describe_security_groups(**kwargs) + except is_boto3_error_code('InvalidGroup.NotFound'): + pass + do_something(info) # do something with the info that was successfully returned + +Using fail_json_aws() +--------------------- + +In the AnsibleAWSModule there is a special method, ``module.fail_json_aws()`` for nice reporting of +exceptions. Call this on your exception and it will report the error together with a traceback for +use in Ansible verbose mode. + +You should use the AnsibleAWSModule for all new modules, unless not possible. If adding significant +amounts of exception handling to existing modules, we recommend migrating the module to use AnsibleAWSModule +(there are very few changes required to do this) + +.. code-block:: python + + from ansible.module_utils.aws.core import AnsibleAWSModule + + # Set up module parameters + # module params code here + + # Connect to AWS + # connection code here + + # Make a call to AWS + name = module.params.get['name'] + try: + result = connection.describe_frooble(FroobleName=name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name) + +Note that it should normally be acceptable to catch all normal exceptions here, however if you +expect anything other than botocore exceptions you should test everything works as expected. + +If you need to perform an action based on the error boto3 returned, use the error code. + +.. code-block:: python + + # Make a call to AWS + name = module.params.get['name'] + try: + result = connection.describe_frooble(FroobleName=name) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'FroobleNotFound': + workaround_failure() # This is an error that we can work around + else: + module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name) + +using fail_json() and avoiding ansible.module_utils.aws.core +------------------------------------------------------------ + +Boto3 provides lots of useful information when an exception is thrown so pass this to the user +along with the message. + +.. code-block:: python + + from ansible.module_utils.ec2 import HAS_BOTO3 + try: + import botocore + except ImportError: + pass # caught by imported HAS_BOTO3 + + # Connect to AWS + # connection code here + + # Make a call to AWS + name = module.params.get['name'] + try: + result = connection.describe_frooble(FroobleName=name) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Couldn't obtain frooble %s: %s" % (name, str(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + +Note: we use `str(e)` rather than `e.message` as the latter doesn't +work with python3 + +If you need to perform an action based on the error boto3 returned, use the error code. + +.. code-block:: python + + # Make a call to AWS + name = module.params.get['name'] + try: + result = connection.describe_frooble(FroobleName=name) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'FroobleNotFound': + workaround_failure() # This is an error that we can work around + else: + module.fail_json(msg="Couldn't obtain frooble %s: %s" % (name, str(e)), + exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + except botocore.exceptions.BotoCoreError as e: + module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name) + + +API throttling (rate limiting) and pagination +============================================= + +For methods that return a lot of results, boto3 often provides +`paginators <https://boto3.readthedocs.io/en/latest/guide/paginators.html>`_. If the method +you're calling has ``NextToken`` or ``Marker`` parameters, you should probably +check whether a paginator exists (the top of each boto3 service reference page has a link +to Paginators, if the service has any). To use paginators, obtain a paginator object, +call ``paginator.paginate`` with the appropriate arguments and then call ``build_full_result``. + +Any time that you are calling the AWS API a lot, you may experience API throttling, +and there is an ``AWSRetry`` decorator that can be used to ensure backoff. Because +exception handling could interfere with the retry working properly (as AWSRetry needs to +catch throttling exceptions to work correctly), you'd need to provide a backoff function +and then put exception handling around the backoff function. + +You can use ``exponential_backoff`` or ``jittered_backoff`` strategies - see +the cloud ``module_utils`` ()/lib/ansible/module_utils/cloud.py) +and `AWS Architecture blog <https://www.awsarchitectureblog.com/2015/03/backoff.html>`_ for more details. + +The combination of these two approaches is then: + +.. code-block:: python + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_some_resource_with_backoff(client, **kwargs): + paginator = client.get_paginator('describe_some_resource') + return paginator.paginate(**kwargs).build_full_result()['SomeResource'] + + def describe_some_resource(client, module): + filters = ansible_dict_to_boto3_filter_list(module.params['filters']) + try: + return describe_some_resource_with_backoff(client, Filters=filters) + except botocore.exceptions.ClientError as e: + module.fail_json_aws(e, msg="Could not describe some resource") + + +If the underlying ``describe_some_resources`` API call throws a ``ResourceNotFound`` +exception, ``AWSRetry`` takes this as a cue to retry until it's not thrown (this +is so that when creating a resource, we can just retry until it exists). + +To handle authorization failures or parameter validation errors in +``describe_some_resource_with_backoff``, where we just want to return ``None`` if +the resource doesn't exist and not retry, we need: + +.. code-block:: python + + @AWSRetry.exponential_backoff(retries=5, delay=5) + def describe_some_resource_with_backoff(client, **kwargs): + try: + return client.describe_some_resource(ResourceName=kwargs['name'])['Resources'] + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFound': + return None + else: + raise + except BotoCoreError as e: + raise + + def describe_some_resource(client, module): + name = module.params.get['name'] + try: + return describe_some_resource_with_backoff(client, name=name) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Could not describe resource %s" % name) + + +To make use of AWSRetry easier, it can now be wrapped around a client returned +by ``AnsibleAWSModule``. any call from a client. To add retries to a client, +create a client: + +.. code-block:: python + + module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + +Any calls from that client can be made to use the decorator passed at call-time +using the `aws_retry` argument. By default, no retries are used. + +.. code-block:: python + + ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True) + + # equivalent with normal AWSRetry + @AWSRetry.jittered_backoff(retries=10) + def describe_instances(client, **kwargs): + return ec2.describe_instances(**kwargs) + + describe_instances(module.client('ec2'), InstanceIds=['i-123456789']) + +The call will be retried the specified number of times, so the calling functions +don't need to be wrapped in the backoff decorator. + +You can also use customization for ``retries``, ``delay`` and ``max_delay`` parameters used by +``AWSRetry.jittered_backoff`` API using module params. You can take a look at +the `cloudformation <cloudformation_module>` module for example. + +To make all Amazon modules uniform, prefix the module param with ``backoff_``, so ``retries`` becomes ``backoff_retries`` + and likewise with ``backoff_delay`` and ``backoff_max_delay``. + +Returning Values +================ + +When you make a call using boto3, you will probably get back some useful information that you +should return in the module. As well as information related to the call itself, you will also have +some response metadata. It is OK to return this to the user as well as they may find it useful. + +Boto3 returns all values CamelCased. Ansible follows Python standards for variable names and uses +snake_case. There is a helper function in module_utils/ec2.py called `camel_dict_to_snake_dict` +that allows you to easily convert the boto3 response to snake_case. + +You should use this helper function and avoid changing the names of values returned by Boto3. +E.g. if boto3 returns a value called 'SecretAccessKey' do not change it to 'AccessKey'. + +.. code-block:: python + + # Make a call to AWS + result = connection.aws_call() + + # Return the result to the user + module.exit_json(changed=True, **camel_dict_to_snake_dict(result)) + +Dealing with IAM JSON policy +============================ + +If your module accepts IAM JSON policies then set the type to 'json' in the module spec. For +example: + +.. code-block:: python + + argument_spec.update( + dict( + policy=dict(required=False, default=None, type='json'), + ) + ) + +Note that AWS is unlikely to return the policy in the same order that is was submitted. Therefore, +use the `compare_policies` helper function which handles this variance. + +`compare_policies` takes two dictionaries, recursively sorts and makes them hashable for comparison +and returns True if they are different. + +.. code-block:: python + + from ansible.module_utils.ec2 import compare_policies + + import json + + # some lines skipped here + + # Get the policy from AWS + current_policy = json.loads(aws_object.get_policy()) + user_policy = json.loads(module.params.get('policy')) + + # Compare the user submitted policy to the current policy ignoring order + if compare_policies(user_policy, current_policy): + # Update the policy + aws_object.set_policy(user_policy) + else: + # Nothing to do + pass + +Dealing with tags +================= + +AWS has a concept of resource tags. Usually the boto3 API has separate calls for tagging and +untagging a resource. For example, the ec2 API has a create_tags and delete_tags call. + +It is common practice in Ansible AWS modules to have a `purge_tags` parameter that defaults to +true. + +The `purge_tags` parameter means that existing tags will be deleted if they are not specified by +the Ansible task. + +There is a helper function `compare_aws_tags` to ease dealing with tags. It can compare two dicts +and return the tags to set and the tags to delete. See the Helper function section below for more +detail. + +Helper functions +================ + +Along with the connection functions in Ansible ec2.py module_utils, there are some other useful +functions detailed below. + +camel_dict_to_snake_dict +------------------------ + +boto3 returns results in a dict. The keys of the dict are in CamelCase format. In keeping with +Ansible format, this function will convert the keys to snake_case. + +``camel_dict_to_snake_dict`` takes an optional parameter called ``ignore_list`` which is a list of +keys not to convert (this is usually useful for the ``tags`` dict, whose child keys should remain with +case preserved) + +Another optional parameter is ``reversible``. By default, ``HTTPEndpoint`` is converted to ``http_endpoint``, +which would then be converted by ``snake_dict_to_camel_dict`` to ``HttpEndpoint``. +Passing ``reversible=True`` converts HTTPEndpoint to ``h_t_t_p_endpoint`` which converts back to ``HTTPEndpoint``. + +snake_dict_to_camel_dict +------------------------ + +`snake_dict_to_camel_dict` converts snake cased keys to camel case. By default, because it was +first introduced for ECS purposes, this converts to dromedaryCase. An optional +parameter called `capitalize_first`, which defaults to `False`, can be used to convert to CamelCase. + +ansible_dict_to_boto3_filter_list +--------------------------------- + +Converts a an Ansible list of filters to a boto3 friendly list of dicts. This is useful for any +boto3 `_facts` modules. + +boto_exception +-------------- + +Pass an exception returned from boto or boto3, and this function will consistently get the message from the exception. + +Deprecated: use `AnsibleAWSModule`'s `fail_json_aws` instead. + + +boto3_tag_list_to_ansible_dict +------------------------------ + +Converts a boto3 tag list to an Ansible dict. Boto3 returns tags as a list of dicts containing keys +called 'Key' and 'Value' by default. This key names can be overridden when calling the function. +For example, if you have already camel_cased your list of tags you may want to pass lowercase key +names instead, in other words, 'key' and 'value'. + +This function converts the list in to a single dict where the dict key is the tag key and the dict +value is the tag value. + +ansible_dict_to_boto3_tag_list +------------------------------ + +Opposite of above. Converts an Ansible dict to a boto3 tag list of dicts. You can again override +the key names used if 'Key' and 'Value' is not suitable. + +get_ec2_security_group_ids_from_names +------------------------------------- + +Pass this function a list of security group names or combination of security group names and IDs +and this function will return a list of IDs. You should also pass the VPC ID if known because +security group names are not necessarily unique across VPCs. + +compare_policies +---------------- + +Pass two dicts of policies to check if there are any meaningful differences and returns true +if there are. This recursively sorts the dicts and makes them hashable before comparison. + +This method should be used any time policies are being compared so that a change in order +doesn't result in unnecessary changes. + +compare_aws_tags +---------------- + +Pass two dicts of tags and an optional purge parameter and this function will return a dict +containing key pairs you need to modify and a list of tag key names that you need to remove. Purge +is True by default. If purge is False then any existing tags will not be modified. + +This function is useful when using boto3 'add_tags' and 'remove_tags' functions. Be sure to use the +other helper function `boto3_tag_list_to_ansible_dict` to get an appropriate tag dict before +calling this function. Since the AWS APIs are not uniform (for example, EC2 is different from Lambda) this will work +without modification for some (Lambda) and others may need modification before using these values +(such as EC2, with requires the tags to unset to be in the form `[{'Key': key1}, {'Key': key2}]`). + +Integration Tests for AWS Modules +================================= + +All new AWS modules should include integration tests to ensure that any changes in AWS APIs that +affect the module are detected. At a minimum this should cover the key API calls and check the +documented return values are present in the module result. + +For general information on running the integration tests see the :ref:`Integration Tests page of the +Module Development Guide <testing_integration>`, especially the section on configuration for cloud tests. + +The integration tests for your module should be added in `test/integration/targets/MODULE_NAME`. + +You must also have a aliases file in `test/integration/targets/MODULE_NAME/aliases`. This file serves +two purposes. First indicates it's in an AWS test causing the test framework to make AWS credentials +available during the test run. Second putting the test in a test group causing it to be run in the +continuous integration build. + +Tests for new modules should be added to the same group as existing AWS tests. In general just copy +an existing aliases file such as the `aws_s3 tests aliases file <https://github.com/ansible-collections/amazon.aws/blob/master/tests/integration/targets/aws_s3/aliases>`_. + +AWS Credentials for Integration Tests +------------------------------------- + +The testing framework handles running the test with appropriate AWS credentials, these are made available +to your test in the following variables: + +* `aws_region` +* `aws_access_key` +* `aws_secret_key` +* `security_token` + +So all invocations of AWS modules in the test should set these parameters. To avoid duplicating these +for every call, it's preferable to use :ref:`module_defaults <module_defaults>`. For example: + +.. code-block:: yaml + + - name: set connection information for aws modules and run tasks + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + block: + + - name: Do Something + ec2_instance: + ... params ... + + - name: Do Something Else + ec2_instance: + ... params ... + +AWS Permissions for Integration Tests +------------------------------------- + +As explained in the :ref:`Integration Test guide <testing_integration>` +there are defined IAM policies in `mattclay/aws-terminator <https://github.com/mattclay/aws-terminator>`_ that contain the necessary permissions +to run the AWS integration test. + +If your module interacts with a new service or otherwise requires new permissions, tests will fail when you submit a pull request and the +`Ansibullbot <https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md>`_ will tag your PR as needing revision. +We do not automatically grant additional permissions to the roles used by the continuous integration builds. +You will need to raise a Pull Request against `mattclay/aws-terminator <https://github.com/mattclay/aws-terminator>`_ to add them. + +If your PR has test failures, check carefully to be certain the failure is only due to the missing permissions. If you've ruled out other sources of failure, add a comment with the `ready_for_review` +tag and explain that it's due to missing permissions. + +Your pull request cannot be merged until the tests are passing. If your pull request is failing due to missing permissions, +you must collect the minimum IAM permissions required to +run the tests. + +There are two ways to figure out which IAM permissions you need for your PR to pass: + +* Start with the most permissive IAM policy, run the tests to collect information about which resources your tests actually use, then construct a policy based on that output. This approach only works on modules that use `AnsibleAWSModule`. +* Start with the least permissive IAM policy, run the tests to discover a failure, add permissions for the resource that addresses that failure, then repeat. If your module uses `AnsibleModule` instead of `AnsibleAWSModule`, you must use this approach. + +To start with the most permissive IAM policy: + +1) `Create an IAM policy <https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start>`_ that allows all actions (set ``Action`` and ``Resource`` to ``*```). +2) Run your tests locally with this policy. On AnsibleAWSModule-based modules, the ``debug_botocore_endpoint_logs`` option is automatically set to ``yes``, so you should see a list of AWS ACTIONS after the PLAY RECAP showing all the permissions used. If your tests use a boto/AnsibleModule module, you must start with the least permissive policy (see below). +3) Modify your policy to allow only the actions your tests use. Restrict account, region, and prefix where possible. Wait a few minutes for your policy to update. +4) Run the tests again with a user or role that allows only the new policy. +5) If the tests fail, troubleshoot (see tips below), modify the policy, run the tests again, and repeat the process until the tests pass with a restrictive policy. +6) Open a pull request proposing the minimum required policy to the `CI policies <https://github.com/mattclay/aws-terminator/tree/master/aws/policy>`_. + +To start from the least permissive IAM policy: + +1) Run the integration tests locally with no IAM permissions. +2) Examine the error when the tests reach a failure. + a) If the error message indicates the action used in the request, add the action to your policy. + b) If the error message does not indicate the action used in the request: + - Usually the action is a CamelCase version of the method name - for example, for an ec2 client the method `describe_security_groups` correlates to the action `ec2:DescribeSecurityGroups`. + - Refer to the documentation to identify the action. + c) If the error message indicates the resource ARN used in the request, limit the action to that resource. + d) If the error message does not indicate the resource ARN used: + - Determine if the action can be restricted to a resource by examining the documentation. + - If the action can be restricted, use the documentation to construct the ARN and add it to the policy. +3) Add the action or resource that caused the failure to `an IAM policy <https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start>`_. Wait a few minutes for your policy to update. +4) Run the tests again with this policy attached to your user or role. +5) If the tests still fail at the same place with the same error you will need to troubleshoot (see tips below). If the first test passes, repeat steps 2 and 3 for the next error. Repeat the process until the tests pass with a restrictive policy. +6) Open a pull request proposing the minimum required policy to the `CI policies <https://github.com/mattclay/aws-terminator/tree/master/aws/policy>`_. + +Troubleshooting IAM policies +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- When you make changes to a policy, wait a few minutes for the policy to update before re-running the tests. +- Use the `policy simulator <https://policysim.aws.amazon.com/>`_ to verify that each action (limited by resource when applicable) in your policy is allowed. +- If you're restricting actions to certain resources, replace resources temporarily with `*`. If the tests pass with wildcard resources, there is a problem with the resource definition in your policy. +- If the initial troubleshooting above doesn't provide any more insight, AWS may be using additional undisclosed resources and actions. +- Examine the AWS FullAccess policy for the service for clues. +- Re-read the AWS documentation, especially the list of `Actions, Resources and Condition Keys <https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html>`_ for the various AWS services. +- Look at the `cloudonaut <https://iam.cloudonaut.io>`_ documentation as a troubleshooting cross-reference. +- Use a search engine. +- Ask in the Ansible IRC channel #ansible-aws (on freenode IRC). + +Unsupported Integration tests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are a limited number of reasons why it may not be practical to run integration +tests for a module within CI. Where these apply you should add the keyword +`unsupported` to the aliases file in `test/integration/targets/MODULE_NAME/aliases`. + +Some cases where tests should be marked as unsupported: +1) The tests take longer than 10 or 15 minutes to complete +2) The tests create expensive resources +3) The tests create inline policies +4) The tests require the existance of external resources +5) The tests manage Account level security policies such as the password policy or AWS Organizations. + +Where one of these reasons apply you should open a pull request proposing the minimum required policy to the +`unsupported test policies <https://github.com/mattclay/aws-terminator/tree/master/hacking/aws_config/test_policies>`_. + +Unsupported integration tests will not be automatically run by CI. However, the +necessary policies should be available so that the tests can be manually run by +someone performing a PR review or writing a patch. diff --git a/docs/docsite/rst/dev_guide/platforms/openstack_guidelines.rst b/docs/docsite/rst/dev_guide/platforms/openstack_guidelines.rst new file mode 100644 index 00000000..8827cefb --- /dev/null +++ b/docs/docsite/rst/dev_guide/platforms/openstack_guidelines.rst @@ -0,0 +1,57 @@ +.. _OpenStack_module_development: + +OpenStack Ansible Modules +========================= + +The OpenStack collection (on `Galaxy <https://galaxy.ansible.com/openstack/cloud>`_, source code `repository <https://opendev.org/openstack/ansible-collections-openstack.git>`_) contains modules for interacting with OpenStack as either an admin or an end user. If the module does not begin with ``os_``, it is either deprecated or soon to be deprecated. This document serves as developer coding guidelines for modules in this collection. + +.. contents:: + :local: + +Naming +------ + +* All module names should start with ``os_`` +* Name any module that a cloud consumer would expect to use after the logical resource it manages: ``os_server`` not ``os_nova``. This naming convention acknowledges that the end user does not care which service manages the resource - that is a deployment detail. For example cloud consumers may not know whether their floating IPs are managed by Nova or Neutron. +* Name any module that a cloud admin would expect to use with the service and the resource: ``os_keystone_domain``. +* If the module is one that a cloud admin and a cloud consumer could both use, + the cloud consumer rules apply. + +Interface +--------- + +* If the resource being managed has an id, it should be returned. +* If the resource being managed has an associated object more complex than + an id, it should also be returned. + +Interoperability +---------------- + +* It should be assumed that the cloud consumer does not know a bazillion + details about the deployment choices their cloud provider made, and a best + effort should be made to present one sane interface to the Ansible user + regardless of deployer insanity. +* All modules should work appropriately against all existing known public + OpenStack clouds. +* It should be assumed that a user may have more than one cloud account that + they wish to combine as part of a single Ansible-managed infrastructure. + +Libraries +--------- + +* All modules should use ``openstack_full_argument_spec`` to pick up the + standard input such as auth and ssl support. +* All modules should include ``extends_documentation_fragment: openstack``. +* All complex cloud interaction or interoperability code should be housed in + the `openstacksdk <https://git.openstack.org/cgit/openstack/openstacksdk>`_ + library. +* All OpenStack API interactions should happen via the openstacksdk and not via + OpenStack Client libraries. The OpenStack Client libraries do no have end + users as a primary audience, they are for intra-server communication. + +Testing +------- + +* Integration testing is currently done in `OpenStack's CI system <https://git.openstack.org/cgit/openstack/openstacksdk/tree/openstack/tests/ansible>`_ +* Testing in openstacksdk produces an obvious chicken-and-egg scenario. Work is under + way to trigger from and report on PRs directly. diff --git a/docs/docsite/rst/dev_guide/platforms/ovirt_dev_guide.rst b/docs/docsite/rst/dev_guide/platforms/ovirt_dev_guide.rst new file mode 100644 index 00000000..bf461d40 --- /dev/null +++ b/docs/docsite/rst/dev_guide/platforms/ovirt_dev_guide.rst @@ -0,0 +1,220 @@ +.. _oVirt_module_development: + +oVirt Ansible Modules +===================== + +The set of modules for interacting with oVirt/RHV are currently part of the community.general collection (on `Galaxy <https://galaxy.ansible.com/community/general>`_, source code `repository <https://github.com/ansible-collections/community.general/tree/main/plugins/modules/cloud/ovirt>`_). This document serves as developer coding guidelines for creating oVirt/RHV modules. + +.. contents:: + :local: + +Naming +------ + +- All modules should start with an ``ovirt_`` prefix. +- All modules should be named after the resource it manages in singular + form. +- All modules that gather information should have a ``_info`` + suffix. + +Interface +--------- + +- Every module should return the ID of the resource it manages. +- Every module should return the dictionary of the resource it manages. +- Never change the name of the parameter, as we guarantee backward + compatibility. Use aliases instead. +- If a parameter can't achieve idempotency for any reason, please + document it. + +Interoperability +---------------- + +- All modules should work against all minor versions of + version 4 of the API. Version 3 of the API is not supported. + +Libraries +--------- + +- All modules should use ``ovirt_full_argument_spec`` or + ``ovirt_info_full_argument_spec`` to pick up the standard input (such + as auth and ``fetch_nested``). +- All modules should use ``extends_documentation_fragment``: ovirt to go + along with ``ovirt_full_argument_spec``. +- All info modules should use ``extends_documentation_fragment``: + ``ovirt_info`` to go along with ``ovirt_info_full_argument_spec``. +- Functions that are common to all modules should be implemented in the + ``module_utils/ovirt.py`` file, so they can be reused. +- Python SDK version 4 must be used. + +New module development +---------------------- + +Please read :ref:`developing_modules`, +first to know what common properties, functions and features every module must +have. + +In order to achieve idempotency of oVirt entity attributes, a helper class +was created. The first thing you need to do is to extend this class and override a few +methods: + +.. code:: python + + try: + import ovirtsdk4.types as otypes + except ImportError: + pass + + from ansible.module_utils.ovirt import ( + BaseModule, + equal + ) + + class ClustersModule(BaseModule): + + # The build method builds the entity we want to create. + # Always be sure to build only the parameters the user specified + # in their yaml file, so we don't change the values which we shouldn't + # change. If you set the parameter to None, nothing will be changed. + def build_entity(self): + return otypes.Cluster( + name=self.param('name'), + comment=self.param('comment'), + description=self.param('description'), + ) + + # The update_check method checks if the update is needed to be done on + # the entity. The equal method doesn't check the values which are None, + # which means it doesn't check the values which user didn't set in yaml. + # All other values are checked and if there is found some mismatch, + # the update method is run on the entity, the entity is build by + # 'build_entity' method. You don't have to care about calling the update, + # it's called behind the scene by the 'BaseModule' class. + def update_check(self, entity): + return ( + equal(self.param('comment'), entity.comment) + and equal(self.param('description'), entity.description) + ) + +The code above handle the check if the entity should be updated, so we +don't update the entity if not needed and also it construct the needed +entity of the SDK. + +.. code:: python + + from ansible.module_utils.basic import AnsibleModule + from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + ovirt_full_argument_spec, + ) + + # This module will support two states of the cluster, + # either it will be present or absent. The user can + # specify three parameters: name, comment and description, + # The 'ovirt_full_argument_spec' function, will merge the + # parameters created here with some common one like 'auth': + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + name=dict(default=None, required=True), + description=dict(default=None), + comment=dict(default=None), + ) + + # Create the Ansible module, please always implement the + # feautre called 'check_mode', for 'create', 'update' and + # 'delete' operations it's implemented by default in BaseModule: + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + # Check if the user has Python SDK installed: + check_sdk(module) + + try: + auth = module.params.pop('auth') + + # Create the connection to the oVirt engine: + connection = create_connection(auth) + + # Create the service which manages the entity: + clusters_service = connection.system_service().clusters_service() + + # Create the module which will handle create, update and delete flow: + clusters_module = ClustersModule( + connection=connection, + module=module, + service=clusters_service, + ) + + # Check the state and call the appropriate method: + state = module.params['state'] + if state == 'present': + ret = clusters_module.create() + elif state == 'absent': + ret = clusters_module.remove() + + # The return value of the 'create' and 'remove' method is dictionary + # with the 'id' of the entity we manage and the type of the entity + # with filled in attributes of the entity. The 'change' status is + # also returned by those methods: + module.exit_json(**ret) + except Exception as e: + # Modules can't raises exception, it always must exit with + # 'module.fail_json' in case of exception. Always use + # 'exception=traceback.format_exc' for debugging purposes: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + # Logout only in case the user passed the 'token' in 'auth' + # parameter: + connection.close(logout=auth.get('token') is None) + +If your module must support action handling (for example, +virtual machine start) you must ensure that you handle the states of the +virtual machine correctly, and document the behavior of the +module: + +.. code:: python + + if state == 'running': + ret = vms_module.action( + action='start', + post_action=vms_module._post_start_action, + action_condition=lambda vm: ( + vm.status not in [ + otypes.VmStatus.MIGRATING, + otypes.VmStatus.POWERING_UP, + otypes.VmStatus.REBOOT_IN_PROGRESS, + otypes.VmStatus.WAIT_FOR_LAUNCH, + otypes.VmStatus.UP, + otypes.VmStatus.RESTORING_STATE, + ] + ), + wait_condition=lambda vm: vm.status == otypes.VmStatus.UP, + # Start action kwargs: + use_cloud_init=use_cloud_init, + use_sysprep=use_sysprep, + # ... + ) + +As you can see from the preceding example, the ``action`` method accepts the ``action_condition`` and +``wait_condition``, which are methods which accept the virtual machine +object as a parameter, so you can check whether the virtual +machine is in a proper state before the action. The rest of the +parameters are for the ``start`` action. You may also handle pre- +or post- action tasks by defining ``pre_action`` and ``post_action`` +parameters. + +Testing +------- + +- Integration testing is currently done in oVirt's CI system + `on Jenkins <https://jenkins.ovirt.org/view/All/job/ovirt-system-tests_ansible-suite-master/>`__ + and + `on GitHub <https://github.com/oVirt/ovirt-system-tests/tree/master/ansible-suite-master/>`__. +- Please consider using these integration tests if you create a new module or add a new feature to an existing + module. diff --git a/docs/docsite/rst/dev_guide/platforms/vmware_guidelines.rst b/docs/docsite/rst/dev_guide/platforms/vmware_guidelines.rst new file mode 100644 index 00000000..7a5c8410 --- /dev/null +++ b/docs/docsite/rst/dev_guide/platforms/vmware_guidelines.rst @@ -0,0 +1,270 @@ +.. _VMware_module_development: + +**************************************** +Guidelines for VMware module development +**************************************** + +The Ansible VMware collection (on `Galaxy <https://galaxy.ansible.com/community/vmware>`_, source code `repository <https://github.com/ansible-collections/vmware>`_) is maintained by the VMware Working Group. For further information see the `team community page <https://github.com/ansible/community/wiki/VMware>`_. + +.. contents:: + :local: + +Testing with govcsim +==================== + +Most of the existing modules are covered by functional tests. The tests are located in the :file:`test/integration/targets/`. + +By default, the tests run against a vCenter API simulator called `govcsim <https://github.com/vmware/govmomi/tree/master/vcsim>`_. ``ansible-test`` will automatically pull a `govcsim container <https://quay.io/repository/ansible/vcenter-test-container>` and use it to set-up the test environment. + +You can trigger the test of a module manually with the ``ansible-test`` command. For example, to trigger ``vcenter_folder`` tests: + +.. code-block:: shell + + source hacking/env-setup + ansible-test integration --python 3.7 vcenter_folder + +``govcsim`` is handy because it's much more fast that than a regular test environment. However, it does not +support all the ESXi or vCenter features. + +.. note:: + + Do not confuse ``govcsim`` with ``vcsim``. It's old outdated version of vCenter simulator whereas govcsim is new and written in go lang + +Testing with your own infrastructure +==================================== + +You can also target a regular VMware environment. This paragraph explains step by step how you can run the test-suite yourself. + +Requirements +------------ + +- 2 ESXi hosts (6.5 or 6.7) + - with 2 NIC, the second ones should be available for the test +- a VCSA host +- a NFS server +- Python dependencies: + - `pyvmomi <https://github.com/vmware/pyvmomi/tree/master/pyVmomi>` + - `requests <https://2.python-requests.org/en/master/>`. + +If you want to deploy your test environment in a hypervisor, both VMware or Libvirt <https://github.com/goneri/vmware-on-libvirt> work well. + +NFS server configuration +~~~~~~~~~~~~~~~~~~~~~~~~ + +Your NFS server must expose the following directory structure: + +.. code-block:: shell + + $ tree /srv/share/ + /srv/share/ + ├── isos + │ ├── base.iso + │ ├── centos.iso + │ └── fedora.iso + └── vms + 2 directories, 3 files + +On a Linux system, you can expose the directory over NFS with the following export file: + +.. code-block:: shell + + $ cat /etc/exports + /srv/share 192.168.122.0/255.255.255.0(rw,anonuid=1000,anongid=1000) + +.. note:: + + With this configuration all the new files will be owned by the user with the UID and GID 1000/1000. + Adjust the configuration to match your user's UID/GID. + +The service can be enabled with: + +.. code-block:: shell + + $ sudo systemctl enable --now nfs-server + + +Configure your installation +--------------------------- + +Prepare a configuration file that describes your set-up. The file +should be called :file:`test/integration/cloud-config-vcenter.ini` and based on +:file:`test/lib/ansible_test/config/cloud-config-vcenter.ini.template`. For instance, if you've deployed your lab with +`vmware-on-libvirt <https://github.com/goneri/vmware-on-libvirt>`: + +.. code-block:: ini + + [DEFAULT] + vcenter_username: administrator@vsphere.local + vcenter_password: !234AaAa56 + vcenter_hostname: vcenter.test + vmware_validate_certs: false + esxi1_username: root + esxi1_hostname: esxi1.test + esxi1_password: root + esxi2_username: root + esxi2_hostname: test2.test + esxi2_password: root + +If you use an HTTP proxy +------------------------- +Support for hosting test infrastructure behind an HTTP proxy is currently in development. See the following pull requests for more information: + +- ansible-test: vcenter behind an HTTP proxy <https://github.com/ansible/ansible/pull/58208> +- pyvmomi: proxy support <https://github.com/vmware/pyvmomi/pull/799> +- VMware: add support for HTTP proxy in connection API <https://github.com/ansible/ansible/pull/52936> + +Once you have incorporated the code from those PRs, specify the location of the proxy server with the two extra keys: + +.. code-block:: ini + + vmware_proxy_host: esxi1-gw.ws.testing.ansible.com + vmware_proxy_port: 11153 + +In addition, you may need to adjust the variables of the following file to match the configuration of your lab: +:file:`test/integration/targets/prepare_vmware_tests/vars/real_lab.yml`. If you use `vmware-on-libvirt <https://github.com/goneri/vmware-on-libvirt>` to prepare you lab, you don't have anything to change. + +Run the test-suite +------------------ + +Once your configuration is ready, you can trigger a run with the following command: + +.. code-block:: shell + + source hacking/env-setup + VMWARE_TEST_PLATFORM=static ansible-test integration --python 3.7 vmware_host_firewall_manager + +``vmware_host_firewall_manager`` is the name of the module to test. + +``vmware_guest`` is much larger than any other test role and is rather slow. You can enable or disable some of its test playbooks in +:file:`test/integration/targets/vmware_guest/defaults/main.yml`. + + +Unit-test +========= + +The VMware modules have limited unit-test coverage. You can run the test suite with the +following commands: + +.. code-block:: shell + + source hacking/env-setup + ansible-test units --venv --python 3.7 '.*vmware.*' + +Code style and best practice +============================ + +datacenter argument with ESXi +----------------------------- + +The ``datacenter`` parameter should not use ``ha-datacenter`` by default. This is because the user may +not realize that Ansible silently targets the wrong data center. + +esxi_hostname should not be mandatory +------------------------------------- + +Depending upon the functionality provided by ESXi or vCenter, some modules can seamlessly work with both. In this case, +``esxi_hostname`` parameter should be optional. + +.. code-block:: python + + if self.is_vcenter(): + esxi_hostname = module.params.get('esxi_hostname') + if not esxi_hostname: + self.module.fail_json("esxi_hostname parameter is mandatory") + self.host = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_hostname)[0] + else: + self.host = find_obj(self.content, [vim.HostSystem], None) + if self.host is None: + self.module.fail_json(msg="Failed to find host system.") + +Example should use the fully qualified collection name (FQCN) +------------------------------------------------------------- + +Use FQCN for examples within module documentation For instance, you should use ``community.vmware.vmware_guest`` instead of just +``vmware_guest``. + +This way, the examples don't depend on the ``collections`` directive of the +playbook. + +Functional tests +---------------- + +Writing new tests +~~~~~~~~~~~~~~~~~ + +If you are writing a new collection of integration tests, there are a few VMware-specific things to note beyond +the standard Ansible :ref:`integration testing<testing_integration>` process. + +The test-suite uses a set of common, pre-defined vars located in the :file:`test/integration/targets/prepare_vmware_tests/` role. +The resources defined there are automatically created by importing that role at the start of your test: + +.. code-block:: yaml + + - import_role: + name: prepare_vmware_tests + vars: + setup_datacenter: true + +This will give you a ready to use cluster, datacenter, datastores, folder, switch, dvswitch, ESXi hosts, and VMs. + +No need to create too much resources +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Most of the time, it's not necessary to use ``with_items`` to create multiple resources. By avoiding it, +you speed up the test execution and you simplify the clean up afterwards. + +VM names should be predictable +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you need to create a new VM during your test, you can use ``test_vm1``, ``test_vm2`` or ``test_vm3``. This +way it will be automatically clean up for you. + +Avoid the common boiler plate code in your test playbook +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +From Ansible 2.10, the test suite uses `modules_defaults`. This module +allow us to preinitialize the following default keys of the VMware modules: + +- hostname +- username +- password +- validate_certs + +For example, the following block: + +.. code-block:: yaml + + - name: Add a VMware vSwitch + vmware_vswitch: + hostname: '{{ vcenter_hostname }}' + username: '{{ vcenter_username }}' + password: '{{ vcenter_password }}' + validate_certs: 'no' + esxi_hostname: 'esxi1' + switch_name: "boby" + state: present + +should be simplified to just: + +.. code-block:: yaml + + - name: Add a VMware vSwitch + vmware_vswitch: + esxi_hostname: 'esxi1' + switch_name: "boby" + state: present + + +Typographic convention +====================== + +Nomenclature +------------ + +We try to enforce the following rules in our documentation: + +- VMware, not VMWare or vmware +- ESXi, not esxi or ESXI +- vCenter, not vcenter or VCenter + +We also refer to vcsim's Go implementation with ``govcsim``. This to avoid any confusion with the outdated implementation. diff --git a/docs/docsite/rst/dev_guide/shared_snippets/licensing.txt b/docs/docsite/rst/dev_guide/shared_snippets/licensing.txt new file mode 100644 index 00000000..2802c420 --- /dev/null +++ b/docs/docsite/rst/dev_guide/shared_snippets/licensing.txt @@ -0,0 +1,9 @@ +.. note:: + **LICENSING REQUIREMENTS** Ansible enforces the following licensing requirements: + + * Utilities (files in ``lib/ansible/module_utils/``) may have one of two licenses: + * A file in ``module_utils`` used **only** for a specific vendor's hardware, provider, or service may be licensed under GPLv3+. + Adding a new file under ``module_utils`` with GPLv3+ needs to be approved by the core team. + * All other ``module_utils`` must be licensed under BSD, so GPL-licensed third-party and Galaxy modules can use them. + * If there's doubt about the appropriate license for a file in ``module_utils``, the Ansible Core Team will decide during an Ansible Core Community Meeting. + * All other files shipped with Ansible, including all modules, must be licensed under the GPL license (GPLv3 or later). diff --git a/docs/docsite/rst/dev_guide/style_guide/basic_rules.rst b/docs/docsite/rst/dev_guide/style_guide/basic_rules.rst new file mode 100644 index 00000000..034aece5 --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/basic_rules.rst @@ -0,0 +1,69 @@ +.. _styleguide_basic: + +Basic rules +=========== +.. contents:: + :local: + +Use standard American English +----------------------------- +Ansible uses Standard American English. Watch for common words that are spelled differently in American English (color vs colour, organize vs organise, and so on). + +Write for a global audience +--------------------------- +Everything you say should be understandable by people of different backgrounds and cultures. Avoid idioms and regionalism and maintain a neutral tone that cannot be misinterpreted. Avoid attempts at humor. + +Follow naming conventions +------------------------- +Always follow naming conventions and trademarks. + +.. good place to link to an Ansible terminology page + +Use clear sentence structure +---------------------------- +Clear sentence structure means: + +- Start with the important information first. +- Avoid padding/adding extra words that make the sentence harder to understand. +- Keep it short - Longer sentences are harder to understand. + +Some examples of improving sentences: + +Bad: + The unwise walking about upon the area near the cliff edge may result in a dangerous fall and therefore it is recommended that one remains a safe distance to maintain personal safety. + +Better: + Danger! Stay away from the cliff. + +Bad: + Furthermore, large volumes of water are also required for the process of extraction. + +Better: + Extraction also requires large volumes of water. + +Avoid verbosity +--------------- +Write short, succinct sentences. Avoid terms like: + +- "...as has been said before," +- "..each and every," +- "...point in time," +- "...in order to," + +Highlight menu items and commands +--------------------------------- +When documenting menus or commands, it helps to **bold** what is important. + +For menu procedures, bold the menu names, button names, and so on to help the user find them on the GUI: + +1. On the **File** menu, click **Open**. +2. Type a name in the **User Name** field. +3. In the **Open** dialog box, click **Save**. +4. On the toolbar, click the **Open File** icon. + +For code or command snippets, use the RST `code-block directive <https://www.sphinx-doc.org/en/1.5/markup/code.html#directive-code-block>`_:: + + .. code-block:: bash + + ssh my_vyos_user@vyos.example.net + show config diff --git a/docs/docsite/rst/dev_guide/style_guide/grammar_punctuation.rst b/docs/docsite/rst/dev_guide/style_guide/grammar_punctuation.rst new file mode 100644 index 00000000..4505e2d0 --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/grammar_punctuation.rst @@ -0,0 +1,201 @@ + +Grammar and Punctuation +`````````````````````````````````````` + +Common Styles and Usage, and Common Mistakes +---------------------------------------------------- + +Ansible +~~~~~~~~~ +* Write "Ansible." Not "Ansible, Inc." or "AnsibleWorks The only exceptions to this rule are when we're writing legal or financial statements. + +* Never use the logotype by itself in body text. Always keep the same font you are using the rest of the sentence. + +* A company is singular in the US. In other words, Ansible is an "it," not a "they." + + +Capitalization +~~~~~~~~~~~~~~ +If it's not a real product, service, or department at Ansible, don't capitalize it. Not even if it seems important. Capitalize only the first letter of the first word in headlines. + +Colon +~~~~~~~~~~~~~~~~~ +A colon is generally used before a list or series: +- The Triangle Area consists of three cities: Raleigh, Durham, and Chapel Hill. + +But not if the list is a complement or object of an element in the sentence: +- Before going on vacation, be sure to (1) set the alarm, (2) cancel the newspaper, and (3) ask a neighbor to collect your mail. + +Use a colon after "as follows" and "the following" if the related list comes immediately after: +wedge The steps for changing directories are as follows: + + 1. Open a terminal. + 2. Type cd... + +Use a colon to introduce a bullet list (or dash, or icon/symbol of your choice): + + In the Properties dialog box, you'll find the following entries: + + - Connection name + - Count + - Cost per item + + +Commas +~~~~~~~~~~~ +Use serial commas, the comma before the "and" in a series of three or more items: + +- "Item 1, item 2, and item 3." + + +It's easier to read that way and helps avoid confusion. The primary exception to this you will see is in PR, where it is traditional not to use serial commas because it is often the style of journalists. + +Commas are always important, considering the vast difference in meanings of the following two statements. + +- Let's eat, Grandma +- Let's eat Grandma. + +Correct punctuation could save Grandma's life. + +If that does not convince you, maybe this will: + +.. image:: images/commas-matter.jpg + + +Contractions +~~~~~~~~~~~~~ +Do not use contractions in Ansible documents. + +Em dashes +~~~~~~~~~~ +When possible, use em-dashes with no space on either side. When full em-dashes aren't available, use double-dashes with no spaces on either side--like this. + +A pair of em dashes can be used in place of commas to enhance readability. Note, however, that dashes are always more emphatic than commas. + +A pair of em dashes can replace a pair of parentheses. Dashes are considered less formal than parentheses; they are also more intrusive. If you want to draw attention to the parenthetical content, use dashes. If you want to include the parenthetical content more subtly, use parentheses. + +.. note:: + When dashes are used in place of parentheses, surrounding punctuation should be omitted. Compare the following examples. + +:: + + Upon discovering the errors (all 124 of them), the publisher immediately recalled the books. + + Upon discovering the errors—all 124 of them—the publisher immediately recalled the books. + + +When used in place of parentheses at the end of a sentence, only a single dash is used. + +:: + + After three weeks on set, the cast was fed up with his direction (or, rather, lack of direction). + + After three weeks on set, the cast was fed up with his direction—or, rather, lack of direction. + + +Exclamation points (!) +~~~~~~~~~~~~~~~~~~~~~~~ +Do not use them at the end of sentences. An exclamation point can be used when referring to a command, such as the bang (!) command. + +Gender References +~~~~~~~~~~~~~~~~~~ +Do not use gender-specific pronouns in documentation. It is far less awkward to read a sentence that uses "they" and "their" rather than "he/she" and "his/hers." + +It is fine to use "you" when giving instructions and "the user," "new users," and so on. in more general explanations. + +Never use "one" in place of "you" when writing technical documentation. Using "one" is far too formal. + +Never use "we" when writing. "We" aren't doing anything on the user side. Ansible's products are doing the work as requested by the user. + + +Hyphen +~~~~~~~~~~~~~~ +The hyphen's primary function is the formation of certain compound terms. Do not use a hyphen unless it serves a purpose. If a compound adjective cannot be misread or, as with many psychological terms, its meaning is established, a hyphen is not necessary. + +Use hyphens to avoid ambiguity or confusion: + +:: + + a little-used car + a little used-car + + cross complaint + cross-complaint + + high-school girl + high schoolgirl + + fine-tooth comb (most people do not comb their teeth) + + third-world war + third world war + +.. image:: images/hyphen-funny.jpg + +In professionally printed material (particularly books, magazines, and newspapers), the hyphen is used to divide words between the end of one line and the beginning of the next. This allows for an evenly aligned right margin without highly variable (and distracting) word spacing. + + +Lists +~~~~~~~ +Keep the structure of bulleted lists equivalent and consistent. If one bullet is a verb phrase, they should all be verb phrases. If one is a complete sentence, they should all be complete sentences, and so on. + +Capitalize the first word of each bullet. Unless it is obvious that it is just a list of items, such as a list of items like: +* computer +* monitor +* keyboard +* mouse + +When the bulleted list appears within the context of other copy, (unless it's a straight list like the previous example) add periods, even if the bullets are sentence fragments. Part of the reason behind this is that each bullet is said to complete the original sentence. + +In some cases where the bullets are appearing independently, such as in a poster or a homepage promotion, they do not need periods. + +When giving instructional steps, use numbered lists instead of bulleted lists. + + +Months and States +~~~~~~~~~~~~~~~~~~~~ +Abbreviate months and states according to AP. Months are only abbreviated if they are used in conjunction with a day. Example: "The President visited in January 1999." or "The President visited Jan. 12." + +Months: Jan., Feb., March, April, May, June, July, Aug., Sept., Nov., Dec. + +States: Ala., Ariz., Ark., Calif., Colo., Conn., Del., Fla., Ga., Ill., Ind., Kan., Ky., La., Md., Mass., Mich., Minn., Miss., Mo., Mont., Neb., Nev., NH, NJ, NM, NY, NC, ND, Okla., Ore., Pa., RI, SC, SD, Tenn., Vt., Va., Wash., W.Va., Wis., Wyo. + +Numbers +~~~~~~~~~ +Numbers between one and nine are written out. 10 and above are numerals. The exception to this is writing "4 million" or "4 GB." It's also acceptable to use numerals in tables and charts. + +Phone Numbers ++++++++++++++++ + +Phone number style: 1 (919) 555-0123 x002 and 1 888-GOTTEXT + + +Quotations (Using Quotation Marks and Writing Quotes) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + "Place the punctuation inside the quotes," the editor said. + +Except in rare instances, use only "said" or "says" because anything else just gets in the way of the quote itself, and also tends to editorialize. + +Place the name first right after the quote: + "I like to write first-person because I like to become the character I'm writing," Wally Lamb said. + +Not: + "I like to write first-person because I like to become the character I'm writing," said Wally Lamb. + + +Semicolon +~~~~~~~~~~~~~~~ +Use a semicolon to separate items in a series if the items contain commas: + +- Everyday I have coffee, toast, and fruit for breakfast; a salad for lunch; and a peanut butter sandwich, cookies, ice cream, and chocolate cake for dinner. + +Use a semicolon before a conjunctive adverb (however, therefore, otherwise, namely, for example, and so on): +- I think; therefore, I am. + +Spacing after sentences +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use only a single space after a sentence. + +Time +~~~~~~~~ +* Time of day is written as "4 p.m." diff --git a/docs/docsite/rst/dev_guide/style_guide/images/commas-matter-2.jpg b/docs/docsite/rst/dev_guide/style_guide/images/commas-matter-2.jpg Binary files differnew file mode 100644 index 00000000..2dec81c4 --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/images/commas-matter-2.jpg diff --git a/docs/docsite/rst/dev_guide/style_guide/images/commas-matter.jpg b/docs/docsite/rst/dev_guide/style_guide/images/commas-matter.jpg Binary files differnew file mode 100644 index 00000000..1699a31a --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/images/commas-matter.jpg diff --git a/docs/docsite/rst/dev_guide/style_guide/images/hyphen-funny.jpg b/docs/docsite/rst/dev_guide/style_guide/images/hyphen-funny.jpg Binary files differnew file mode 100644 index 00000000..d642703f --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/images/hyphen-funny.jpg diff --git a/docs/docsite/rst/dev_guide/style_guide/images/thenvsthan.jpg b/docs/docsite/rst/dev_guide/style_guide/images/thenvsthan.jpg Binary files differnew file mode 100644 index 00000000..f4851b07 --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/images/thenvsthan.jpg diff --git a/docs/docsite/rst/dev_guide/style_guide/index.rst b/docs/docsite/rst/dev_guide/style_guide/index.rst new file mode 100644 index 00000000..a50a3180 --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/index.rst @@ -0,0 +1,244 @@ +.. _style_guide: + +******************* +Ansible style guide +******************* + +Welcome to the Ansible style guide! +To create clear, concise, consistent, useful materials on docs.ansible.com, follow these guidelines: + +.. contents:: + :local: + +Linguistic guidelines +===================== + +We want the Ansible documentation to be: + +* clear +* direct +* conversational +* easy to translate + +We want reading the docs to feel like having an experienced, friendly colleague +explain how Ansible works. + +Stylistic cheat-sheet +--------------------- + +This cheat-sheet illustrates a few rules that help achieve the "Ansible tone": + ++-------------------------------+------------------------------+----------------------------------------+ +| Rule | Good example | Bad example | ++===============================+==============================+========================================+ +| Use active voice | You can run a task by | A task can be run by | ++-------------------------------+------------------------------+----------------------------------------+ +| Use the present tense | This command creates a | This command will create a | ++-------------------------------+------------------------------+----------------------------------------+ +| Address the reader | As you expand your inventory | When the number of managed nodes grows | ++-------------------------------+------------------------------+----------------------------------------+ +| Use standard English | Return to this page | Hop back to this page | ++-------------------------------+------------------------------+----------------------------------------+ +| Use American English | The color of the output | The colour of the output | ++-------------------------------+------------------------------+----------------------------------------+ + +Header case +----------- + +Headers should be written in sentence case. For example, this section's title is +``Header case``, not ``Header Case`` or ``HEADER CASE``. + + +Avoid using Latin phrases +------------------------- + +Latin words and phrases like ``e.g.`` or ``etc.`` +are easily understood by English speakers. +They may be harder to understand for others and are also tricky for automated translation. + +Use the following English terms in place of Latin terms or abbreviations: + ++-------------------------------+------------------------------+ +| Latin | English | ++===============================+==============================+ +| i.e | in other words | ++-------------------------------+------------------------------+ +| e.g. | for example | ++-------------------------------+------------------------------+ +| etc | and so on | ++-------------------------------+------------------------------+ +| via | by/ through | ++-------------------------------+------------------------------+ +| vs./versus | rather than/against | ++-------------------------------+------------------------------+ + + +reStructuredText guidelines +=========================== + +The Ansible documentation is written in reStructuredText and processed by Sphinx. +We follow these technical or mechanical guidelines on all rST pages: + +Header notation +--------------- + +`Section headers in reStructuredText <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#sections>`_ +can use a variety of notations. +Sphinx will 'learn on the fly' when creating a hierarchy of headers. +To make our documents easy to read and to edit, we follow a standard set of header notations. +We use: + +* ``###`` with overline, for parts: + +.. code-block:: rst + + ############### + Developer guide + ############### + +* ``***`` with overline, for chapters: + +.. code-block:: rst + + ******************* + Ansible style guide + ******************* + +* ``===`` for sections: + +.. code-block:: rst + + Mechanical guidelines + ===================== + +* ``---`` for subsections: + +.. code-block:: rst + + Internal navigation + ------------------- + +* ``^^^`` for sub-subsections: + +.. code-block:: rst + + Adding anchors + ^^^^^^^^^^^^^^ + +* ``"""`` for paragraphs: + +.. code-block:: rst + + Paragraph that needs a title + """""""""""""""""""""""""""" + + +Internal navigation +------------------- + +`Anchors (also called labels) and links <https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#ref-role>`_ +work together to help users find related content. +Local tables of contents also help users navigate quickly to the information they need. +All internal links should use the ``:ref:`` syntax. +Every page should have at least one anchor to support internal ``:ref:`` links. +Long pages, or pages with multiple levels of headers, can also include a local TOC. + +.. _adding_anchors_rst: + +Adding anchors +^^^^^^^^^^^^^^ + +* Include at least one anchor on every page +* Place the main anchor above the main header +* If the file has a unique title, use that for the main page anchor:: + + .. _unique_page:: + +* You may also add anchors elsewhere on the page + +Adding internal links +^^^^^^^^^^^^^^^^^^^^^ + +* All internal links must use ``:ref:`` syntax. These links both point to the anchor defined above: + +.. code-block:: rst + + :ref:`unique_page` + :ref:`this page <unique_page>` + +The second example adds custom text for the link. + +Adding links to modules and plugins +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Ansible 2.10 and later require the extended Fully Qualified Collection Name (FQCN) as part of the links: + +.. code-block:: text + + ansible_collections. + FQCN + _module + +For example: + + .. code-block:: rst + + :ref:`ansible.builtin.first_found lookup plugin <ansible_collections.ansible.builtin.first_found_lookup>` + +displays as :ref:`ansible.builtin.first_found lookup plugin <ansible_collections.ansible.builtin.first_found_lookup>`. + +Modules require different suffixes from other plugins: + +* Module links use this extended FQCN module name with ``_module`` for the anchor. +* Plugin links use this extended FQCN plugin name with the plugin type (``_connection`` for example). + +.. code-block:: rst + + :ref:`arista.eos.eos_config <ansible_collections.arista.eos.eos_config_module>` + :ref:`community.kubernetes.kubectl connection plugin <ansible_collections.community.kubernetes.kubectl_connection>` + +.. note:: + + ``ansible.builtin`` is the FQCN for modules included in ``ansible.base``. Documentation links are the only place you prepend ``ansible_collections`` to the FQCN. This is used by the documentation build scripts to correctly fetch documentation from collections on Ansible Galaxy. + +.. _local_toc: + +Adding local TOCs +^^^^^^^^^^^^^^^^^ + +The page you're reading includes a `local TOC <http://docutils.sourceforge.net/docs/ref/rst/directives.html#table-of-contents>`_. +If you include a local TOC: + +* place it below, not above, the main heading and (optionally) introductory text +* use the ``:local:`` directive so the page's main header is not included +* do not include a title + +The syntax is: + +.. code-block:: rst + + .. contents:: + :local: + +More resources +============== + +These pages offer more help with grammatical, stylistic, and technical rules for documentation. + +.. toctree:: + :maxdepth: 1 + + basic_rules + voice_style + trademarks + grammar_punctuation + spelling_word_choice + search_hints + resources + +.. seealso:: + + :ref:`community_documentation_contributions` + How to contribute to the Ansible documentation + :ref:`testing_documentation_locally` + How to build the Ansible documentation + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible-docs IRC chat channel diff --git a/docs/docsite/rst/dev_guide/style_guide/resources.rst b/docs/docsite/rst/dev_guide/style_guide/resources.rst new file mode 100644 index 00000000..c624b12e --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/resources.rst @@ -0,0 +1,10 @@ +Resources +```````````````` +* Follow the style of the :ref:`Ansible Documentation<ansible_documentation>` +* Ask for advice on IRC, on the ``#ansible-devel`` Freenode channel +* Review these online style guides: + + * `AP Stylebook <https://www.apstylebook.com>`_ + * `Chicago Manual of Style <https://www.chicagomanualofstyle.org/home.html>`_ + * `Strunk and White's Elements of Style <https://www.crockford.com/wrrrld/style.html>`_ + diff --git a/docs/docsite/rst/dev_guide/style_guide/search_hints.rst b/docs/docsite/rst/dev_guide/style_guide/search_hints.rst new file mode 100644 index 00000000..d9bf3f66 --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/search_hints.rst @@ -0,0 +1,48 @@ + +.. _search_hints: + +Writing documentation so search can find it +------------------------------------------- + +One of the keys to writing good documentation is to make it findable. Readers use a combination of internal site search and external search engines such as Google or duckduckgo. + +To ensure Ansible documentation is findable, you should: + +#. Use headings that clearly reflect what you are documenting. +#. Use numbered lists for procedures or high-level steps where possible. +#. Avoid linking to github blobs where possible. + + +Using clear headings in documentation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We all use simple English when we want to find something. For example, the title of this page could have been any one of the following: + +* Search optimization +* Findable documentation +* Writing for findability + +What we are really trying to describe is - how do I write documentation so search engines can find my content? That simple phrase is what drove the title of this section. When you are creating your headings for documentation, spend some time to think about what you would type in a search box to find it, or more importantly, how someone less familiar with Ansible would try to find that information. Your heading should be the answer to that question. + +One word of caution - you do want to limit the size of your headings. A full heading such as `How do I write documentation so search engines can find my content?` is too long. Search engines would truncate anything over 50 - 60 characters. Long headings would also wrap on smaller devices such as a smart phone. + +Using numbered lists for `zero position` snippets +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Google can optimize the search results by adding a `feature snippet <https://support.google.com/websearch/answer/9351707>`_ at the top of the search results. This snippet provides a small window into the documentation on that first search result that adds more detail than the rest of the search results, and can occasionally answer the reader's questions right there, or at least verify that the linked page is what the reader is looking for. + +Google returns the feature snippet in the form of numbered steps. Where possible, you should add a numbered list near the top of your documentation page, where appropriate. The steps can be the exact procedure a reader would follow, or could be a high level introduction to the documentation topic, such as the numbered list at the top of this page. + +Problems with github blobs on search results +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Search engines do not typically return github blobs in search results, at least not in higher ranked positions. While it is possible and sometimes necessary to link to github blobs from documentation, the better approach would be to copy that information into an .rst page in Ansible documentation. + +Other search hints +^^^^^^^^^^^^^^^^^^ + +While it may not be possible to adapt your documentation to all search optimizations, keep the following in mind as you write your documentation: + +* **Search engines don't parse beyond the `#` in an html page.** So for example, all the subheadings on this page are appended to the main page URL. As such, when I search for 'Using number lists for zero position snippets', the search result would be a link to the top of this page, not a link directly to the subheading I searched for. Using :ref:`local TOCs <local_toc>` helps alleviate this problem as the reader can scan for the header at top of the page and click to the section they are looking for. For critical documentation, consider creating a new page that can be a direct search result page. + +* **Make your first few sentences clearly describe your page topic.** Search engines return not just the URL, but a short description of the information at the URL. For Ansible documentation, we do not have description metadata embedded on each page. Instead, the search engines return the first couple of sentences (140 characters) on the page. That makes your first sentence or two very important to the reader who is searching for something in Ansible. diff --git a/docs/docsite/rst/dev_guide/style_guide/spelling_word_choice.rst b/docs/docsite/rst/dev_guide/style_guide/spelling_word_choice.rst new file mode 100644 index 00000000..3f6d8d7b --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/spelling_word_choice.rst @@ -0,0 +1,327 @@ +Spelling - Word Usage - Common Words and Phrases to Use and Avoid +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Acronyms +++++++++++++++++ + +Always uppercase. An acronym is a word formed from the initial letters of a name, such as ROM for Read-only memory, +SaaS for Software as a Service, or by combining initial letters or part of a series of words, such as LILO for LInux +LOader. + +Spell out the acronym before using it in alone text, such as "The Embedded DevKit (EDK)..." + +Applications ++++++++++++++++++++ +When used as a proper name, use the capitalization of the product, such as GNUPro, Source-Navigator, and Ansible Tower. When used as a command, use lowercase as appropriate, such as "To start GCC, type ``gcc``." + +.. note:: + + "vi" is always lowercase. + +As +++++++++ +This is often used to mean "because", but has other connotations, for example, parallel or simultaneous actions. If you mean "because", say "because". + +Asks for +++++++++++++++++ +Use "requests" instead. + +Assure/Ensure/Insure +++++++++++++++++++++++++++++ +Assure implies a sort of mental comfort. As in "I assured my husband that I would eventually bring home beer." + +Ensure means "to make sure." + +Insure relates to monetary insurance. + + +Back up +++++++++++++++ +This is a verb. You "back up" files; you do not "backup" files. + +Backup +++++++++++ +This is a noun. You create "backup" files; you do not create "back up" files. + +Backward +++++++++++++++ +Correct. Avoid using backwards unless you are stating that something has "backwards compatibility." + +Backwards compatibility +++++++++++++++++++++++++ +Correct as is. + +By way of +++++++++++++++++++ +Use "using" instead. + +Can/May +++++++++++++++ +Use "can" to describe actions or conditions that are possible. Use "may" only to describe situations where permission is being given. If either "can," "could," or "may" apply, use "can" because it's less tentative. + +CD or cd ++++++++++++++++ +When referring to a compact disk, use CD, such as "Insert the CD into the CD-ROM drive." When referring to the change directory command, use cd. + +CD-ROM ++++++++++++++ +Correct. Do not use "cdrom," "CD-Rom," "CDROM," "cd-rom" or any other variation. When referring to the drive, use CD-ROM drive, such as "Insert the CD into the CD-ROM drive." The plural is "CD-ROMs." + + +Command line ++++++++++++++++++++ +Correct. Do not use "command-line" or "commandline" as a noun. If used as an adjective, "command-line" is appropriate, for example "command-line arguments". + +Use "command line" to describes where to place options for a command, but not where to type the command. Use "shell prompt" instead to describe where to type commands. The line on the display screen where a command is expected. Generally, the command line is the line that contains the most recently displayed command prompt. + + +Daylight saving time (DST) ++++++++++++++++++++++++++++++++ + +Correct. Do not use daylight savings time. Daylight Saving Time (DST) is often misspelled "Daylight Savings", with an "s" at the end. Other common variations are "Summer Time"and "Daylight-Saving Time". (https://www.timeanddate.com/time/dst/daylight-savings-time.html) + + +Download +++++++++++++++++ +Correct. Do not use "down load" or "down-load." + +e.g. +++++++++++ +Spell it out: "For example." + +Failover ++++++++++++++++ +When used as a noun, a failover is a backup operation that automatically switches to a standby database, server or network if the primary system fails or is temporarily shut down for servicing. Failover is an important fault tolerance function of mission-critical systems that rely on constant accessibility. Failover automatically and transparently to the user redirects requests from the failed or down system to the backup system that mimics the operations of the primary system. + +Fail over +++++++++++++ +When used as a verb, fail over is two words since there can be different tenses such as failed over. + +Fewer ++++++++++++++++++++ +Fewer is used with plural nouns. Think things you could count. Time, money, distance, and weight are often listed as exceptions to the traditional "can you count it" rule, often thought of a singular amounts (the work will take less than 5 hours, for example). + +File name ++++++++++++++ +Correct. Do not use "filename." + +File system ++++++++++++++++++++ +Correct. Do not use "filesystem." The system that an operating system or program uses to organize and keep track of files. For example, a hierarchical file system is one that uses directories to organize files into a tree structure. Although the operating system provides its own file management system, you can buy separate file management systems. These systems interact smoothly with the operating system but provide more features, such as improved backup procedures and stricter file protection. + +For instance +++++++++++++++ +For example," instead. + +For further/additional/whatever information +++++++++++++++++++++++++++++++++++++++++++++++ +Use "For more information" + +For this reason +++++++++++++++++++ +Use "therefore". + +Forward +++++++++++++++ +Correct. Avoid using "forwards." + +Gigabyte (GB) +++++++++++++++ +2 to the 30th power (1,073,741,824) bytes. One gigabyte is equal to 1,024 megabytes. Gigabyte is often abbreviated as G or GB. + +Got +++++++++++++++ +Avoid. Use "must" instead. + +High-availability +++++++++++++++++++ +Correct. Do not use "high availability." + +Highly available +++++++++++++++++++ +Correct. Do not use highly-available." + +Hostname ++++++++++++++++++ +Correct. Do not use host name. + +i.e. +++++++++++++++ +Spell it out: "That is." + +Installer +++++++++++++++ +Avoid. Use "installation program" instead. + +It's and its +++++++++++++++ +"It's" is a contraction for "it is;" use "it is" instead of "it's." Use "its" as a possessive pronoun (for example, "the store is known for its low prices"). + +Less +++++++++++++ +Less is used with singular nouns. For example "View less details" wouldn't be correct but "View less detail" works. Use fewer when you have plural nouns (things you can count). + +Linux +++++++++++++++ +Correct. Do not use "LINUX" or "linux" unless referring to a command, such as "To start Linux, type linux." Linux is a registered trademark of Linus Torvalds. + +Login +++++++++++++++ +A noun used to refer to the login prompt, such as "At the login prompt, enter your username." + +Log in +++++++++++++++ +A verb used to refer to the act of logging in. Do not use "login," "loggin," "logon," and other variants. For example, "When starting your computer, you are requested to log in..." + +Log on +++++++++++++++ +To make a computer system or network recognize you so that you can begin a computer session. Most personal computers have no log-on procedure -- you just turn the machine on and begin working. For larger systems and networks, however, you usually need to enter a username and password before the computer system will allow you to execute programs. + +Lots of +++++++++++++++ +Use "Several" or something equivalent instead. + +Make sure +++++++++++++++ +This means "be careful to remember, attend to, or find out something." For example, "...make sure that the rhedk group is listed in the output." +Try to use verify or ensure instead. + +Manual/man page +++++++++++++++++++ +Correct. Two words. Do not use "manpage" + +MB +++++++++ +(1) When spelled MB, short for megabyte (1,000,000 or 1,048,576 bytes, depending on the context). +(2) When spelled Mb, short for megabit. + +MBps +++++++++++++++ +Short for megabytes per second, a measure of data transfer speed. Mass storage devices are generally measured in MBps. + +MySQL +++++++++++++++ +Common open source database server and client package. Do not use "MYSQL" or "mySQL." + +Need to +++++++++++++++ +Avoid. Use "must" instead. + +Read-only +++++++++++++ +Correct. Use when referring to the access permissions of files or directories. + +Real time/real-time +++++++++++++++++++++++ +Depends. If used as a noun, it is the actual time during which something takes place. For example, "The computer may partly analyze the data in real time (as it comes in) -- R. H. March." If used as an adjective, "real-time" is appropriate. For example, "XEmacs is a self-documenting, customizable, extensible, real-time display editor." + +Refer to +++++++++++++++ +Use to indicate a reference (within a manual or website) or a cross-reference (to another manual or documentation source). + +See +++++++++++++++ +Don't use. Use "Refer to" instead. + +Since +++++++++ +This is often used to mean "because", but "since" has connotations of time, so be careful. If you mean "because", say "because". + +Tells +++++++++++++++ +Use "Instructs" instead. + +That/which +++++++++++++++ +"That" introduces a restrictive clause-a clause that must be there for the sentence to make sense. A restrictive clause often defines the noun or phrase preceding it. "Which" introduces a non-restrictive, parenthetical clause-a clause that could be omitted without affecting the meaning of the sentence. For example: The car was travelling at a speed that would endanger lives. The car, which was traveling at a speed that would endanger lives, swerved onto the sidewalk. Use "who" or "whom," rather than "that" or "which," when referring to a person. + +Then/than +++++++++++++++ + "Then" refers to a time in the past or the next step in a sequence. "Than" is used for comparisons. + +.. image:: images/thenvsthan.jpg + +Third-party +++++++++++++++ +Correct. Do not use "third party". + +Troubleshoot +++++++++++++++ +Correct. Do not use "trouble shoot" or "trouble-shoot." To isolate the source of a problem and fix it. In the case of computer systems, the term troubleshoot is usually used when the problem is suspected to be hardware -related. If the problem is known to be in software, the term debug is more commonly used. + +UK +++++++++++++++ +Correct as is, no periods. + +UNIX® +++++++++++++++ +Correct. Do not use "Unix" or "unix." UNIX® is a registered trademark of The Open Group. + +Unset +++++++++++++++ +Don't use. Use Clear. + +US +++++++++++++++ +Correct as is, no periods. + +User +++++++++++++++ +When referring to the reader, use "you" instead of "user." For example, "The user must..." is incorrect. Use "You must..." instead. If referring to more than one user, calling the collection "users" is acceptable, such as "Other users may wish to access your database." + +Username +++++++++++++++ +Correct. Do not use "user name." + +View +++++++++++++++ +When using as a reference ("View the documentation available online."), do not use View. Use "Refer to" instead. + +Within +++++++++++++++ +Don't use to refer to a file that exists in a directory. Use "In". + +World Wide Web +++++++++++++++ +Correct. Capitalize each word. Abbreviate as "WWW" or "Web." + +Webpage +++++++++++++++ +Correct. Do not use "web page" or "Web page." + +Web server +++++++++++++++ +Correct. Do not use "webserver". For example, "The Apache HTTP Server is the default Web server..." + +Website +++++++++++++++ +Correct. Do not use "web site" or "Web site." For example, "The Ansible website contains ..." + +Who/whom +++++++++++++++ +Use the pronoun "who" as a subject. Use the pronoun "whom" as a direct object, an indirect object, or the object of a preposition. For example: Who owns this? To whom does this belong? + +Will +++++++++++++++ +Do not use future tense unless it is absolutely necessary. For instance, do not use the sentence, "The next section will describe the process in more detail." Instead, use the sentence, "The next section describes the process in more detail." + +Wish +++++++++++++++ +Use "need" instead of "desire" and "wish." Use "want" when the reader's actions are optional (that is, they may not "need" something but may still "want" something). + +x86 +++++++++++++++ +Correct. Do not capitalize the "x." + +x86_64 +++++++++++++++ +Do not use. Do not use "Hammer". Always use "AMD64 and Intel® EM64T" when referring to this architecture. + +You +++++++++++++++ +Correct. Do not use "I," "he," or "she." + +You may +++++++++++++++ +Try to avoid using this. For example, "you may" can be eliminated from this sentence "You may double-click on the desktop..." + diff --git a/docs/docsite/rst/dev_guide/style_guide/trademarks.rst b/docs/docsite/rst/dev_guide/style_guide/trademarks.rst new file mode 100644 index 00000000..266f16bd --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/trademarks.rst @@ -0,0 +1,96 @@ + +Trademark Usage +`````````````````````````````````````` +Why is it important to use the TM, SM, and ® for our registered marks? + +Before a trademark is registered with the United States Patent and Trademark Office it is appropriate to use the TM or SM symbol depending whether the product is for goods or services. It is important to use the TM or SM as it is notification to the public that Ansible claims rights to the mark even though it has not yet been registered. + +Once the trademark is registered, it is appropriate to use the symbol in place of the TM or SM. The symbol designation must be used in conjunction with the trademark if Ansible is to fully protect its rights. If we don't protect these marks, we run the risk of losing them in the way of Aspirin or Trampoline or Escalator. + +General Rules: ++++++++++++++++ + +Trademarks should be used on 1st references on a page or within a section. + +Use Red Hat® Ansible Tower® or Ansible®, on first reference when referring to products. + +Use "Ansible" alone as the company name, as in "Ansible announced quarterly results," which is not marked. + +Also add the trademark disclaimer. +* When using Ansible trademarks in the body of written text, you should use the following credit line in a prominent place, usually a footnote. + + For Registered Trademarks: + - [Name of Trademark] is a registered trademark of Red Hat, Inc. in the United States and other countries. + + For Unregistered Trademarks (TMs/SMs): + - [Name of Trademark] is a trademark of Red Hat, Inc. in the United States and other countries. + + For registered and unregistered trademarks: + - [Name of Trademark] is a registered trademark and [Name of Trademark] is a trademark of Red Hat, Inc. in the United States and other countries. + +Guidelines for the proper use of trademarks: ++++++++++++++++++++++++++++++++++++++++++++++ + + Always distinguish trademarks from surround text with at least initial capital letters or in all capital letters. + +Always use proper trademark form and spelling. + +Never use a trademark as a noun. Always use a trademark as an adjective modifying the noun. + + Correct: + Red Hat® Ansible Tower® system performance is incredible. + + Incorrect: + Ansible's performance is incredible. + +Never use a trademark as a verb. Trademarks are products or services, never actions. + + Correct: + "Orchestrate your entire network using Red Hat® Ansible Tower®." + + Incorrect: + "Ansible your entire network." + +Never modify a trademark to a plural form. Instead, change the generic word from the singular to the plural. + + Correct: + "Corporate demand for Red Hat® Ansible Tower® configuration software is surging." + + Incorrect: + "Corporate demand for Ansible is surging." + +Never modify a trademark from its possessive form, or make a trademark possessive. Always use it in the form it has been registered. + +Never translate a trademark into another language. + +Never use trademarks to coin new words or names. + +Never use trademarks to create a play on words. + +Never alter a trademark in any way including through unapproved fonts or visual identifiers. + +Never abbreviate or use any Ansible trademarks as an acronym. + +The importance of Ansible trademarks +++++++++++++++++++++++++++++++++++++++++++++++++ + +The Ansible trademark and the "A" logo in a shaded circle are our most valuable assets. The value of these trademarks encompass the Ansible Brand. Effective trademark use is more than just a name, it defines the level of quality the customer will receive and it ties a product or service to a corporate image. A trademark may serve as the basis for many of our everyday decisions and choices. The Ansible Brand is about how we treat customers and each other. In order to continue to build a stronger more valuable Brand we must use it in a clear and consistent manner. + +The mark consists of the letter "A" in a shaded circle. As of 5/11/15, this was a pending trademark (registration in process). + +Common Ansible Trademarks ++++++++++++++++++++++++++++++++++++++++ +* Ansible® +* Ansible Tower® + +Other Common Trademarks and Resource Sites: +++++++++++++++++++++++++++++++++++++++++++++++++ +- Linux is a registered trademark of Linus Torvalds. +- UNIX® is a registered trademark of The Open Group. +- Microsoft, Windows, Vista, XP, and NT are registered trademarks or trademarks of Microsoft Corporation in the United States and/or other countries. https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/en-us.aspx +- Apple, Mac, Mac OS, Macintosh, Pages and TrueType are either registered trademarks or trademarks of Apple Computer, Inc. in the United States and/or other countries. https://www.apple.com/legal/intellectual-property/trademark/appletmlist.html +- Adobe, Acrobat, GoLive, InDesign, Illustrator, PostScript , PhotoShop and the OpenType logo are either registered trademarks or trademarks of Adobe Systems Incorporated in the United States and/or other countries. https://www.adobe.com/legal/permissions/trademarks.html +- Macromedia and Macromedia Flash are trademarks of Macromedia, Inc. https://www.adobe.com/legal/permissions/trademarks.html +- IBM is a registered trademark of International Business Machines Corporation. https://www.ibm.com/legal/us/en/copytrade.shtml +- Celeron, Celeron Inside, Centrino, Centrino logo, Core Inside, Intel Core, Intel Inside, Intel Inside logo, Itanium, Itanium Inside, Pentium, Pentium Inside,VTune, Xeon, and Xeon Inside are trademarks or registered trademarks of Intel Corporation or its subsidiaries in the United States and other countries. https://www.intel.com/content/www/us/en/legal/trademarks.html + diff --git a/docs/docsite/rst/dev_guide/style_guide/voice_style.rst b/docs/docsite/rst/dev_guide/style_guide/voice_style.rst new file mode 100644 index 00000000..0dff7a87 --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/voice_style.rst @@ -0,0 +1,20 @@ + +Voice Style +````````````````````` +The essence of the Ansible writing style is short sentences that flow naturally together. Mix up sentence structures. Vary sentence subjects. Address the reader directly. Ask a question. And when the reader adjusts to the pace of shorter sentences, write a longer one. + +- Write how real people speak... +- ...but try to avoid slang and colloquialisms that might not translate well into other languages. +- Say big things with small words. +- Be direct. Tell the reader exactly what you want them to do. +- Be honest. +- Short sentences show confidence. +- Grammar rules are meant to be bent, but only if the reader knows you are doing this. +- Choose words with fewer syllables for faster reading and better understanding. +- Think of copy as one-on-one conversations rather than as a speech. It's more difficult to ignore someone who is speaking to you directly. +- When possible, start task-oriented sentences (those that direct a user to do something) with action words. For example: Find software... Contact support... Install the media.... and so forth. + +Active Voice +------------------ +Use the active voice ("Start Linuxconf by typing...") rather than passive ("Linuxconf can be started by typing...") whenever possible. Active voice makes for more lively, interesting reading. +Also avoid future tense (or using the term "will") whenever possible For example, future tense ("The screen will display...") does not read as well as an active voice ("The screen displays"). Remember, the users you are writing for most often refer to the documentation while they are using the system, not after or in advance of using the system. diff --git a/docs/docsite/rst/dev_guide/style_guide/why_use.rst b/docs/docsite/rst/dev_guide/style_guide/why_use.rst new file mode 100644 index 00000000..0c1bf51a --- /dev/null +++ b/docs/docsite/rst/dev_guide/style_guide/why_use.rst @@ -0,0 +1,23 @@ +:orphan: + +Why Use a Style Guide? +````````````````````````````````` + +Style guides are important because they ensure consistency in the content, look, and feel of a book or a website. + +Remember, a style guide is only useful if it is used, updated, and enforced. Style Guides are useful for engineering-related documentation, sales and marketing materials, support docs, community contributions, and more. + +As changes are made to the overall Ansible site design, be sure to update this style guide with those changes. Or, should other resources listed below have major revisions, consider including company information here for ease of reference. + +This style guide incorporates current Ansible resources and information so that overall site and documentation consistency can be met. + +.. raw:: html + + <blockquote class="note info"> + + "If you don't find it in the index, look very carefully through the entire catalogue." + ― Sears, Roebuck and Co., 1897 Sears Roebuck & Co. Catalogue + +.. raw:: html + + </blockquote> diff --git a/docs/docsite/rst/dev_guide/testing.rst b/docs/docsite/rst/dev_guide/testing.rst new file mode 100644 index 00000000..763f1672 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing.rst @@ -0,0 +1,243 @@ +.. _developing_testing: + +*************** +Testing Ansible +*************** + +.. contents:: + :local: + + +Why test your Ansible contributions? +==================================== + +If you're a developer, one of the most valuable things you can do is to look at GitHub issues and help fix bugs, since bug-fixing is almost always prioritized over feature development. Even for non-developers, helping to test pull requests for bug fixes and features is still immensely valuable. + +Ansible users who understand how to write playbooks and roles should be able to test their work. GitHub pull requests will automatically run a variety of tests (for example, Shippable) that show bugs in action. However, contributors must also test their work outside of the automated GitHub checks and show evidence of these tests in the PR to ensure that their work will be more likely to be reviewed and merged. + +Read on to learn how Ansible is tested, how to test your contributions locally, and how to extend testing capabilities. + +If you want to learn about testing collections, read :ref:`testing_collections` + + + +Types of tests +============== + +At a high level we have the following classifications of tests: + +:compile: + * :ref:`testing_compile` + * Test python code against a variety of Python versions. +:sanity: + * :ref:`testing_sanity` + * Sanity tests are made up of scripts and tools used to perform static code analysis. + * The primary purpose of these tests is to enforce Ansible coding standards and requirements. +:integration: + * :ref:`testing_integration` + * Functional tests of modules and Ansible core functionality. +:units: + * :ref:`testing_units` + * Tests directly against individual parts of the code base. + + +If you're a developer, one of the most valuable things you can do is look at the GitHub +issues list and help fix bugs. We almost always prioritize bug fixing over feature +development. + +Even for non developers, helping to test pull requests for bug fixes and features is still +immensely valuable. Ansible users who understand writing playbooks and roles should be +able to add integration tests and so GitHub pull requests with integration tests that show +bugs in action will also be a great way to help. + + +Testing within GitHub & Shippable +================================= + + +Organization +------------ + +When Pull Requests (PRs) are created they are tested using Shippable, a Continuous Integration (CI) tool. Results are shown at the end of every PR. + +When Shippable detects an error and it can be linked back to a file that has been modified in the PR then the relevant lines will be added as a GitHub comment. For example:: + + The test `ansible-test sanity --test pep8` failed with the following errors: + + lib/ansible/modules/network/foo/bar.py:509:17: E265 block comment should start with '# ' + + The test `ansible-test sanity --test validate-modules` failed with the following error: + lib/ansible/modules/network/foo/bar.py:0:0: E307 version_added should be 2.4. Currently 2.3 + +From the above example we can see that ``--test pep8`` and ``--test validate-modules`` have identified an issue. The commands given allow you to run the same tests locally to ensure you've fixed all issues without having to push your changes to GitHub and wait for Shippable, for example: + +If you haven't already got Ansible available, use the local checkout by running:: + + source hacking/env-setup + +Then run the tests detailed in the GitHub comment:: + + ansible-test sanity --test pep8 + ansible-test sanity --test validate-modules + +If there isn't a GitHub comment stating what's failed you can inspect the results by clicking on the "Details" button under the "checks have failed" message at the end of the PR. + +Rerunning a failing CI job +-------------------------- + +Occasionally you may find your PR fails due to a reason unrelated to your change. This could happen for several reasons, including: + +* a temporary issue accessing an external resource, such as a yum or git repo +* a timeout creating a virtual machine to run the tests on + +If either of these issues appear to be the case, you can rerun the Shippable test by: + +* adding a comment with ``/rebuild`` (full rebuild) or ``/rebuild_failed`` (rebuild only failed CI nodes) to the PR +* closing and re-opening the PR (full rebuild) +* making another change to the PR and pushing to GitHub + +If the issue persists, please contact us in ``#ansible-devel`` on Freenode IRC. + + +How to test a PR +================ + +Ideally, code should add tests that prove that the code works. That's not always possible and tests are not always comprehensive, especially when a user doesn't have access to a wide variety of platforms, or is using an API or web service. In these cases, live testing against real equipment can be more valuable than automation that runs against simulated interfaces. In any case, things should always be tested manually the first time as well. + +Thankfully, helping to test Ansible is pretty straightforward, assuming you are familiar with how Ansible works. + +Setup: Checking out a Pull Request +---------------------------------- + +You can do this by: + +* checking out Ansible +* fetching the proposed changes into a test branch +* testing +* commenting on that particular issue on GitHub + +Here's how: + +.. warning:: + Testing source code from GitHub pull requests sent to us does have some inherent risk, as the source code + sent may have mistakes or malicious code that could have a negative impact on your system. We recommend + doing all testing on a virtual machine, whether a cloud instance, or locally. Some users like Vagrant + or Docker for this, but they are optional. It is also useful to have virtual machines of different Linux or + other flavors, since some features (for example, package managers such as apt or yum) are specific to those OS versions. + + +Create a fresh area to work:: + + + git clone https://github.com/ansible/ansible.git ansible-pr-testing + cd ansible-pr-testing + +Next, find the pull request you'd like to test and make note of its number. It will look something like this:: + + Use os.path.sep instead of hardcoding / #65381 + +.. note:: Only test ``ansible:devel`` + + It is important that the PR request target be ``ansible:devel``, as we do not accept pull requests into any other branch. Dot releases are cherry-picked manually by Ansible staff. + +Use the pull request number when you fetch the proposed changes and create your branch for testing:: + + git fetch origin refs/pull/XXXX/head:testing_PRXXXX + git checkout testing_PRXXXX + +The first command fetches the proposed changes from the pull request and creates a new branch named ``testing_PRXXXX``, where the XXXX is the actual number associated with the pull request (for example, 65381). The second command checks out the newly created branch. + +.. note:: + If the GitHub user interface shows that the pull request will not merge cleanly, we do not recommend proceeding if you are not somewhat familiar with git and coding, as you will have to resolve a merge conflict. This is the responsibility of the original pull request contributor. + +.. note:: + Some users do not create feature branches, which can cause problems when they have multiple, unrelated commits in their version of ``devel``. If the source looks like ``someuser:devel``, make sure there is only one commit listed on the pull request. + +The Ansible source includes a script that allows you to use Ansible directly from source without requiring a +full installation that is frequently used by developers on Ansible. + +Simply source it (to use the Linux/Unix terminology) to begin using it immediately:: + + source ./hacking/env-setup + +This script modifies the ``PYTHONPATH`` environment variables (along with a few other things), which will be temporarily +set as long as your shell session is open. + +Testing the Pull Request +------------------------ + +At this point, you should be ready to begin testing! + +Some ideas of what to test are: + +* Create a test Playbook with the examples in and check if they function correctly +* Test to see if any Python backtraces returned (that's a bug) +* Test on different operating systems, or against different library versions + +Run sanity tests +```````````````` + +.. code:: shell + + ansible-test sanity + +More information: :ref:`testing_sanity` + +Run unit tests +`````````````` + +.. code:: shell + + ansible-test units + +More information: :ref:`testing_units` + +Run integration tests +````````````````````` + +.. code:: shell + + ansible-test integration -v ping + +More information: :ref:`testing_integration` + +Any potential issues should be added as comments on the pull request (and it's acceptable to comment if the feature works as well), remembering to include the output of ``ansible --version`` + +Example:: + + Works for me! Tested on `Ansible 2.3.0`. I verified this on CentOS 6.5 and also Ubuntu 14.04. + +If the PR does not resolve the issue, or if you see any failures from the unit/integration tests, just include that output instead: + + | This change causes errors for me. + | + | When I ran this Ubuntu 16.04 it failed with the following: + | + | \``` + | some output + | StackTrace + | some other output + | \``` + +Code Coverage Online +```````````````````` + +`The online code coverage reports <https://codecov.io/gh/ansible/ansible>`_ are a good way +to identify areas for testing improvement in Ansible. By following red colors you can +drill down through the reports to find files which have no tests at all. Adding both +integration and unit tests which show clearly how code should work, verify important +Ansible functions and increase testing coverage in areas where there is none is a valuable +way to help improve Ansible. + +The code coverage reports only cover the ``devel`` branch of Ansible where new feature +development takes place. Pull requests and new code will be missing from the codecov.io +coverage reports so local reporting is needed. Most ``ansible-test`` commands allow you +to collect code coverage, this is particularly useful to indicate where to extend +testing. See :ref:`testing_running_locally` for more information. + + +Want to know more about testing? +================================ + +If you'd like to know more about the plans for improving testing Ansible then why not join the +`Testing Working Group <https://github.com/ansible/community/blob/master/meetings/README.md>`_. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/action-plugin-docs.rst b/docs/docsite/rst/dev_guide/testing/sanity/action-plugin-docs.rst new file mode 100644 index 00000000..e3a5d8b8 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/action-plugin-docs.rst @@ -0,0 +1,4 @@ +action-plugin-docs +================== + +Each action plugin should have a matching module of the same name to provide documentation. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/ansible-doc.rst b/docs/docsite/rst/dev_guide/testing/sanity/ansible-doc.rst new file mode 100644 index 00000000..9f2c4f5f --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/ansible-doc.rst @@ -0,0 +1,4 @@ +ansible-doc +=========== + +Verifies that ``ansible-doc`` can parse module documentation on all supported Python versions. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/ansible-var-precedence-check.rst b/docs/docsite/rst/dev_guide/testing/sanity/ansible-var-precedence-check.rst new file mode 100644 index 00000000..1906886f --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/ansible-var-precedence-check.rst @@ -0,0 +1,6 @@ +:orphan: + +ansible-var-precedence-check +============================ + +Check the order of precedence for Ansible variables against :ref:`ansible_variable_precedence`. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/azure-requirements.rst b/docs/docsite/rst/dev_guide/testing/sanity/azure-requirements.rst new file mode 100644 index 00000000..5e0cc044 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/azure-requirements.rst @@ -0,0 +1,10 @@ +:orphan: + +azure-requirements +================== + +Update the Azure integration test requirements file when changes are made to the Azure packaging requirements file: + +.. code-block:: bash + + cp packaging/requirements/requirements-azure.txt test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt diff --git a/docs/docsite/rst/dev_guide/testing/sanity/bin-symlinks.rst b/docs/docsite/rst/dev_guide/testing/sanity/bin-symlinks.rst new file mode 100644 index 00000000..dcec7ed3 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/bin-symlinks.rst @@ -0,0 +1,11 @@ +bin-symlinks +============ + +The ``bin/`` directory in Ansible must contain only symbolic links to executable files. +These files must reside in the ``lib/ansible/`` or ``test/lib/ansible_test/`` directories. + +This is required to allow ``ansible-test`` to work with containers and remote hosts when running from an installed version of Ansible. + +Symlinks for each entry point in ``bin/`` must also be present in ``test/lib/ansible_test/_data/injector/``. +Each symlink should point to the ``python.py`` script in the same directory. +This facilitates running with the correct Python interpreter and enabling code coverage. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/boilerplate.rst b/docs/docsite/rst/dev_guide/testing/sanity/boilerplate.rst new file mode 100644 index 00000000..51c0c089 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/boilerplate.rst @@ -0,0 +1,11 @@ +:orphan: + +boilerplate +=========== + +Most Python files should include the following boilerplate: + +.. code-block:: python + + from __future__ import (absolute_import, division, print_function) + __metaclass__ = type diff --git a/docs/docsite/rst/dev_guide/testing/sanity/botmeta.rst b/docs/docsite/rst/dev_guide/testing/sanity/botmeta.rst new file mode 100644 index 00000000..639bb0bf --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/botmeta.rst @@ -0,0 +1,4 @@ +botmeta +======= + +Verifies that ``./github/BOTMETA.yml`` is valid. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/changelog.rst b/docs/docsite/rst/dev_guide/testing/sanity/changelog.rst new file mode 100644 index 00000000..8cb53329 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/changelog.rst @@ -0,0 +1,17 @@ +changelog +========= + +Basic linting of changelog fragments with `antsibull-changelog lint <https://pypi.org/project/antsibull-changelog/>`_. + +One or more of the following sections are required: + +- major_changes +- minor_changes +- breaking_changes +- deprecated_features +- removed_features +- security_fixes +- bugfixes +- known_issues + +New modules and plugins must not be included in changelog fragments. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/compile.rst b/docs/docsite/rst/dev_guide/testing/sanity/compile.rst new file mode 100644 index 00000000..222f94e4 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/compile.rst @@ -0,0 +1,4 @@ +compile +======= + +See :ref:`testing_compile` for more information. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/configure-remoting-ps1.rst b/docs/docsite/rst/dev_guide/testing/sanity/configure-remoting-ps1.rst new file mode 100644 index 00000000..e83bc78d --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/configure-remoting-ps1.rst @@ -0,0 +1,5 @@ +configure-remoting-ps1 +====================== + +The file ``examples/scripts/ConfigureRemotingForAnsible.ps1`` is required and must be a regular file. +It is used by external automated processes and cannot be moved, renamed or replaced with a symbolic link. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/deprecated-config.rst b/docs/docsite/rst/dev_guide/testing/sanity/deprecated-config.rst new file mode 100644 index 00000000..950805a2 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/deprecated-config.rst @@ -0,0 +1,6 @@ +:orphan: + +deprecated-config +================= + +``DOCUMENTATION`` config is scheduled for removal diff --git a/docs/docsite/rst/dev_guide/testing/sanity/docs-build.rst b/docs/docsite/rst/dev_guide/testing/sanity/docs-build.rst new file mode 100644 index 00000000..23f3c552 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/docs-build.rst @@ -0,0 +1,4 @@ +docs-build +========== + +Verifies that ``make singlehtmldocs`` in ``docs/docsite/`` completes without errors. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/empty-init.rst b/docs/docsite/rst/dev_guide/testing/sanity/empty-init.rst new file mode 100644 index 00000000..e87bb71e --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/empty-init.rst @@ -0,0 +1,10 @@ +empty-init +========== + +The ``__init__.py`` files under the following directories must be empty. For some of these (modules +and tests), ``__init__.py`` files with code won't be used. For others (module_utils), we want the +possibility of using Python namespaces which an empty ``__init__.py`` will allow for. + +- ``lib/ansible/modules/`` +- ``lib/ansible/module_utils/`` +- ``test/units/`` diff --git a/docs/docsite/rst/dev_guide/testing/sanity/future-import-boilerplate.rst b/docs/docsite/rst/dev_guide/testing/sanity/future-import-boilerplate.rst new file mode 100644 index 00000000..9d150e1f --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/future-import-boilerplate.rst @@ -0,0 +1,51 @@ +future-import-boilerplate +========================= + +Most Python files should include the following boilerplate at the top of the file, right after the +comment header: + +.. code-block:: python + + from __future__ import (absolute_import, division, print_function) + +This uses Python 3 semantics for absolute vs relative imports, division, and print. By doing this, +we can write code which is portable between Python 2 and Python 3 by following the Python 3 semantics. + + +absolute_import +--------------- + +When Python 2 encounters an import of a name in a file like ``import copy`` it attempts to load +``copy.py`` from the same directory as the file is in. This can cause problems if there is a python +file of that name in the directory and also a python module in ``sys.path`` with that same name. In +that case, Python 2 would load the one in the same directory and there would be no way to load the +one on ``sys.path``. Python 3 fixes this by making imports absolute by default. ``import copy`` +will find ``copy.py`` from ``sys.path``. If you want to import ``copy.py`` from the same directory, +the code needs to be changed to perform a relative import: ``from . import copy``. + +.. seealso:: + + * `Absolute and relative imports <https://www.python.org/dev/peps/pep-0328>`_ + +division +-------- + +In Python 2, the division operator (``/``) returns integer values when used with integers. If there +was a remainder, this part would be left off (aka, `floor division`). In Python 3, the division +operator (``/``) always returns a floating point number. Code that needs to calculate the integer +portion of the quotient needs to switch to using the floor division operator (`//`) instead. + +.. seealso:: + + * `Changing the division operator <https://www.python.org/dev/peps/pep-0238>`_ + +print_function +-------------- + +In Python 2, :func:`python:print` is a keyword. In Python 3, :func:`python3:print` is a function with different +parameters. Using this ``__future__`` allows using the Python 3 print semantics everywhere. + +.. seealso:: + + * `Make print a function <https://www.python.org/dev/peps/pep-3105>`_ + diff --git a/docs/docsite/rst/dev_guide/testing/sanity/ignores.rst b/docs/docsite/rst/dev_guide/testing/sanity/ignores.rst new file mode 100644 index 00000000..9d7a94c0 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/ignores.rst @@ -0,0 +1,99 @@ +ignores +======= + +Sanity tests for individual files can be skipped, and specific errors can be ignored. + +When to Ignore Errors +--------------------- + +Sanity tests are designed to improve code quality and identify common issues with content. +When issues are identified during development, those issues should be corrected. + +As development of Ansible continues, sanity tests are expanded to detect issues that previous releases could not. +To allow time for existing content to be updated to pass newer tests, ignore entries can be added. +New content should not use ignores for existing sanity tests. + +When code is fixed to resolve sanity test errors, any relevant ignores must also be removed. +If the ignores are not removed, this will be reported as an unnecessary ignore error. +This is intended to prevent future regressions due to the same error recurring after being fixed. + +When to Skip Tests +------------------ + +Although rare, there are reasons for skipping a sanity test instead of ignoring the errors it reports. + +If a sanity test results in a traceback when processing content, that error cannot be ignored. +If this occurs, open a new `bug report <https://github.com/ansible/ansible/issues/new?template=bug_report.md>`_ for the issue so it can be fixed. +If the traceback occurs due to an issue with the content, that issue should be fixed. +If the content is correct, the test will need to be skipped until the bug in the sanity test is fixed. + + Caution should be used when skipping sanity tests instead of ignoring them. + Since the test is skipped entirely, resolution of the issue will not be automatically detected. + This will prevent prevent regression detection from working once the issue has been resolved. + For this reason it is a good idea to periodically review skipped entries manually to verify they are required. + +Ignore File Location +-------------------- + +The location of the ignore file depends on the type of content being tested. + +Ansible Collections +~~~~~~~~~~~~~~~~~~~ + +Since sanity tests change between Ansible releases, a separate ignore file is needed for each Ansible major release. + +The filename is ``tests/sanity/ignore-X.Y.txt`` where ``X.Y`` is the Ansible release being used to test the collection. + +Maintaining a separate file for each Ansible release allows a collection to pass tests for multiple versions of Ansible. + +Ansible +~~~~~~~ + +When testing Ansible, all ignores are placed in the ``test/sanity/ignore.txt`` file. + +Only a single file is needed because ``ansible-test`` is developed and released as a part of Ansible itself. + +Ignore File Format +------------------ + +The ignore file contains one entry per line. +Each line consists of two columns, separated by a single space. +Comments may be added at the end of an entry, started with a hash (``#``) character, which can be proceeded by zero or more spaces. +Blank and comment only lines are not allowed. + +The first column specifies the file path that the entry applies to. +File paths must be relative to the root of the content being tested. +This is either the Ansible source or an Ansible collection. +File paths cannot contain a space or the hash (``#``) character. + +The second column specifies the sanity test that the entry applies to. +This will be the name of the sanity test. +If the sanity test is specific to a version of Python, the name will include a dash (``-``) and the relevant Python version. +If the named test uses error codes then the error code to ignore must be appended to the name of the test, separated by a colon (``:``). + +Below are some example ignore entries for an Ansible collection:: + + roles/my_role/files/my_script.sh shellcheck:SC2154 # ignore undefined variable + plugins/modules/my_module.py validate-modules:E105 # ignore license check + plugins/modules/my_module.py import-3.8 # needs update to support collections.abc on Python 3.8+ + +It is also possible to skip a sanity test for a specific file. +This is done by adding ``!skip`` after the sanity test name in the second column. +When this is done, no error code is included, even if the sanity test uses error codes. + +Below are some example skip entries for an Ansible collection:: + + plugins/module_utils/my_util.py validate-modules!skip # waiting for bug fix in module validator + plugins/lookup/my_plugin.py compile-2.6!skip # Python 2.6 is not supported on the controller + +Ignore File Errors +------------------ + +There are various errors that can be reported for the ignore file itself: + +- syntax errors parsing the ignore file +- references a file path that does not exist +- references to a sanity test that does not exist +- ignoring an error that does not occur +- ignoring a file which is skipped +- duplicate entries diff --git a/docs/docsite/rst/dev_guide/testing/sanity/import.rst b/docs/docsite/rst/dev_guide/testing/sanity/import.rst new file mode 100644 index 00000000..4b29636a --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/import.rst @@ -0,0 +1,5 @@ +import +====== + +All Python imports in ``lib/ansible/modules/`` and ``lib/ansible/module_utils/`` which are not from the Python standard library +must be imported in a try/except ImportError block. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst b/docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst new file mode 100644 index 00000000..e6cc1e91 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst @@ -0,0 +1,182 @@ +integration-aliases +=================== + +Integration tests are executed by ``ansible-test`` and reside in directories under ``test/integration/targets/``. +Each test MUST have an ``aliases`` file to control test execution. + +Aliases are explained in the following sections. Each alias must be on a separate line in an ``aliases`` file. + +Groups +------ + +Tests must be configured to run in exactly one group. This is done by adding the appropriate group to the ``aliases`` file. + +The following are examples of some of the available groups: + +- ``shippable/posix/group1`` +- ``shippable/windows/group2`` +- ``shippable/azure/group3`` +- ``shippable/aws/group1`` +- ``shippable/cloud/group1`` + +Groups are used to balance tests across multiple CI jobs to minimize test run time. +They also improve efficiency by keeping tests with similar requirements running together. + +When selecting a group for a new test, use the same group as existing tests similar to the one being added. +If more than one group is available, select one randomly. + +Setup +----- + +Aliases can be used to execute setup targets before running tests: + +- ``setup/once/TARGET`` - Run the target ``TARGET`` before the first target that requires it. +- ``setup/always/TARGET`` - Run the target ``TARGET`` before each target that requires it. + +Requirements +------------ + +Aliases can be used to express some test requirements: + +- ``needs/privileged`` - Requires ``--docker-privileged`` when running tests with ``--docker``. +- ``needs/root`` - Requires running tests as ``root`` or with ``--docker``. +- ``needs/ssh`` - Requires SSH connections to localhost (or the test container with ``--docker``) without a password. +- ``needs/httptester`` - Requires use of the http-test-container to run tests. + +Dependencies +------------ + +Some test dependencies are automatically discovered: + +- Ansible role dependencies defined in ``meta/main.yml`` files. +- Setup targets defined with ``setup/*`` aliases. +- Symbolic links from one target to a file in another target. + +Aliases can be used to declare dependencies that are not handled automatically: + +- ``needs/target/TARGET`` - Requires use of the test target ``TARGET``. +- ``needs/file/PATH`` - Requires use of the file ``PATH`` relative to the git root. + +Skipping +-------- + +Aliases can be used to skip platforms using one of the following: + +- ``skip/freebsd`` - Skip tests on FreeBSD. +- ``skip/osx`` - Skip tests on macOS. +- ``skip/rhel`` - Skip tests on RHEL. +- ``skip/docker`` - Skip tests when running in a Docker container. + +Platform versions, as specified using the ``--remote`` option with ``/`` removed, can also be skipped: + +- ``skip/freebsd11.1`` - Skip tests on FreeBSD 11.1. +- ``skip/rhel7.6`` - Skip tests on RHEL 7.6. + +Windows versions, as specified using the ``--windows`` option can also be skipped: + +- ``skip/windows/2008`` - Skip tests on Windows Server 2008. +- ``skip/windows/2012-R2`` - Skip tests on Windows Server 2012 R2. + +Aliases can be used to skip Python major versions using one of the following: + +- ``skip/python2`` - Skip tests on Python 2.x. +- ``skip/python3`` - Skip tests on Python 3.x. + +For more fine grained skipping, use conditionals in integration test playbooks, such as: + +.. code-block:: yaml + + when: ansible_distribution in ('Ubuntu') + + +Miscellaneous +------------- + +There are several other aliases available as well: + +- ``destructive`` - Requires ``--allow-destructive`` to run without ``--docker`` or ``--remote``. +- ``hidden`` - Target is ignored. Usable as a dependency. Automatic for ``setup_`` and ``prepare_`` prefixed targets. + +Unstable +-------- + +Tests which fail sometimes should be marked with the ``unstable`` alias until the instability has been fixed. +These tests will continue to run for pull requests which modify the test or the module under test. + +This avoids unnecessary test failures for other pull requests, as well as tests on merge runs and nightly CI jobs. + +There are two ways to run unstable tests manually: + +- Use the ``--allow-unstable`` option for ``ansible-test`` +- Prefix the test name with ``unstable/`` when passing it to ``ansible-test``. + +Tests will be marked as unstable by a member of the Ansible Core Team. +GitHub issues_ will be created to track each unstable test. + +Disabled +-------- + +Tests which always fail should be marked with the ``disabled`` alias until they can be fixed. + +Disabled tests are automatically skipped. + +There are two ways to run disabled tests manually: + +- Use the ``--allow-disabled`` option for ``ansible-test`` +- Prefix the test name with ``disabled/`` when passing it to ``ansible-test``. + +Tests will be marked as disabled by a member of the Ansible Core Team. +GitHub issues_ will be created to track each disabled test. + +Unsupported +----------- + +Tests which cannot be run in CI should be marked with the ``unsupported`` alias. +Most tests can be supported through the use of simulators and/or cloud plugins. + +However, if that is not possible then marking a test as unsupported will prevent it from running in CI. + +There are two ways to run unsupported tests manually: + +* Use the ``--allow-unsupported`` option for ``ansible-test`` +* Prefix the test name with ``unsupported/`` when passing it to ``ansible-test``. + +Tests will be marked as unsupported by the contributor of the test. + +Cloud +----- + +Tests for cloud services and other modules that require access to external APIs usually require special support for testing in CI. + +These require an additional alias to indicate the required test plugin. + +Some of the available aliases are: + +- ``cloud/aws`` +- ``cloud/azure`` +- ``cloud/cs`` +- ``cloud/foreman`` +- ``cloud/openshift`` +- ``cloud/tower`` +- ``cloud/vcenter`` + +Untested +-------- + +Every module and plugin should have integration tests, even if the tests cannot be run in CI. + +Issues +------ + +Tests that are marked as unstable_ or disabled_ will have an issue created to track the status of the test. +Each issue will be assigned to one of the following projects: + +- `AWS <https://github.com/ansible/ansible/projects/21>`_ +- `Azure <https://github.com/ansible/ansible/projects/22>`_ +- `Windows <https://github.com/ansible/ansible/projects/23>`_ +- `General <https://github.com/ansible/ansible/projects/25>`_ + +Questions +--------- + +For questions about integration tests reach out to @mattclay or @gundalow on GitHub or ``#ansible-devel`` on IRC. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/line-endings.rst b/docs/docsite/rst/dev_guide/testing/sanity/line-endings.rst new file mode 100644 index 00000000..d56cfc12 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/line-endings.rst @@ -0,0 +1,4 @@ +line-endings +============ + +All files must use ``\n`` for line endings instead of ``\r\n``. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/metaclass-boilerplate.rst b/docs/docsite/rst/dev_guide/testing/sanity/metaclass-boilerplate.rst new file mode 100644 index 00000000..c7327b39 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/metaclass-boilerplate.rst @@ -0,0 +1,23 @@ +metaclass-boilerplate +===================== + +Most Python files should include the following boilerplate at the top of the file, right after the +comment header and ``from __future__ import``: + +.. code-block:: python + + __metaclass__ = type + + +Python 2 has "new-style classes" and "old-style classes" whereas Python 3 only has new-style classes. +Adding the ``__metaclass__ = type`` boilerplate makes every class defined in that file into +a new-style class as well. + +.. code-block:: python + + from __future__ import absolute_import, division, print_function + __metaclass__ = type + + class Foo: + # This is a new-style class even on Python 2 because of the __metaclass__ + pass diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-assert.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-assert.rst new file mode 100644 index 00000000..489f917f --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-assert.rst @@ -0,0 +1,16 @@ +no-assert +========= + +Do not use ``assert`` in production Ansible python code. When running Python +with optimizations, Python will remove ``assert`` statements, potentially +allowing for unexpected behavior throughout the Ansible code base. + +Instead of using ``assert`` you should utilize simple ``if`` statements, +that result in raising an exception. There is a new exception called +``AnsibleAssertionError`` that inherits from ``AnsibleError`` and +``AssertionError``. When possible, utilize a more specific exception +than ``AnsibleAssertionError``. + +Modules will not have access to ``AnsibleAssertionError`` and should instead +raise ``AssertionError``, a more specific exception, or just use +``module.fail_json`` at the failure point. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-basestring.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-basestring.rst new file mode 100644 index 00000000..f1b6ba92 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-basestring.rst @@ -0,0 +1,11 @@ +no-basestring +============= + +Do not use ``isinstance(s, basestring)`` as basestring has been removed in +Python3. You can import ``string_types``, ``binary_type``, or ``text_type`` +from ``ansible.module_utils.six`` and then use ``isinstance(s, string_types)`` +or ``isinstance(s, (binary_type, text_type))`` instead. + +If this is part of code to convert a string to a particular type, +``ansible.module_utils._text`` contains several functions that may be even +better for you: ``to_text``, ``to_bytes``, and ``to_native``. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iteritems.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iteritems.rst new file mode 100644 index 00000000..e231c796 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iteritems.rst @@ -0,0 +1,16 @@ +no-dict-iteritems +================= + +The ``dict.iteritems`` method has been removed in Python 3. There are two recommended alternatives: + +.. code-block:: python + + for KEY, VALUE in DICT.items(): + pass + +.. code-block:: python + + from ansible.module_utils.six import iteritems + + for KEY, VALUE in iteritems(DICT): + pass diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iterkeys.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iterkeys.rst new file mode 100644 index 00000000..9dc4a978 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iterkeys.rst @@ -0,0 +1,9 @@ +no-dict-iterkeys +================ + +The ``dict.iterkeys`` method has been removed in Python 3. Use the following instead: + +.. code-block:: python + + for KEY in DICT: + pass diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-dict-itervalues.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-itervalues.rst new file mode 100644 index 00000000..979450e4 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-itervalues.rst @@ -0,0 +1,16 @@ +no-dict-itervalues +================== + +The ``dict.itervalues`` method has been removed in Python 3. There are two recommended alternatives: + +.. code-block:: python + + for VALUE in DICT.values(): + pass + +.. code-block:: python + + from ansible.module_utils.six import itervalues + + for VALUE in itervalues(DICT): + pass diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-get-exception.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-get-exception.rst new file mode 100644 index 00000000..584fbc86 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-get-exception.rst @@ -0,0 +1,28 @@ +no-get-exception +================ + +We created a function, ``ansible.module_utils.pycompat24.get_exception`` to +help retrieve exceptions in a manner compatible with Python 2.4 through +Python 3.6. We no longer support Python 2.4 and Python 2.5 so this is +extraneous and we want to deprecate the function. Porting code should look +something like this: + +.. code-block:: python + + # Unfixed code: + try: + raise IOError('test') + except IOError: + e = get_excetion() + do_something(e) + except: + e = get_exception() + do_something_else(e) + + # After fixing: + try: + raise IOError('test') + except IOErrors as e: + do_something(e) + except Exception as e: + do_something_else(e) diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-illegal-filenames.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-illegal-filenames.rst new file mode 100644 index 00000000..6e6f565e --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-illegal-filenames.rst @@ -0,0 +1,61 @@ +no-illegal-filenames +==================== + +Files and directories should not contain illegal characters or names so that +Ansible can be checked out on any Operating System. + +Illegal Characters +------------------ + +The following characters are not allowed to be used in any part of the file or +directory name; + +* ``<`` +* ``>`` +* ``:`` +* ``"`` +* ``/`` +* ``\`` +* ``|`` +* ``?`` +* ``*`` +* Any characters whose integer representations are in the range from 0 through to 31 like ``\n`` + +The following characters are not allowed to be used as the last character of a +file or directory; + +* ``.`` +* ``" "`` (just the space character) + +Illegal Names +------------- + +The following names are not allowed to be used as the name of a file or +directory excluding the extension; + +* ``CON`` +* ``PRN`` +* ``AUX`` +* ``NUL`` +* ``COM1`` +* ``COM2`` +* ``COM3`` +* ``COM4`` +* ``COM5`` +* ``COM6`` +* ``COM7`` +* ``COM8`` +* ``COM9`` +* ``LPT1`` +* ``LPT2`` +* ``LPT3`` +* ``LPT4`` +* ``LPT5`` +* ``LPT6`` +* ``LPT7`` +* ``LPT8`` +* ``LPT9`` + +For example, the file ``folder/COM1``, ``folder/COM1.txt`` are illegal but +``folder/COM1-file`` or ``folder/COM1-file.txt`` is allowed. + diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-main-display.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-main-display.rst new file mode 100644 index 00000000..7ccf0dc7 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-main-display.rst @@ -0,0 +1,12 @@ +no-main-display +=============== + +As of Ansible 2.8, ``Display`` should no longer be imported from ``__main__``. + +``Display`` is now a singleton and should be utilized like the following:: + + from ansible.utils.display import Display + display = Display() + +There is no longer a need to attempt ``from __main__ import display`` inside +a ``try/except`` block. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-smart-quotes.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-smart-quotes.rst new file mode 100644 index 00000000..50dc7baf --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-smart-quotes.rst @@ -0,0 +1,4 @@ +no-smart-quotes +=============== + +Smart quotes (``”“‘’``) should not be used. Use plain ascii quotes (``"'``) instead. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-tests-as-filters.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-tests-as-filters.rst new file mode 100644 index 00000000..0c1f99ac --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-tests-as-filters.rst @@ -0,0 +1,12 @@ +:orphan: + +no-tests-as-filters +=================== + +Using Ansible provided Jinja2 tests as filters will be removed in Ansible 2.9. + +Prior to Ansible 2.5, Jinja2 tests included within Ansible were most often used as filters. The large difference in use is that filters are referenced as ``variable | filter_name`` while Jinja2 tests are referenced as ``variable is test_name``. + +Jinja2 tests are used for comparisons, whereas filters are used for data manipulation, and have different applications in Jinja2. This change is to help differentiate the concepts for a better understanding of Jinja2, and where each can be appropriately used. + +As of Ansible 2.5 using an Ansible provided Jinja2 test with filter syntax will display a deprecation error. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-underscore-variable.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-underscore-variable.rst new file mode 100644 index 00000000..5174a43a --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-underscore-variable.rst @@ -0,0 +1,30 @@ +:orphan: + +no-underscore-variable +====================== + +In the future, Ansible may use the identifier ``_`` to internationalize its +message strings. To be ready for that, we need to make sure that there are +no conflicting identifiers defined in the code base. + +In common practice, ``_`` is frequently used as a dummy variable (a variable +to receive a value from a function where the value is useless and never used). +In Ansible, we're using the identifier ``dummy`` for this purpose instead. + +Example of unfixed code: + +.. code-block:: python + + for _ in range(0, retries): + success = retry_thing() + if success: + break + +Example of fixed code: + +.. code-block:: python + + for dummy in range(0, retries): + success = retry_thing() + if success: + break diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-unicode-literals.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-unicode-literals.rst new file mode 100644 index 00000000..c4f3586a --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-unicode-literals.rst @@ -0,0 +1,16 @@ +no-unicode_literals +=================== + +The use of :code:`from __future__ import unicode_literals` has been deemed an anti-pattern. The +problems with it are: + +* It makes it so one can't jump into the middle of a file and know whether a bare literal string is + a byte string or text string. The programmer has to first check the top of the file to see if the + import is there. +* It removes the ability to define native strings (a string which should be a byte string on python2 + and a text string on python3) via a string literal. +* It makes for more context switching. A programmer could be reading one file which has + `unicode_literals` and know that bare string literals are text strings but then switch to another + file (perhaps tracing program execution into a third party library) and have to switch their + understanding of what bare string literals are. + diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-unwanted-files.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-unwanted-files.rst new file mode 100644 index 00000000..3d76324e --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-unwanted-files.rst @@ -0,0 +1,13 @@ +no-unwanted-files +================= + +Specific file types are allowed in certain directories: + +- ``lib`` - All content must reside in the ``lib/ansible`` directory. + +- ``lib/ansible`` - Only source code with one of the following extensions is allowed: + + - ``*.cs`` - C# + - ``*.ps1`` - PowerShell + - ``*.psm1`` - PowerShell + - ``*.py`` - Python diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-wildcard-import.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-wildcard-import.rst new file mode 100644 index 00000000..fdaf07b0 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/no-wildcard-import.rst @@ -0,0 +1,31 @@ +:orphan: + +no-wildcard-import +================== + +Using :code:`import *` is a bad habit which pollutes your namespace, hinders +debugging, and interferes with static analysis of code. For those reasons, we +do want to limit the use of :code:`import *` in the ansible code. Change our +code to import the specific names that you need instead. + +Examples of unfixed code: + +.. code-block:: python + + from ansible.module_utils.six import * + if isinstance(variable, string_types): + do_something(variable) + + from ansible.module_utils.basic import * + module = AnsibleModule() + +Examples of fixed code: + +.. code-block:: python + + from ansible.module_utils import six + if isinstance(variable, six.string_types): + do_something(variable) + + from ansible.module_utils.basic import AnsibleModule + module = AnsibleModule() diff --git a/docs/docsite/rst/dev_guide/testing/sanity/obsolete-files.rst b/docs/docsite/rst/dev_guide/testing/sanity/obsolete-files.rst new file mode 100644 index 00000000..6e2fb2a5 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/obsolete-files.rst @@ -0,0 +1,14 @@ +obsolete-files +============== + +Directories in the Ansible source tree are sometimes made obsolete. +Files should not exist in these directories. +The new location (if any) is dependent on which directory has been made obsolete. + +Below are some of the obsolete directories and their new locations: + +- All of ``test/runner/`` is now under ``test/lib/ansible_test/`` instead. The organization of files in the new directory has changed. +- Most subdirectories of ``test/sanity/`` (with some exceptions) are now under ``test/lib/ansible_test/_data/sanity/`` instead. + +This error occurs most frequently for open pull requests which add or modify files in directories which are now obsolete. +Make sure the branch you are working from is current so that changes can be made in the correct location. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/package-data.rst b/docs/docsite/rst/dev_guide/testing/sanity/package-data.rst new file mode 100644 index 00000000..220872dd --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/package-data.rst @@ -0,0 +1,5 @@ +package-data +============ + +Verifies that the combination of ``MANIFEST.in`` and ``package_data`` from ``setup.py`` +properly installs data files from within ``lib/ansible`` diff --git a/docs/docsite/rst/dev_guide/testing/sanity/pep8.rst b/docs/docsite/rst/dev_guide/testing/sanity/pep8.rst new file mode 100644 index 00000000..8595d986 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/pep8.rst @@ -0,0 +1,6 @@ +pep8 +==== + +Python static analysis for PEP 8 style guideline compliance. + +See :ref:`testing_pep8` for more information. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/pslint.rst b/docs/docsite/rst/dev_guide/testing/sanity/pslint.rst new file mode 100644 index 00000000..baa4fa03 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/pslint.rst @@ -0,0 +1,4 @@ +pslint +====== + +PowerShell static analysis for common programming errors using `PSScriptAnalyzer <https://github.com/PowerShell/PSScriptAnalyzer/>`_. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/pylint-ansible-test.rst b/docs/docsite/rst/dev_guide/testing/sanity/pylint-ansible-test.rst new file mode 100644 index 00000000..a80ddc1e --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/pylint-ansible-test.rst @@ -0,0 +1,8 @@ +:orphan: + +pylint-ansible-test +=================== + +Python static analysis for common programming errors. + +A more strict set of rules applied to ``ansible-test``. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/pylint.rst b/docs/docsite/rst/dev_guide/testing/sanity/pylint.rst new file mode 100644 index 00000000..2b2ef9e5 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/pylint.rst @@ -0,0 +1,4 @@ +pylint +====== + +Python static analysis for common programming errors. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/release-names.rst b/docs/docsite/rst/dev_guide/testing/sanity/release-names.rst new file mode 100644 index 00000000..359f7ecb --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/release-names.rst @@ -0,0 +1,4 @@ +Release names +============= + +Verifies that the most recent release name has been added to ``./github/RELEASE_NAMES.yml`` diff --git a/docs/docsite/rst/dev_guide/testing/sanity/replace-urlopen.rst b/docs/docsite/rst/dev_guide/testing/sanity/replace-urlopen.rst new file mode 100644 index 00000000..705195c9 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/replace-urlopen.rst @@ -0,0 +1,4 @@ +replace-urlopen +=============== + +Use ``open_url`` from ``module_utils`` instead of ``urlopen``. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/required-and-default-attributes.rst b/docs/docsite/rst/dev_guide/testing/sanity/required-and-default-attributes.rst new file mode 100644 index 00000000..573c3615 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/required-and-default-attributes.rst @@ -0,0 +1,5 @@ +required-and-default-attributes +=============================== + +Use only one of ``default`` or ``required`` with ``FieldAttribute``. + diff --git a/docs/docsite/rst/dev_guide/testing/sanity/rstcheck.rst b/docs/docsite/rst/dev_guide/testing/sanity/rstcheck.rst new file mode 100644 index 00000000..8fcbbce3 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/rstcheck.rst @@ -0,0 +1,4 @@ +rstcheck +======== + +Check reStructuredText files for syntax and formatting issues. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/runtime-metadata.rst b/docs/docsite/rst/dev_guide/testing/sanity/runtime-metadata.rst new file mode 100644 index 00000000..cf6d9272 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/runtime-metadata.rst @@ -0,0 +1,7 @@ +runtime-metadata.yml +==================== + +Validates the schema for: + +* ansible-base's ``lib/ansible/config/ansible_builtin_runtime.yml`` +* collection's ``meta/runtime.yml`` diff --git a/docs/docsite/rst/dev_guide/testing/sanity/sanity-docs.rst b/docs/docsite/rst/dev_guide/testing/sanity/sanity-docs.rst new file mode 100644 index 00000000..34265c34 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/sanity-docs.rst @@ -0,0 +1,4 @@ +sanity-docs +=========== + +Documentation for each ``ansible-test sanity`` test is required. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/shebang.rst b/docs/docsite/rst/dev_guide/testing/sanity/shebang.rst new file mode 100644 index 00000000..cff2aa09 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/shebang.rst @@ -0,0 +1,16 @@ +shebang +======= + +Most executable files should only use one of the following shebangs: + +- ``#!/bin/sh`` +- ``#!/bin/bash`` +- ``#!/usr/bin/make`` +- ``#!/usr/bin/env python`` +- ``#!/usr/bin/env bash`` + +NOTE: For ``#!/bin/bash``, any of the options ``eux`` may also be used, such as ``#!/bin/bash -eux``. + +This does not apply to Ansible modules, which should not be executable and must always use ``#!/usr/bin/python``. + +Some exceptions are permitted. Ask if you have questions. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/shellcheck.rst b/docs/docsite/rst/dev_guide/testing/sanity/shellcheck.rst new file mode 100644 index 00000000..446ee1ee --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/shellcheck.rst @@ -0,0 +1,4 @@ +shellcheck +========== + +Static code analysis for shell scripts using the excellent `shellcheck <https://www.shellcheck.net/>`_ tool. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/symlinks.rst b/docs/docsite/rst/dev_guide/testing/sanity/symlinks.rst new file mode 100644 index 00000000..017209bd --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/symlinks.rst @@ -0,0 +1,6 @@ +symlinks +======== + +Symbolic links are only permitted for files that exist to ensure proper tarball generation during a release. + +If other types of symlinks are needed for tests they must be created as part of the test. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/test-constraints.rst b/docs/docsite/rst/dev_guide/testing/sanity/test-constraints.rst new file mode 100644 index 00000000..36ceb361 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/test-constraints.rst @@ -0,0 +1,4 @@ +test-constraints +================ + +Constraints for test requirements should be in ``test/lib/ansible_test/_data/requirements/constraints.txt``. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/update-bundled.rst b/docs/docsite/rst/dev_guide/testing/sanity/update-bundled.rst new file mode 100644 index 00000000..d8f19385 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/update-bundled.rst @@ -0,0 +1,31 @@ +:orphan: + +update-bundled +============== + +Check whether any of our known bundled code needs to be updated for a new upstream release. + +This test can error in the following ways: + +* The bundled code is out of date with regard to the latest release on pypi. Update the code + to the new version and update the version in _BUNDLED_METADATA to solve this. + +* The code is lacking a _BUNDLED_METADATA variable. This typically happens when a bundled version + is updated and we forget to add a _BUNDLED_METADATA variable to the updated file. Once that is + added, this error should go away. + +* A file has a _BUNDLED_METADATA variable but the file isn't specified in + :file:`test/sanity/code-smell/update-bundled.py`. This typically happens when a new bundled + library is added. Add the file to the `get_bundled_libs()` function in the `update-bundled.py` + test script to solve this error. + +_BUNDLED_METADATA has the following fields: + +:pypi_name: Name of the bundled package on pypi + +:version: Version of the package that we are including here + +:version_constraints: Optional PEP440 specifier for the version range that we are bundling. + Currently, the only valid use of this is to follow a version that is + compatible with the Python stdlib when newer versions of the pypi package + implement a new API. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/use-argspec-type-path.rst b/docs/docsite/rst/dev_guide/testing/sanity/use-argspec-type-path.rst new file mode 100644 index 00000000..e06d83dd --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/use-argspec-type-path.rst @@ -0,0 +1,10 @@ +use-argspec-type-path +===================== + +The AnsibleModule argument_spec knows of several types beyond the standard python types. One of +these is ``path``. When used, type ``path`` ensures that an argument is a string and expands any +shell variables and tilde characters. + +This test looks for use of :func:`os.path.expanduser <python:os.path.expanduser>` in modules. When found, it tells the user to +replace it with ``type='path'`` in the module's argument_spec or list it as a false positive in the +test. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/use-compat-six.rst b/docs/docsite/rst/dev_guide/testing/sanity/use-compat-six.rst new file mode 100644 index 00000000..1f415005 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/use-compat-six.rst @@ -0,0 +1,4 @@ +use-compat-six +============== + +Use ``six`` from ``module_utils`` instead of ``six``. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/validate-modules.rst b/docs/docsite/rst/dev_guide/testing/sanity/validate-modules.rst new file mode 100644 index 00000000..efb58f20 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/validate-modules.rst @@ -0,0 +1,6 @@ +validate-modules +================ + +Analyze modules for common issues in code and documentation. + +See :ref:`testing_validate-modules` for more information. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/yamllint.rst b/docs/docsite/rst/dev_guide/testing/sanity/yamllint.rst new file mode 100644 index 00000000..5822bb7c --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing/sanity/yamllint.rst @@ -0,0 +1,4 @@ +yamllint +======== + +Check YAML files for syntax and formatting issues. diff --git a/docs/docsite/rst/dev_guide/testing_compile.rst b/docs/docsite/rst/dev_guide/testing_compile.rst new file mode 100644 index 00000000..5c22194d --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing_compile.rst @@ -0,0 +1,76 @@ +:orphan: + +.. _testing_compile: + +************* +Compile Tests +************* + +.. contents:: Topics + +Overview +======== + +Compile tests check source files for valid syntax on all supported python versions: + +- 2.4 (Ansible 2.3 only) +- 2.6 +- 2.7 +- 3.5 +- 3.6 +- 3.7 +- 3.8 +- 3.9 + +NOTE: In Ansible 2.4 and earlier the compile test was provided by a dedicated sub-command ``ansible-test compile`` instead of a sanity test using ``ansible-test sanity --test compile``. + +Running compile tests locally +============================= + +Compile tests can be run across the whole code base by doing: + +.. code:: shell + + cd /path/to/ansible/source + source hacking/env-setup + ansible-test sanity --test compile + +Against a single file by doing: + +.. code:: shell + + ansible-test sanity --test compile lineinfile + +Or against a specific Python version by doing: + +.. code:: shell + + ansible-test sanity --test compile --python 2.7 lineinfile + +For advanced usage see the help: + +.. code:: shell + + ansible-test sanity --help + + +Installing dependencies +======================= + +``ansible-test`` has a number of dependencies , for ``compile`` tests we suggest running the tests with ``--local``, which is the default + +The dependencies can be installed using the ``--requirements`` argument. For example: + +.. code:: shell + + ansible-test sanity --test compile --requirements lineinfile + + + +The full list of requirements can be found at `test/lib/ansible_test/_data/requirements <https://github.com/ansible/ansible/tree/devel/test/lib/ansible_test/_data/requirements>`_. Requirements files are named after their respective commands. See also the `constraints <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/requirements/constraints.txt>`_ applicable to all commands. + + +Extending compile tests +======================= + +If you believe changes are needed to the compile tests please add a comment on the `Testing Working Group Agenda <https://github.com/ansible/community/blob/master/meetings/README.md>`_ so it can be discussed. diff --git a/docs/docsite/rst/dev_guide/testing_documentation.rst b/docs/docsite/rst/dev_guide/testing_documentation.rst new file mode 100644 index 00000000..f9989395 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing_documentation.rst @@ -0,0 +1,36 @@ +:orphan: + +.. _testing_module_documentation: + +**************************** +Testing module documentation +**************************** + +Before you submit a module for inclusion in the main Ansible repo, you must test your module documentation for correct HTML rendering and to ensure that the argspec matches the documentation in your Python file. The community pages offer more information on :ref:`testing reStructuredText documentation <testing_documentation_locally>`. + +To check the HTML output of your module documentation: + +#. Ensure working :ref:`development environment <environment_setup>`. +#. Install required Python packages (drop '--user' in venv/virtualenv): + + .. code-block:: bash + + pip install --user -r requirements.txt + pip install --user -r docs/docsite/requirements.txt + +#. Ensure your module is in the correct directory: ``lib/ansible/modules/$CATEGORY/mymodule.py``. +#. Build HTML from your module documentation: ``MODULES=mymodule make webdocs``. +#. To build the HTML documentation for multiple modules, use a comma-separated list of module names: ``MODULES=mymodule,mymodule2 make webdocs``. +#. View the HTML page at ``file:///path/to/docs/docsite/_build/html/modules/mymodule_module.html``. + +To ensure that your module documentation matches your ``argument_spec``: + +#. Install required Python packages (drop '--user' in venv/virtualenv): + + .. code-block:: bash + + pip install --user -r test/lib/ansible_test/_data/requirements/sanity.txt + +#. run the ``validate-modules`` test:: + + ansible-test sanity --test validate-modules mymodule diff --git a/docs/docsite/rst/dev_guide/testing_httptester.rst b/docs/docsite/rst/dev_guide/testing_httptester.rst new file mode 100644 index 00000000..a8806371 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing_httptester.rst @@ -0,0 +1,27 @@ +:orphan: + +********** +httptester +********** + +.. contents:: Topics + +Overview +======== + +``httptester`` is a docker container used to host certain resources required by :ref:`testing_integration`. This is to avoid CI tests requiring external resources (such as git or package repos) which, if temporarily unavailable, would cause tests to fail. + +HTTP Testing endpoint which provides the following capabilities: + +* httpbin +* nginx +* SSL +* SNI + + +Source files can be found in the `http-test-container <https://github.com/ansible/http-test-container>`_ repository. + +Extending httptester +==================== + +If you have sometime to improve ``httptester`` please add a comment on the `Testing Working Group Agenda <https://github.com/ansible/community/blob/master/meetings/README.md>`_ to avoid duplicated effort. diff --git a/docs/docsite/rst/dev_guide/testing_integration.rst b/docs/docsite/rst/dev_guide/testing_integration.rst new file mode 100644 index 00000000..0880e5b1 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing_integration.rst @@ -0,0 +1,236 @@ +:orphan: + +.. _testing_integration: + +***************** +Integration tests +***************** + +.. contents:: Topics + +The Ansible integration Test system. + +Tests for playbooks, by playbooks. + +Some tests may require credentials. Credentials may be specified with `credentials.yml`. + +Some tests may require root. + +.. note:: + Every new module and plugin should have integration tests, even if the tests cannot be run on Ansible CI infrastructure. + In this case, the tests should be marked with the ``unsupported`` alias in `aliases file <https://docs.ansible.com/ansible/latest/dev_guide/testing/sanity/integration-aliases.html>`_. + +Quick Start +=========== + +It is highly recommended that you install and activate the ``argcomplete`` python package. +It provides tab completion in ``bash`` for the ``ansible-test`` test runner. + +Configuration +============= + +ansible-test command +-------------------- + +The example below assumes ``bin/`` is in your ``$PATH``. An easy way to achieve that +is to initialize your environment with the ``env-setup`` command:: + + source hacking/env-setup + ansible-test --help + +You can also call ``ansible-test`` with the full path:: + + bin/ansible-test --help + +integration_config.yml +---------------------- + +Making your own version of ``integration_config.yml`` can allow for setting some +tunable parameters to help run the tests better in your environment. Some +tests (for example, cloud tests) will only run when access credentials are provided. For more +information about supported credentials, refer to the various ``cloud-config-*.template`` +files in the ``test/integration/`` directory. + +Prerequisites +============= + +Some tests assume things like hg, svn, and git are installed, and in path. Some tests +(such as those for Amazon Web Services) need separate definitions, which will be covered +later in this document. + +(Complete list pending) + +Non-destructive Tests +===================== + +These tests will modify files in subdirectories, but will not do things that install or remove packages or things +outside of those test subdirectories. They will also not reconfigure or bounce system services. + +.. note:: Running integration tests within Docker + + To protect your system from any potential changes caused by integration tests, and to ensure a sensible set of dependencies are available we recommend that you always run integration tests with the ``--docker`` option, for example ``--docker centos8``. See the `list of supported docker images <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/completion/docker.txt>`_ for options (the ``default`` image is used for sanity and unit tests, as well as for platform independent integration tests such as those for cloud modules). + +.. note:: Avoiding pulling new Docker images + + Use the ``--docker-no-pull`` option to avoid pulling the latest container image. This is required when using custom local images that are not available for download. + +Run as follows for all POSIX platform tests executed by our CI system in a fedora32 docker container:: + + ansible-test integration shippable/ --docker fedora32 + +You can target a specific tests as well, such as for individual modules:: + + ansible-test integration ping + +You can use the ``-v`` option to make the output more verbose:: + + ansible-test integration lineinfile -vvv + +Use the following command to list all the available targets:: + + ansible-test integration --list-targets + +.. note:: Bash users + + If you use ``bash`` with ``argcomplete``, obtain a full list by doing: ``ansible-test integration <tab><tab>`` + +Destructive Tests +================= + +These tests are allowed to install and remove some trivial packages. You will likely want to devote these +to a virtual environment, such as Docker. They won't reformat your filesystem:: + + ansible-test integration destructive/ --docker fedora32 + +Windows Tests +============= + +These tests exercise the ``winrm`` connection plugin and Windows modules. You'll +need to define an inventory with a remote Windows 2008 or 2012 Server to use +for testing, and enable PowerShell Remoting to continue. + +Running these tests may result in changes to your Windows host, so don't run +them against a production/critical Windows environment. + +Enable PowerShell Remoting (run on the Windows host via Remote Desktop):: + + Enable-PSRemoting -Force + +Define Windows inventory:: + + cp inventory.winrm.template inventory.winrm + ${EDITOR:-vi} inventory.winrm + +Run the Windows tests executed by our CI system:: + + ansible-test windows-integration -v shippable/ + +Tests in Docker containers +========================== + +If you have a Linux system with Docker installed, running integration tests using the same Docker containers used by +the Ansible continuous integration (CI) system is recommended. + +.. note:: Docker on non-Linux + + Using Docker Engine to run Docker on a non-Linux host (such as macOS) is not recommended. + Some tests may fail, depending on the image used for testing. + Using the ``--docker-privileged`` option when running ``integration`` (not ``network-integration`` or ``windows-integration``) may resolve the issue. + +Running Integration Tests +------------------------- + +To run all CI integration test targets for POSIX platforms in a Ubuntu 18.04 container:: + + ansible-test integration shippable/ --docker ubuntu1804 + +You can also run specific tests or select a different Linux distribution. +For example, to run tests for the ``ping`` module on a Ubuntu 18.04 container:: + + ansible-test integration ping --docker ubuntu1804 + +Container Images +---------------- + +Python 2 +```````` + +Most container images are for testing with Python 2: + + - centos6 + - centos7 + - fedora28 + - opensuse15py2 + - ubuntu1404 + - ubuntu1604 + +Python 3 +```````` + +To test with Python 3 use the following images: + + - centos8 + - fedora32 + - opensuse15 + - ubuntu1804 + + +Legacy Cloud Tests +================== + +Some of the cloud tests run as normal integration tests, and others run as legacy tests; see the +:ref:`testing_integration_legacy` page for more information. + + +Other configuration for Cloud Tests +=================================== + +In order to run some tests, you must provide access credentials in a file named +``cloud-config-aws.yml`` or ``cloud-config-cs.ini`` in the test/integration +directory. Corresponding .template files are available for for syntax help. The newer AWS +tests now use the file test/integration/cloud-config-aws.yml + +IAM policies for AWS +==================== + +Ansible needs fairly wide ranging powers to run the tests in an AWS account. This rights can be provided to a dedicated user. These need to be configured before running the test. + +testing-policies +---------------- + +The GitHub repository `mattclay/aws-terminator <https://github.com/mattclay/aws-terminator/>`_ +contains two sets of policies used for all existing AWS module integratoin tests. +The `hacking/aws_config/setup_iam.yml` playbook can be used to setup two groups: + + - `ansible-integration-ci` will have the policies applied necessary to run any + integration tests not marked as `unsupported` and are designed to mirror those + used by Ansible's CI. + - `ansible-integration-unsupported` will have the additional policies applied + necessary to run the integraion tests marked as `unsupported` including tests + for managing IAM roles, users and groups. + +Once the groups have been created, you'll need to create a user and make the user a member of these +groups. The policies are designed to minimize the rights of that user. Please note that while this policy does limit +the user to one region, this does not fully restrict the user (primarily due to the limitations of the Amazon ARN +notation). The user will still have wide privileges for viewing account definitions, and will also able to manage +some resources that are not related to testing (for example, AWS lambdas with different names). Tests should not +be run in a primary production account in any case. + +Other Definitions required +-------------------------- + +Apart from installing the policy and giving it to the user identity running the tests, a +lambda role `ansible_integration_tests` has to be created which has lambda basic execution +privileges. + + +Network Tests +============= + +For guidance on writing network test see :ref:`testing_resource_modules`. + + +Where to find out more +====================== + +If you'd like to know more about the plans for improving testing Ansible, join the `Testing Working Group <https://github.com/ansible/community/blob/master/meetings/README.md>`_. diff --git a/docs/docsite/rst/dev_guide/testing_integration_legacy.rst b/docs/docsite/rst/dev_guide/testing_integration_legacy.rst new file mode 100644 index 00000000..759285e3 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing_integration_legacy.rst @@ -0,0 +1,108 @@ +:orphan: + +.. _testing_integration_legacy: + +******************************************* +Testing using the Legacy Integration system +******************************************* + +.. contents:: Topics + +This page details how to run the integration tests that haven't been ported to the new ``ansible-test`` framework. + +The following areas are still tested using the legacy ``make tests`` command: + +* amazon (some) +* azure +* cloudflare +* cloudscale +* cloudstack +* consul +* exoscale +* gce +* jenkins +* rackspace + +Over time the above list will be reduced as tests are ported to the ``ansible-test`` framework. + + +Running Cloud Tests +==================== + +Cloud tests exercise capabilities of cloud modules (for example, ec2_key). These are +not 'tests run in the cloud' so much as tests that leverage the cloud modules +and are organized by cloud provider. + +Some AWS tests may use environment variables. It is recommended to either unset any AWS environment variables( such as ``AWS_DEFAULT_PROFILE``, ``AWS_SECRET_ACCESS_KEY``, and so on) or be sure that the environment variables match the credentials provided in ``credentials.yml`` to ensure the tests run with consistency to their full capability on the expected account. See `AWS CLI docs <https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html>`_ for information on creating a profile. + +Subsets of tests may be run by ``#commenting`` out unnecessary roles in the appropriate playbook, such as ``test/integration/amazon.yml``. + +In order to run cloud tests, you must provide access credentials in a file +named ``credentials.yml``. A sample credentials file named +``credentials.template`` is available for syntax help. + +Provide cloud credentials:: + + cp credentials.template credentials.yml + ${EDITOR:-vi} credentials.yml + + +Other configuration +=================== + +In order to run some tests, you must provide access credentials in a file named +``credentials.yml``. A sample credentials file named ``credentials.template`` is available +for syntax help. + +IAM policies for AWS +==================== + +In order to run the tests in an AWS account ansible needs fairly wide ranging powers which +can be provided to a dedicated user or temporary credentials using a specific policy +configured in the AWS account. + +testing-iam-policy.json.j2 +-------------------------- + +The testing-iam-policy.json.j2 file contains a policy which can be given to the user +running the tests to give close to minimum rights required to run the tests. Please note +that this does not fully restrict the user; The user has wide privileges for viewing +account definitions and is also able to manage some resources that are not related to +testing (for example, AWS lambdas with different names) primarily due to the limitations of the +Amazon ARN notation. At the very least the policy limits the user to one region, however +tests should not be run in a primary production account in any case. + +Other Definitions required +-------------------------- + +Apart from installing the policy and giving it to the user identity running +the tests, a lambda role `ansible_integration_tests` has to be created which +has lambda basic execution privileges. + + +Running Tests +============= + +The tests are invoked via a ``Makefile``. + +If you haven't already got Ansible available use the local checkout by doing:: + + source hacking/env-setup + +Run the tests by doing:: + + cd test/integration/ + # TARGET is the name of the test from the list at the top of this page + #make TARGET + # for example + make amazon + # To run all cloud tests you can do: + make cloud + +.. warning:: Possible cost of running cloud tests + + Running cloud integration tests will create and destroy cloud + resources. Running these tests may result in additional fees associated with + your cloud account. Care is taken to ensure that created resources are + removed. However, it is advisable to inspect your AWS console to ensure no + unexpected resources are running. diff --git a/docs/docsite/rst/dev_guide/testing_pep8.rst b/docs/docsite/rst/dev_guide/testing_pep8.rst new file mode 100644 index 00000000..92630995 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing_pep8.rst @@ -0,0 +1,24 @@ +:orphan: + +.. _testing_pep8: + +***** +PEP 8 +***** + +.. contents:: Topics + +`PEP 8`_ style guidelines are enforced by `pycodestyle`_ on all python files in the repository by default. + +Running Locally +=============== + +The `PEP 8`_ check can be run locally with:: + + + ansible-test sanity --test pep8 [file-or-directory-path-to-check] ... + + + +.. _PEP 8: https://www.python.org/dev/peps/pep-0008/ +.. _pycodestyle: https://pypi.org/project/pycodestyle/ diff --git a/docs/docsite/rst/dev_guide/testing_running_locally.rst b/docs/docsite/rst/dev_guide/testing_running_locally.rst new file mode 100644 index 00000000..964a9e8d --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing_running_locally.rst @@ -0,0 +1,89 @@ +:orphan: + +.. _testing_running_locally: + +*************** +Testing Ansible +*************** + +This document describes how to: + +* Run tests locally using ``ansible-test`` +* Extend + +.. contents:: + :local: + +Requirements +============ + +There are no special requirements for running ``ansible-test`` on Python 2.7 or later. +The ``argparse`` package is required for Python 2.6. +The requirements for each ``ansible-test`` command are covered later. + + +Test Environments +================= + +Most ``ansible-test`` commands support running in one or more isolated test environments to simplify testing. + + +Remote +------ + +The ``--remote`` option runs tests in a cloud hosted environment. +An API key is required to use this feature. + + Recommended for integration tests. + +See the `list of supported platforms and versions <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/completion/remote.txt>`_ for additional details. + +Environment Variables +--------------------- + +When using environment variables to manipulate tests there some limitations to keep in mind. Environment variables are: + +* Not propagated from the host to the test environment when using the ``--docker`` or ``--remote`` options. +* Not exposed to the test environment unless whitelisted in ``test/lib/ansible_test/_internal/util.py`` in the ``common_environment`` function. + + Example: ``ANSIBLE_KEEP_REMOTE_FILES=1`` can be set when running ``ansible-test integration --venv``. However, using the ``--docker`` option would + require running ``ansible-test shell`` to gain access to the Docker environment. Once at the shell prompt, the environment variable could be set + and the tests executed. This is useful for debugging tests inside a container by following the + :ref:`Debugging AnsibleModule-based modules <debugging_modules>` instructions. + +Interactive Shell +================= + +Use the ``ansible-test shell`` command to get an interactive shell in the same environment used to run tests. Examples: + +* ``ansible-test shell --docker`` - Open a shell in the default docker container. +* ``ansible-test shell --venv --python 3.6`` - Open a shell in a Python 3.6 virtual environment. + + +Code Coverage +============= + +Code coverage reports make it easy to identify untested code for which more tests should +be written. Online reports are available but only cover the ``devel`` branch (see +:ref:`developing_testing`). For new code local reports are needed. + +Add the ``--coverage`` option to any test command to collect code coverage data. If you +aren't using the ``--venv`` or ``--docker`` options which create an isolated python +environment then you may have to use the ``--requirements`` option to ensure that the +correct version of the coverage module is installed:: + + ansible-test coverage erase + ansible-test units --coverage apt + ansible-test integration --coverage aws_lambda + ansible-test coverage html + + +Reports can be generated in several different formats: + +* ``ansible-test coverage report`` - Console report. +* ``ansible-test coverage html`` - HTML report. +* ``ansible-test coverage xml`` - XML report. + +To clear data between test runs, use the ``ansible-test coverage erase`` command. For a full list of features see the online help:: + + ansible-test coverage --help diff --git a/docs/docsite/rst/dev_guide/testing_sanity.rst b/docs/docsite/rst/dev_guide/testing_sanity.rst new file mode 100644 index 00000000..a4f99edd --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing_sanity.rst @@ -0,0 +1,53 @@ +:orphan: + +.. _testing_sanity: + +************ +Sanity Tests +************ + +.. contents:: Topics + +Sanity tests are made up of scripts and tools used to perform static code analysis. +The primary purpose of these tests is to enforce Ansible coding standards and requirements. + +Tests are run with ``ansible-test sanity``. +All available tests are run unless the ``--test`` option is used. + + +How to run +========== + +.. note:: + To run sanity tests using docker, always use the default docker image + by passing the ``--docker`` or ``--docker default`` argument. + +.. note:: + When using docker and the ``--base-branch`` argument, + also use the ``--docker-keep-git`` argument to avoid git related errors. + +.. code:: shell + + source hacking/env-setup + + # Run all sanity tests + ansible-test sanity + + # Run all sanity tests including disabled ones + ansible-test sanity --allow-disabled + + # Run all sanity tests against against certain files + ansible-test sanity lib/ansible/modules/files/template.py + + # Run all tests inside docker (good if you don't have dependencies installed) + ansible-test sanity --docker default + + # Run validate-modules against a specific file + ansible-test sanity --test validate-modules lib/ansible/modules/files/template.py + +Available Tests +=============== + +Tests can be listed with ``ansible-test sanity --list-tests``. + +See the full list of :ref:`sanity tests <all_sanity_tests>`, which details the various tests and details how to fix identified issues. diff --git a/docs/docsite/rst/dev_guide/testing_units.rst b/docs/docsite/rst/dev_guide/testing_units.rst new file mode 100644 index 00000000..7573da6f --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing_units.rst @@ -0,0 +1,213 @@ +:orphan: + +.. _testing_units: + +********** +Unit Tests +********** + +Unit tests are small isolated tests that target a specific library or module. Unit tests +in Ansible are currently the only way of driving tests from python within Ansible's +continuous integration process. This means that in some circumstances the tests may be a +bit wider than just units. + +.. contents:: Topics + +Available Tests +=============== + +Unit tests can be found in `test/units +<https://github.com/ansible/ansible/tree/devel/test/units>`_. Notice that the directory +structure of the tests matches that of ``lib/ansible/``. + +Running Tests +============= + +.. note:: + To run unit tests using docker, always use the default docker image + by passing the ``--docker`` or ``--docker default`` argument. + +The Ansible unit tests can be run across the whole code base by doing: + +.. code:: shell + + cd /path/to/ansible/source + source hacking/env-setup + ansible-test units --docker -v + +Against a single file by doing: + +.. code:: shell + + ansible-test units --docker -v apt + +Or against a specific Python version by doing: + +.. code:: shell + + ansible-test units --docker -v --python 2.7 apt + +If you are running unit tests against things other than modules, such as module utilities, specify the whole file path: + +.. code:: shell + + ansible-test units --docker -v test/units/module_utils/basic/test_imports.py + +For advanced usage see the online help:: + + ansible-test units --help + +You can also run tests in Ansible's continuous integration system by opening a pull +request. This will automatically determine which tests to run based on the changes made +in your pull request. + + +Installing dependencies +======================= + +If you are running ``ansible-test`` with the ``--docker`` or ``--venv`` option you do not need to install dependencies manually. + +Otherwise you can install dependencies using the ``--requirements`` option, which will +install all the required dependencies needed for unit tests. For example: + +.. code:: shell + + ansible-test units --python 2.7 --requirements apache2_module + + +The list of unit test requirements can be found at `test/units/requirements.txt +<https://github.com/ansible/ansible/tree/devel/test/units/requirements.txt>`_. + +This does not include the list of unit test requirements for ``ansible-test`` itself, +which can be found at `test/lib/ansible_test/_data/requirements/units.txt +<https://github.com/ansible/ansible/tree/devel/test/lib/ansible_test/_data/requirements/units.txt>`_. + +See also the `constraints +<https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/requirements/constraints.txt>`_ +applicable to all test commands. + + +Extending unit tests +==================== + + +.. warning:: What a unit test isn't + + If you start writing a test that requires external services then + you may be writing an integration test, rather than a unit test. + + +Structuring Unit Tests +`````````````````````` + +Ansible drives unit tests through `pytest <https://docs.pytest.org/en/latest/>`_. This +means that tests can either be written a simple functions which are included in any file +name like ``test_<something>.py`` or as classes. + +Here is an example of a function:: + + #this function will be called simply because it is called test_*() + + def test_add() + a = 10 + b = 23 + c = 33 + assert a + b = c + +Here is an example of a class:: + + import unittest + + class AddTester(unittest.TestCase) + + def SetUp() + self.a = 10 + self.b = 23 + + # this function will + def test_add() + c = 33 + assert self.a + self.b = c + + # this function will + def test_subtract() + c = -13 + assert self.a - self.b = c + +Both methods work fine in most circumstances; the function-based interface is simpler and +quicker and so that's probably where you should start when you are just trying to add a +few basic tests for a module. The class-based test allows more tidy set up and tear down +of pre-requisites, so if you have many test cases for your module you may want to refactor +to use that. + +Assertions using the simple ``assert`` function inside the tests will give full +information on the cause of the failure with a trace-back of functions called during the +assertion. This means that plain asserts are recommended over other external assertion +libraries. + +A number of the unit test suites include functions that are shared between several +modules, especially in the networking arena. In these cases a file is created in the same +directory, which is then included directly. + + +Module test case common code +```````````````````````````` + +Keep common code as specific as possible within the `test/units/` directory structure. +Don't import common unit test code from directories outside the current or parent directories. + +Don't import other unit tests from a unit test. Any common code should be in dedicated +files that aren't themselves tests. + + +Fixtures files +`````````````` + +To mock out fetching results from devices, or provide other complex data structures that +come from external libraries, you can use ``fixtures`` to read in pre-generated data. + +You can check how `fixtures <https://github.com/ansible/ansible/tree/devel/test/units/module_utils/facts/fixtures/cpuinfo>`_ +are used in `cpuinfo fact tests <https://github.com/ansible/ansible/blob/9f72ff80e3fe173baac83d74748ad87cb6e20e64/test/units/module_utils/facts/hardware/linux_data.py#L384>`_ + +If you are simulating APIs you may find that Python placebo is useful. See +:ref:`testing_units_modules` for more information. + + +Code Coverage For New or Updated Unit Tests +``````````````````````````````````````````` +New code will be missing from the codecov.io coverage reports (see :ref:`developing_testing`), so +local reporting is needed. Most ``ansible-test`` commands allow you to collect code +coverage; this is particularly useful when to indicate where to extend testing. + +To collect coverage data add the ``--coverage`` argument to your ``ansible-test`` command line: + +.. code:: shell + + ansible-test units --coverage apt + ansible-test coverage html + +Results will be written to ``test/results/reports/coverage/index.html`` + +Reports can be generated in several different formats: + +* ``ansible-test coverage report`` - Console report. +* ``ansible-test coverage html`` - HTML report. +* ``ansible-test coverage xml`` - XML report. + +To clear data between test runs, use the ``ansible-test coverage erase`` command. See +:ref:`testing_running_locally` for more information about generating coverage +reports. + + +.. seealso:: + + :ref:`testing_units_modules` + Special considerations for unit testing modules + :ref:`testing_running_locally` + Running tests locally including gathering and reporting coverage data + `Python 3 documentation - 26.4. unittest — Unit testing framework <https://docs.python.org/3/library/unittest.html>`_ + The documentation of the unittest framework in python 3 + `Python 2 documentation - 25.3. unittest — Unit testing framework <https://docs.python.org/3/library/unittest.html>`_ + The documentation of the earliest supported unittest framework - from Python 2.6 + `pytest: helps you write better programs <https://docs.pytest.org/en/latest/>`_ + The documentation of pytest - the framework actually used to run Ansible unit tests diff --git a/docs/docsite/rst/dev_guide/testing_units_modules.rst b/docs/docsite/rst/dev_guide/testing_units_modules.rst new file mode 100644 index 00000000..88763eb0 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing_units_modules.rst @@ -0,0 +1,563 @@ +:orphan: + +.. _testing_units_modules: + +**************************** +Unit Testing Ansible Modules +**************************** + +.. highlight:: python + +.. contents:: Topics + +Introduction +============ + +This document explains why, how and when you should use unit tests for Ansible modules. +The document doesn't apply to other parts of Ansible for which the recommendations are +normally closer to the Python standard. There is basic documentation for Ansible unit +tests in the developer guide :ref:`testing_units`. This document should +be readable for a new Ansible module author. If you find it incomplete or confusing, +please open a bug or ask for help on Ansible IRC. + +What Are Unit Tests? +==================== + +Ansible includes a set of unit tests in the :file:`test/units` directory. These tests primarily cover the +internals but can also cover Ansible modules. The structure of the unit tests matches +the structure of the code base, so the tests that reside in the :file:`test/units/modules/` directory +are organized by module groups. + +Integration tests can be used for most modules, but there are situations where +cases cannot be verified using integration tests. This means that Ansible unit test cases +may extend beyond testing only minimal units and in some cases will include some +level of functional testing. + + +Why Use Unit Tests? +=================== + +Ansible unit tests have advantages and disadvantages. It is important to understand these. +Advantages include: + +* Most unit tests are much faster than most Ansible integration tests. The complete suite + of unit tests can be run regularly by a developer on their local system. +* Unit tests can be run by developers who don't have access to the system which the module is + designed to work on, allowing a level of verification that changes to core functions + haven't broken module expectations. +* Unit tests can easily substitute system functions allowing testing of software that + would be impractical. For example, the ``sleep()`` function can be replaced and we check + that a ten minute sleep was called without actually waiting ten minutes. +* Unit tests are run on different Python versions. This allows us to + ensure that the code behaves in the same way on different Python versions. + +There are also some potential disadvantages of unit tests. Unit tests don't normally +directly test actual useful valuable features of software, instead just internal +implementation + +* Unit tests that test the internal, non-visible features of software may make + refactoring difficult if those internal features have to change (see also naming in How + below) +* Even if the internal feature is working correctly it is possible that there will be a + problem between the internal code tested and the actual result delivered to the user + +Normally the Ansible integration tests (which are written in Ansible YAML) provide better +testing for most module functionality. If those tests already test a feature and perform +well there may be little point in providing a unit test covering the same area as well. + +When To Use Unit Tests +====================== + +There are a number of situations where unit tests are a better choice than integration +tests. For example, testing things which are impossible, slow or very difficult to test +with integration tests, such as: + +* Forcing rare / strange / random situations that can't be forced, such as specific network + failures and exceptions +* Extensive testing of slow configuration APIs +* Situations where the integration tests cannot be run as part of the main Ansible + continuous integration running in Shippable. + + + +Providing quick feedback +------------------------ + +Example: + A single step of the rds_instance test cases can take up to 20 + minutes (the time to create an RDS instance in Amazon). The entire + test run can last for well over an hour. All 16 of the unit tests + complete execution in less than 2 seconds. + +The time saving provided by being able to run the code in a unit test makes it worth +creating a unit test when bug fixing a module, even if those tests do not often identify +problems later. As a basic goal, every module should have at least one unit test which +will give quick feedback in easy cases without having to wait for the integration tests to +complete. + +Ensuring correct use of external interfaces +------------------------------------------- + +Unit tests can check the way in which external services are run to ensure that they match +specifications or are as efficient as possible *even when the final output will not be changed*. + +Example: + Package managers are often far more efficient when installing multiple packages at once + rather than each package separately. The final result is the + same: the packages are all installed, so the efficiency is difficult to verify through + integration tests. By providing a mock package manager and verifying that it is called + once, we can build a valuable test for module efficiency. + +Another related use is in the situation where an API has versions which behave +differently. A programmer working on a new version may change the module to work with the +new API version and unintentionally break the old version. A test case +which checks that the call happens properly for the old version can help avoid the +problem. In this situation it is very important to include version numbering in the test case +name (see `Naming unit tests`_ below). + +Providing specific design tests +-------------------------------- + +By building a requirement for a particular part of the +code and then coding to that requirement, unit tests _can_ sometimes improve the code and +help future developers understand that code. + +Unit tests that test internal implementation details of code, on the other hand, almost +always do more harm than good. Testing that your packages to install are stored in a list +would slow down and confuse a future developer who might need to change that list into a +dictionary for efficiency. This problem can be reduced somewhat with clear test naming so +that the future developer immediately knows to delete the test case, but it is often +better to simply leave out the test case altogether and test for a real valuable feature +of the code, such as installing all of the packages supplied as arguments to the module. + + +How to unit test Ansible modules +================================ + +There are a number of techniques for unit testing modules. Beware that most +modules without unit tests are structured in a way that makes testing quite difficult and +can lead to very complicated tests which need more work than the code. Effectively using unit +tests may lead you to restructure your code. This is often a good thing and leads +to better code overall. Good restructuring can make your code clearer and easier to understand. + + +Naming unit tests +----------------- + +Unit tests should have logical names. If a developer working on the module being tested +breaks the test case, it should be easy to figure what the unit test covers from the name. +If a unit test is designed to verify compatibility with a specific software or API version +then include the version in the name of the unit test. + +As an example, ``test_v2_state_present_should_call_create_server_with_name()`` would be a +good name, ``test_create_server()`` would not be. + + +Use of Mocks +------------ + +Mock objects (from https://docs.python.org/3/library/unittest.mock.html) can be very +useful in building unit tests for special / difficult cases, but they can also +lead to complex and confusing coding situations. One good use for mocks would be in +simulating an API. As for 'six', the 'mock' python package is bundled with Ansible (use +``import units.compat.mock``). + +Ensuring failure cases are visible with mock objects +---------------------------------------------------- + +Functions like :meth:`module.fail_json` are normally expected to terminate execution. When you +run with a mock module object this doesn't happen since the mock always returns another mock +from a function call. You can set up the mock to raise an exception as shown above, or you can +assert that these functions have not been called in each test. For example:: + + module = MagicMock() + function_to_test(module, argument) + module.fail_json.assert_not_called() + +This applies not only to calling the main module but almost any other +function in a module which gets the module object. + + +Mocking of the actual module +---------------------------- + +The setup of an actual module is quite complex (see `Passing Arguments`_ below) and often +isn't needed for most functions which use a module. Instead you can use a mock object as +the module and create any module attributes needed by the function you are testing. If +you do this, beware that the module exit functions need special handling as mentioned +above, either by throwing an exception or ensuring that they haven't been called. For example:: + + class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + # you may also do the same to fail json + module = MagicMock() + module.exit_json.side_effect = AnsibleExitJson(Exception) + with self.assertRaises(AnsibleExitJson) as result: + return = my_module.test_this_function(module, argument) + module.fail_json.assert_not_called() + assert return["changed"] == True + +API definition with unit test cases +----------------------------------- + +API interaction is usually best tested with the function tests defined in Ansible's +integration testing section, which run against the actual API. There are several cases +where the unit tests are likely to work better. + +Defining a module against an API specification +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This case is especially important for modules interacting with web services, which provide +an API that Ansible uses but which are beyond the control of the user. + +By writing a custom emulation of the calls that return data from the API, we can ensure +that only the features which are clearly defined in the specification of the API are +present in the message. This means that we can check that we use the correct +parameters and nothing else. + + +*Example: in rds_instance unit tests a simple instance state is defined*:: + + def simple_instance_list(status, pending): + return {u'DBInstances': [{u'DBInstanceArn': 'arn:aws:rds:us-east-1:1234567890:db:fakedb', + u'DBInstanceStatus': status, + u'PendingModifiedValues': pending, + u'DBInstanceIdentifier': 'fakedb'}]} + +This is then used to create a list of states:: + + rds_client_double = MagicMock() + rds_client_double.describe_db_instances.side_effect = [ + simple_instance_list('rebooting', {"a": "b", "c": "d"}), + simple_instance_list('available', {"c": "d", "e": "f"}), + simple_instance_list('rebooting', {"a": "b"}), + simple_instance_list('rebooting', {"e": "f", "g": "h"}), + simple_instance_list('rebooting', {}), + simple_instance_list('available', {"g": "h", "i": "j"}), + simple_instance_list('rebooting', {"i": "j", "k": "l"}), + simple_instance_list('available', {}), + simple_instance_list('available', {}), + ] + +These states are then used as returns from a mock object to ensure that the ``await`` function +waits through all of the states that would mean the RDS instance has not yet completed +configuration:: + + rds_i.await_resource(rds_client_double, "some-instance", "available", mod_mock, + await_pending=1) + assert(len(sleeper_double.mock_calls) > 5), "await_pending didn't wait enough" + +By doing this we check that the ``await`` function will keep waiting through +potentially unusual that it would be impossible to reliably trigger through the +integration tests but which happen unpredictably in reality. + +Defining a module to work against multiple API versions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This case is especially important for modules interacting with many different versions of +software; for example, package installation modules that might be expected to work with +many different operating system versions. + +By using previously stored data from various versions of an API we can ensure that the +code is tested against the actual data which will be sent from that version of the system +even when the version is very obscure and unlikely to be available during testing. + +Ansible special cases for unit testing +====================================== + +There are a number of special cases for unit testing the environment of an Ansible module. +The most common are documented below, and suggestions for others can be found by looking +at the source code of the existing unit tests or asking on the Ansible IRC channel or mailing +lists. + +Module argument processing +-------------------------- + +There are two problems with running the main function of a module: + +* Since the module is supposed to accept arguments on ``STDIN`` it is a bit difficult to + set up the arguments correctly so that the module will get them as parameters. +* All modules should finish by calling either the :meth:`module.fail_json` or + :meth:`module.exit_json`, but these won't work correctly in a testing environment. + +Passing Arguments +----------------- + +.. This section should be updated once https://github.com/ansible/ansible/pull/31456 is + closed since the function below will be provided in a library file. + +To pass arguments to a module correctly, use the ``set_module_args`` method which accepts a dictionary +as its parameter. Module creation and argument processing is +handled through the :class:`AnsibleModule` object in the basic section of the utilities. Normally +this accepts input on ``STDIN``, which is not convenient for unit testing. When the special +variable is set it will be treated as if the input came on ``STDIN`` to the module. Simply call that function before setting up your module:: + + import json + from units.modules.utils import set_module_args + from ansible.module_utils._text import to_bytes + + def test_already_registered(self): + set_module_args({ + 'activationkey': 'key', + 'username': 'user', + 'password': 'pass', + }) + +Handling exit correctly +----------------------- + +.. This section should be updated once https://github.com/ansible/ansible/pull/31456 is + closed since the exit and failure functions below will be provided in a library file. + +The :meth:`module.exit_json` function won't work properly in a testing environment since it +writes error information to ``STDOUT`` upon exit, where it +is difficult to examine. This can be mitigated by replacing it (and :meth:`module.fail_json`) with +a function that raises an exception:: + + def exit_json(*args, **kwargs): + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + +Now you can ensure that the first function called is the one you expected simply by +testing for the correct exception:: + + def test_returned_value(self): + set_module_args({ + 'activationkey': 'key', + 'username': 'user', + 'password': 'pass', + }) + + with self.assertRaises(AnsibleExitJson) as result: + my_module.main() + +The same technique can be used to replace :meth:`module.fail_json` (which is used for failure +returns from modules) and for the ``aws_module.fail_json_aws()`` (used in modules for Amazon +Web Services). + +Running the main function +------------------------- + +If you do want to run the actual main function of a module you must import the module, set +the arguments as above, set up the appropriate exit exception and then run the module:: + + # This test is based around pytest's features for individual test functions + import pytest + import ansible.modules.module.group.my_module as my_module + + def test_main_function(monkeypatch): + monkeypatch.setattr(my_module.AnsibleModule, "exit_json", fake_exit_json) + set_module_args({ + 'activationkey': 'key', + 'username': 'user', + 'password': 'pass', + }) + my_module.main() + + +Handling calls to external executables +-------------------------------------- + +Module must use :meth:`AnsibleModule.run_command` in order to execute an external command. This +method needs to be mocked: + +Here is a simple mock of :meth:`AnsibleModule.run_command` (taken from :file:`test/units/modules/packaging/os/test_rhn_register.py`):: + + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, '', '' # successful execution, no output + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() + self.assertFalse(result.exception.args[0]['changed']) + # Check that run_command has been called + run_command.assert_called_once_with('/usr/bin/command args') + self.assertEqual(run_command.call_count, 1) + self.assertFalse(run_command.called) + + +A Complete Example +------------------ + +The following example is a complete skeleton that reuses the mocks explained above and adds a new +mock for :meth:`Ansible.get_bin_path`:: + + import json + + from units.compat import unittest + from units.compat.mock import patch + from ansible.module_utils import basic + from ansible.module_utils._text import to_bytes + from ansible.modules.namespace import my_module + + + def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) + + + class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + + class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + + def exit_json(*args, **kwargs): + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + + def fail_json(*args, **kwargs): + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + + def get_bin_path(self, arg, required=False): + """Mock AnsibleModule.get_bin_path""" + if arg.endswith('my_command'): + return '/usr/bin/my_command' + else: + if required: + fail_json(msg='%r not found !' % arg) + + + class TestMyModule(unittest.TestCase): + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json, + get_bin_path=get_bin_path) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + + def test_ensure_command_called(self): + set_module_args({ + 'param1': 10, + 'param2': 'test', + }) + + with patch.object(basic.AnsibleModule, 'run_command') as mock_run_command: + stdout = 'configuration updated' + stderr = '' + rc = 0 + mock_run_command.return_value = rc, stdout, stderr # successful execution + + with self.assertRaises(AnsibleExitJson) as result: + my_module.main() + self.assertFalse(result.exception.args[0]['changed']) # ensure result is changed + + mock_run_command.assert_called_once_with('/usr/bin/my_command --value 10 --name test') + + +Restructuring modules to enable testing module set up and other processes +------------------------------------------------------------------------- + +Often modules have a ``main()`` function which sets up the module and then performs other +actions. This can make it difficult to check argument processing. This can be made easier by +moving module configuration and initialization into a separate function. For example:: + + argument_spec = dict( + # module function variables + state=dict(choices=['absent', 'present', 'rebooted', 'restarted'], default='present'), + apply_immediately=dict(type='bool', default=False), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=600), + allocated_storage=dict(type='int', aliases=['size']), + db_instance_identifier=dict(aliases=["id"], required=True), + ) + + def setup_module_object(): + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=required_if, + mutually_exclusive=[['old_instance_id', 'source_db_instance_identifier', + 'db_snapshot_identifier']], + ) + return module + + def main(): + module = setup_module_object() + validate_parameters(module) + conn = setup_client(module) + return_dict = run_task(module, conn) + module.exit_json(**return_dict) + +This now makes it possible to run tests against the module initiation function:: + + def test_rds_module_setup_fails_if_db_instance_identifier_parameter_missing(): + # db_instance_identifier parameter is missing + set_module_args({ + 'state': 'absent', + 'apply_immediately': 'True', + }) + + with self.assertRaises(AnsibleFailJson) as result: + self.module.setup_json + +See also ``test/units/module_utils/aws/test_rds.py`` + +Note that the ``argument_spec`` dictionary is visible in a module variable. This has +advantages, both in allowing explicit testing of the arguments and in allowing the easy +creation of module objects for testing. + +The same restructuring technique can be valuable for testing other functionality, such as the part of the module which queries the object that the module configures. + +Traps for maintaining Python 2 compatibility +============================================ + +If you use the ``mock`` library from the Python 2.6 standard library, a number of the +assert functions are missing but will return as if successful. This means that test cases should take great care *not* use +functions marked as _new_ in the Python 3 documentation, since the tests will likely always +succeed even if the code is broken when run on older versions of Python. + +A helpful development approach to this should be to ensure that all of the tests have been +run under Python 2.6 and that each assertion in the test cases has been checked to work by breaking +the code in Ansible to trigger that failure. + +.. warning:: Maintain Python 2.6 compatibility + + Please remember that modules need to maintain compatibility with Python 2.6 so the unittests for + modules should also be compatible with Python 2.6. + + +.. seealso:: + + :ref:`testing_units` + Ansible unit tests documentation + :ref:`testing_running_locally` + Running tests locally including gathering and reporting coverage data + :ref:`developing_modules_general` + Get started developing a module + `Python 3 documentation - 26.4. unittest — Unit testing framework <https://docs.python.org/3/library/unittest.html>`_ + The documentation of the unittest framework in python 3 + `Python 2 documentation - 25.3. unittest — Unit testing framework <https://docs.python.org/3/library/unittest.html>`_ + The documentation of the earliest supported unittest framework - from Python 2.6 + `pytest: helps you write better programs <https://docs.pytest.org/en/latest/>`_ + The documentation of pytest - the framework actually used to run Ansible unit tests + `Development Mailing List <https://groups.google.com/group/ansible-devel>`_ + Mailing list for development topics + `Testing Your Code (from The Hitchhiker's Guide to Python!) <https://docs.python-guide.org/writing/tests/>`_ + General advice on testing Python code + `Uncle Bob's many videos on YouTube <https://www.youtube.com/watch?v=QedpQjxBPMA&list=PLlu0CT-JnSasQzGrGzddSczJQQU7295D2>`_ + Unit testing is a part of the of various philosophies of software development, including + Extreme Programming (XP), Clean Coding. Uncle Bob talks through how to benefit from this + `"Why Most Unit Testing is Waste" <https://rbcs-us.com/documents/Why-Most-Unit-Testing-is-Waste.pdf>`_ + An article warning against the costs of unit testing + `'A Response to "Why Most Unit Testing is Waste"' <https://henrikwarne.com/2014/09/04/a-response-to-why-most-unit-testing-is-waste/>`_ + An response pointing to how to maintain the value of unit tests diff --git a/docs/docsite/rst/dev_guide/testing_validate-modules.rst b/docs/docsite/rst/dev_guide/testing_validate-modules.rst new file mode 100644 index 00000000..044a2c29 --- /dev/null +++ b/docs/docsite/rst/dev_guide/testing_validate-modules.rst @@ -0,0 +1,165 @@ +:orphan: + +.. _testing_validate-modules: + +**************** +validate-modules +**************** + +.. contents:: Topics + +Python program to help test or validate Ansible modules. + +``validate-modules`` is one of the ``ansible-test`` Sanity Tests, see :ref:`testing_sanity` for more information. + +Originally developed by Matt Martz (@sivel) + + +Usage +===== + +.. code:: shell + + cd /path/to/ansible/source + source hacking/env-setup + ansible-test sanity --test validate-modules + +Help +==== + +.. code:: shell + + usage: validate-modules [-h] [-w] [--exclude EXCLUDE] [--arg-spec] + [--base-branch BASE_BRANCH] [--format {json,plain}] + [--output OUTPUT] + modules [modules ...] + + positional arguments: + modules Path to module or module directory + + optional arguments: + -h, --help show this help message and exit + -w, --warnings Show warnings + --exclude EXCLUDE RegEx exclusion pattern + --arg-spec Analyze module argument spec + --base-branch BASE_BRANCH + Used in determining if new options were added + --format {json,plain} + Output format. Default: "plain" + --output OUTPUT Output location, use "-" for stdout. Default "-" + + +Extending validate-modules +========================== + +The ``validate-modules`` tool has a `schema.py <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py>`_ that is used to validate the YAML blocks, such as ``DOCUMENTATION`` and ``RETURNS``. + + +Codes +===== + +============================================================ ================== ==================== ========================================================================================= + **Error Code** **Type** **Level** **Sample Message** +------------------------------------------------------------ ------------------ -------------------- ----------------------------------------------------------------------------------------- + ansible-deprecated-version Documentation Error A feature is deprecated and supposed to be removed in the current or an earlier Ansible version + ansible-invalid-version Documentation Error The Ansible version at which a feature is supposed to be removed cannot be parsed + ansible-module-not-initialized Syntax Error Execution of the module did not result in initialization of AnsibleModule + collection-deprecated-version Documentation Error A feature is deprecated and supposed to be removed in the current or an earlier collection version + collection-invalid-version Documentation Error The collection version at which a feature is supposed to be removed cannot be parsed (it must be a semantic version, see https://semver.org/) + deprecated-date Documentation Error A date before today appears as ``removed_at_date`` or in ``deprecated_aliases`` + deprecation-mismatch Documentation Error Module marked as deprecated or removed in at least one of the filename, its metadata, or in DOCUMENTATION (setting DOCUMENTATION.deprecated for deprecation or removing all Documentation for removed) but not in all three places. + doc-choices-do-not-match-spec Documentation Error Value for "choices" from the argument_spec does not match the documentation + doc-choices-incompatible-type Documentation Error Choices value from the documentation is not compatible with type defined in the argument_spec + doc-default-does-not-match-spec Documentation Error Value for "default" from the argument_spec does not match the documentation + doc-default-incompatible-type Documentation Error Default value from the documentation is not compatible with type defined in the argument_spec + doc-elements-invalid Documentation Error Documentation specifies elements for argument, when "type" is not ``list``. + doc-elements-mismatch Documentation Error Argument_spec defines elements different than documentation does + doc-missing-type Documentation Error Documentation doesn't specify a type but argument in ``argument_spec`` use default type (``str``) + doc-required-mismatch Documentation Error argument in argument_spec is required but documentation says it is not, or vice versa + doc-type-does-not-match-spec Documentation Error Argument_spec defines type different than documentation does + documentation-error Documentation Error Unknown ``DOCUMENTATION`` error + documentation-syntax-error Documentation Error Invalid ``DOCUMENTATION`` schema + illegal-future-imports Imports Error Only the following ``from __future__`` imports are allowed: ``absolute_import``, ``division``, and ``print_function``. + import-before-documentation Imports Error Import found before documentation variables. All imports must appear below ``DOCUMENTATION``/``EXAMPLES``/``RETURN`` + import-error Documentation Error ``Exception`` attempting to import module for ``argument_spec`` introspection + import-placement Locations Warning Imports should be directly below ``DOCUMENTATION``/``EXAMPLES``/``RETURN`` + imports-improper-location Imports Error Imports should be directly below ``DOCUMENTATION``/``EXAMPLES``/``RETURN`` + incompatible-choices Documentation Error Choices value from the argument_spec is not compatible with type defined in the argument_spec + incompatible-default-type Documentation Error Default value from the argument_spec is not compatible with type defined in the argument_spec + invalid-argument-name Documentation Error Argument in argument_spec must not be one of 'message', 'syslog_facility' as it is used internally by Ansible Core Engine + invalid-argument-spec Documentation Error Argument in argument_spec must be a dictionary/hash when used + invalid-argument-spec-options Documentation Error Suboptions in argument_spec are invalid + invalid-documentation Documentation Error ``DOCUMENTATION`` is not valid YAML + invalid-documentation-options Documentation Error ``DOCUMENTATION.options`` must be a dictionary/hash when used + invalid-examples Documentation Error ``EXAMPLES`` is not valid YAML + invalid-extension Naming Error Official Ansible modules must have a ``.py`` extension for python modules or a ``.ps1`` for powershell modules + invalid-module-schema Documentation Error ``AnsibleModule`` schema validation error + invalid-requires-extension Naming Error Module ``#AnsibleRequires -CSharpUtil`` should not end in .cs, Module ``#Requires`` should not end in .psm1 + invalid-tagged-version Documentation Error All version numbers specified in code have to be explicitly tagged with the collection name, in other words, ``community.general:1.2.3`` or ``ansible.builtin:2.10`` + last-line-main-call Syntax Error Call to ``main()`` not the last line (or ``removed_module()`` in the case of deprecated & docs only modules) + missing-doc-fragment Documentation Error ``DOCUMENTATION`` fragment missing + missing-existing-doc-fragment Documentation Warning Pre-existing ``DOCUMENTATION`` fragment missing + missing-documentation Documentation Error No ``DOCUMENTATION`` provided + missing-examples Documentation Error No ``EXAMPLES`` provided + missing-gplv3-license Documentation Error GPLv3 license header not found + missing-if-name-main Syntax Error Next to last line is not ``if __name__ == "__main__":`` + missing-main-call Syntax Error Did not find a call to ``main()`` (or ``removed_module()`` in the case of deprecated & docs only modules) + missing-module-utils-basic-import Imports Warning Did not find ``ansible.module_utils.basic`` import + missing-module-utils-import-csharp-requirements Imports Error No ``Ansible.ModuleUtils`` or C# Ansible util requirements/imports found + missing-powershell-interpreter Syntax Error Interpreter line is not ``#!powershell`` + missing-python-doc Naming Error Missing python documentation file + missing-python-interpreter Syntax Error Interpreter line is not ``#!/usr/bin/python`` + missing-return Documentation Error No ``RETURN`` documentation provided + missing-return-legacy Documentation Warning No ``RETURN`` documentation provided for legacy module + missing-suboption-docs Documentation Error Argument in argument_spec has sub-options but documentation does not define sub-options + module-incorrect-version-added Documentation Error Module level ``version_added`` is incorrect + module-invalid-version-added Documentation Error Module level ``version_added`` is not a valid version number + module-utils-specific-import Imports Error ``module_utils`` imports should import specific components, not ``*`` + multiple-utils-per-requires Imports Error ``Ansible.ModuleUtils`` requirements do not support multiple modules per statement + multiple-csharp-utils-per-requires Imports Error Ansible C# util requirements do not support multiple utils per statement + no-default-for-required-parameter Documentation Error Option is marked as required but specifies a default. Arguments with a default should not be marked as required + nonexistent-parameter-documented Documentation Error Argument is listed in DOCUMENTATION.options, but not accepted by the module + option-incorrect-version-added Documentation Error ``version_added`` for new option is incorrect + option-invalid-version-added Documentation Error ``version_added`` for option is not a valid version number + parameter-invalid Documentation Error Argument in argument_spec is not a valid python identifier + parameter-invalid-elements Documentation Error Value for "elements" is valid only when value of "type" is ``list`` + implied-parameter-type-mismatch Documentation Error Argument_spec implies ``type="str"`` but documentation defines it as different data type + parameter-type-not-in-doc Documentation Error Type value is defined in ``argument_spec`` but documentation doesn't specify a type + parameter-alias-repeated Parameters Error argument in argument_spec has at least one alias specified multiple times in aliases + parameter-alias-self Parameters Error argument in argument_spec is specified as its own alias + parameter-documented-multiple-times Documentation Error argument in argument_spec with aliases is documented multiple times + parameter-list-no-elements Parameters Error argument in argument_spec "type" is specified as ``list`` without defining "elements" + parameter-state-invalid-choice Parameters Error Argument ``state`` includes ``get``, ``list`` or ``info`` as a choice. Functionality should be in an ``_info`` or (if further conditions apply) ``_facts`` module. + python-syntax-error Syntax Error Python ``SyntaxError`` while parsing module + return-syntax-error Documentation Error ``RETURN`` is not valid YAML, ``RETURN`` fragments missing or invalid + return-invalid-version-added Documentation Error ``version_added`` for return value is not a valid version number + subdirectory-missing-init Naming Error Ansible module subdirectories must contain an ``__init__.py`` + try-except-missing-has Imports Warning Try/Except ``HAS_`` expression missing + undocumented-parameter Documentation Error Argument is listed in the argument_spec, but not documented in the module + unidiomatic-typecheck Syntax Error Type comparison using ``type()`` found. Use ``isinstance()`` instead + unknown-doc-fragment Documentation Warning Unknown pre-existing ``DOCUMENTATION`` error + use-boto3 Imports Error ``boto`` import found, new modules should use ``boto3`` + use-fail-json-not-sys-exit Imports Error ``sys.exit()`` call found. Should be ``exit_json``/``fail_json`` + use-module-utils-urls Imports Error ``requests`` import found, should use ``ansible.module_utils.urls`` instead + use-run-command-not-os-call Imports Error ``os.call`` used instead of ``module.run_command`` + use-run-command-not-popen Imports Error ``subprocess.Popen`` used instead of ``module.run_command`` + use-short-gplv3-license Documentation Error GPLv3 license header should be the :ref:`short form <copyright>` for new modules + mutually_exclusive-type Documentation Error mutually_exclusive entry contains non-string value + mutually_exclusive-collision Documentation Error mutually_exclusive entry has repeated terms + mutually_exclusive-unknown Documentation Error mutually_exclusive entry contains option which does not appear in argument_spec (potentially an alias of an option?) + required_one_of-type Documentation Error required_one_of entry contains non-string value + required_one_of-collision Documentation Error required_one_of entry has repeated terms + required_one_of-unknown Documentation Error required_one_of entry contains option which does not appear in argument_spec (potentially an alias of an option?) + required_together-type Documentation Error required_together entry contains non-string value + required_together-collision Documentation Error required_together entry has repeated terms + required_together-unknown Documentation Error required_together entry contains option which does not appear in argument_spec (potentially an alias of an option?) + required_if-is_one_of-type Documentation Error required_if entry has a fourth value which is not a bool + required_if-requirements-type Documentation Error required_if entry has a third value (requirements) which is not a list or tuple + required_if-requirements-collision Documentation Error required_if entry has repeated terms in requirements + required_if-requirements-unknown Documentation Error required_if entry's requirements contains option which does not appear in argument_spec (potentially an alias of an option?) + required_if-unknown-key Documentation Error required_if entry's key does not appear in argument_spec (potentially an alias of an option?) + required_if-key-in-requirements Documentation Error required_if entry contains its key in requirements list/tuple + required_if-value-type Documentation Error required_if entry's value is not of the type specified for its key + required_by-collision Documentation Error required_by entry has repeated terms + required_by-unknown Documentation Error required_by entry contains option which does not appear in argument_spec (potentially an alias of an option?) +============================================================ ================== ==================== ========================================================================================= diff --git a/docs/docsite/rst/galaxy/dev_guide.rst b/docs/docsite/rst/galaxy/dev_guide.rst new file mode 100644 index 00000000..62530a5d --- /dev/null +++ b/docs/docsite/rst/galaxy/dev_guide.rst @@ -0,0 +1,246 @@ +.. _developing_galaxy: + +********************** +Galaxy Developer Guide +********************** + +You can host collections and roles on Galaxy to share with the Ansible community. Galaxy content is formatted in pre-packaged units of work such as :ref:`roles <playbooks_reuse_roles>`, and new in Galaxy 3.2, :ref:`collections <collections>`. +You can create roles for provisioning infrastructure, deploying applications, and all of the tasks you do everyday. Taking this a step further, you can create collections which provide a comprehensive package of automation that may include multiple playbooks, roles, modules, and plugins. + +.. contents:: + :local: + :depth: 2 + +.. _creating_collections_galaxy: + +Creating collections for Galaxy +=============================== + +Collections are a distribution format for Ansible content. You can use collections to package and distribute playbooks, roles, modules, and plugins. +You can publish and use collections through `Ansible Galaxy <https://galaxy.ansible.com>`_. + +See :ref:`developing_collections` for details on how to create collections. + +.. _creating_roles_galaxy: + + +Creating roles for Galaxy +========================= + +Use the ``init`` command to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires + +.. code-block:: bash + + $ ansible-galaxy init role_name + +The above will create the following directory structure in the current working directory: + +.. code-block:: text + + role_name/ + README.md + .travis.yml + defaults/ + main.yml + files/ + handlers/ + main.yml + meta/ + main.yml + templates/ + tests/ + inventory + test.yml + vars/ + main.yml + +If you want to create a repository for the role the repository root should be `role_name`. + +Force +----- + +If a directory matching the name of the role already exists in the current working directory, the init command will result in an error. To ignore the error +use the ``--force`` option. Force will create the above subdirectories and files, replacing anything that matches. + +Container enabled +----------------- + +If you are creating a Container Enabled role, pass ``--type container`` to ``ansible-galaxy init``. This will create the same directory structure as above, but populate it +with default files appropriate for a Container Enabled role. For instance, the README.md has a slightly different structure, the *.travis.yml* file tests +the role using `Ansible Container <https://github.com/ansible/ansible-container>`_, and the meta directory includes a *container.yml* file. + +Using a custom role skeleton +---------------------------- + +A custom role skeleton directory can be supplied as follows: + +.. code-block:: bash + + $ ansible-galaxy init --role-skeleton=/path/to/skeleton role_name + +When a skeleton is provided, init will: + +- copy all files and directories from the skeleton to the new role +- any .j2 files found outside of a templates folder will be rendered as templates. The only useful variable at the moment is role_name +- The .git folder and any .git_keep files will not be copied + +Alternatively, the role_skeleton and ignoring of files can be configured via ansible.cfg + +.. code-block:: text + + [galaxy] + role_skeleton = /path/to/skeleton + role_skeleton_ignore = ^.git$,^.*/.git_keep$ + +Authenticate with Galaxy +------------------------ + +Using the ``import``, ``delete`` and ``setup`` commands to manage your roles on the Galaxy website requires authentication, and the ``login`` command +can be used to do just that. Before you can use the ``login`` command, you must create an account on the Galaxy website. + +The ``login`` command requires using your GitHub credentials. You can use your username and password, or you can create a `personal access token <https://help.github.com/articles/creating-an-access-token-for-command-line-use/>`_. If you choose to create a token, grant minimal access to the token, as it is used just to verify identify. + +The following shows authenticating with the Galaxy website using a GitHub username and password: + +.. code-block:: text + + $ ansible-galaxy login + + We need your GitHub login to identify you. + This information will not be sent to Galaxy, only to api.github.com. + The password will not be displayed. + + Use --github-token if you do not want to enter your password. + + GitHub Username: dsmith + Password for dsmith: + Successfully logged into Galaxy as dsmith + +When you choose to use your username and password, your password is not sent to Galaxy. It is used to authenticates with GitHub and create a personal access token. +It then sends the token to Galaxy, which in turn verifies that your identity and returns a Galaxy access token. After authentication completes the GitHub token is +destroyed. + +If you do not want to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the ``--github-token`` option to pass a personal access token that you create. + + +Import a role +------------- + +The ``import`` command requires that you first authenticate using the ``login`` command. Once authenticated you can import any GitHub repository that you own or have been granted access. + +Use the following to import to role: + +.. code-block:: bash + + $ ansible-galaxy import github_user github_repo + +By default the command will wait for Galaxy to complete the import process, displaying the results as the import progresses: + +.. code-block:: text + + Successfully submitted import request 41 + Starting import 41: role_name=myrole repo=githubuser/ansible-role-repo ref= + Retrieving GitHub repo githubuser/ansible-role-repo + Accessing branch: master + Parsing and validating meta/main.yml + Parsing galaxy_tags + Parsing platforms + Adding dependencies + Parsing and validating README.md + Adding repo tags as role versions + Import completed + Status SUCCESS : warnings=0 errors=0 + +Branch +^^^^^^ + +Use the ``--branch`` option to import a specific branch. If not specified, the default branch for the repo will be used. + +Role name +^^^^^^^^^ + +By default the name given to the role will be derived from the GitHub repository name. However, you can use the ``--role-name`` option to override this and set the name. + +No wait +^^^^^^^ + +If the ``--no-wait`` option is present, the command will not wait for results. Results of the most recent import for any of your roles is available on the Galaxy web site by visiting *My Imports*. + +Delete a role +------------- + +The ``delete`` command requires that you first authenticate using the ``login`` command. Once authenticated you can remove a role from the Galaxy web site. You are only allowed to remove roles where you have access to the repository in GitHub. + +Use the following to delete a role: + +.. code-block:: bash + + $ ansible-galaxy delete github_user github_repo + +This only removes the role from Galaxy. It does not remove or alter the actual GitHub repository. + + +Travis integrations +------------------- + +You can create an integration or connection between a role in Galaxy and `Travis <https://travis-ci.org>`_. Once the connection is established, a build in Travis will +automatically trigger an import in Galaxy, updating the search index with the latest information about the role. + +You create the integration using the ``setup`` command, but before an integration can be created, you must first authenticate using the ``login`` command; you will +also need an account in Travis, and your Travis token. Once you're ready, use the following command to create the integration: + +.. code-block:: bash + + $ ansible-galaxy setup travis github_user github_repo xxx-travis-token-xxx + +The setup command requires your Travis token, however the token is not stored in Galaxy. It is used along with the GitHub username and repo to create a hash as described +in `the Travis documentation <https://docs.travis-ci.com/user/notifications/>`_. The hash is stored in Galaxy and used to verify notifications received from Travis. + +The setup command enables Galaxy to respond to notifications. To configure Travis to run a build on your repository and send a notification, follow the +`Travis getting started guide <https://docs.travis-ci.com/user/getting-started/>`_. + +To instruct Travis to notify Galaxy when a build completes, add the following to your .travis.yml file: + +.. code-block:: text + + notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ + + +List Travis integrations +^^^^^^^^^^^^^^^^^^^^^^^^ + +Use the ``--list`` option to display your Travis integrations: + +.. code-block:: bash + + $ ansible-galaxy setup --list + + + ID Source Repo + ---------- ---------- ---------- + 2 travis github_user/github_repo + 1 travis github_user/github_repo + + +Remove Travis integrations +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Use the ``--remove`` option to disable and remove a Travis integration: + + .. code-block:: bash + + $ ansible-galaxy setup --remove ID + +Provide the ID of the integration to be disabled. You can find the ID by using the ``--list`` option. + + +.. seealso:: + :ref:`collections` + Shareable collections of modules, playbooks and roles + :ref:`playbooks_reuse_roles` + All about ansible roles + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/galaxy/user_guide.rst b/docs/docsite/rst/galaxy/user_guide.rst new file mode 100644 index 00000000..161839b1 --- /dev/null +++ b/docs/docsite/rst/galaxy/user_guide.rst @@ -0,0 +1,493 @@ +.. _using_galaxy: +.. _ansible_galaxy: + +***************** +Galaxy User Guide +***************** + +:dfn:`Ansible Galaxy` refers to the `Galaxy <https://galaxy.ansible.com>`_ website, a free site for finding, downloading, and sharing community developed roles. + +Use Galaxy to jump-start your automation project with great content from the Ansible community. Galaxy provides pre-packaged units of work such as :ref:`roles <playbooks_reuse_roles>`, and new in Galaxy 3.2, :ref:`collections <collections>` +You can find roles for provisioning infrastructure, deploying applications, and all of the tasks you do everyday. The collection format provides a comprehensive package of automation that may include multiple playbooks, roles, modules, and plugins. + +.. contents:: + :local: + :depth: 2 +.. _finding_galaxy_collections: + +Finding collections on Galaxy +============================= + +To find collections on Galaxy: + +#. Click the :guilabel:`Search` icon in the left-hand navigation. +#. Set the filter to *collection*. +#. Set other filters and press :guilabel:`enter`. + +Galaxy presents a list of collections that match your search criteria. + +.. _installing_galaxy_collections: + + +Installing collections +====================== + + +Installing a collection from Galaxy +----------------------------------- + +.. include:: ../shared_snippets/installing_collections.txt + +.. _installing_ah_collection: + +Downloading a collection from Automation Hub +---------------------------------------------------- + +You can download collections from Automation Hub at the command line. Automation Hub content is available to subscribers only, so you must download an API token and configure your local environment to provide it before you can you download collections. To download a collection from Automation Hub with the ``ansible-galaxy`` command: + +1. Get your Automation Hub API token. Go to https://cloud.redhat.com/ansible/automation-hub/token/ and click :guilabel:`Get API token` from the version dropdown to copy your API token. +2. Configure Red Hat Automation Hub server in the ``server_list`` option under the ``[galaxy]`` section in your :file:`ansible.cfg` file. + + .. code-block:: ini + + [galaxy] + server_list = automation_hub + + [galaxy_server.automation_hub] + url=https://cloud.redhat.com/api/automation-hub/ + auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token + token=my_ah_token + +3. Download the collection hosted in Automation Hub. + + .. code-block:: bash + + ansible-galaxy collection install my_namespace.my_collection + +.. seealso:: + `Getting started with Automation Hub <https://www.ansible.com/blog/getting-started-with-ansible-hub>`_ + An introduction to Automation Hub + +Installing an older version of a collection +------------------------------------------- + +.. include:: ../shared_snippets/installing_older_collection.txt + +Install multiple collections with a requirements file +----------------------------------------------------- + +.. include:: ../shared_snippets/installing_multiple_collections.txt + +Downloading a collection for offline use +----------------------------------------- + +.. include:: ../shared_snippets/download_tarball_collections.txt + +Installing a collection from a git repository +--------------------------------------------- + +.. include:: ../shared_snippets/installing_collections_git_repo.txt + +Listing installed collections +----------------------------- + +To list installed collections, run ``ansible-galaxy collection list``. See :ref:`collections_listing` for more details. + + +Configuring the ``ansible-galaxy`` client +------------------------------------------ + +.. include:: ../shared_snippets/galaxy_server_list.txt + +.. _finding_galaxy_roles: + +Finding roles on Galaxy +======================= + +Search the Galaxy database by tags, platforms, author and multiple keywords. For example: + +.. code-block:: bash + + $ ansible-galaxy search elasticsearch --author geerlingguy + +The search command will return a list of the first 1000 results matching your search: + +.. code-block:: text + + Found 2 roles matching your search: + + Name Description + ---- ----------- + geerlingguy.elasticsearch Elasticsearch for Linux. + geerlingguy.elasticsearch-curator Elasticsearch curator for Linux. + + +Get more information about a role +--------------------------------- + +Use the ``info`` command to view more detail about a specific role: + +.. code-block:: bash + + $ ansible-galaxy info username.role_name + +This returns everything found in Galaxy for the role: + +.. code-block:: text + + Role: username.role_name + description: Installs and configures a thing, a distributed, highly available NoSQL thing. + active: True + commit: c01947b7bc89ebc0b8a2e298b87ab416aed9dd57 + commit_message: Adding travis + commit_url: https://github.com/username/repo_name/commit/c01947b7bc89ebc0b8a2e298b87ab + company: My Company, Inc. + created: 2015-12-08T14:17:52.773Z + download_count: 1 + forks_count: 0 + github_branch: + github_repo: repo_name + github_user: username + id: 6381 + is_valid: True + issue_tracker_url: + license: Apache + min_ansible_version: 1.4 + modified: 2015-12-08T18:43:49.085Z + namespace: username + open_issues_count: 0 + path: /Users/username/projects/roles + scm: None + src: username.repo_name + stargazers_count: 0 + travis_status_url: https://travis-ci.org/username/repo_name.svg?branch=master + version: + watchers_count: 1 + + +.. _installing_galaxy_roles: + +Installing roles from Galaxy +============================ + +The ``ansible-galaxy`` command comes bundled with Ansible, and you can use it to install roles from Galaxy or directly from a git based SCM. You can +also use it to create a new role, remove roles, or perform tasks on the Galaxy website. + +The command line tool by default communicates with the Galaxy website API using the server address *https://galaxy.ansible.com*. If you run your own internal Galaxy server +and want to use it instead of the default one, pass the ``--server`` option following the address of this galaxy server. You can set permanently this option by setting +the Galaxy server value in your ``ansible.cfg`` file to use it . For information on setting the value in *ansible.cfg* see :ref:`galaxy_server`. + + +Installing roles +---------------- + +Use the ``ansible-galaxy`` command to download roles from the `Galaxy website <https://galaxy.ansible.com>`_ + +.. code-block:: bash + + $ ansible-galaxy install namespace.role_name + +Setting where to install roles +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default, Ansible downloads roles to the first writable directory in the default list of paths ``~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles``. This installs roles in the home directory of the user running ``ansible-galaxy``. + +You can override this with one of the following options: + +* Set the environment variable :envvar:`ANSIBLE_ROLES_PATH` in your session. +* Use the ``--roles-path`` option for the ``ansible-galaxy`` command. +* Define ``roles_path`` in an ``ansible.cfg`` file. + +The following provides an example of using ``--roles-path`` to install the role into the current working directory: + +.. code-block:: bash + + $ ansible-galaxy install --roles-path . geerlingguy.apache + +.. seealso:: + + :ref:`intro_configuration` + All about configuration files + +Installing a specific version of a role +--------------------------------------- + +When the Galaxy server imports a role, it imports any git tags matching the `Semantic Version <https://semver.org/>`_ format as versions. +In turn, you can download a specific version of a role by specifying one of the imported tags. + +To see the available versions for a role: + +#. Locate the role on the Galaxy search page. +#. Click on the name to view more details, including the available versions. + +You can also navigate directly to the role using the /<namespace>/<role name>. For example, to view the role geerlingguy.apache, go to `<https://galaxy.ansible.com/geerlingguy/apache>`_. + +To install a specific version of a role from Galaxy, append a comma and the value of a GitHub release tag. For example: + +.. code-block:: bash + + $ ansible-galaxy install geerlingguy.apache,v1.0.0 + +It is also possible to point directly to the git repository and specify a branch name or commit hash as the version. For example, the following will +install a specific commit: + +.. code-block:: bash + + $ ansible-galaxy install git+https://github.com/geerlingguy/ansible-role-apache.git,0b7cd353c0250e87a26e0499e59e7fd265cc2f25 + +Installing multiple roles from a file +------------------------------------- + +You can install multiple roles by including the roles in a :file:`requirements.yml` file. The format of the file is YAML, and the +file extension must be either *.yml* or *.yaml*. + +Use the following command to install roles included in :file:`requirements.yml:` + +.. code-block:: bash + + $ ansible-galaxy install -r requirements.yml + +Again, the extension is important. If the *.yml* extension is left off, the ``ansible-galaxy`` CLI assumes the file is in an older, now deprecated, +"basic" format. + +Each role in the file will have one or more of the following attributes: + + src + The source of the role. Use the format *namespace.role_name*, if downloading from Galaxy; otherwise, provide a URL pointing + to a repository within a git based SCM. See the examples below. This is a required attribute. + scm + Specify the SCM. As of this writing only *git* or *hg* are allowed. See the examples below. Defaults to *git*. + version: + The version of the role to download. Provide a release tag value, commit hash, or branch name. Defaults to the branch set as a default in the repository, otherwise defaults to the *master*. + name: + Download the role to a specific name. Defaults to the Galaxy name when downloading from Galaxy, otherwise it defaults + to the name of the repository. + +Use the following example as a guide for specifying roles in *requirements.yml*: + +.. code-block:: yaml + + # from galaxy + - name: yatesr.timezone + + # from locally cloned git repository (git+file:// requires full paths) + - src: git+file:///home/bennojoy/nginx + + # from GitHub + - src: https://github.com/bennojoy/nginx + + # from GitHub, overriding the name and specifying a specific tag + - name: nginx_role + src: https://github.com/bennojoy/nginx + version: master + + # from GitHub, specifying a specific commit hash + - src: https://github.com/bennojoy/nginx + version: "ee8aa41" + + # from a webserver, where the role is packaged in a tar.gz + - name: http-role-gz + src: https://some.webserver.example.com/files/master.tar.gz + + # from a webserver, where the role is packaged in a tar.bz2 + - name: http-role-bz2 + src: https://some.webserver.example.com/files/master.tar.bz2 + + # from a webserver, where the role is packaged in a tar.xz (Python 3.x only) + - name: http-role-xz + src: https://some.webserver.example.com/files/master.tar.xz + + # from Bitbucket + - src: git+https://bitbucket.org/willthames/git-ansible-galaxy + version: v1.4 + + # from Bitbucket, alternative syntax and caveats + - src: https://bitbucket.org/willthames/hg-ansible-galaxy + scm: hg + + # from GitLab or other git-based scm, using git+ssh + - src: git@gitlab.company.com:mygroup/ansible-base.git + scm: git + version: "0.1" # quoted, so YAML doesn't parse this as a floating-point value + +.. warning:: + + Embedding credentials into a SCM URL is not secure. Make sure to use safe auth options for security reasons. For example, use `SSH <https://help.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh>`_, `netrc <https://linux.die.net/man/5/netrc>`_ or `http.extraHeader <https://git-scm.com/docs/git-config#Documentation/git-config.txt-httpextraHeader>`_/`url.<base>.pushInsteadOf <https://git-scm.com/docs/git-config#Documentation/git-config.txt-urlltbasegtpushInsteadOf>`_ in Git config to prevent your creds from being exposed in logs. + +Installing roles and collections from the same requirements.yml file +--------------------------------------------------------------------- + +You can install roles and collections from the same requirements files, with some caveats. + +.. code-block:: yaml + + --- + roles: + # Install a role from Ansible Galaxy. + - name: geerlingguy.java + version: 1.9.6 + + collections: + # Install a collection from Ansible Galaxy. + - name: geerlingguy.php_roles + version: 0.9.3 + source: https://galaxy.ansible.com + +.. note:: + While both roles and collections can be specified in one requirements file, they need to be installed separately. + The ``ansible-galaxy role install -r requirements.yml`` will only install roles and ``ansible-galaxy collection install -r requirements.yml -p ./`` will only install collections. + +Installing multiple roles from multiple files +--------------------------------------------- + +For large projects, the ``include`` directive in a :file:`requirements.yml` file provides the ability to split a large file into multiple smaller files. + +For example, a project may have a :file:`requirements.yml` file, and a :file:`webserver.yml` file. + +Below are the contents of the :file:`webserver.yml` file: + +.. code-block:: bash + + # from github + - src: https://github.com/bennojoy/nginx + + # from Bitbucket + - src: git+http://bitbucket.org/willthames/git-ansible-galaxy + version: v1.4 + +The following shows the contents of the :file:`requirements.yml` file that now includes the :file:`webserver.yml` file: + +.. code-block:: bash + + # from galaxy + - name: yatesr.timezone + - include: <path_to_requirements>/webserver.yml + +To install all the roles from both files, pass the root file, in this case :file:`requirements.yml` on the +command line, as follows: + +.. code-block:: bash + + $ ansible-galaxy install -r requirements.yml + +.. _galaxy_dependencies: + +Dependencies +------------ + +Roles can also be dependent on other roles, and when you install a role that has dependencies, those dependencies will automatically be installed to the ``roles_path``. + +There are two ways to define the dependencies of a role: + +* using ``meta/requirements.yml`` +* using ``meta/main.yml`` + +Using ``meta/requirements.yml`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 2.10 + +You can create the file ``meta/requirements.yml`` and define dependencies in the same format used for :file:`requirements.yml` described in the `Installing multiple roles from a file`_ section. + +From there, you can import or include the specified roles in your tasks. + +Using ``meta/main.yml`` +^^^^^^^^^^^^^^^^^^^^^^^ + +Alternatively, you can specify role dependencies in the ``meta/main.yml`` file by providing a list of roles under the ``dependencies`` section. If the source of a role is Galaxy, you can simply specify the role in +the format ``namespace.role_name``. You can also use the more complex format in :file:`requirements.yml`, allowing you to provide ``src``, ``scm``, ``version``, and ``name``. + +Dependencies installed that way, depending on other factors described below, will also be executed **before** this role is executed during play execution. +To better understand how dependencies are handled during play execution, see :ref:`playbooks_reuse_roles`. + +The following shows an example ``meta/main.yml`` file with dependent roles: + +.. code-block:: yaml + + --- + dependencies: + - geerlingguy.java + + galaxy_info: + author: geerlingguy + description: Elasticsearch for Linux. + company: "Midwestern Mac, LLC" + license: "license (BSD, MIT)" + min_ansible_version: 2.4 + platforms: + - name: EL + versions: + - all + - name: Debian + versions: + - all + - name: Ubuntu + versions: + - all + galaxy_tags: + - web + - system + - monitoring + - logging + - lucene + - elk + - elasticsearch + +Tags are inherited *down* the dependency chain. In order for tags to be applied to a role and all its dependencies, the tag should be applied to the role, not to all the tasks within a role. + +Roles listed as dependencies are subject to conditionals and tag filtering, and may not execute fully depending on +what tags and conditionals are applied. + +If the source of a role is Galaxy, specify the role in the format *namespace.role_name*: + +.. code-block:: yaml + + dependencies: + - geerlingguy.apache + - geerlingguy.ansible + + +Alternately, you can specify the role dependencies in the complex form used in :file:`requirements.yml` as follows: + +.. code-block:: yaml + + dependencies: + - name: geerlingguy.ansible + - name: composer + src: git+https://github.com/geerlingguy/ansible-role-composer.git + version: 775396299f2da1f519f0d8885022ca2d6ee80ee8 + +.. note:: + + Galaxy expects all role dependencies to exist in Galaxy, and therefore dependencies to be specified in the + ``namespace.role_name`` format. If you import a role with a dependency where the ``src`` value is a URL, the import process will fail. + +List installed roles +-------------------- + +Use ``list`` to show the name and version of each role installed in the *roles_path*. + +.. code-block:: bash + + $ ansible-galaxy list + - ansible-network.network-engine, v2.7.2 + - ansible-network.config_manager, v2.6.2 + - ansible-network.cisco_nxos, v2.7.1 + - ansible-network.vyos, v2.7.3 + - ansible-network.cisco_ios, v2.7.0 + +Remove an installed role +------------------------ + +Use ``remove`` to delete a role from *roles_path*: + +.. code-block:: bash + + $ ansible-galaxy remove namespace.role_name + + +.. seealso:: + :ref:`collections` + Shareable collections of modules, playbooks and roles + :ref:`playbooks_reuse_roles` + Reusable tasks, handlers, and other files in a known directory structure diff --git a/docs/docsite/rst/images/cow.png b/docs/docsite/rst/images/cow.png Binary files differnew file mode 100644 index 00000000..9ace4401 --- /dev/null +++ b/docs/docsite/rst/images/cow.png diff --git a/docs/docsite/rst/installation_guide/index.rst b/docs/docsite/rst/installation_guide/index.rst new file mode 100644 index 00000000..8a7f41db --- /dev/null +++ b/docs/docsite/rst/installation_guide/index.rst @@ -0,0 +1,13 @@ +****************** +Installation Guide +****************** + +Welcome to the Ansible Installation Guide! + + +.. toctree:: + :maxdepth: 2 + + intro_installation + intro_configuration + diff --git a/docs/docsite/rst/installation_guide/intro_configuration.rst b/docs/docsite/rst/installation_guide/intro_configuration.rst new file mode 100644 index 00000000..131c6c44 --- /dev/null +++ b/docs/docsite/rst/installation_guide/intro_configuration.rst @@ -0,0 +1,59 @@ +.. _intro_configuration: + +******************* +Configuring Ansible +******************* + +.. contents:: Topics + + +This topic describes how to control Ansible settings. + + +.. _the_configuration_file: + +Configuration file +================== + +Certain settings in Ansible are adjustable via a configuration file (ansible.cfg). +The stock configuration should be sufficient for most users, but there may be reasons you would want to change them. +Paths where configuration file is searched are listed in :ref:`reference documentation<ansible_configuration_settings_locations>`. + +.. _getting_the_latest_configuration: + +Getting the latest configuration +-------------------------------- + +If installing Ansible from a package manager, the latest ``ansible.cfg`` file should be present in ``/etc/ansible``, possibly +as a ``.rpmnew`` file (or other) as appropriate in the case of updates. + +If you installed Ansible from pip or from source, you may want to create this file in order to override +default settings in Ansible. + +An `example file is available on GitHub <https://github.com/ansible/ansible/blob/devel/examples/ansible.cfg>`_. + +For more details and a full listing of available configurations go to :ref:`configuration_settings<ansible_configuration_settings>`. Starting with Ansible version 2.4, you can use the :ref:`ansible-config` command line utility to list your available options and inspect the current values. + +For in-depth details, see :ref:`ansible_configuration_settings`. + +.. _environmental_configuration: + +Environmental configuration +=========================== + +Ansible also allows configuration of settings using environment variables. +If these environment variables are set, they will override any setting loaded from the configuration file. + +You can get a full listing of available environment variables from :ref:`ansible_configuration_settings`. + + +.. _command_line_configuration: + +Command line options +==================== + +Not all configuration options are present in the command line, just the ones deemed most useful or common. +Settings in the command line will override those passed through the configuration file and the environment. + +The full list of options available is in :ref:`ansible-playbook` and :ref:`ansible`. + diff --git a/docs/docsite/rst/installation_guide/intro_installation.rst b/docs/docsite/rst/installation_guide/intro_installation.rst new file mode 100644 index 00000000..b5084431 --- /dev/null +++ b/docs/docsite/rst/installation_guide/intro_installation.rst @@ -0,0 +1,629 @@ +.. _installation_guide: +.. _intro_installation_guide: + +****************** +Installing Ansible +****************** + +Ansible is an agentless automation tool that you install on a control node. From the control node, Ansible manages machines and other devices remotely (by default, over the SSH protocol). + +To install Ansible for use at the command line, simply install the Ansible package on one machine (which could easily be a laptop). You do not need to install a database or run any daemons. Ansible can manage an entire fleet of remote machines from that one control node. + +.. contents:: + :local: + +Prerequisites +============= + +Before you install Ansible, review the requirements for a control node. Before you use Ansible, review the requirements for managed nodes (those end devices you want to automate). Control nodes and managed nodes have different minimum requirements. + +.. _control_node_requirements: + +Control node requirements +------------------------- + +For your control node (the machine that runs Ansible), you can use any machine with Python 2 (version 2.7) or Python 3 (versions 3.5 and higher) installed. ansible-core 2.11 and Ansible 4.0.0 will make Python 3.8 a soft dependency for the control node, but will function with the aforementioned requirements. ansible-core 2.12 and Ansible 5.0.0 will require Python 3.8 or newer to function on the control node. Starting with ansible-core 2.11, the project will only be packaged for Python 3.8 and newer. +This includes Red Hat, Debian, CentOS, macOS, any of the BSDs, and so on. +Windows is not supported for the control node, read more about this in `Matt Davis's blog post <http://blog.rolpdog.com/2020/03/why-no-ansible-controller-for-windows.html>`_. + +.. warning:: + + Please note that some plugins that run on the control node have additional requirements. These requirements should be listed in the plugin documentation. + +When choosing a control node, remember that any management system benefits from being run near the machines being managed. If you are using Ansible to manage machines in a cloud, consider using a machine inside that cloud as your control node. In most cases Ansible will perform better from a machine on the cloud than from a machine on the open Internet. + +.. warning:: + + Ansible 2.11 will make Python 3.8 a soft dependency for the control node, but will function with the aforementioned requirements. Ansible 2.12 will require Python 3.8 or newer to function on the control node. Starting with Ansible 2.11, the project will only be packaged for Python 3.8 and newer. + + +.. _managed_node_requirements: + +Managed node requirements +------------------------- + +Although you do not need a daemon on your managed nodes, you do need a way for Ansible to communicate with them. For most managed nodes, Ansible makes a connection over SSH and transfers modules using SFTP. If SSH works but SFTP is not available on some of your managed nodes, you can switch to SCP in :ref:`ansible.cfg <ansible_configuration_settings>`. For any machine or device that can run Python, you also need Python 2 (version 2.6 or later) or Python 3 (version 3.5 or later). + +.. warning:: + + Please note that some modules have additional requirements that need to be satisfied on the 'target' machine (the managed node). These requirements should be listed in the module documentation. + +.. note:: + + * If you have SELinux enabled on remote nodes, you will also want to install libselinux-python on them before using any copy/file/template related functions in Ansible. You can use the :ref:`yum module<yum_module>` or :ref:`dnf module<dnf_module>` in Ansible to install this package on remote systems that do not have it. + + * By default, before the first Python module in a playbook runs on a host, Ansible attempts to discover a suitable Python interpreter on that host. You can override the discovery behavior by setting the :ref:`ansible_python_interpreter<ansible_python_interpreter>` inventory variable to a specific interpreter, and in other ways. See :ref:`interpreter_discovery` for details. + + * Ansible's :ref:`raw module<raw_module>`, and the :ref:`script module<script_module>`, do not depend on a client side install of Python to run. Technically, you can use Ansible to install a compatible version of Python using the :ref:`raw module<raw_module>`, which then allows you to use everything else. For example, if you need to bootstrap Python 2 onto a RHEL-based system, you can install it as follows: + + .. code-block:: shell + + $ ansible myhost --become -m raw -a "yum install -y python2" + +.. _what_version: + +Selecting an Ansible artifact and version to install +==================================================== + +Starting with version 2.10, Ansible distributes two artifacts: a community package called ``ansible`` and a minimalist language and runtime called ``ansible-core`` (called `ansible-base` in version 2.10). Choose the Ansible artifact and version that matches your particular needs. + +Installing the Ansible community package +---------------------------------------- + +The ``ansible`` package includes the Ansible language and runtime plus a range of community curated Collections. It recreates and expands on the functionality that was included in Ansible 2.9. + +You can choose any of the following ways to install the Ansible community package: + +* Install the latest release with your OS package manager (for Red Hat Enterprise Linux (TM), CentOS, Fedora, Debian, or Ubuntu). +* Install with ``pip`` (the Python package manager). + +Installing `ansible-core` +------------------------- + +Ansible also distributes a minimalist object called ``ansible-core`` (or ``ansible-base`` in version 2.10). It contains the Ansible language, runtime, and a short list of core modules and other plugins. You can build functionality on top of ``ansible-core`` by installing collections from Galaxy, Automation Hub, or any other source. + +You can choose any of the following ways to install ``ansible-core``: + +* Install ``ansible-core`` (version 2.11 and greater) or ``ansible-base`` (version 2.10) with ``pip``. +* Install ``ansible-core`` from source from the ansible/ansible GitHub repository to access the development (``devel``) version to develop or test the latest features. + +.. note:: + + You should only run ``ansible-core`` from ``devel`` if you are modifying ``ansible-core``, or trying out features under development. This is a rapidly changing source of code and can become unstable at any point. + +Ansible generally creates new releases twice a year. See :ref:`release_and_maintenance` for information on release timing and maintenance of older releases. + +.. _from_pip: + +Installing and upgrading Ansible with ``pip`` +============================================= + +Ansible can be installed on many systems with ``pip``, the Python package manager. + +Prerequisites: Installing ``pip`` +---------------------------------- + +If ``pip`` is not already available on your system, run the following commands to install it:: + + $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py + $ python get-pip.py --user + +You may need to perform some additional configuration before you are able to run Ansible. See the Python documentation on `installing to the user site`_ for more information. + +.. _installing to the user site: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site + +Installing Ansible with ``pip`` +------------------------------- + +Once ``pip`` is installed, you can install Ansible [1]_:: + + $ python -m pip install --user ansible + +In order to use the ``paramiko`` connection plugin or modules that require ``paramiko``, install the required module [2]_:: + + $ python -m pip install --user paramiko + +If you wish to install Ansible globally, run the following commands:: + + $ sudo python get-pip.py + $ sudo python -m pip install ansible + +.. note:: + + Running ``pip`` with ``sudo`` will make global changes to the system. Since ``pip`` does not coordinate with system package managers, it could make changes to your system that leaves it in an inconsistent or non-functioning state. This is particularly true for macOS. Installing with ``--user`` is recommended unless you understand fully the implications of modifying global files on the system. + +.. note:: + + Older versions of ``pip`` default to http://pypi.python.org/simple, which no longer works. + Please make sure you have the latest version of ``pip`` before installing Ansible. + If you have an older version of ``pip`` installed, you can upgrade by following `pip's upgrade instructions <https://pip.pypa.io/en/stable/installing/#upgrading-pip>`_ . + +.. _from_pip_venv: + +Installing Ansible in a virtual environment with ``pip`` +-------------------------------------------------------- + +.. note:: + + If you have Ansible 2.9 or older installed, you need to use ``pip uninstall ansible`` first to remove older versions of Ansible before re-installing it. + +Ansible can also be installed inside a new or existing ``virtualenv``:: + + $ python -m virtualenv ansible # Create a virtualenv if one does not already exist + $ source ansible/bin/activate # Activate the virtual environment + $ python -m pip install ansible + +.. _pip_upgrade: + +Upgrading Ansible with ``pip`` +------------------------------ + +Upgrading from 2.9 or earlier to 2.10 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Starting in version 2.10, Ansible is made of two packages. When you upgrade from version 2.9 and older to version 2.10 or later, you need to uninstall the old Ansible version (2.9 or earlier) before upgrading. If you do not uninstall the older version of Ansible, you will see the following message, and no change will be performed: + +.. code-block:: console + + Cannot install ansible-base with a pre-existing ansible==2.x installation. + + Installing ansible-base with ansible-2.9 or older currently installed with + pip is known to cause problems. Please uninstall ansible and install the new + version: + + pip uninstall ansible + pip install ansible-base + + ... + +As explained by the message, to upgrade you must first remove the version of Ansible installed and then install it to the latest version. + +.. code-block:: console + + $ pip uninstall ansible + $ pip install ansible + +.. _installing_the_control_node: +.. _from_yum: + +Installing Ansible on specific operating systems +================================================ + +Follow these instructions to install the Ansible community package on a variety of operating systems. + +Installing Ansible on RHEL, CentOS, or Fedora +---------------------------------------------- + +On Fedora: + +.. code-block:: bash + + $ sudo dnf install ansible + +On RHEL and CentOS: + +.. code-block:: bash + + $ sudo yum install ansible + +RPMs for RHEL 7 and RHEL 8 are available from the `Ansible Engine repository <https://access.redhat.com/articles/3174981>`_. + +To enable the Ansible Engine repository for RHEL 8, run the following command: + +.. code-block:: bash + + $ sudo subscription-manager repos --enable ansible-2.9-for-rhel-8-x86_64-rpms + +To enable the Ansible Engine repository for RHEL 7, run the following command: + +.. code-block:: bash + + $ sudo subscription-manager repos --enable rhel-7-server-ansible-2.9-rpms + +RPMs for currently supported versions of RHEL and CentOS are also available from `EPEL <https://fedoraproject.org/wiki/EPEL>`_. + +.. note:: + + Since Ansible 2.10 for RHEL is not available at this time, continue to use Ansible 2.9. + +Ansible can manage older operating systems that contain Python 2.6 or higher. + +.. _from_apt: + +Installing Ansible on Ubuntu +---------------------------- + +Ubuntu builds are available `in a PPA here <https://launchpad.net/~ansible/+archive/ubuntu/ansible>`_. + +To configure the PPA on your machine and install Ansible run these commands: + +.. code-block:: bash + + $ sudo apt update + $ sudo apt install software-properties-common + $ sudo apt-add-repository --yes --update ppa:ansible/ansible + $ sudo apt install ansible + +.. note:: On older Ubuntu distributions, "software-properties-common" is called "python-software-properties". You may want to use ``apt-get`` instead of ``apt`` in older versions. Also, be aware that only newer distributions (in other words, 18.04, 18.10, and so on) have a ``-u`` or ``--update`` flag, so adjust your script accordingly. + +Debian/Ubuntu packages can also be built from the source checkout, run: + +.. code-block:: bash + + $ make deb + +Installing Ansible on Debian +---------------------------- + +Debian users may leverage the same source as the Ubuntu PPA. + +Add the following line to /etc/apt/sources.list: + +.. code-block:: bash + + deb http://ppa.launchpad.net/ansible/ansible/ubuntu trusty main + +Then run these commands: + +.. code-block:: bash + + $ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 93C4A3FD7BB9C367 + $ sudo apt update + $ sudo apt install ansible + +.. note:: This method has been verified with the Trusty sources in Debian Jessie and Stretch but may not be supported in earlier versions. You may want to use ``apt-get`` instead of ``apt`` in older versions. + +Installing Ansible on Gentoo with portage +----------------------------------------- + +.. code-block:: bash + + $ emerge -av app-admin/ansible + +To install the newest version, you may need to unmask the Ansible package prior to emerging: + +.. code-block:: bash + + $ echo 'app-admin/ansible' >> /etc/portage/package.accept_keywords + +Installing Ansible on FreeBSD +----------------------------- + +Though Ansible works with both Python 2 and 3 versions, FreeBSD has different packages for each Python version. +So to install you can use: + +.. code-block:: bash + + $ sudo pkg install py27-ansible + +or: + +.. code-block:: bash + + $ sudo pkg install py36-ansible + + +You may also wish to install from ports, run: + +.. code-block:: bash + + $ sudo make -C /usr/ports/sysutils/ansible install + +You can also choose a specific version, for example ``ansible25``. + +Older versions of FreeBSD worked with something like this (substitute for your choice of package manager): + +.. code-block:: bash + + $ sudo pkg install ansible + +.. _on_macos: + +Installing Ansible on macOS +--------------------------- + +The preferred way to install Ansible on a Mac is with ``pip``. + +The instructions can be found in :ref:`from_pip`. If you are running macOS version 10.12 or older, then you should upgrade to the latest ``pip`` to connect to the Python Package Index securely. It should be noted that pip must be run as a module on macOS, and the linked ``pip`` instructions will show you how to do that. + +.. note:: + + To upgrade from Ansible 2.9 or older to Ansible 3 or later, you must ``pip uninstall ansible`` first to remove older versions of Ansible before re-installing it. + +.. note:: + + macOS by default is configured for a small number of file handles, so if you want to use 15 or more forks you'll need to raise the ulimit with ``sudo launchctl limit maxfiles unlimited``. This command can also fix any "Too many open files" errors. + +If you are installing on macOS Mavericks (10.9), you may encounter some noise from your compiler. A workaround is to do the following:: + + $ CFLAGS=-Qunused-arguments CPPFLAGS=-Qunused-arguments pip install --user ansible + + +.. _from_pkgutil: + +Installing Ansible on Solaris +----------------------------- + +Ansible is available for Solaris as `SysV package from OpenCSW <https://www.opencsw.org/packages/ansible/>`_. + +.. code-block:: bash + + # pkgadd -d http://get.opencsw.org/now + # /opt/csw/bin/pkgutil -i ansible + +.. _from_pacman: + +Installing Ansible on Arch Linux +--------------------------------- + +Ansible is available in the Community repository:: + + $ pacman -S ansible + +The AUR has a PKGBUILD for pulling directly from GitHub called `ansible-git <https://aur.archlinux.org/packages/ansible-git>`_. + +Also see the `Ansible <https://wiki.archlinux.org/index.php/Ansible>`_ page on the ArchWiki. + +.. _from_sbopkg: + +Installing Ansible on Slackware Linux +------------------------------------- + +Ansible build script is available in the `SlackBuilds.org <https://slackbuilds.org/apps/ansible/>`_ repository. +Can be built and installed using `sbopkg <https://sbopkg.org/>`_. + +Create queue with Ansible and all dependencies:: + + # sqg -p ansible + +Build and install packages from a created queuefile (answer Q for question if sbopkg should use queue or package):: + + # sbopkg -k -i ansible + +.. _from swupd: + +Installing Ansible on Clear Linux +--------------------------------- + +Ansible and its dependencies are available as part of the sysadmin host management bundle:: + + $ sudo swupd bundle-add sysadmin-hostmgmt + +Update of the software will be managed by the swupd tool:: + + $ sudo swupd update + +.. _from_pip_devel: +.. _getting_ansible: + +Installing and running the ``devel`` branch from source +======================================================= + +In Ansible 2.10 and later, the `ansible/ansible repository <https://github.com/ansible/ansible>`_ contains the code for basic features and functions, such as copying module code to managed nodes. This code is also known as ``ansible-core``. + +New features are added to ``ansible-core`` on a branch called ``devel``. If you are testing new features, fixing bugs, or otherwise working with the development team on changes to the core code, you can install and run ``devel``. + +.. note:: + + You should only install and run the ``devel`` branch if you are modifying ``ansible-core`` or trying out features under development. This is a rapidly changing source of code and can become unstable at any point. + +.. note:: + + If you want to use Ansible Tower as the control node, do not install or run the ``devel`` branch of Ansible. Use an OS package manager (like ``apt`` or ``yum``) or ``pip`` to install a stable version. + +If you are running Ansible from source, you may also wish to follow the `Ansible GitHub project <https://github.com/ansible/ansible>`_. We track issues, document bugs, and share feature ideas in this and other related repositories. + +For more information on getting involved in the Ansible project, see the :ref:`ansible_community_guide`. For more information on creating Ansible modules and Collections, see the :ref:`developer_guide`. + +Installing ``devel`` from GitHub with ``pip`` +--------------------------------------------- + +You can install the ``devel`` branch of ``ansible-core`` directly from GitHub with ``pip``: + +.. code-block:: bash + + $ python -m pip install --user https://github.com/ansible/ansible/archive/devel.tar.gz + +.. note:: + + If you have Ansible 2.9 or older installed, you need to use ``pip uninstall ansible`` first to remove older versions of Ansible before re-installing it. See :ref:`pip_upgrade` for more details. + +You can replace ``devel`` in the URL mentioned above, with any other branch or tag on GitHub to install older versions of Ansible (prior to ``ansible-base`` 2.10.), tagged alpha or beta versions, and release candidates. This installs all of Ansible. + +.. code-block:: bash + + $ python -m pip install --user https://github.com/ansible/ansible/archive/stable-2.9.tar.gz + +See :ref:`from_source` for instructions on how to run ``ansible-core`` directly from source. + + +Installing ``devel`` from GitHub by cloning +------------------------------------------- + +You can install the ``devel`` branch of ``ansible-core`` by cloning the GitHub repository: + +.. code-block:: bash + + $ git clone https://github.com/ansible/ansible.git + $ cd ./ansible + +The default branch is ``devel``. + +.. _from_source: + +Running the ``devel`` branch from a clone +----------------------------------------- + +``ansible-core`` is easy to run from source. You do not need ``root`` permissions to use it and there is no software to actually install. No daemons or database setup are required. + +Once you have installed the ``ansible-core`` repository by cloning, setup the Ansible environment: + +Using Bash: + +.. code-block:: bash + + $ source ./hacking/env-setup + +Using Fish:: + + $ source ./hacking/env-setup.fish + +If you want to suppress spurious warnings/errors, use:: + + $ source ./hacking/env-setup -q + +If you do not have ``pip`` installed in your version of Python, install it:: + + $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py + $ python get-pip.py --user + +Ansible also uses the following Python modules that need to be installed [1]_: + +.. code-block:: bash + + $ python -m pip install --user -r ./requirements.txt + +To update the ``devel`` branch of ``ansible-core`` on your local machine, use pull-with-rebase so any local changes are replayed. + +.. code-block:: bash + + $ git pull --rebase + +.. code-block:: bash + + $ git pull --rebase #same as above + $ git submodule update --init --recursive + +After you run the the env-setup script, you will be running from the source code. The default inventory file will be ``/etc/ansible/hosts``. You can optionally specify an inventory file (see :ref:`inventory`) other than ``/etc/ansible/hosts``: + +.. code-block:: bash + + $ echo "127.0.0.1" > ~/ansible_hosts + $ export ANSIBLE_INVENTORY=~/ansible_hosts + +You can read more about the inventory file at :ref:`inventory`. + +Confirming your installation +============================ + +Whatever method of installing Ansible you chose, you can test that it is installed correctly with a ping command: + +.. code-block:: bash + + $ ansible all -m ping --ask-pass + +You can also use "sudo make install". + +.. _tagged_releases: + +Finding tarballs of tagged releases +=================================== + +Packaging Ansible or wanting to build a local package yourself, but don't want to do a git checkout? Tarballs of releases are available from ``pypi`` as https://pypi.python.org/packages/source/a/ansible/ansible-{{VERSION}}.tar.gz. You can make VERSION a variable in your package managing system that you update in one place whenever you package a new version. Alternately, you can download https://pypi.python.org/project/ansible to get the latest stable release. + +.. note:: + + If you are creating your own Ansible package, you must also download or package ``ansible-base`` as part of your Ansible package. You can download it as https://pypi.python.org/packages/source/a/ansible-base/ansible-base-{{VERSION}}.tar.gz. + +These releases are also tagged in the `git repository <https://github.com/ansible/ansible/releases>`_ with the release version. + + +.. _shell_completion: + +Adding Ansible command shell completion +======================================= + +As of Ansible 2.9, you can add shell completion of the Ansible command line utilities by installing an optional dependency called ``argcomplete``. ``argcomplete`` supports bash, and has limited support for zsh and tcsh. + +You can install ``python-argcomplete`` from EPEL on Red Hat Enterprise based distributions, and or from the standard OS repositories for many other distributions. + +For more information about installation and configuration, see the `argcomplete documentation <https://argcomplete.readthedocs.io/en/latest/>`_. + +Installing ``argcomplete`` on RHEL, CentOS, or Fedora +----------------------------------------------------- + +On Fedora: + +.. code-block:: bash + + $ sudo dnf install python-argcomplete + +On RHEL and CentOS: + +.. code-block:: bash + + $ sudo yum install epel-release + $ sudo yum install python-argcomplete + + +Installing ``argcomplete`` with ``apt`` +--------------------------------------- + +.. code-block:: bash + + $ sudo apt install python-argcomplete + + +Installing ``argcomplete`` with ``pip`` +--------------------------------------- + +.. code-block:: bash + + $ python -m pip install argcomplete + +Configuring ``argcomplete`` +--------------------------- + +There are 2 ways to configure ``argcomplete`` to allow shell completion of the Ansible command line utilities: globally or per command. + +Global configuration +^^^^^^^^^^^^^^^^^^^^ + +Global completion requires bash 4.2. + +.. code-block:: bash + + $ sudo activate-global-python-argcomplete + +This will write a bash completion file to a global location. Use ``--dest`` to change the location. + +Per command configuration +^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you do not have bash 4.2, you must register each script independently. + +.. code-block:: bash + + $ eval $(register-python-argcomplete ansible) + $ eval $(register-python-argcomplete ansible-config) + $ eval $(register-python-argcomplete ansible-console) + $ eval $(register-python-argcomplete ansible-doc) + $ eval $(register-python-argcomplete ansible-galaxy) + $ eval $(register-python-argcomplete ansible-inventory) + $ eval $(register-python-argcomplete ansible-playbook) + $ eval $(register-python-argcomplete ansible-pull) + $ eval $(register-python-argcomplete ansible-vault) + +You should place the above commands into your shells profile file such as ``~/.profile`` or ``~/.bash_profile``. + +Using ``argcomplete`` with zsh or tcsh +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +See the `argcomplete documentation <https://argcomplete.readthedocs.io/en/latest/>`_. + + +.. seealso:: + + :ref:`intro_adhoc` + Examples of basic commands + :ref:`working_with_playbooks` + Learning ansible's configuration management language + :ref:`installation_faqs` + Ansible Installation related to FAQs + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel + +.. [1] If you have issues with the "pycrypto" package install on macOS, then you may need to try ``CC=clang sudo -E pip install pycrypto``. +.. [2] ``paramiko`` was included in Ansible's ``requirements.txt`` prior to 2.8. diff --git a/docs/docsite/rst/inventory/implicit_localhost.rst b/docs/docsite/rst/inventory/implicit_localhost.rst new file mode 100644 index 00000000..2f065dc7 --- /dev/null +++ b/docs/docsite/rst/inventory/implicit_localhost.rst @@ -0,0 +1,35 @@ +:orphan: + +.. _implicit_localhost: + +Implicit 'localhost' +==================== + +When you try to reference a ``localhost`` and you don't have it defined in inventory, Ansible will create an implicit one for you.:: + + - hosts: all + tasks: + - name: check that i have log file for all hosts on my local machine + stat: path=/var/log/hosts/{{inventory_hostname}}.log + delegate_to: localhost + +In a case like this (or ``local_action``) when Ansible needs to contact a 'localhost' but you did not supply one, we create one for you. This host is defined with specific connection variables equivalent to this in an inventory:: + + ... + + hosts: + localhost: + vars: + ansible_connection: local + ansible_python_interpreter: "{{ansible_playbook_python}}" + +This ensures that the proper connection and Python are used to execute your tasks locally. +You can override the built-in implicit version by creating a ``localhost`` host entry in your inventory. At that point, all implicit behaviors are ignored; the ``localhost`` in inventory is treated just like any other host. Group and host vars will apply, including connection vars, which includes the ``ansible_python_interpreter`` setting. This will also affect ``delegate_to: localhost`` and ``local_action``, the latter being an alias to the former. + +.. note:: + - This host is not targetable via any group, however it will use vars from ``host_vars`` and from the 'all' group. + - Implicit localhost does not appear in the ``hostvars`` magic variable unless demanded, such as by ``"{{ hostvars['localhost'] }}"``. + - The ``inventory_file`` and ``inventory_dir`` magic variables are not available for the implicit localhost as they are dependent on **each inventory host**. + - This implicit host also gets triggered by using ``127.0.0.1`` or ``::1`` as they are the IPv4 and IPv6 representations of 'localhost'. + - Even though there are many ways to create it, there will only ever be ONE implicit localhost, using the name first used to create it. + - Having ``connection: local`` does NOT trigger an implicit localhost, you are just changing the connection for the ``inventory_hostname``. diff --git a/docs/docsite/rst/network/dev_guide/developing_plugins_network.rst b/docs/docsite/rst/network/dev_guide/developing_plugins_network.rst new file mode 100644 index 00000000..45aee4b0 --- /dev/null +++ b/docs/docsite/rst/network/dev_guide/developing_plugins_network.rst @@ -0,0 +1,265 @@ + +.. _developing_modules_network: +.. _developing_plugins_network: + +************************** +Developing network plugins +************************** + +You can extend the existing network modules with custom plugins in your collection. + +.. contents:: + :local: + +Network connection plugins +========================== +Each network connection plugin has a set of its own plugins which provide a specification of the +connection for a particular set of devices. The specific plugin used is selected at runtime based +on the value of the ``ansible_network_os`` variable assigned to the host. This variable should be +set to the same value as the name of the plugin to be loaded. Thus, ``ansible_network_os=nxos`` +will try to load a plugin in a file named ``nxos.py``, so it is important to name the plugin in a +way that will be sensible to users. + +Public methods of these plugins may be called from a module or module_utils with the connection +proxy object just as other connection methods can. The following is a very simple example of using +such a call in a module_utils file so it may be shared with other modules. + +.. code-block:: python + + from ansible.module_utils.connection import Connection + + def get_config(module): + # module is your AnsibleModule instance. + connection = Connection(module._socket_path) + + # You can now call any method (that doesn't start with '_') of the connection + # plugin or its platform-specific plugin + return connection.get_config() + +.. contents:: + :local: + +.. _developing_plugins_httpapi: + +Developing httpapi plugins +========================== + +:ref:`httpapi plugins <httpapi_plugins>` serve as adapters for various HTTP(S) APIs for use with the ``httpapi`` connection plugin. They should implement a minimal set of convenience methods tailored to the API you are attempting to use. + +Specifically, there are a few methods that the ``httpapi`` connection plugin expects to exist. + +Making requests +--------------- + +The ``httpapi`` connection plugin has a ``send()`` method, but an httpapi plugin needs a ``send_request(self, data, **message_kwargs)`` method as a higher-level wrapper to ``send()``. This method should prepare requests by adding fixed values like common headers or URL root paths. This method may do more complex work such as turning data into formatted payloads, or determining which path or method to request. It may then also unpack responses to be more easily consumed by the caller. + +.. code-block:: python + + from ansible.module_utils.six.moves.urllib.error import HTTPError + + def send_request(self, data, path, method='POST'): + # Fixed headers for requests + headers = {'Content-Type': 'application/json'} + try: + response, response_content = self.connection.send(path, data, method=method, headers=headers) + except HTTPError as exc: + return exc.code, exc.read() + + # handle_response (defined separately) will take the format returned by the device + # and transform it into something more suitable for use by modules. + # This may be JSON text to Python dictionaries, for example. + return handle_response(response_content) + +Authenticating +-------------- + +By default, all requests will authenticate with HTTP Basic authentication. If a request can return some kind of token to stand in place of HTTP Basic, the ``update_auth(self, response, response_text)`` method should be implemented to inspect responses for such tokens. If the token is meant to be included with the headers of each request, it is sufficient to return a dictionary which will be merged with the computed headers for each request. The default implementation of this method does exactly this for cookies. If the token is used in another way, say in a query string, you should instead save that token to an instance variable, where the ``send_request()`` method (above) can add it to each request + +.. code-block:: python + + def update_auth(self, response, response_text): + cookie = response.info().get('Set-Cookie') + if cookie: + return {'Cookie': cookie} + + return None + +If instead an explicit login endpoint needs to be requested to receive an authentication token, the ``login(self, username, password)`` method can be implemented to call that endpoint. If implemented, this method will be called once before requesting any other resources of the server. By default, it will also be attempted once when a HTTP 401 is returned from a request. + +.. code-block:: python + + def login(self, username, password): + login_path = '/my/login/path' + data = {'user': username, 'password': password} + + response = self.send_request(data, path=login_path) + try: + # This is still sent as an HTTP header, so we can set our connection's _auth + # variable manually. If the token is returned to the device in another way, + # you will have to keep track of it another way and make sure that it is sent + # with the rest of the request from send_request() + self.connection._auth = {'X-api-token': response['token']} + except KeyError: + raise AnsibleAuthenticationFailure(message="Failed to acquire login token.") + +Similarly, ``logout(self)`` can be implemented to call an endpoint to invalidate and/or release the current token, if such an endpoint exists. This will be automatically called when the connection is closed (and, by extension, when reset). + +.. code-block:: python + + def logout(self): + logout_path = '/my/logout/path' + self.send_request(None, path=logout_path) + + # Clean up tokens + self.connection._auth = None + +Error handling +-------------- + +The ``handle_httperror(self, exception)`` method can deal with status codes returned by the server. The return value indicates how the plugin will continue with the request: + +* A value of ``true`` means that the request can be retried. This my be used to indicate a transient error, or one that has been resolved. For example, the default implementation will try to call ``login()`` when presented with a 401, and return ``true`` if successful. + +* A value of ``false`` means that the plugin is unable to recover from this response. The status code will be raised as an exception to the calling module. + +* Any other value will be taken as a nonfatal response from the request. This may be useful if the server returns error messages in the body of the response. Returning the original exception is usually sufficient in this case, as HTTPError objects have the same interface as a successful response. + +For example httpapi plugins, see the `source code for the httpapi plugins <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/httpapi>`_ included with Ansible Core. + + + +Developing NETCONF plugins +========================== + +The :ref:`netconf <netconf_connection>` connection plugin provides a connection to remote devices over the ``SSH NETCONF`` subsystem. Network devices typically use this connection plugin to send and receive ``RPC`` calls over ``NETCONF``. + +The ``netconf`` connection plugin uses the ``ncclient`` Python library under the hood to initiate a NETCONF session with a NETCONF-enabled remote network device. ``ncclient`` also executes NETCONF RPC requests and receives responses. You must install the ``ncclient`` on the local Ansible controller. + +To use the ``netconf`` connection plugin for network devices that support standard NETCONF (:RFC:`6241`) operations such as ``get``, ``get-config``, ``edit-config``, set ``ansible_network_os=default``. +You can use :ref:`netconf_get <netconf_get_module>`, :ref:`netconf_config <netconf_config_module>` and :ref:`netconf_rpc <netconf_rpc_module>` modules to talk to a NETCONF enabled remote host. + +As a contributor and user, you should be able to use all the methods under the ``NetconfBase`` class if your device supports standard NETCONF. You can contribute a new plugin if the device you are working with has a vendor specific NETCONF RPC. +To support a vendor specific NETCONF RPC, add the implementation in the network OS specific NETCONF plugin. + +For Junos for example: + +* See the vendor-specific Junos RPC methods implemented in ``plugins/netconf/junos.py``. +* Set the value of ``ansible_network_os`` to the name of the netconf plugin file, that is ``junos`` in this case. + +.. _developing_plugins_network_cli: + +Developing network_cli plugins +============================== + +The :ref:`network_cli <network_cli_connection>` connection type uses ``paramiko_ssh`` under the hood which creates a pseudo terminal to send commands and receive responses. +``network_cli`` loads two platform specific plugins based on the value of ``ansible_network_os``: + +* Terminal plugin (for example ``plugins/terminal/ios.py``) - Controls the parameters related to terminal, such as setting terminal length and width, page disabling and privilege escalation. Also defines regex to identify the command prompt and error prompts. + +* :ref:`cliconf_plugins` (for example, :ref:`ios cliconf <ios_cliconf>`) - Provides an abstraction layer for low level send and receive operations. For example, the ``edit_config()`` method ensures that the prompt is in ``config`` mode before executing configuration commands. + +To contribute a new network operating system to work with the ``network_cli`` connection, implement the ``cliconf`` and ``terminal`` plugins for that network OS. + +The plugins can reside in: + +* Adjacent to playbook in folders + + .. code-block:: bash + + cliconf_plugins/ + terminal_plugins/ + +* Roles + + .. code-block:: bash + + myrole/cliconf_plugins/ + myrole/terminal_plugins/ + +* Collections + + .. code-block:: bash + + myorg/mycollection/plugins/terminal/ + myorg/mycollection/plugins/cliconf/ + +The user can also set the :ref:`DEFAULT_CLICONF_PLUGIN_PATH` to configure the ``cliconf`` plugin path. + +After adding the ``cliconf`` and ``terminal`` plugins in the expected locations, users can: + +* Use the :ref:`cli_command <cli_command_module>` to run an arbitrary command on the network device. +* Use the :ref:`cli_config <cli_config_module>` to implement configuration changes on the remote hosts without platform-specific modules. + + +.. _develop_cli_parse_plugins: + +Developing cli_parser plugins in a collection +=============================================== + +You can use ``cli_parse`` as an entry point for a cli_parser plugin in +your own collection. + +The following sample shows the start of a custom cli_parser plugin: + +.. code-block:: python + + from ansible_collections.ansible.netcommon.plugins.module_utils.cli_parser.cli_parserbase import ( + CliParserBase, + ) + + class CliParser(CliParserBase): + """ Sample cli_parser plugin + """ + + # Use the follow extention when loading a template + DEFAULT_TEMPLATE_EXTENSION = "txt" + # Provide the contents of the template to the parse function + PROVIDE_TEMPLATE_CONTENTS = True + + def myparser(text, template_contents): + # parse the text using the template contents + return {...} + + def parse(self, *_args, **kwargs): + """ Standard entry point for a cli_parse parse execution + + :return: Errors or parsed text as structured data + :rtype: dict + + :example: + + The parse function of a parser should return a dict: + {"errors": [a list of errors]} + or + {"parsed": obj} + """ + template_contents = kwargs["template_contents"] + text = self._task_args.get("text") + try: + parsed = myparser(text, template_contents) + except Exception as exc: + msg = "Custom parser returned an error while parsing. Error: {err}" + return {"errors": [msg.format(err=to_native(exc))]} + return {"parsed": parsed} + +The following task uses this custom cli_parser plugin: + +.. code-block:: yaml + + - name: Use a custom cli_parser + ansible.netcommon.cli_parse: + command: ls -l + parser: + name: my_organiztion.my_collection.custom_parser + +To develop a custom plugin: +- Each cli_parser plugin requires a ``CliParser`` class. +- Each cli_parser plugin requires a ``parse`` function. +- Always return a dictionary with ``errors`` or ``parsed``. +- Place the custom cli_parser in plugins/cli_parsers directory of the collection. +- See the `current cli_parsers <https://github.com/ansible-collections/ansible.netcommon/tree/main/plugins/cli_parsers>`_ for examples to follow. + + +.. seealso:: + + * :ref:`cli_parsing` diff --git a/docs/docsite/rst/network/dev_guide/developing_resource_modules_network.rst b/docs/docsite/rst/network/dev_guide/developing_resource_modules_network.rst new file mode 100644 index 00000000..f5715b5f --- /dev/null +++ b/docs/docsite/rst/network/dev_guide/developing_resource_modules_network.rst @@ -0,0 +1,819 @@ + +.. _developing_resource_modules: + +*********************************** +Developing network resource modules +*********************************** + +.. contents:: + :local: + :depth: 2 + +Understanding network and security resource modules +=================================================== + +Network and security devices separate configuration into sections (such as interfaces, VLANs, and so on) that apply to a network or security service. Ansible resource modules take advantage of this to allow users to configure subsections or resources within the device configuration. Resource modules provide a consistent experience across different network and security devices. For example, a network resource module may only update the configuration for a specific portion of the network interfaces, VLANs, ACLs, and so on for a network device. The resource module: + +#. Fetches a piece of the configuration (fact gathering), for example, the interfaces configuration. +#. Converts the returned configuration into key-value pairs. +#. Places those key-value pairs into an internal agnostic structured data format. + +Now that the configuration data is normalized, the user can update and modify the data and then use the resource module to send the configuration data back to the device. This results in a full round-trip configuration update without the need for manual parsing, data manipulation, and data model management. + +The resource module has two top-level keys - ``config`` and ``state``: + +* ``config`` defines the resource configuration data model as key-value pairs. The type of the ``config`` option can be ``dict`` or ``list of dict`` based on the resource managed. That is, if the device has a single global configuration, it should be a ``dict`` (for example, a global LLDP configuration). If the device has multiple instances of configuration, it should be of type ``list`` with each element in the list of type ``dict`` (for example, interfaces configuration). + + +* ``state`` defines the action the resource module takes on the end device. + +The ``state`` for a new resource module should support the following values (as applicable for the devices that support them): + +merged + Ansible merges the on-device configuration with the provided configuration in the task. + +replaced + Ansible replaces the on-device configuration subsection with the provided configuration subsection in the task. + +overridden + Ansible overrides the on-device configuration for the resource with the provided configuration in the task. Use caution with this state as you could remove your access to the device (for example, by overriding the management interface configuration). + +deleted + Ansible deletes the on-device configuration subsection and restores any default settings. + +gathered + Ansible displays the resource details gathered from the network device and accessed with the ``gathered`` key in the result. + +rendered + Ansible renders the provided configuration in the task in the device-native format (for example, Cisco IOS CLI). Ansible returns this rendered configuration in the ``rendered`` key in the result. Note this state does not communicate with the network device and can be used offline. + +parsed + Ansible parses the configuration from the ``running_configuration`` option into Ansible structured data in the ``parsed`` key in the result. Note this does not gather the configuration from the network device so this state can be used offline. + + +Modules in Ansible-maintained collections must support these state values. If you develop a module with only "present" and "absent" for state, you may submit it to a community collection. + +.. note:: + + The states ``rendered``, ``gathered``, and ``parsed`` do not perform any change on the device. + +.. seealso:: + + `Deep Dive on VLANs Resource Modules for Network Automation <https://www.ansible.com/blog/deep-dive-on-vlans-resource-modules-for-network-automation>`_ + Walkthrough of how state values are implemented for VLANs. + + +Developing network and security resource modules +================================================= + +The Ansible Engineering team ensures the module design and code pattern within Ansible-maintained collections is uniform across resources and across platforms to give a vendor-agnostic feel and deliver good quality code. We recommend you use the `resource module builder <https://github.com/ansible-network/resource_module_builder>`_ to develop a resource module. + + +The highlevel process for developing a resource module is: + +#. Create and share a resource model design in the `resource module models repository <https://github.com/ansible-network/resource_module_models>`_ as a PR for review. +#. Download the latest version of the `resource module builder <https://github.com/ansible-network/resource_module_builder>`_. +#. Run the ``resource module builder`` to create a collection scaffold from your approved resource model. +#. Write the code to implement your resource module. +#. Develop integration and unit tests to verify your resource module. +#. Create a PR to the appropriate collection that you want to add your new resource module to. See :ref:`contributing_maintained_collections` for details on determining the correct collection for your module. + + +Understanding the model and resource module builder +----------------------------------------------------- + +The resource module builder is an Ansible Playbook that helps developers scaffold and maintain an Ansible resource module. It uses a model as the single source of truth for the module. This model is a ``yaml`` file that is used for the module DOCUMENTATION section and the argument spec. + +The resource module builder has the following capabilities: + +- Uses a defined model to scaffold a resource module directory layout and initial class files. +- Scaffolds either an Ansible role or a collection. +- Subsequent uses of the resource module builder will only replace the module arspec and file containing the module docstring. +- Allows you to store complex examples along side the model in the same directory. +- Maintains the model as the source of truth for the module and use resource module builder to update the source files as needed. +- Generates working sample modules for both ``<network_os>_<resource>`` and ``<network_os>_facts``. + +Accessing the resource module builder +------------------------------------- + +To access the resource module builder: + +1. clone the github repository: + + .. code-block:: bash + + git clone https://github.com/ansible-network/resource_module_builder.git + +2. Install the requirements: + + .. code-block:: bash + + pip install -r requirements.txt + +Creating a model +----------------- + +You must create a model for your new resource. The model is the single source of truth for both the argspec and docstring, keeping them in sync. Once your model is approved, you can use the resource module builder to generate three items based on the model: + +* The scaffold for a new module +* The argspec for the new module +* The docstring for the new module + +For any subsequent changes to the functionality, update the model first and use the resource module builder to update the module argspec and docstring. + +For example, the resource model builder includes the ``myos_interfaces.yml`` sample in the :file:`models` directory, as seen below: + +.. code-block:: yaml + + --- + GENERATOR_VERSION: '1.0' + + NETWORK_OS: myos + RESOURCE: interfaces + COPYRIGHT: Copyright 2019 Red Hat + LICENSE: gpl-3.0.txt + + DOCUMENTATION: | + module: myos_interfaces + version_added: 1.0.0 + short_description: 'Manages <xxxx> attributes of <network_os> <resource>' + description: 'Manages <xxxx> attributes of <network_os> <resource>.' + author: Ansible Network Engineer + notes: + - 'Tested against <network_os> <version>' + options: + config: + description: The provided configuration + type: list + elements: dict + suboptions: + name: + type: str + description: The name of the <resource> + some_string: + type: str + description: + - The some_string_01 + choices: + - choice_a + - choice_b + - choice_c + default: choice_a + some_bool: + description: + - The some_bool. + type: bool + some_int: + description: + - The some_int. + type: int + version_added: '1.1.0' + some_dict: + type: dict + description: + - The some_dict. + suboptions: + property_01: + description: + - The property_01 + type: str + state: + description: + - The state of the configuration after module completion. + type: str + choices: + - merged + - replaced + - overridden + - deleted + default: merged + EXAMPLES: + - deleted_example_01.txt + - merged_example_01.txt + - overridden_example_01.txt + - replaced_example_01.txt + +Notice that you should include examples for each of the states that the resource supports. The resource module builder also includes these in the sample model. + +Share this model as a PR for review at `resource module models repository <https://github.com/ansible-network/resource_module_models>`_. You can also see more model examples at that location. + + +Creating a collection scaffold from a resource model +---------------------------------------------------- + +To use the resource module builder to create a collection scaffold from your approved resource model: + +.. code-block:: bash + + ansible-playbook -e rm_dest=<destination for modules and module utils> \ + -e structure=collection \ + -e collection_org=<collection_org> \ + -e collection_name=<collection_name> \ + -e model=<model> \ + site.yml + +Where the parameters are as follows: + +- ``rm_dest``: The directory where the resource module builder places the files and directories for the resource module and facts modules. +- ``structure``: The directory layout type (role or collection) + + - ``role``: Generate a role directory layout. + - ``collection``: Generate a collection directory layout. + +- ``collection_org``: The organization of the collection, required when `structure=collection`. +- ``collection_name``: The name of the collection, required when `structure=collection`. +- ``model``: The path to the model file. + +To use the resource module builder to create a role scaffold: + +.. code-block:: bash + + ansible-playbook -e rm_dest=<destination for modules and module utils> \ + -e structure=role \ + -e model=<model> \ + site.yml + +Examples +======== + +Collection directory layout +--------------------------- + +This example shows the directory layout for the following: + +- ``network_os``: myos +- ``resource``: interfaces + +.. code-block:: bash + + ansible-playbook -e rm_dest=~/github/rm_example \ + -e structure=collection \ + -e collection_org=cidrblock \ + -e collection_name=my_collection \ + -e model=models/myos/interfaces/myos_interfaces.yml \ + site.yml + +.. code-block:: text + + ├── docs + ├── LICENSE.txt + ├── playbooks + ├── plugins + | ├── action + | ├── filter + | ├── inventory + | ├── modules + | | ├── __init__.py + | | ├── myos_facts.py + | | └── myos_interfaces.py + | └── module_utils + | ├── __init__.py + | └── network + | ├── __init__.py + | └── myos + | ├── argspec + | | ├── facts + | | | ├── facts.py + | | | └── __init__.py + | | ├── __init__.py + | | └── interfaces + | | ├── __init__.py + | | └── interfaces.py + | ├── config + | | ├── __init__.py + | | └── interfaces + | | ├── __init__.py + | | └── interfaces.py + | ├── facts + | | ├── facts.py + | | ├── __init__.py + | | └── interfaces + | | ├── __init__.py + | | └── interfaces.py + | ├── __init__.py + | └── utils + | ├── __init__.py + | └── utils.py + ├── README.md + └── roles + + +Role directory layout +--------------------- + +This example displays the role directory layout for the following: + +- ``network_os``: myos +- ``resource``: interfaces + +.. code-block:: bash + + ansible-playbook -e rm_dest=~/github/rm_example/roles/my_role \ + -e structure=role \ + -e model=models/myos/interfaces/myos_interfaces.yml \ + site.yml + + +.. code-block:: text + + roles + └── my_role + ├── library + │ ├── __init__.py + │ ├── myos_facts.py + │ └── myos_interfaces.py + ├── LICENSE.txt + ├── module_utils + │ ├── __init__.py + │ └── network + │ ├── __init__.py + │ └── myos + │ ├── argspec + │ │ ├── facts + │ │ │ ├── facts.py + │ │ │ └── __init__.py + │ │ ├── __init__.py + │ │ └── interfaces + │ │ ├── __init__.py + │ │ └── interfaces.py + │ ├── config + │ │ ├── __init__.py + │ │ └── interfaces + │ │ ├── __init__.py + │ │ └── interfaces.py + │ ├── facts + │ │ ├── facts.py + │ │ ├── __init__.py + │ │ └── interfaces + │ │ ├── __init__.py + │ │ └── interfaces.py + │ ├── __init__.py + │ └── utils + │ ├── __init__.py + │ └── utils.py + └── README.md + + +Using the collection +-------------------- + +This example shows how to use the generated collection in a playbook: + + .. code-block:: yaml + + ---- + - hosts: myos101 + gather_facts: False + tasks: + - cidrblock.my_collection.myos_interfaces: + register: result + - debug: + var: result + - cidrblock.my_collection.myos_facts: + - debug: + var: ansible_network_resources + + +Using the role +-------------- + +This example shows how to use the generated role in a playbook: + +.. code-block:: yaml + + - hosts: myos101 + gather_facts: False + roles: + - my_role + + - hosts: myos101 + gather_facts: False + tasks: + - myos_interfaces: + register: result + - debug: + var: result + - myos_facts: + - debug: + var: ansible_network_resources + + +Resource module structure and workflow +====================================== + +The resource module structure includes the following components: + +Module + * ``library/<ansible_network_os>_<resource>.py``. + * Imports the ``module_utils`` resource package and calls ``execute_module`` API: + + .. code-block:: text + + def main(): + result = <resource_package>(module).execute_module() + +Module argspec + * ``module_utils/<ansible_network_os>/argspec/<resource>/``. + * Argspec for the resource. + +Facts + * ``module_utils/<ansible_network_os>/facts/<resource>/``. + * Populate facts for the resource. + * Entry in ``module_utils/<ansible_network_os>/facts/facts.py`` for ``get_facts`` API to keep ``<ansible_network_os>_facts`` module and facts gathered for the resource module in sync for every subset. + * Entry of Resource subset in FACTS_RESOURCE_SUBSETS list in ``module_utils/<ansible_network_os>/facts/facts.py`` to make facts collection work. + +Module package in module_utils + * ``module_utils/<ansible_network_os>/<config>/<resource>/``. + * Implement ``execute_module`` API that loads the configuration to device and generates the result with ``changed``, ``commands``, ``before`` and ``after`` keys. + * Call ``get_facts`` API that returns the ``<resource>`` configuration facts or return the difference if the device has onbox diff support. + * Compare facts gathered and given key-values if diff is not supported. + * Generate final configuration. + +Utils + * ``module_utils/<ansible_network_os>/utils``. + * Utilities for the ``<ansible_network_os>`` platform. + +.. _tox_resource_modules: + +Running ``ansible-test sanity`` and ``tox`` on resource modules +================================================================ + +You should run ``ansible-test sanity`` and ``tox -elinters`` from the collection root directory before pushing your PR to an Ansible-maintained collection. The CI runs both and will fail if these tests fail. See :ref:`developing_testing` for details on ``ansible-test sanity``. + +To install the necessary packages: + +#. Ensure you have a valid Ansible development environment configured. See :ref:`environment_setup` for details. +#. Run ``pip install -r requirements.txt`` from the collection root directory. + + + Running ``tox -elinters``: + + * Reads :file:`tox.ini` from the collection root directory and installs required dependencies (such as ``black`` and ``flake8``). + * Runs these with preconfigured options (such as line-length and ignores.) + * Runs ``black`` in check mode to show which files will be formatted without actually formatting them. + +Testing resource modules +======================== + +The tests rely on a role generated by the resource module builder. After changes to the resource module builder, the role should be regenerated and the tests modified and run as needed. To generate the role after changes: + +.. code-block:: bash + + rm -rf rmb_tests/roles/my_role + ansible-playbook -e rm_dest=./rmb_tests/roles/my_role \ + -e structure=role \ + -e model=models/myos/interfaces/myos_interfaces.yml \ + site.yml + + +.. _testing_resource_modules: + +Resource module integration tests +---------------------------------- + +High-level integration test requirements for new resource modules are as follows: + +#. Write a test case for every state. +#. Write additional test cases to test the behavior of the module when an empty ``config.yaml`` is given. +#. Add a round trip test case. This involves a ``merge`` operation, followed by ``gather_facts``, a ``merge`` update with additional configuration, and then reverting back to the base configuration using the previously gathered facts with the ``state`` set to ``overridden``. +#. Wherever applicable, assertions should check after and before ``dicts`` against a hard coded Source of Truth. + +.. _using_zuul_resource_modules: + +We use Zuul as the CI to run the integration test. + +* To view the report, click :guilabel:`Details` on the CI comment in the PR +* To view a failure report, click :guilabel:`ansible/check` and select the failed test. +* To view logs while the test is running, check for your PR number in the `Zull status board <https://dashboard.zuul.ansible.com/t/ansible/status>`_. +* To fix static test failure locally, run the :command:`tox -e black` **inside the root folder of collection**. + +To view The Ansible run logs and debug test failures: + +#. Click the failed job to get the summary, and click :guilabel:`Logs` for the log. +#. Click :guilabel:`console` and scroll down to find the failed test. +#. Click :guilabel:`>` next to the failed test for complete details. + + +Integration test structure +........................... + +Each test case should generally follow this pattern: + +* setup —> test —> assert —> test again (for idempotency) —> assert —> tear down (if needed) -> done. This keeps test playbooks from becoming monolithic and difficult to troubleshoot. +* Include a name for each task that is not an assertion. You can add names to assertions as well, but it is easier to identify the broken task within a failed test if you add a name for each task. +* Files containing test cases must end in ``.yaml`` + +Implementation +.............. + +For platforms that support ``connection: local`` *and* ``connection: network_cli`` use the following guidance: + +* Name the :file:`targets/` directories after the module name. +* The :file:`main.yaml` file should just reference the transport. + +The following example walks through the integration tests for the ``vyos.vyos.vyos_l3_interfaces`` module in the `vyos.vyos <https://github.com/ansible-collections/vyos.vyos/tree/master/tests/integration>`_ collection: + +``test/integration/targets/vyos_l3_interfaces/tasks/main.yaml`` + +.. code-block:: yaml + + --- + - include: cli.yaml + tags: + - cli + +``test/integration/targets/vyos_l3_interfaces/tasks/cli.yaml`` + +.. code-block:: yaml + + --- + - name: collect all cli test cases + find: + paths: "{{ role_path }}/tests/cli" + patterns: "{{ testcase }}.yaml" + register: test_cases + delegate_to: localhost + + - name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + + - name: run test cases (connection=network_cli) + include: "{{ test_case_to_run }} ansible_connection=network_cli" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run + + - name: run test case (connection=local) + include: "{{ test_case_to_run }} ansible_connection=local ansible_become=no" + with_first_found: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run + +``test/integration/targets/vyos_l3_interfaces/tests/cli/overridden.yaml`` + +.. code-block:: yaml + + --- + - debug: + msg: START vyos_l3_interfaces merged integration tests on connection={{ ansible_connection + }} + + - include_tasks: _remove_config.yaml + + - block: + + - include_tasks: _populate.yaml + + - name: Overrides all device configuration with provided configuration + register: result + vyos.vyos.vyos_l3_interfaces: &id001 + config: + + - name: eth0 + ipv4: + + - address: dhcp + + - name: eth1 + ipv4: + + - address: 192.0.2.15/24 + state: overridden + + - name: Assert that before dicts were correctly generated + assert: + that: + - "{{ populate | symmetric_difference(result['before']) |length == 0 }}" + + - name: Assert that correct commands were generated + assert: + that: + - "{{ overridden['commands'] | symmetric_difference(result['commands'])\ + \ |length == 0 }}" + + - name: Assert that after dicts were correctly generated + assert: + that: + - "{{ overridden['after'] | symmetric_difference(result['after']) |length\ + \ == 0 }}" + + - name: Overrides all device configuration with provided configurations (IDEMPOTENT) + register: result + vyos.vyos.vyos_l3_interfaces: *id001 + + - name: Assert that the previous task was idempotent + assert: + that: + - result['changed'] == false + + - name: Assert that before dicts were correctly generated + assert: + that: + - "{{ overridden['after'] | symmetric_difference(result['before']) |length\ + \ == 0 }}" + always: + + - include_tasks: _remove_config.yaml + + +Detecting test resources at runtime +................................... + +Your tests should detect resources (such as interfaces) at runtime rather than hard-coding them into the test. This allows the test to run on a variety of systems. + +For example: + +.. code-block:: yaml + + - name: Collect interface list + connection: ansible.netcommon.network_cli + register: intout + cisco.nxos.nxos_command: + commands: + - show interface brief | json + + - set_fact: + intdataraw: "{{ intout.stdout_lines[0]['TABLE_interface']['ROW_interface'] }}" + + - set_fact: + nxos_int1: '{{ intdataraw[1].interface }}' + + - set_fact: + nxos_int2: '{{ intdataraw[2].interface }}' + + - set_fact: + nxos_int3: '{{ intdataraw[3].interface }}' + + +See the complete test example of this at https://github.com/ansible-collections/cisco.nxos/blob/master/tests/integration/targets/prepare_nxos_tests/tasks/main.yml. + + +Running network integration tests +.................................. + +Ansible uses Zuul to run an integration test suite on every PR, including new tests introduced by that PR. To find and fix problems in network modules, run the network integration test locally before you submit a PR. + + +First, create an inventory file that points to your test machines. The inventory group should match the platform name (for example, ``eos``, ``ios``): + +.. code-block:: bash + + cd test/integration + cp inventory.network.template inventory.networking + ${EDITOR:-vi} inventory.networking + # Add in machines for the platform(s) you wish to test + +To run these network integration tests, use ``ansible-test network-integration --inventory </path/to/inventory> <tests_to_run>``: + +.. code-block:: console + + ansible-test network-integration --inventory ~/myinventory -vvv vyos_facts + ansible-test network-integration --inventory ~/myinventory -vvv vyos_.* + + + +To run all network tests for a particular platform: + +.. code-block:: bash + + ansible-test network-integration --inventory /path/to-collection-module/test/integration/inventory.networking vyos_.* + +This example will run against all ``vyos`` modules. Note that ``vyos_.*`` is a regex match, not a bash wildcard - include the `.` if you modify this example. + +To run integration tests for a specific module: + +.. code-block:: bash + + ansible-test network-integration --inventory /path/to-collection-module/test/integration/inventory.networking vyos_l3_interfaces + +To run a single test case on a specific module: + +.. code-block:: bash + + # Only run vyos_l3_interfaces/tests/cli/gathered.yaml + ansible-test network-integration --inventory /path/to-collection-module/test/integration/inventory.networking vyos_l3_interfaces --testcase gathered + +To run integration tests for a specific transport: + +.. code-block:: bash + + # Only run nxapi test + ansible-test network-integration --inventory /path/to-collection-module/test/integration/inventory.networking --tags="nxapi" nxos_.* + + # Skip any cli tests + ansible-test network-integration --inventory /path/to-collection-module/test/integration/inventory.networking --skip-tags="cli" nxos_.* + +See `test/integration/targets/nxos_bgp/tasks/main.yaml <https://github.com/ansible-collections/cisco.nxos/blob/master/tests/integration/targets/nxos_bgp/tasks/main.yaml>`_ for how this is implemented in the tests. + +For more options: + +.. code-block:: bash + + ansible-test network-integration --help + +If you need additional help or feedback, reach out in ``#ansible-network`` on Freenode. + +Unit test requirements +----------------------- + +High-level unit test requirements that new resource modules should follow: + +#. Write test cases for all the states with all possible combinations of config values. +#. Write test cases to test the error conditions ( negative scenarios). +#. Check the value of ``changed`` and ``commands`` keys in every test case. + +We run all unit test cases on our Zuul test suite, on the latest python version supported by our CI setup. + +Use the :ref:`same procedure <using_zuul_resource_modules>` as the integration tests to view Zuul unit tests reports and logs. + +See :ref:`unit module testing <testing_units_modules>` for general unit test details. + +.. end of cut n .. parsed-literal:: + + +Example: Unit testing Ansible network resource modules +====================================================== + + +This section walks through an example of how to develop unit tests for Ansible resource +modules. + +See :ref:`testing_units` and :ref:`testing_units_modules` for general documentation on Ansible unit tests for modules. +Please read those pages first to understand unit tests and why and when you should use them. + + +Using mock objects to unit test Ansible network resource modules +---------------------------------------------------------------- + + +`Mock objects <https://docs.python.org/3/library/unittest.mock.html>`_ can be very +useful in building unit tests for special or difficult cases, but they can also +lead to complex and confusing coding situations. One good use for mocks would be to +simulate an API. The ``mock`` Python package is bundled with Ansible (use +``import units.compat.mock``). + +You can mock the device connection and output from the device as follows: + +.. code-block:: python + + self.mock_get_config = patch( "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config" + ) + self.get_config = self.mock_get_config.start() + + self.mock_load_config = patch( + "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config" + ) + self.load_config = self.mock_load_config.start() + + self.mock_get_resource_connection_config = patch( + "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection" + ) + self.get_resource_connection_config = (self.mock_get_resource_connection_config.start()) + + self.mock_get_resource_connection_facts = patch( + "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection" + ) + self.get_resource_connection_facts = (self.mock_get_resource_connection_facts.start()) + + self.mock_edit_config = patch( + "ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config" + ) + self.edit_config = self.mock_edit_config.start() + + self.mock_execute_show_command = patch( + "ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.l2_interfaces.l2_interfaces.L2_interfacesFacts.get_device_data" + ) + self.execute_show_command = self.mock_execute_show_command.start() + + +The facts file of the module now includes a new method, ``get_device_data``. Call ``get_device_data`` here to emulate the device output. + + +Mocking device data +----------------------- + +To mock fetching results from devices or provide other complex data structures that +come from external libraries, you can use ``fixtures`` to read in pre-generated data. The text files for this pre-generated data live in ``test/units/modules/network/PLATFORM/fixtures/``. See for example the `eos_l2_interfaces.cfg file <https://github.com/ansible-collections/arista.eos/blob/master/tests/unit/modules/network/eos/fixtures/eos_l2_interfaces_config.cfg>`_. + +Load data using the ``load_fixture`` method and set this data as the return value of the +``get_device_data`` method in the facts file: + +.. code-block:: python + + def load_fixtures(self, commands=None, transport='cli'): + def load_from_file(*args, **kwargs): + return load_fixture('eos_l2_interfaces_config.cfg') + self.execute_show_command.side_effect = load_from_file + +See the unit test file `test_eos_l2_interfaces <https://github.com/ansible-collections/arista.eos/blob/master/tests/unit/modules/network/eos/test_eos_l2_interfaces.py>`_ +for a practical example. + + +.. seealso:: + + :ref:`testing_units` + Deep dive into developing unit tests for Ansible modules + :ref:`testing_running_locally` + Running tests locally including gathering and reporting coverage data + :ref:`developing_modules_general` + Get started developing a module diff --git a/docs/docsite/rst/network/dev_guide/documenting_modules_network.rst b/docs/docsite/rst/network/dev_guide/documenting_modules_network.rst new file mode 100644 index 00000000..78c88b37 --- /dev/null +++ b/docs/docsite/rst/network/dev_guide/documenting_modules_network.rst @@ -0,0 +1,52 @@ + +.. _documenting_modules_network: + +********************************* +Documenting new network platforms +********************************* + +.. contents:: + :local: + +When you create network modules for a new platform, or modify the connections provided by an existing network platform(such as ``network_cli`` and ``httpapi``), you also need to update the :ref:`settings_by_platform` table and add or modify the Platform Options file for your platform. + +You should already have documented each module as described in :ref:`developing_modules_documenting`. + +Modifying the platform options table +==================================== + +The :ref:`settings_by_platform` table is a convenient summary of the connections options provided by each network platform that has modules in Ansible. Add a row for your platform to this table, in alphabetical order. For example: + +.. code-block:: text + + +-------------------+-------------------------+-------------+---------+---------+----------+ + | My OS | ``myos`` | ✓ | ✓ | | ✓ | + +Ensure that the table stays formatted correctly. That is: + +* Each row is inserted in alphabetical order. +* The cell division ``|`` markers line up with the ``+`` markers. +* The check marks appear only for the connection types provided by the network modules. + + + +Adding a platform-specific options section +========================================== + +The platform- specific sections are individual ``.rst`` files that provide more detailed information for the users of your network platform modules. Name your new file ``platform_<name>.rst`` (for example, ``platform_myos.rst``). The platform name should match the module prefix. See `platform_eos.rst <https://github.com/ansible/ansible/blob/devel/docs/docsite/rst/network/user_guide/platform_eos.rst>`_ and :ref:`eos_platform_options` for an example of the details you should provide in your platform-specific options section. + +Your platform-specific section should include the following: + +* **Connections available table** - a deeper dive into each connection type, including details on credentials, indirect access, connections settings, and enable mode. +* **How to use each connection type** - with working examples of each connection type. + +If your network platform supports SSH connections, also include the following at the bottom of your ``.rst`` file: + +.. code-block:: text + + .. include:: shared_snippets/SSH_warning.txt + +Adding your new file to the table of contents +============================================= + +As a final step, add your new file in alphabetical order in the ``platform_index.rst`` file. You should then build the documentation to verify your additions. See :ref:`community_documentation_contributions` for more details. diff --git a/docs/docsite/rst/network/dev_guide/index.rst b/docs/docsite/rst/network/dev_guide/index.rst new file mode 100644 index 00000000..5f0e7924 --- /dev/null +++ b/docs/docsite/rst/network/dev_guide/index.rst @@ -0,0 +1,32 @@ +.. _network_developer_guide: + +********************************** +Network Developer Guide +********************************** + +Welcome to the Developer Guide for Ansible Network Automation! + +**Who should use this guide?** + +If you want to extend Ansible for Network Automation by creating a module or plugin, this guide is for you. This guide is specific to networking. You should already be familiar with how to create, test, and document modules and plugins, as well as the prerequisites for getting your module or plugin accepted into the main Ansible repository. See the :ref:`developer_guide` for details. Before you proceed, please read: + +* How to :ref:`add a custom plugin or module locally <developing_locally>`. +* How to figure out if :ref:`developing a module is the right approach <module_dev_should_you>` for my use case. +* How to :ref:`set up my Python development environment <environment_setup>`. +* How to :ref:`get started writing a module <developing_modules_general>`. + + +Find the network developer task that best describes what you want to do: + + * I want to :ref:`develop a network resource module <developing_resource_modules>`. + * I want to :ref:`develop a network connection plugin <developing_plugins_network>`. + * I want to :ref:`document my set of modules for a network platform <documenting_modules_network>`. + +If you prefer to read the entire guide, here's a list of the pages in order. + +.. toctree:: + :maxdepth: 1 + + developing_resource_modules_network + developing_plugins_network + documenting_modules_network diff --git a/docs/docsite/rst/network/getting_started/basic_concepts.rst b/docs/docsite/rst/network/getting_started/basic_concepts.rst new file mode 100644 index 00000000..980b144d --- /dev/null +++ b/docs/docsite/rst/network/getting_started/basic_concepts.rst @@ -0,0 +1,10 @@ +************** +Basic Concepts +************** + +These concepts are common to all uses of Ansible, including network automation. You need to understand them to use Ansible for network automation. This basic introduction provides the background you need to follow the examples in this guide. + +.. contents:: + :local: + +.. include:: ../../shared_snippets/basic_concepts.txt diff --git a/docs/docsite/rst/network/getting_started/first_inventory.rst b/docs/docsite/rst/network/getting_started/first_inventory.rst new file mode 100644 index 00000000..d3d1528e --- /dev/null +++ b/docs/docsite/rst/network/getting_started/first_inventory.rst @@ -0,0 +1,431 @@ +*********************************************** +Build Your Inventory +*********************************************** + +Running a playbook without an inventory requires several command-line flags. Also, running a playbook against a single device is not a huge efficiency gain over making the same change manually. The next step to harnessing the full power of Ansible is to use an inventory file to organize your managed nodes into groups with information like the ``ansible_network_os`` and the SSH user. A fully-featured inventory file can serve as the source of truth for your network. Using an inventory file, a single playbook can maintain hundreds of network devices with a single command. This page shows you how to build an inventory file, step by step. + +.. contents:: + :local: + +Basic inventory +================================================== + +First, group your inventory logically. Best practice is to group servers and network devices by their What (application, stack or microservice), Where (datacenter or region), and When (development stage): + +- **What**: db, web, leaf, spine +- **Where**: east, west, floor_19, building_A +- **When**: dev, test, staging, prod + +Avoid spaces, hyphens, and preceding numbers (use ``floor_19``, not ``19th_floor``) in your group names. Group names are case sensitive. + +This tiny example data center illustrates a basic group structure. You can group groups using the syntax ``[metagroupname:children]`` and listing groups as members of the metagroup. Here, the group ``network`` includes all leafs and all spines; the group ``datacenter`` includes all network devices plus all webservers. + +.. code-block:: yaml + + --- + + leafs: + hosts: + leaf01: + ansible_host: 10.16.10.11 + leaf02: + ansible_host: 10.16.10.12 + + spines: + hosts: + spine01: + ansible_host: 10.16.10.13 + spine02: + ansible_host: 10.16.10.14 + + network: + children: + leafs: + spines: + + webservers: + hosts: + webserver01: + ansible_host: 10.16.10.15 + webserver02: + ansible_host: 10.16.10.16 + + datacenter: + children: + network: + webservers: + + + +You can also create this same inventory in INI format. + +.. code-block:: ini + + [leafs] + leaf01 + leaf02 + + [spines] + spine01 + spine02 + + [network:children] + leafs + spines + + [webservers] + webserver01 + webserver02 + + [datacenter:children] + network + webservers + + +Add variables to the inventory +================================================================================ + +Next, you can set values for many of the variables you needed in your first Ansible command in the inventory, so you can skip them in the ``ansible-playbook`` command. In this example, the inventory includes each network device's IP, OS, and SSH user. If your network devices are only accessible by IP, you must add the IP to the inventory file. If you access your network devices using hostnames, the IP is not necessary. + +.. code-block:: yaml + + --- + + leafs: + hosts: + leaf01: + ansible_host: 10.16.10.11 + ansible_network_os: vyos.vyos.vyos + ansible_user: my_vyos_user + leaf02: + ansible_host: 10.16.10.12 + ansible_network_os: vyos.vyos.vyos + ansible_user: my_vyos_user + + spines: + hosts: + spine01: + ansible_host: 10.16.10.13 + ansible_network_os: vyos.vyos.vyos + ansible_user: my_vyos_user + spine02: + ansible_host: 10.16.10.14 + ansible_network_os: vyos.vyos.vyos + ansible_user: my_vyos_user + + network: + children: + leafs: + spines: + + webservers: + hosts: + webserver01: + ansible_host: 10.16.10.15 + ansible_user: my_server_user + webserver02: + ansible_host: 10.16.10.16 + ansible_user: my_server_user + + datacenter: + children: + network: + webservers: + + +Group variables within inventory +================================================================================ + +When devices in a group share the same variable values, such as OS or SSH user, you can reduce duplication and simplify maintenance by consolidating these into group variables: + +.. code-block:: yaml + + --- + + leafs: + hosts: + leaf01: + ansible_host: 10.16.10.11 + leaf02: + ansible_host: 10.16.10.12 + vars: + ansible_network_os: vyos.vyos.vyos + ansible_user: my_vyos_user + + spines: + hosts: + spine01: + ansible_host: 10.16.10.13 + spine02: + ansible_host: 10.16.10.14 + vars: + ansible_network_os: vyos.vyos.vyos + ansible_user: my_vyos_user + + network: + children: + leafs: + spines: + + webservers: + hosts: + webserver01: + ansible_host: 10.16.10.15 + webserver02: + ansible_host: 10.16.10.16 + vars: + ansible_user: my_server_user + + datacenter: + children: + network: + webservers: + +Variable syntax +================================================================================ + +The syntax for variable values is different in inventory, in playbooks, and in the ``group_vars`` files, which are covered below. Even though playbook and ``group_vars`` files are both written in YAML, you use variables differently in each. + +- In an ini-style inventory file you **must** use the syntax ``key=value`` for variable values: ``ansible_network_os=vyos.vyos.vyos``. +- In any file with the ``.yml`` or ``.yaml`` extension, including playbooks and ``group_vars`` files, you **must** use YAML syntax: ``key: value``. + +- In ``group_vars`` files, use the full ``key`` name: ``ansible_network_os: vyos.vyos.vyos``. +- In playbooks, use the short-form ``key`` name, which drops the ``ansible`` prefix: ``network_os: vyos.vyos.vyos``. + + +Group inventory by platform +================================================================================ + +As your inventory grows, you may want to group devices by platform. This allows you to specify platform-specific variables easily for all devices on that platform: + +.. code-block:: yaml + + --- + + leafs: + hosts: + leaf01: + ansible_host: 10.16.10.11 + leaf02: + ansible_host: 10.16.10.12 + + spines: + hosts: + spine01: + ansible_host: 10.16.10.13 + spine02: + ansible_host: 10.16.10.14 + + network: + children: + leafs: + spines: + vars: + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: vyos.vyos.vyos + ansible_user: my_vyos_user + + webservers: + hosts: + webserver01: + ansible_host: 10.16.10.15 + webserver02: + ansible_host: 10.16.10.16 + vars: + ansible_user: my_server_user + + datacenter: + children: + network: + webservers: + +With this setup, you can run ``first_playbook.yml`` with only two flags: + +.. code-block:: console + + ansible-playbook -i inventory.yml -k first_playbook.yml + +With the ``-k`` flag, you provide the SSH password(s) at the prompt. Alternatively, you can store SSH and other secrets and passwords securely in your group_vars files with ``ansible-vault``. See :ref:`network_vault` for details. + +Verifying the inventory +========================= + +You can use the :ref:`ansible-inventory` CLI command to display the inventory as Ansible sees it. + +.. code-block:: console + + $ ansible-inventory -i test.yml --list + { + "_meta": { + "hostvars": { + "leaf01": { + "ansible_connection": "ansible.netcommon.network_cli", + "ansible_host": "10.16.10.11", + "ansible_network_os": "vyos.vyos.vyos", + "ansible_user": "my_vyos_user" + }, + "leaf02": { + "ansible_connection": "ansible.netcommon.network_cli", + "ansible_host": "10.16.10.12", + "ansible_network_os": "vyos.vyos.vyos", + "ansible_user": "my_vyos_user" + }, + "spine01": { + "ansible_connection": "ansible.netcommon.network_cli", + "ansible_host": "10.16.10.13", + "ansible_network_os": "vyos.vyos.vyos", + "ansible_user": "my_vyos_user" + }, + "spine02": { + "ansible_connection": "ansible.netcommon.network_cli", + "ansible_host": "10.16.10.14", + "ansible_network_os": "vyos.vyos.vyos", + "ansible_user": "my_vyos_user" + }, + "webserver01": { + "ansible_host": "10.16.10.15", + "ansible_user": "my_server_user" + }, + "webserver02": { + "ansible_host": "10.16.10.16", + "ansible_user": "my_server_user" + } + } + }, + "all": { + "children": [ + "datacenter", + "ungrouped" + ] + }, + "datacenter": { + "children": [ + "network", + "webservers" + ] + }, + "leafs": { + "hosts": [ + "leaf01", + "leaf02" + ] + }, + "network": { + "children": [ + "leafs", + "spines" + ] + }, + "spines": { + "hosts": [ + "spine01", + "spine02" + ] + }, + "webservers": { + "hosts": [ + "webserver01", + "webserver02" + ] + } + } + +.. _network_vault: + +Protecting sensitive variables with ``ansible-vault`` +================================================================================ + +The ``ansible-vault`` command provides encryption for files and/or individual variables like passwords. This tutorial will show you how to encrypt a single SSH password. You can use the commands below to encrypt other sensitive information, such as database passwords, privilege-escalation passwords and more. + +First you must create a password for ansible-vault itself. It is used as the encryption key, and with this you can encrypt dozens of different passwords across your Ansible project. You can access all those secrets (encrypted values) with a single password (the ansible-vault password) when you run your playbooks. Here's a simple example. + +1. Create a file and write your password for ansible-vault to it: + +.. code-block:: console + + echo "my-ansible-vault-pw" > ~/my-ansible-vault-pw-file + +2. Create the encrypted ssh password for your VyOS network devices, pulling your ansible-vault password from the file you just created: + +.. code-block:: console + + ansible-vault encrypt_string --vault-id my_user@~/my-ansible-vault-pw-file 'VyOS_SSH_password' --name 'ansible_password' + +If you prefer to type your ansible-vault password rather than store it in a file, you can request a prompt: + +.. code-block:: console + + ansible-vault encrypt_string --vault-id my_user@prompt 'VyOS_SSH_password' --name 'ansible_password' + +and type in the vault password for ``my_user``. + +The :option:`--vault-id <ansible-playbook --vault-id>` flag allows different vault passwords for different users or different levels of access. The output includes the user name ``my_user`` from your ``ansible-vault`` command and uses the YAML syntax ``key: value``: + +.. code-block:: yaml + + ansible_password: !vault | + $ANSIBLE_VAULT;1.2;AES256;my_user + 66386134653765386232383236303063623663343437643766386435663632343266393064373933 + 3661666132363339303639353538316662616638356631650a316338316663666439383138353032 + 63393934343937373637306162366265383461316334383132626462656463363630613832313562 + 3837646266663835640a313164343535316666653031353763613037656362613535633538386539 + 65656439626166666363323435613131643066353762333232326232323565376635 + Encryption successful + +This is an example using an extract from a YAML inventory, as the INI format does not support inline vaults: + +.. code-block:: yaml + + ... + + vyos: # this is a group in yaml inventory, but you can also do under a host + vars: + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: vyos.vyos.vyos + ansible_user: my_vyos_user + ansible_password: !vault | + $ANSIBLE_VAULT;1.2;AES256;my_user + 66386134653765386232383236303063623663343437643766386435663632343266393064373933 + 3661666132363339303639353538316662616638356631650a316338316663666439383138353032 + 63393934343937373637306162366265383461316334383132626462656463363630613832313562 + 3837646266663835640a313164343535316666653031353763613037656362613535633538386539 + 65656439626166666363323435613131643066353762333232326232323565376635 + + ... + +To use an inline vaulted variables with an INI inventory you need to store it in a 'vars' file in YAML format, +it can reside in host_vars/ or group_vars/ to be automatically picked up or referenced from a play via ``vars_files`` or ``include_vars``. + +To run a playbook with this setup, drop the ``-k`` flag and add a flag for your ``vault-id``: + +.. code-block:: console + + ansible-playbook -i inventory --vault-id my_user@~/my-ansible-vault-pw-file first_playbook.yml + +Or with a prompt instead of the vault password file: + +.. code-block:: console + + ansible-playbook -i inventory --vault-id my_user@prompt first_playbook.yml + +To see the original value, you can use the debug module. Please note if your YAML file defines the `ansible_connection` variable (as we used in our example), it will take effect when you execute the command below. To prevent this, please make a copy of the file without the ansible_connection variable. + +.. code-block:: console + + cat vyos.yml | grep -v ansible_connection >> vyos_no_connection.yml + + ansible localhost -m debug -a var="ansible_password" -e "@vyos_no_connection.yml" --ask-vault-pass + Vault password: + + localhost | SUCCESS => { + "ansible_password": "VyOS_SSH_password" + } + + +.. warning:: + + Vault content can only be decrypted with the password that was used to encrypt it. If you want to stop using one password and move to a new one, you can update and re-encrypt existing vault content with ``ansible-vault rekey myfile``, then provide the old password and the new password. Copies of vault content still encrypted with the old password can still be decrypted with old password. + +For more details on building inventory files, see :ref:`the introduction to inventory<intro_inventory>`; for more details on ansible-vault, see :ref:`the full Ansible Vault documentation<vault>`. + +Now that you understand the basics of commands, playbooks, and inventory, it's time to explore some more complex Ansible Network examples. diff --git a/docs/docsite/rst/network/getting_started/first_playbook.rst b/docs/docsite/rst/network/getting_started/first_playbook.rst new file mode 100644 index 00000000..b09814cd --- /dev/null +++ b/docs/docsite/rst/network/getting_started/first_playbook.rst @@ -0,0 +1,212 @@ + +.. _first_network_playbook: + +*************************************************** +Run Your First Command and Playbook +*************************************************** + +Put the concepts you learned to work with this quick tutorial. Install Ansible, execute a network configuration command manually, execute the same command with Ansible, then create a playbook so you can execute the command any time on multiple network devices. + +.. contents:: + :local: + +Prerequisites +================================================== + +Before you work through this tutorial you need: + +- Ansible 2.10 (or higher) installed +- One or more network devices that are compatible with Ansible +- Basic Linux command line knowledge +- Basic knowledge of network switch & router configuration + +Install Ansible +================================================== + +Install Ansible using your preferred method. See :ref:`installation_guide`. Then return to this tutorial. + +Confirm the version of Ansible (must be >= 2.10): + +.. code-block:: bash + + ansible --version + + +Establish a manual connection to a managed node +================================================== + +To confirm your credentials, connect to a network device manually and retrieve its configuration. Replace the sample user and device name with your real credentials. For example, for a VyOS router: + +.. code-block:: bash + + ssh my_vyos_user@vyos.example.net + show config + exit + +This manual connection also establishes the authenticity of the network device, adding its RSA key fingerprint to your list of known hosts. (If you have connected to the device before, you have already established its authenticity.) + + +Run your first network Ansible command +================================================== + +Instead of manually connecting and running a command on the network device, you can retrieve its configuration with a single, stripped-down Ansible command: + +.. code-block:: bash + + ansible all -i vyos.example.net, -c ansible.netcommon.network_cli -u my_vyos_user -k -m vyos.vyos.vyos_facts -e ansible_network_os=vyos.vyos.vyos + +The flags in this command set seven values: + - the host group(s) to which the command should apply (in this case, all) + - the inventory (-i, the device or devices to target - without the trailing comma -i points to an inventory file) + - the connection method (-c, the method for connecting and executing ansible) + - the user (-u, the username for the SSH connection) + - the SSH connection method (-k, please prompt for the password) + - the module (-m, the Ansible module to run, using the fully qualified collection name (FQCN)) + - an extra variable ( -e, in this case, setting the network OS value) + +NOTE: If you use ``ssh-agent`` with ssh keys, Ansible loads them automatically. You can omit ``-k`` flag. + +.. note:: + + If you are running Ansible in a virtual environment, you will also need to add the variable ``ansible_python_interpreter=/path/to/venv/bin/python`` + + +Create and run your first network Ansible Playbook +================================================== + +If you want to run this command every day, you can save it in a playbook and run it with ``ansible-playbook`` instead of ``ansible``. The playbook can store a lot of the parameters you provided with flags at the command line, leaving less to type at the command line. You need two files for this - a playbook and an inventory file. + +1. Download :download:`first_playbook.yml <sample_files/first_playbook.yml>`, which looks like this: + +.. literalinclude:: sample_files/first_playbook.yml + :language: YAML + +The playbook sets three of the seven values from the command line above: the group (``hosts: all``), the connection method (``connection: ansible.netcommon.network_cli``) and the module (in each task). With those values set in the playbook, you can omit them on the command line. The playbook also adds a second task to show the config output. When a module runs in a playbook, the output is held in memory for use by future tasks instead of written to the console. The debug task here lets you see the results in your shell. + +2. Run the playbook with the command: + +.. code-block:: bash + + ansible-playbook -i vyos.example.net, -u ansible -k -e ansible_network_os=vyos.vyos.vyos first_playbook.yml + +The playbook contains one play with two tasks, and should generate output like this: + +.. code-block:: bash + + $ ansible-playbook -i vyos.example.net, -u ansible -k -e ansible_network_os=vyos.vyos.vyos first_playbook.yml + + PLAY [First Playbook] + *************************************************************************************************************************** + + TASK [Get config for VyOS devices] + *************************************************************************************************************************** + ok: [vyos.example.net] + + TASK [Display the config] + *************************************************************************************************************************** + ok: [vyos.example.net] => { + "msg": "The hostname is vyos and the OS is VyOS 1.1.8" + } + +3. Now that you can retrieve the device config, try updating it with Ansible. Download :download:`first_playbook_ext.yml <sample_files/first_playbook_ext.yml>`, which is an extended version of the first playbook: + +.. literalinclude:: sample_files/first_playbook_ext.yml + :language: YAML + +The extended first playbook has four tasks in a single play. Run it with the same command you used above. The output shows you the change Ansible made to the config: + +.. code-block:: bash + + $ ansible-playbook -i vyos.example.net, -u ansible -k -e ansible_network_os=vyos.vyos.vyos first_playbook_ext.yml + + PLAY [First Playbook] + ************************************************************************************************************************************ + + TASK [Get config for VyOS devices] + ********************************************************************************************************************************** + ok: [vyos.example.net] + + TASK [Display the config] + ************************************************************************************************************************************* + ok: [vyos.example.net] => { + "msg": "The hostname is vyos and the OS is VyOS 1.1.8" + } + + TASK [Update the hostname] + ************************************************************************************************************************************* + changed: [vyos.example.net] + + TASK [Get changed config for VyOS devices] + ************************************************************************************************************************************* + ok: [vyos.example.net] + + TASK [Display the changed config] + ************************************************************************************************************************************* + ok: [vyos.example.net] => { + "msg": "The new hostname is vyos-changed and the OS is VyOS 1.1.8" + } + + PLAY RECAP + ************************************************************************************************************************************ + vyos.example.net : ok=5 changed=1 unreachable=0 failed=0 + + + +.. _network_gather_facts: + +Gathering facts from network devices +==================================== + +The ``gather_facts`` keyword now supports gathering network device facts in standardized key/value pairs. You can feed these network facts into further tasks to manage the network device. + +You can also use the new ``gather_network_resources`` parameter with the network ``*_facts`` modules (such as :ref:`arista.eos.eos_facts <ansible_collections.arista.eos.eos_facts_module>`) to return just a subset of the device configuration, as shown below. + +.. code-block:: yaml + + - hosts: arista + gather_facts: True + gather_subset: interfaces + module_defaults: + arista.eos.eos_facts: + gather_network_resources: interfaces + +The playbook returns the following interface facts: + +.. code-block:: yaml + + "network_resources": { + "interfaces": [ + { + "description": "test-interface", + "enabled": true, + "mtu": "512", + "name": "Ethernet1" + }, + { + "enabled": true, + "mtu": "3000", + "name": "Ethernet2" + }, + { + "enabled": true, + "name": "Ethernet3" + }, + { + "enabled": true, + "name": "Ethernet4" + }, + { + "enabled": true, + "name": "Ethernet5" + }, + { + "enabled": true, + "name": "Ethernet6" + }, + ] + } + + +Note that this returns a subset of what is returned by just setting ``gather_subset: interfaces``. + +You can store these facts and use them directly in another task, such as with the :ref:`eos_interfaces <ansible_collections.arista.eos.eos_interfaces_module>` resource module. diff --git a/docs/docsite/rst/network/getting_started/index.rst b/docs/docsite/rst/network/getting_started/index.rst new file mode 100644 index 00000000..d9638a5c --- /dev/null +++ b/docs/docsite/rst/network/getting_started/index.rst @@ -0,0 +1,34 @@ +.. _network_getting_started: + +********************************** +Network Getting Started +********************************** + +Ansible collections support a wide range of vendors, device types, and actions, so you can manage your entire network with a single automation tool. With Ansible, you can: + +- Automate repetitive tasks to speed routine network changes and free up your time for more strategic work +- Leverage the same simple, powerful, and agentless automation tool for network tasks that operations and development use +- Separate the data model (in a playbook or role) from the execution layer (via Ansible modules) to manage heterogeneous network devices +- Benefit from community and vendor-generated sample playbooks and roles to help accelerate network automation projects +- Communicate securely with network hardware over SSH or HTTPS + +**Who should use this guide?** + +This guide is intended for network engineers using Ansible for the first time. If you understand networks but have never used Ansible, work through the guide from start to finish. + +This guide is also useful for experienced Ansible users automating network tasks for the first time. You can use Ansible commands, playbooks and modules to configure hubs, switches, routers, bridges and other network devices. But network modules are different from Linux/Unix and Windows modules, and you must understand some network-specific concepts to succeed. If you understand Ansible but have never automated a network task, start with the second section. + +This guide introduces basic Ansible concepts and guides you through your first Ansible commands, playbooks and inventory entries. + +.. toctree:: + :maxdepth: 2 + :caption: Getting Started Guide + + basic_concepts + network_differences + first_playbook + first_inventory + network_roles + intermediate_concepts + network_connection_options + network_resources diff --git a/docs/docsite/rst/network/getting_started/intermediate_concepts.rst b/docs/docsite/rst/network/getting_started/intermediate_concepts.rst new file mode 100644 index 00000000..3496f22e --- /dev/null +++ b/docs/docsite/rst/network/getting_started/intermediate_concepts.rst @@ -0,0 +1,39 @@ +***************** +Beyond the basics +***************** + +This page introduces some concepts that help you manage your Ansible workflow with directory structure and source control. Like the Basic Concepts at the beginning of this guide, these intermediate concepts are common to all uses of Ansible. + +.. contents:: + :local: + + +A typical Ansible filetree +========================== + +Ansible expects to find certain files in certain places. As you expand your inventory and create and run more network playbooks, keep your files organized in your working Ansible project directory like this: + +.. code-block:: console + + . + ├── backup + │ ├── vyos.example.net_config.2018-02-08@11:10:15 + │ ├── vyos.example.net_config.2018-02-12@08:22:41 + ├── first_playbook.yml + ├── inventory + ├── group_vars + │ ├── vyos.yml + │ └── eos.yml + ├── roles + │ ├── static_route + │ └── system + ├── second_playbook.yml + └── third_playbook.yml + +The ``backup`` directory and the files in it get created when you run modules like ``vyos_config`` with the ``backup: yes`` parameter. + + +Tracking changes to inventory and playbooks: source control with git +==================================================================== + +As you expand your inventory, roles and playbooks, you should place your Ansible projects under source control. We recommend ``git`` for source control. ``git`` provides an audit trail, letting you track changes, roll back mistakes, view history and share the workload of managing, maintaining and expanding your Ansible ecosystem. There are plenty of tutorials and guides to using ``git`` available. diff --git a/docs/docsite/rst/network/getting_started/network_connection_options.rst b/docs/docsite/rst/network/getting_started/network_connection_options.rst new file mode 100644 index 00000000..c23e7307 --- /dev/null +++ b/docs/docsite/rst/network/getting_started/network_connection_options.rst @@ -0,0 +1,48 @@ +.. _network_connection_options: + +*************************************** +Working with network connection options +*************************************** + +Network modules can support multiple connection protocols, such as ``ansible.netcommon.network_cli``, ``ansible.netcommon.netconf``, and ``ansible.netcommon.httpapi``. These connections include some common options you can set to control how the connection to your network device behaves. + +Common options are: + +* ``become`` and ``become_method`` as described in :ref:`privilege_escalation`. +* ``network_os`` - set to match your network platform you are communicating with. See the :ref:`platform-specific <platform_options>` pages. +* ``remote_user`` as described in :ref:`connection_set_user`. +* Timeout options - ``persistent_command_timeout``, ``persistent_connect_timeout``, and ``timeout``. + +.. _timeout_options: + +Setting timeout options +======================= + +When communicating with a remote device, you have control over how long Ansible maintains the connection to that device, as well as how long Ansible waits for a command to complete on that device. Each of these options can be set as variables in your playbook files, environment variables, or settings in your :ref:`ansible.cfg file <ansible_configuration_settings>`. + +For example, the three options for controlling the connection timeout are as follows. + +Using vars (per task): + +.. code-block:: yaml + + - name: save running-config + cisco.ios.ios_command: + commands: copy running-config startup-config + vars: + ansible_command_timeout: 30 + +Using the environment variable: + +.. code-block:: bash + + $export ANSIBLE_PERSISTENT_COMMAND_TIMEOUT=30 + +Using the global configuration (in :file:`ansible.cfg`) + +.. code-block:: ini + + [persistent_connection ] + command_timeout = 30 + +See :ref:`ansible_variable_precedence` for details on the relative precedence of each of these variables. See the individual connection type to understand each option. diff --git a/docs/docsite/rst/network/getting_started/network_differences.rst b/docs/docsite/rst/network/getting_started/network_differences.rst new file mode 100644 index 00000000..76b18aa4 --- /dev/null +++ b/docs/docsite/rst/network/getting_started/network_differences.rst @@ -0,0 +1,68 @@ +************************************************************ +How Network Automation is Different +************************************************************ + +Network automation leverages the basic Ansible concepts, but there are important differences in how the network modules work. This introduction prepares you to understand the exercises in this guide. + +.. contents:: + :local: + +Execution on the control node +================================================================================ + +Unlike most Ansible modules, network modules do not run on the managed nodes. From a user's point of view, network modules work like any other modules. They work with ad-hoc commands, playbooks, and roles. Behind the scenes, however, network modules use a different methodology than the other (Linux/Unix and Windows) modules use. Ansible is written and executed in Python. Because the majority of network devices can not run Python, the Ansible network modules are executed on the Ansible control node, where ``ansible`` or ``ansible-playbook`` runs. + +Network modules also use the control node as a destination for backup files, for those modules that offer a ``backup`` option. With Linux/Unix modules, where a configuration file already exists on the managed node(s), the backup file gets written by default in the same directory as the new, changed file. Network modules do not update configuration files on the managed nodes, because network configuration is not written in files. Network modules write backup files on the control node, usually in the `backup` directory under the playbook root directory. + +Multiple communication protocols +================================================================================ + +Because network modules execute on the control node instead of on the managed nodes, they can support multiple communication protocols. The communication protocol (XML over SSH, CLI over SSH, API over HTTPS) selected for each network module depends on the platform and the purpose of the module. Some network modules support only one protocol; some offer a choice. The most common protocol is CLI over SSH. You set the communication protocol with the ``ansible_connection`` variable: + +.. csv-table:: + :header: "Value of ansible_connection", "Protocol", "Requires", "Persistent?" + :widths: 30, 10, 10, 10 + + "ansible.netcommon.network_cli", "CLI over SSH", "network_os setting", "yes" + "ansible.netcommon.netconf", "XML over SSH", "network_os setting", "yes" + "ansible.netcommon.httpapi", "API over HTTP/HTTPS", "network_os setting", "yes" + "local", "depends on provider", "provider setting", "no" + +.. note:: + ``ansible.netcommon.httpapi`` deprecates ``eos_eapi`` and ``nxos_nxapi``. See :ref:`httpapi_plugins` for details and an example. + +The ``ansible_connection: local`` has been deprecated. Please use one of the persistent connection types listed above instead. With persistent connections, you can define the hosts and credentials only once, rather than in every task. You also need to set the ``network_os`` variable for the specific network platform you are communicating with. For more details on using each connection type on various platforms, see the :ref:`platform-specific <platform_options>` pages. + + +Collections organized by network platform +================================================================================ + +A network platform is a set of network devices with a common operating system that can be managed by an Ansible collection, for example: + +- Arista: `arista.eos <https://galaxy.ansible.com/arista/eos>`_ +- Cisco: `cisco.ios <https://galaxy.ansible.com/cisco/ios>`_, `cisco.iosxr <https://galaxy.ansible.com/cisco/iosxr>`_, `cisco.nxos <https://galaxy.ansible.com/cisco/nxos>`_ +- Juniper: `junipernetworks.junos <https://galaxy.ansible.com/junipernetworks/junos>`_ +- VyOS `vyos.vyos <https://galaxy.ansible.com/vyos/vyos>`_ + +All modules within a network platform share certain requirements. Some network platforms have specific differences - see the :ref:`platform-specific <platform_options>` documentation for details. + +.. _privilege_escalation: + +Privilege Escalation: ``enable`` mode, ``become``, and ``authorize`` +================================================================================ + +Several network platforms support privilege escalation, where certain tasks must be done by a privileged user. On network devices this is called the ``enable`` mode (the equivalent of ``sudo`` in \*nix administration). Ansible network modules offer privilege escalation for those network devices that support it. For details of which platforms support ``enable`` mode, with examples of how to use it, see the :ref:`platform-specific <platform_options>` documentation. + +Using ``become`` for privilege escalation +----------------------------------------- + +Use the top-level Ansible parameter ``become: yes`` with ``become_method: enable`` to run a task, play, or playbook with escalated privileges on any network platform that supports privilege escalation. You must use either ``connection: network_cli`` or ``connection: httpapi`` with ``become: yes`` with ``become_method: enable``. If you are using ``network_cli`` to connect Ansible to your network devices, a ``group_vars`` file would look like: + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: cisco.ios.ios + ansible_become: yes + ansible_become_method: enable + +For more information, see :ref:`Become and Networks<become_network>` diff --git a/docs/docsite/rst/network/getting_started/network_resources.rst b/docs/docsite/rst/network/getting_started/network_resources.rst new file mode 100644 index 00000000..3451c476 --- /dev/null +++ b/docs/docsite/rst/network/getting_started/network_resources.rst @@ -0,0 +1,46 @@ + +.. _network_resources: + +************************ +Resources and next steps +************************ + +.. contents:: + :local: + +Documents +========= + +Read more about Ansible for Network Automation: + +- Network Automation on the `Ansible website <https://www.ansible.com/overview/networking>`_ +- Ansible Network `Blog posts <https://www.ansible.com/blog/topic/networks>`_ + +Events (on video and in person) +=============================== + +All sessions at Ansible events are recorded and include many Network-related topics (use Filter by Category to view only Network topics). You can also join us for future events in your area. See: + +- `Recorded AnsibleFests <https://www.ansible.com/resources/videos/ansiblefest>`_ +- `Recorded AnsibleAutomates <https://www.ansible.com/resources/webinars-training>`_ +- `Upcoming Ansible Events <https://www.ansible.com/community/events>`_ page. + +GitHub repos +============ + +Ansible hosts module code, examples, demonstrations, and other content on GitHub. Anyone with a GitHub account is able to create Pull Requests (PRs) or issues on these repos: + +- `Network-Automation <https://github.com/network-automation>`_ is an open community for all things network automation. Have an idea, some playbooks, or roles to share? Email ansible-network@redhat.com and we will add you as a contributor to the repository. + +- `Ansible collections <https://github.com/ansible-collections>`_ is the main repository for Ansible-maintained and community collections, including collections for network devices. + + + +IRC and Slack +============= + +Join us on: + +* Freenode IRC - ``#ansible-network`` Freenode channel + +* Slack - `<https://ansiblenetwork.slack.com>`_ diff --git a/docs/docsite/rst/network/getting_started/network_roles.rst b/docs/docsite/rst/network/getting_started/network_roles.rst new file mode 100644 index 00000000..b77d0611 --- /dev/null +++ b/docs/docsite/rst/network/getting_started/network_roles.rst @@ -0,0 +1,267 @@ + +.. _using_network_roles: + +************************* +Use Ansible network roles +************************* + +Roles are sets of Ansible defaults, files, tasks, templates, variables, and other Ansible components that work together. As you saw on :ref:`first_network_playbook`, moving from a command to a playbook makes it easy to run multiple tasks and repeat the same tasks in the same order. Moving from a playbook to a role makes it even easier to reuse and share your ordered tasks. You can look at :ref:`Ansible Galaxy <ansible_galaxy>`, which lets you share your roles and use others' roles, either directly or as inspiration. + +.. contents:: + :local: + +Understanding roles +=================== + +So what exactly is a role, and why should you care? Ansible roles are basically playbooks broken up into a known file structure. Moving to roles from a playbook makes sharing, reading, and updating your Ansible workflow easier. Users can write their own roles. So for example, you don't have to write your own DNS playbook. Instead, you specify a DNS server and a role to configure it for you. + +To simplify your workflow even further, the Ansible Network team has written a series of roles for common network use cases. Using these roles means you don't have to reinvent the wheel. Instead of writing and maintaining your own ``create_vlan`` playbooks or roles, you can concentrate on designing, codifying and maintaining the parser templates that describe your network topologies and inventory, and let Ansible's network roles do the work. See the `network-related roles <https://galaxy.ansible.com/ansible-network>`_ on Ansible Galaxy. + +A sample DNS playbook +--------------------- + +To demonstrate the concept of what a role is, the example ``playbook.yml`` below is a single YAML file containing a two-task playbook. This Ansible Playbook configures the hostname on a Cisco IOS XE device, then it configures the DNS (domain name system) servers. + +.. code-block:: yaml + + --- + - name: configure cisco routers + hosts: routers + connection: ansible.netcommon.network_cli + gather_facts: no + vars: + dns: "8.8.8.8 8.8.4.4" + + tasks: + - name: configure hostname + cisco.ios.ios_config: + lines: hostname {{ inventory_hostname }} + + - name: configure DNS + cisco.ios.ios_config: + lines: ip name-server {{dns}} + +If you run this playbook using the ``ansible-playbook`` command, you'll see the output below. This example used ``-l`` option to limit the playbook to only executing on the **rtr1** node. + +.. code-block:: bash + + [user@ansible ~]$ ansible-playbook playbook.yml -l rtr1 + + PLAY [configure cisco routers] ************************************************* + + TASK [configure hostname] ****************************************************** + changed: [rtr1] + + TASK [configure DNS] *********************************************************** + changed: [rtr1] + + PLAY RECAP ********************************************************************* + rtr1 : ok=2 changed=2 unreachable=0 failed=0 + + +This playbook configured the hostname and DNS servers. You can verify that configuration on the Cisco IOS XE **rtr1** router: + +.. code-block:: bash + + rtr1#sh run | i name + hostname rtr1 + ip name-server 8.8.8.8 8.8.4.4 + +Convert the playbook into a role +--------------------------------- + +The next step is to convert this playbook into a reusable role. You can create the directory structure manually, or you can use ``ansible-galaxy init`` to create the standard framework for a role. + +.. code-block:: bash + + [user@ansible ~]$ ansible-galaxy init system-demo + [user@ansible ~]$ cd system-demo/ + [user@ansible system-demo]$ tree + . + ├── defaults + │ └── main.yml + ├── files + ├── handlers + │ └── main.yml + ├── meta + │ └── main.yml + ├── README.md + ├── tasks + │ └── main.yml + ├── templates + ├── tests + │ ├── inventory + │ └── test.yml + └── vars + └── main.yml + +This first demonstration uses only the **tasks** and **vars** directories. The directory structure would look as follows: + +.. code-block:: bash + + [user@ansible system-demo]$ tree + . + ├── tasks + │ └── main.yml + └── vars + └── main.yml + +Next, move the content of the ``vars`` and ``tasks`` sections from the original Ansible Playbook into the role. First, move the two tasks into the ``tasks/main.yml`` file: + +.. code-block:: bash + + [user@ansible system-demo]$ cat tasks/main.yml + --- + - name: configure hostname + cisco.ios.ios_config: + lines: hostname {{ inventory_hostname }} + + - name: configure DNS + cisco.ios.ios_config: + lines: ip name-server {{dns}} + +Next, move the variables into the ``vars/main.yml`` file: + +.. code-block:: bash + + [user@ansible system-demo]$ cat vars/main.yml + --- + dns: "8.8.8.8 8.8.4.4" + +Finally, modify the original Ansible Playbook to remove the ``tasks`` and ``vars`` sections and add the keyword ``roles`` with the name of the role, in this case ``system-demo``. You'll have this playbook: + +.. code-block:: yaml + + --- + - name: configure cisco routers + hosts: routers + connection: ansible.netcommon.network_cli + gather_facts: no + + roles: + - system-demo + +To summarize, this demonstration now has a total of three directories and three YAML files. There is the ``system-demo`` folder, which represents the role. This ``system-demo`` contains two folders, ``tasks`` and ``vars``. There is a ``main.yml`` is each respective folder. The ``vars/main.yml`` contains the variables from ``playbook.yml``. The ``tasks/main.yml`` contains the tasks from ``playbook.yml``. The ``playbook.yml`` file has been modified to call the role rather than specifying vars and tasks directly. Here is a tree of the current working directory: + +.. code-block:: bash + + [user@ansible ~]$ tree + . + ├── playbook.yml + └── system-demo + ├── tasks + │ └── main.yml + └── vars + └── main.yml + +Running the playbook results in identical behavior with slightly different output: + +.. code-block:: bash + + [user@ansible ~]$ ansible-playbook playbook.yml -l rtr1 + + PLAY [configure cisco routers] ************************************************* + + TASK [system-demo : configure hostname] **************************************** + ok: [rtr1] + + TASK [system-demo : configure DNS] ********************************************* + ok: [rtr1] + + PLAY RECAP ********************************************************************* + rtr1 : ok=2 changed=0 unreachable=0 failed=0 + +As seen above each task is now prepended with the role name, in this case ``system-demo``. When running a playbook that contains several roles, this will help pinpoint where a task is being called from. This playbook returned ``ok`` instead of ``changed`` because it has identical behavior for the single file playbook we started from. + +As before, the playbook will generate the following configuration on a Cisco IOS-XE router: + +.. code-block:: bash + + rtr1#sh run | i name + hostname rtr1 + ip name-server 8.8.8.8 8.8.4.4 + + +This is why Ansible roles can be simply thought of as deconstructed playbooks. They are simple, effective and reusable. Now another user can simply include the ``system-demo`` role instead of having to create a custom "hard coded" playbook. + +Variable precedence +------------------- + +What if you want to change the DNS servers? You aren't expected to change the ``vars/main.yml`` within the role structure. Ansible has many places where you can specify variables for a given play. See :ref:`playbooks_variables` for details on variables and precedence. There are actually 21 places to put variables. While this list can seem overwhelming at first glance, the vast majority of use cases only involve knowing the spot for variables of least precedence and how to pass variables with most precedence. See :ref:`ansible_variable_precedence` for more guidance on where you should put variables. + +Lowest precedence +^^^^^^^^^^^^^^^^^ + +The lowest precedence is the ``defaults`` directory within a role. This means all the other 20 locations you could potentially specify the variable will all take higher precedence than ``defaults``, no matter what. To immediately give the vars from the ``system-demo`` role the least precedence, rename the ``vars`` directory to ``defaults``. + +.. code-block:: bash + + [user@ansible system-demo]$ mv vars defaults + [user@ansible system-demo]$ tree + . + ├── defaults + │ └── main.yml + ├── tasks + │ └── main.yml + +Add a new ``vars`` section to the playbook to override the default behavior (where the variable ``dns`` is set to 8.8.8.8 and 8.8.4.4). For this demonstration, set ``dns`` to 1.1.1.1, so ``playbook.yml`` becomes: + +.. code-block:: yaml + + --- + - name: configure cisco routers + hosts: routers + connection: ansible.netcommon.network_cli + gather_facts: no + vars: + dns: 1.1.1.1 + roles: + - system-demo + +Run this updated playbook on **rtr2**: + +.. code-block:: bash + + [user@ansible ~]$ ansible-playbook playbook.yml -l rtr2 + +The configuration on the **rtr2** Cisco router will look as follows: + +.. code-block:: bash + + rtr2#sh run | i name-server + ip name-server 1.1.1.1 + +The variable configured in the playbook now has precedence over the ``defaults`` directory. In fact, any other spot you configure variables would win over the values in the ``defaults`` directory. + +Highest precedence +^^^^^^^^^^^^^^^^^^ + +Specifying variables in the ``defaults`` directory within a role will always take the lowest precedence, while specifying ``vars`` as extra vars with the ``-e`` or ``--extra-vars=`` will always take the highest precedence, no matter what. Re-running the playbook with the ``-e`` option overrides both the ``defaults`` directory (8.8.4.4 and 8.8.8.8) as well as the newly created ``vars`` within the playbook that contains the 1.1.1.1 dns server. + +.. code-block:: bash + + [user@ansible ~]$ ansible-playbook playbook.yml -e "dns=192.168.1.1" -l rtr3 + +The result on the Cisco IOS XE router will only contain the highest precedence setting of 192.168.1.1: + +.. code-block:: bash + + rtr3#sh run | i name-server + ip name-server 192.168.1.1 + +How is this useful? Why should you care? Extra vars are commonly used by network operators to override defaults. A powerful example of this is with Red Hat Ansible Tower and the Survey feature. It is possible through the web UI to prompt a network operator to fill out parameters with a Web form. This can be really simple for non-technical playbook writers to execute a playbook using their Web browser. See `Ansible Tower Job Template Surveys <https://docs.ansible.com/ansible-tower/latest/html/userguide/workflow_templates.html#surveys>`_ for more details. + + +Update an installed role +------------------------ + +The Ansible Galaxy page for a role lists all available versions. To update a locally installed role to a new or different version, use the ``ansible-galaxy install`` command with the version and ``--force`` option. You may also need to manually update any dependent roles to support this version. See the role **Read Me** tab in Galaxy for dependent role minimum version requirements. + +.. code-block:: bash + + [user@ansible]$ ansible-galaxy install mynamespace.my_role,v2.7.1 --force + +.. seealso:: + + `Ansible Galaxy documentation <https://galaxy.ansible.com/docs/>`_ + Ansible Galaxy user guide diff --git a/docs/docsite/rst/network/getting_started/sample_files/first_playbook.yml b/docs/docsite/rst/network/getting_started/sample_files/first_playbook.yml new file mode 100644 index 00000000..908b89f9 --- /dev/null +++ b/docs/docsite/rst/network/getting_started/sample_files/first_playbook.yml @@ -0,0 +1,15 @@ +--- + +- name: Network Getting Started First Playbook + connection: ansible.netcommon.network_cli + gather_facts: false + hosts: all + tasks: + + - name: Get config for VyOS devices + vyos.vyos.vyos_facts: + gather_subset: all + + - name: Display the config + debug: + msg: "The hostname is {{ ansible_net_hostname }} and the OS is {{ ansible_net_version }}" diff --git a/docs/docsite/rst/network/getting_started/sample_files/first_playbook_ext.yml b/docs/docsite/rst/network/getting_started/sample_files/first_playbook_ext.yml new file mode 100644 index 00000000..2d5f6a5f --- /dev/null +++ b/docs/docsite/rst/network/getting_started/sample_files/first_playbook_ext.yml @@ -0,0 +1,29 @@ +--- + +- name: Network Getting Started First Playbook Extended + connection: ansible.netcommon.network_cli + gather_facts: false + hosts: all + tasks: + + - name: Get config for VyOS devices + vyos.vyos.vyos_facts: + gather_subset: all + + - name: Display the config + debug: + msg: "The hostname is {{ ansible_net_hostname }} and the OS is {{ ansible_net_version }}" + + - name: Update the hostname + vyos.vyos.vyos_config: + backup: yes + lines: + - set system host-name vyos-changed + + - name: Get changed config for VyOS devices + vyos.vyos.vyos_facts: + gather_subset: all + + - name: Display the changed config + debug: + msg: "The new hostname is {{ ansible_net_hostname }} and the OS is {{ ansible_net_version }}" diff --git a/docs/docsite/rst/network/index.rst b/docs/docsite/rst/network/index.rst new file mode 100644 index 00000000..25756391 --- /dev/null +++ b/docs/docsite/rst/network/index.rst @@ -0,0 +1,20 @@ +:orphan: + +.. _network_guide: + +****************************** +Ansible for Network Automation +****************************** + +Ansible Network modules extend the benefits of simple, powerful, agentless automation to network administrators and teams. Ansible Network modules can configure your network stack, test and validate existing network state, and discover and correct network configuration drift. + +If you're new to Ansible, or new to using Ansible for network management, start with :ref:`network_getting_started`. If you are already familiar with network automation with Ansible, see :ref:`network_advanced`. + +For documentation on using a particular network module, consult the :ref:`list of all network modules<network_modules>`. Network modules for various hardware are supported by different teams including the hardware vendors themselves, volunteers from the Ansible community, and the Ansible Network Team. + +.. toctree:: + :maxdepth: 3 + + getting_started/index + user_guide/index + dev_guide/index diff --git a/docs/docsite/rst/network/user_guide/cli_parsing.rst b/docs/docsite/rst/network/user_guide/cli_parsing.rst new file mode 100644 index 00000000..dd9443da --- /dev/null +++ b/docs/docsite/rst/network/user_guide/cli_parsing.rst @@ -0,0 +1,719 @@ +.. _cli_parsing: + +***************************************** +Parsing semi-structured text with Ansible +***************************************** + +The :ref:`cli_parse <ansible_collections.ansible.netcommon.cli_parse_module>` module parses semi-structured data such as network configurations into structured data to allow programmatic use of the data from that device. You can pull information from a network device and update a CMDB in one playbook. Use cases include automated troubleshooting, creating dynamic documentation, updating IPAM (IP address management) tools and so on. + + +.. contents:: + :local: + + +Understanding the CLI parser +============================= + +The `ansible.netcommon <https://galaxy.ansible.com/ansible/netcommon>`_ collection version 1.2.0 or later includes the :ref:`cli_parse <ansible_collections.ansible.netcommon.cli_parse_module>` module that can run CLI commands and parse the semi-structured text output. You can use the ``cli_parse`` module on a device, host, or platform that only supports a command-line interface and the commands issued return semi-structured text. The ``cli_parse`` module can either run a CLI command on a device and return a parsed result or can simply parse any text document. The ``cli_parse`` module includes cli_parser plugins to interface with a variety of parsing engines. + +Why parse the text? +-------------------- + +Parsing semi-structured data such as network configurations into structured data allows programmatic use of the data from that device. Use cases include automated troubleshooting, creating dynamic documentation, updating IPAM (IP address management) tools and so on. You may prefer to do this with Ansible natively to take advantage of native Ansible constructs such as: + +- The ``when`` clause to conditionally run other tasks or roles +- The ``assert`` module to check configuration and operational state compliance +- The ``template`` module to generate reports about configuration and operational state information +- Templates and ``command`` or ``config`` modules to generate host, device, or platform commands or configuration +- The current platform ``facts`` modules to supplement native facts information + +By parsing semi-structured text into Ansible native data structures, you can take full advantage of Ansible's network modules and plugins. + + +When not to parse the text +--------------------------- + +You should not parse semi-structured text when: + +- The device, host, or platform has a RESTAPI and returns JSON. +- Existing Ansible facts modules already return the desired data. +- Ansible network resource modules exist for configuration management of the device and resource. + +Parsing the CLI +========================= + +The ``cli_parse`` module includes the following cli_parsing plugins: + +``native`` + The native parsing engine built into Ansible and requires no addition python libraries +``xml`` + Convert XML to an Ansible native data structure +``textfsm`` + A python module which implements a template based state machine for parsing semi-formatted text +``ntc_templates`` + Predefined ``textfsm`` templates packages supporting a variety of platforms and commands +``ttp`` + A library for semi-structured text parsing using templates, with added capabilities to simplify the process +``pyats`` + Uses the parsers included with the Cisco Test Automation & Validation Solution +``json`` + Converts JSON output at the CLI to an Ansible native data structure + +Although Ansible contains a number of plugins that can convert XML to Ansible native data structures, the ``cli_parse`` module runs the command on devices that return XML and returns the converted data in a single task. + +Because ``cli_parse`` uses a plugin based architecture, it can use additional parsing engines from any Ansible collection. + +.. note:: + + The ``ansible.netcommon.native`` and ``ansible.netcommon.json`` parsing engines are fully supported with a Red Hat Ansible Automation Platform subscription. Red Hat Ansible Automation Platform subscription support is limited to the use of the ``ntc_templates``, pyATS, ``textfsm``, ``xmltodict``, public APIs as documented. + +Parsing with the native parsing engine +-------------------------------------- + +The native parsing engine is included with the ``cli_parse`` module. It uses data captured using regular expressions to populate the parsed data structure. The native parsing engine requires a YAML template file to parse the command output. + +Networking example +^^^^^^^^^^^^^^^^^^ + +This example uses the output of a network device command and applies a native template to produce an output in Ansible structured data format. + +The ``show interface`` command output from the network device looks as follows: + +.. code-block:: console + + Ethernet1/1 is up + admin state is up, Dedicated Interface + Hardware: 100/1000/10000 Ethernet, address: 5254.005a.f8bd (bia 5254.005a.f8bd) + MTU 1500 bytes, BW 1000000 Kbit, DLY 10 usec + reliability 255/255, txload 1/255, rxload 1/255 + Encapsulation ARPA, medium is broadcast + Port mode is access + full-duplex, auto-speed + Beacon is turned off + Auto-Negotiation is turned on FEC mode is Auto + Input flow-control is off, output flow-control is off + Auto-mdix is turned off + Switchport monitor is off + EtherType is 0x8100 + EEE (efficient-ethernet) : n/a + Last link flapped 4week(s) 6day(s) + Last clearing of "show interface" counters never + <...> + + +Create the native template to match this output and store it as ``templates/nxos_show_interface.yaml``: + +.. code-block:: yaml + + --- + - example: Ethernet1/1 is up + getval: '(?P<name>\S+) is (?P<oper_state>\S+)' + result: + "{{ name }}": + name: "{{ name }}" + state: + operating: "{{ oper_state }}" + shared: true + + - example: admin state is up, Dedicated Interface + getval: 'admin state is (?P<admin_state>\S+),' + result: + "{{ name }}": + name: "{{ name }}" + state: + admin: "{{ admin_state }}" + + - example: " Hardware: Ethernet, address: 5254.005a.f8b5 (bia 5254.005a.f8b5)" + getval: '\s+Hardware: (?P<hardware>.*), address: (?P<mac>\S+)' + result: + "{{ name }}": + hardware: "{{ hardware }}" + mac_address: "{{ mac }}" + + +This native parser template is structured as a list of parsers, each containing the following key-value pairs: + +- ``example`` - An example line of the text line to be parsed +- ``getval`` - A regular expression using named capture groups to store the extracted data +- ``result`` - A data tree, populated as a template, from the parsed data +- ``shared`` - (optional) The shared key makes the parsed values available to the rest of the parser entries until matched again. + +The following example task uses ``cli_parse`` with the native parser and the example template above to parse the ``show interface`` command from a Cisco NXOS device: + +.. code-block:: yaml + + - name: "Run command and parse with native" + ansible.netcommon.cli_parse: + command: show interface + parser: + name: ansible.netcommon.native + set_fact: interfaces + +Taking a deeper dive into this task: + +- The ``command`` option provides the command you want to run on the device or host. Alternately, you can provide text from a previous command with the ``text`` option instead. +- The ``parser`` option provides information specific to the parser engine. +- The ``name`` suboption provides the fully qualified collection name (FQCN) of the parsing engine (``ansible.netcommon.native``). +- The ``cli_parse`` module, by default, looks for the template in the templates directory as ``{{ short_os }}_{{ command }}.yaml``. + + - The ``short_os`` in the template filename is derived from either the host ``ansible_network_os`` or ``ansible_distribution``. + - Spaces in the network or host command are replace with ``_`` in the ``command`` portion of the template filename. In this example, the ``show interfaces`` network CLI command becomes ``show_interfaces`` in the filename. + +.. note:: + + ``ansible.netcommon.native`` parsing engine is fully supported with a Red Hat Ansible Automation Platform subscription. + +Lastly in this task, the ``set_fact`` option sets the following ``interfaces`` fact for the device based on the now-structured data returned from ``cli_parse``: + +.. code-block:: yaml + + Ethernet1/1: + hardware: 100/1000/10000 Ethernet + mac_address: 5254.005a.f8bd + name: Ethernet1/1 + state: + admin: up + operating: up + Ethernet1/10: + hardware: 100/1000/10000 Ethernet + mac_address: 5254.005a.f8c6 + <...> + + +Linux example +^^^^^^^^^^^^^ + +You can also use the native parser to run commands and parse output from Linux hosts. + +The output of a sample Linux command (``ip addr show``) looks as follows: + +.. code-block:: bash + + 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: enp0s31f6: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc fq_codel state DOWN group default qlen 1000 + link/ether x2:6a:64:9d:84:19 brd ff:ff:ff:ff:ff:ff + 3: wlp2s0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000 + link/ether x6:c2:44:f7:41:e0 brd ff:ff:ff:ff:ff:ff permaddr d8:f2:ca:99:5c:82 + +Create the native template to match this output and store it as ``templates/fedora_ip_addr_show.yaml``: + +.. code-block:: yaml + + --- + - example: '1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000' + getval: | + (?x) # free-spacing + \d+:\s # the interface index + (?P<name>\S+):\s # the name + <(?P<properties>\S+)> # the properties + \smtu\s(?P<mtu>\d+) # the mtu + .* # gunk + state\s(?P<state>\S+) # the state of the interface + result: + "{{ name }}": + name: "{{ name }}" + loopback: "{{ 'LOOPBACK' in stats.split(',') }}" + up: "{{ 'UP' in properties.split(',') }}" + carrier: "{{ not 'NO-CARRIER' in properties.split(',') }}" + broadcast: "{{ 'BROADCAST' in properties.split(',') }}" + multicast: "{{ 'MULTICAST' in properties.split(',') }}" + state: "{{ state|lower() }}" + mtu: "{{ mtu }}" + shared: True + + - example: 'inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0' + getval: | + (?x) # free-spacing + \s+inet\s(?P<inet>([0-9]{1,3}\.){3}[0-9]{1,3}) # the ip address + /(?P<bits>\d{1,2}) # the mask bits + result: + "{{ name }}": + ip_address: "{{ inet }}" + mask_bits: "{{ bits }}" + +.. note:: + + The ``shared`` key in the parser template allows the interface name to be used in subsequent parser entries. The use of examples and free-spacing mode with the regular expressions makes the template easier to read. + +The following example task uses ``cli_parse`` with the native parser and the example template above to parse the Linux output: + +.. code-block:: yaml + + - name: Run command and parse + ansible.netcommon.cli_parse: + command: ip addr show + parser: + name: ansible.netcommon.native + set_fact: interfaces + +This task assumes you previously gathered facts to determine the ``ansible_distribution`` needed to locate the template. Alternately, you could provide the path in the ``parser/template_path`` option. + + +Lastly in this task, the ``set_fact`` option sets the following ``interfaces`` fact for the host, based on the now-structured data returned from ``cli_parse``: + +.. code-block:: yaml + + lo: + broadcast: false + carrier: true + ip_address: 127.0.0.1 + mask_bits: 8 + mtu: 65536 + multicast: false + name: lo + state: unknown + up: true + enp64s0u1: + broadcast: true + carrier: true + ip_address: 192.168.86.83 + mask_bits: 24 + mtu: 1500 + multicast: true + name: enp64s0u1 + state: up + up: true + <...> + + +Parsing JSON +------------- + +Although Ansible will natively convert serialized JSON to Ansible native data when recognized, you can also use the ``cli_parse`` module for this conversion. + +Example task: + +.. code-block:: yaml + + - name: "Run command and parse as json" + ansible.netcommon.cli_parse: + command: show interface | json + parser: + name: ansible.netcommon.json + register: interfaces + +Taking a deeper dive into this task: + +- The ``show interface | json`` command is issued on the device. +- The output is set as the ``interfaces`` fact for the device. +- JSON support is provided primarily for playbook consistency. + +.. note:: + + The use of ``ansible.netcommon.json`` is fully supported with a Red Hat Ansible Automation Platform subscription + +Parsing with ntc_templates +---------------------------- + +The ``ntc_templates`` python library includes pre-defined ``textfsm`` templates for parsing a variety of network device commands output. + +Example task: + +.. code-block:: yaml + + - name: "Run command and parse with ntc_templates" + ansible.netcommon.cli_parse: + command: show interface + parser: + name: ansible.netcommon.ntc_templates + set_fact: interfaces + +Taking a deeper dive into this task: + +- The ``ansible_network_os`` of the device is converted to the ntc_template format ``cisco_nxos``. Alternately, you can provide the ``os`` with the ``parser/os`` option instead. +- The ``cisco_nxos_show_interface.textfsm`` template, included with the ``ntc_templates`` package, parses the output. +- See `the ntc_templates README <https://github.com/networktocode/ntc-templates/blob/master/README.md>`_ for additional information about the ``ntc_templates`` python library. + +.. note:: + + Red Hat Ansible Automation Platform subscription support is limited to the use of the ``ntc_templates`` public APIs as documented. + + +This task and and the predefined template sets the following fact as the ``interfaces`` fact for the host: + +.. code-block:: yaml + + interfaces: + - address: 5254.005a.f8b5 + admin_state: up + bandwidth: 1000000 Kbit + bia: 5254.005a.f8b5 + delay: 10 usec + description: '' + duplex: full-duplex + encapsulation: ARPA + hardware_type: Ethernet + input_errors: '' + input_packets: '' + interface: mgmt0 + ip_address: 192.168.101.14/24 + last_link_flapped: '' + link_status: up + mode: '' + mtu: '1500' + output_errors: '' + output_packets: '' + speed: 1000 Mb/s + - address: 5254.005a.f8bd + admin_state: up + bandwidth: 1000000 Kbit + bia: 5254.005a.f8bd + delay: 10 usec + + +Parsing with pyATS +---------------------- + +``pyATS`` is part of the Cisco Test Automation & Validation Solution. It includes many predefined parsers for a number of network platforms and commands. You can use the predefined parsers that are part of the ``pyATS`` package with the ``cli_parse`` module. + +Example task: + +.. code-block:: yaml + + - name: "Run command and parse with pyats" + ansible.netcommon.cli_parse: + command: show interface + parser: + name: ansible.netcommon.pyats + set_fact: interfaces + + +Taking a deeper dive into this task: + +- The ``cli_parse`` modules converts the ``ansible_network_os`` automatically (in this example, ``ansible_network_os`` set to ``cisco.nxos.nxos``, converts to ``nxos`` for pyATS. Alternately, you can set the OS with the ``parser/os`` option instead. +- Using a combination of the command and OS, the pyATS selects the following parser: https://pubhub.devnetcloud.com/media/genie-feature-browser/docs/#/parsers/show%2520interface. +- The ``cli_parse`` module sets ``cisco.ios.ios`` to ``iosxe`` for pyATS. You can override this with the ``parser/os`` option. +- ``cli_parse`` only uses the predefined parsers in pyATS. See the `pyATS documentation <https://developer.cisco.com/docs/pyats/>`_ and the full list of `pyATS included parsers <https://pubhub.devnetcloud.com/media/genie-feature-browser/docs/#/parsers>`_. + +.. note:: + + Red Hat Ansible Automation Platform subscription support is limited to the use of the pyATS public APIs as documented. + + +This task sets the following fact as the ``interfaces`` fact for the host: + +.. code-block:: yaml + + mgmt0: + admin_state: up + auto_mdix: 'off' + auto_negotiate: true + bandwidth: 1000000 + counters: + in_broadcast_pkts: 3 + in_multicast_pkts: 1652395 + in_octets: 556155103 + in_pkts: 2236713 + in_unicast_pkts: 584259 + rate: + in_rate: 320 + in_rate_pkts: 0 + load_interval: 1 + out_rate: 48 + out_rate_pkts: 0 + rx: true + tx: true + delay: 10 + duplex_mode: full + enabled: true + encapsulations: + encapsulation: arpa + ethertype: '0x0000' + ipv4: + 192.168.101.14/24: + ip: 192.168.101.14 + prefix_length: '24' + link_state: up + <...> + + +Parsing with textfsm +--------------------- + +``textfsm`` is a Python module which implements a template-based state machine for parsing semi-formatted text. + +The following sample``textfsm`` template is stored as ``templates/nxos_show_interface.textfsm`` + +.. code-block:: text + + + Value Required INTERFACE (\S+) + Value LINK_STATUS (.+?) + Value ADMIN_STATE (.+?) + Value HARDWARE_TYPE (.\*) + Value ADDRESS ([a-zA-Z0-9]+.[a-zA-Z0-9]+.[a-zA-Z0-9]+) + Value BIA ([a-zA-Z0-9]+.[a-zA-Z0-9]+.[a-zA-Z0-9]+) + Value DESCRIPTION (.\*) + Value IP_ADDRESS (\d+\.\d+\.\d+\.\d+\/\d+) + Value MTU (\d+) + Value MODE (\S+) + Value DUPLEX (.+duplex?) + Value SPEED (.+?) + Value INPUT_PACKETS (\d+) + Value OUTPUT_PACKETS (\d+) + Value INPUT_ERRORS (\d+) + Value OUTPUT_ERRORS (\d+) + Value BANDWIDTH (\d+\s+\w+) + Value DELAY (\d+\s+\w+) + Value ENCAPSULATION (\w+) + Value LAST_LINK_FLAPPED (.+?) + + Start + ^\S+\s+is.+ -> Continue.Record + ^${INTERFACE}\s+is\s+${LINK_STATUS},\sline\sprotocol\sis\s${ADMIN_STATE}$$ + ^${INTERFACE}\s+is\s+${LINK_STATUS}$$ + ^admin\s+state\s+is\s+${ADMIN_STATE}, + ^\s+Hardware(:|\s+is)\s+${HARDWARE_TYPE},\s+address(:|\s+is)\s+${ADDRESS}(.*bia\s+${BIA})* + ^\s+Description:\s+${DESCRIPTION} + ^\s+Internet\s+Address\s+is\s+${IP_ADDRESS} + ^\s+Port\s+mode\s+is\s+${MODE} + ^\s+${DUPLEX}, ${SPEED}(,|$$) + ^\s+MTU\s+${MTU}.\*BW\s+${BANDWIDTH}.\*DLY\s+${DELAY} + ^\s+Encapsulation\s+${ENCAPSULATION} + ^\s+${INPUT_PACKETS}\s+input\s+packets\s+\d+\s+bytes\s\*$$ + ^\s+${INPUT_ERRORS}\s+input\s+error\s+\d+\s+short\s+frame\s+\d+\s+overrun\s+\d+\s+underrun\s+\d+\s+ignored\s\*$$ + ^\s+${OUTPUT_PACKETS}\s+output\s+packets\s+\d+\s+bytes\s\*$$ + ^\s+${OUTPUT_ERRORS}\s+output\s+error\s+\d+\s+collision\s+\d+\s+deferred\s+\d+\s+late\s+collision\s\*$$ + ^\s+Last\s+link\s+flapped\s+${LAST_LINK_FLAPPED}\s\*$$ + +The following task uses the example template for ``textfsm`` with the ``cli_parse`` module. + +.. code-block:: yaml + + - name: "Run command and parse with textfsm" + ansible.netcommon.cli_parse: + command: show interface + parser: + name: ansible.netcommon.textfsm + set_fact: interfaces + +Taking a deeper dive into this task: + +- The ``ansible_network_os`` for the device (``cisco.nxos.nxos``) is converted to ``nxos``. Alternately you can provide the OS in the ``parser/os`` option instead. +- The textfsm template name defaulted to ``templates/nxos_show_interface.textfsm`` using a combination of the OS and command run. Alternately you can override the generated template path with the ``parser/template_path`` option. +- See the `textfsm README <https://github.com/google/textfsm>`_ for details. +- ``textfsm`` was previously made available as a filter plugin. Ansible users should transition to the ``cli_parse`` module. + +.. note:: + + Red Hat Ansible Automation Platform subscription support is limited to the use of the ``textfsm`` public APIs as documented. + +This task sets the following fact as the ``interfaces`` fact for the host: + +.. code-block:: yaml + + - ADDRESS: X254.005a.f8b5 + ADMIN_STATE: up + BANDWIDTH: 1000000 Kbit + BIA: X254.005a.f8b5 + DELAY: 10 usec + DESCRIPTION: '' + DUPLEX: full-duplex + ENCAPSULATION: ARPA + HARDWARE_TYPE: Ethernet + INPUT_ERRORS: '' + INPUT_PACKETS: '' + INTERFACE: mgmt0 + IP_ADDRESS: 192.168.101.14/24 + LAST_LINK_FLAPPED: '' + LINK_STATUS: up + MODE: '' + MTU: '1500' + OUTPUT_ERRORS: '' + OUTPUT_PACKETS: '' + SPEED: 1000 Mb/s + - ADDRESS: X254.005a.f8bd + ADMIN_STATE: up + BANDWIDTH: 1000000 Kbit + BIA: X254.005a.f8bd + + +Parsing with TTP +----------------- + +TTP is a Python library for semi-structured text parsing using templates. TTP uses a jinja-like syntax to limit the need for regular expressions. Users familiar with jinja templating may find the TTP template syntax familiar. + +The following is an example TTP template stored as ``templates/nxos_show_interface.ttp``: + +.. code-block:: jinja + + {{ interface }} is {{ state }} + admin state is {{ admin_state }}{{ ignore(".\*") }} + +The following task uses this template to parse the ``show interface`` command output: + +.. code-block:: yaml + + - name: "Run command and parse with ttp" + ansible.netcommon.cli_parse: + command: show interface + parser: + name: ansible.netcommon.ttp + set_fact: interfaces + +Taking a deeper dive in this task: + +- The default template path ``templates/nxos_show_interface.ttp`` was generated using the ``ansible_network_os`` for the host and ``command`` provided. +- TTP supports several additional variables that will be passed to the parser. These include: + + - ``parser/vars/ttp_init`` - Additional parameter passed when the parser is initialized. + - ``parser/vars/ttp_results`` - Additional parameters used to influence the parser output. + - ``parser/vars/ttp_vars`` - Additional variables made available in the template. + +- See the `TTP documentation <https://ttp.readthedocs.io>`_ for details. + + +The task sets the follow fact as the ``interfaces`` fact for the host: + +.. code-block:: yaml + + - admin_state: up, + interface: mgmt0 + state: up + - admin_state: up, + interface: Ethernet1/1 + state: up + - admin_state: up, + interface: Ethernet1/2 + state: up + + +Converting XML +----------------- + +Although Ansible contains a number of plugins that can convert XML to Ansible native data structures, the ``cli_parse`` module runs the command on devices that return XML and returns the converted data in a single task. + +This example task runs the ``show interface`` command and parses the output as XML: + +.. code-block:: yaml + + - name: "Run command and parse as xml" + ansible.netcommon.cli_parse: + command: show interface | xml + parser: + name: ansible.netcommon.xml + set_fact: interfaces + +.. note:: + + Red Hat Ansible Automation Platform subscription support is limited to the use of the ``xmltodict`` public APIs as documented. + +This task sets the ``interfaces`` fact for the host based on this returned output: + +.. code-block:: yaml + + nf:rpc-reply: + '@xmlns': http://www.cisco.com/nxos:1.0:if_manager + '@xmlns:nf': urn:ietf:params:xml:ns:netconf:base:1.0 + nf:data: + show: + interface: + __XML__OPT_Cmd_show_interface_quick: + __XML__OPT_Cmd_show_interface___readonly__: + __readonly__: + TABLE_interface: + ROW_interface: + - admin_state: up + encapsulation: ARPA + eth_autoneg: 'on' + eth_bia_addr: x254.005a.f8b5 + eth_bw: '1000000' + + +Advanced use cases +=================== + +The ``cli_parse`` module supports several features to support more complex uses cases. + +Provide a full template path +----------------------------- + +Use the ``template_path`` option to override the default template path in the task: + +.. code-block:: yaml + + - name: "Run command and parse with native" + ansible.netcommon.cli_parse: + command: show interface + parser: + name: ansible.netcommon.native + template_path: /home/user/templates/filename.yaml + + +Provide command to parser different than the command run +----------------------------------------------------------- + +Use the ``command`` suboption for the ``parser`` to configure the command the parser expects if it is different from the command ``cli_parse`` runs: + +.. code-block:: yaml + + - name: "Run command and parse with native" + ansible.netcommon.cli_parse: + command: sho int + parser: + name: ansible.netcommon.native + command: show interface + +Provide a custom OS value +-------------------------------- + +Use the ``os`` suboption to the parser to directly set the OS instead of using ``ansible_network_os`` or ``ansible_distribution`` to generate the template path or with the specified parser engine: + +.. code-block:: yaml + + - name: Use ios instead of iosxe for pyats + ansible.netcommon.cli_parse: + command: show something + parser: + name: ansible.netcommon.pyats + os: ios + + - name: Use linux instead of fedora from ansible_distribution + ansible.netcommon.cli_parse: + command: ps -ef + parser: + name: ansible.netcommon.native + os: linux + + +Parse existing text +-------------------- + +Use the ``text`` option instead of ``command`` to parse text collected earlier in the playbook. + +.. code-block:: yaml + + # using /home/user/templates/filename.yaml + - name: "Parse text from previous task" + ansible.netcommon.cli_parse: + text: "{{ output['stdout'] }}" + parser: + name: ansible.netcommon.native + template_path: /home/user/templates/filename.yaml + + # using /home/user/templates/filename.yaml + - name: "Parse text from file" + ansible.netcommon.cli_parse: + text: "{{ lookup('file', 'path/to/file.txt') }}" + parser: + name: ansible.netcommon.native + template_path: /home/user/templates/filename.yaml + + # using templates/nxos_show_version.yaml + - name: "Parse text from previous task" + ansible.netcommon.cli_parse: + text: "{{ sho_version['stdout'] }}" + parser: + name: ansible.netcommon.native + os: nxos + command: show version + + +.. seealso:: + + * :ref:`develop_cli_parse_plugins` diff --git a/docs/docsite/rst/network/user_guide/faq.rst b/docs/docsite/rst/network/user_guide/faq.rst new file mode 100644 index 00000000..cb43ac28 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/faq.rst @@ -0,0 +1,76 @@ +.. _network_faq: + +******************* +Ansible Network FAQ +******************* + +.. contents:: Topics + +.. _network_faq_performance: + +How can I improve performance for network playbooks? +==================================================== + +.. _network_faq_strategy_free: + +Consider ``strategy: free`` if you are running on multiple hosts +--------------------------------------------------------------------------------- + +The ``strategy`` plugin tells Ansible how to order multiple tasks on multiple hosts. :ref:`Strategy<strategy_plugins>` is set at the playbook level. + +The default strategy is ``linear``. With strategy set to ``linear``, Ansible waits until the current task has run on all hosts before starting the next task on any host. Ansible may have forks free, but will not use them until all hosts have completed the current task. If each task in your playbook must succeed on all hosts before you run the next task, use the ``linear`` strategy. + +Using the ``free`` strategy, Ansible uses available forks to execute tasks on each host as quickly as possible. Even if an earlier task is still running on one host, Ansible executes later tasks on other hosts. The ``free`` strategy uses available forks more efficiently. If your playbook stalls on each task, waiting for one slow host, consider using ``strategy: free`` to boost overall performance. + +.. _network_faq_limit_show_running: + +Execute ``show running`` only if you absolutely must +--------------------------------------------------------------------------------- + +The ``show running`` command is the most resource-intensive command to execute on a network device, because of the way queries are handled by the network OS. Using the command in your Ansible playbook will slow performance significantly, especially on large devices; repeating it will multiply the performance hit. If you have a playbook that checks the running config, then executes changes, then checks the running config again, you should expect that playbook to be very slow. + +.. _network_faq_limit_ProxyCommand: + +Use ``ProxyCommand`` only if you absolutely must +--------------------------------------------------------------------------------- + +Network modules support the use of a :ref:`proxy or jump host<network_delegate_to_vs_ProxyCommand>` with the ``ProxyCommand`` parameter. However, when you use a jump host, Ansible must open a new SSH connection for every task, even if you are using a persistent connection type (``network_cli`` or ``netconf``). To maximize the performance benefits of the persistent connection types introduced in version 2.5, avoid using jump hosts whenever possible. + +.. _network_faq_set_forks: + +Set ``--forks`` to match your needs +--------------------------------------------------------------------------------- + +Every time Ansible runs a task, it forks its own process. The ``--forks`` parameter defines the number of concurrent tasks - if you retain the default setting, which is ``--forks=5``, and you are running a playbook on 10 hosts, five of those hosts will have to wait until a fork is available. Of course, the more forks you allow, the more memory and processing power Ansible will use. Since most network tasks are run on the control host, this means your laptop can quickly become cpu- or memory-bound. + +.. _network_faq_redacted_output: + +Why is my output sometimes replaced with ``********``? +====================================================== + +Ansible replaces any string marked ``no_log``, including passwords, with ``********`` in Ansible output. This is done by design, to protect your sensitive data. Most users are happy to have their passwords redacted. However, Ansible replaces every string that matches your password with ``********``. If you use a common word for your password, this can be a problem. For example, if you choose ``Admin`` as your password, Ansible will replace every instance of the word ``Admin`` with ``********`` in your output. This may make your output harder to read. To avoid this problem, select a secure password that will not occur elsewhere in your Ansible output. + +.. _network_faq_no_abbreviations_with_config: + +Why do the ``*_config`` modules always return ``changed=true`` with abbreviated commands? +========================================================================================= + +When you issue commands directly on a network device, you can use abbreviated commands. For example, ``int g1/0/11`` and ``interface GigabitEthernet1/0/11`` do the same thing; ``shut`` and ``shutdown`` do the same thing. Ansible Network ``*_command`` modules work with abbreviations, because they run commands through the network OS. + +When committing configuration, however, the network OS converts abbreviations into long-form commands. Whether you use ``shut`` or ``shutdown`` on ``GigabitEthernet1/0/11``, the result in the configuration is the same: ``shutdown``. + +Ansible Network ``*_config`` modules compare the text of the commands you specify in ``lines`` to the text in the configuration. If you use ``shut`` in the ``lines`` section of your task, and the configuration reads ``shutdown``, the module returns ``changed=true`` even though the configuration is already correct. Your task will update the configuration every time it runs. + +To avoid this problem, use long-form commands with the ``*_config`` modules: + + +.. code-block:: yaml + + --- + - hosts: all + gather_facts: no + tasks: + - cisco.ios.ios_config: + lines: + - shutdown + parents: interface GigabitEthernet1/0/11 diff --git a/docs/docsite/rst/network/user_guide/index.rst b/docs/docsite/rst/network/user_guide/index.rst new file mode 100644 index 00000000..f5eff6f4 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/index.rst @@ -0,0 +1,25 @@ +.. _network_advanced: + +********************************** +Network Advanced Topics +********************************** + +Once you have mastered the basics of network automation with Ansible, as presented in :ref:`network_getting_started`, use this guide understand platform-specific details, optimization, and troubleshooting tips for Ansible for network automation. + +**Who should use this guide?** + +This guide is intended for network engineers using Ansible for automation. It covers advanced topics. If you understand networks and Ansible, this guide is for you. You may read through the entire guide if you choose, or use the links below to find the specific information you need. + +If you're new to Ansible, or new to using Ansible for network automation, start with the :ref:`network_getting_started`. + +.. toctree:: + :maxdepth: 2 + :caption: Advanced Topics + + network_resource_modules + network_best_practices_2.5 + cli_parsing + network_debug_troubleshooting + network_working_with_command_output + faq + platform_index diff --git a/docs/docsite/rst/network/user_guide/network_best_practices_2.5.rst b/docs/docsite/rst/network/user_guide/network_best_practices_2.5.rst new file mode 100644 index 00000000..1101017c --- /dev/null +++ b/docs/docsite/rst/network/user_guide/network_best_practices_2.5.rst @@ -0,0 +1,483 @@ +.. _network-best-practices: + +************************ +Ansible Network Examples +************************ + +This document describes some examples of using Ansible to manage your network infrastructure. + +.. contents:: + :local: + +Prerequisites +============= + +This example requires the following: + +* **Ansible 2.10** (or higher) installed. See :ref:`intro_installation_guide` for more information. +* One or more network devices that are compatible with Ansible. +* Basic understanding of YAML :ref:`yaml_syntax`. +* Basic understanding of Jinja2 templates. See :ref:`playbooks_templating` for more information. +* Basic Linux command line use. +* Basic knowledge of network switch & router configurations. + + +Groups and variables in an inventory file +========================================= + +An ``inventory`` file is a YAML or INI-like configuration file that defines the mapping of hosts into groups. + +In our example, the inventory file defines the groups ``eos``, ``ios``, ``vyos`` and a "group of groups" called ``switches``. Further details about subgroups and inventory files can be found in the :ref:`Ansible inventory Group documentation <subgroups>`. + +Because Ansible is a flexible tool, there are a number of ways to specify connection information and credentials. We recommend using the ``[my_group:vars]`` capability in your inventory file. + +.. code-block:: ini + + [all:vars] + # these defaults can be overridden for any group in the [group:vars] section + ansible_connection=ansible.netcommon.network_cli + ansible_user=ansible + + [switches:children] + eos + ios + vyos + + [eos] + veos01 ansible_host=veos-01.example.net + veos02 ansible_host=veos-02.example.net + veos03 ansible_host=veos-03.example.net + veos04 ansible_host=veos-04.example.net + + [eos:vars] + ansible_become=yes + ansible_become_method=enable + ansible_network_os=arista.eos.eos + ansible_user=my_eos_user + ansible_password=my_eos_password + + [ios] + ios01 ansible_host=ios-01.example.net + ios02 ansible_host=ios-02.example.net + ios03 ansible_host=ios-03.example.net + + [ios:vars] + ansible_become=yes + ansible_become_method=enable + ansible_network_os=cisco.ios.ios + ansible_user=my_ios_user + ansible_password=my_ios_password + + [vyos] + vyos01 ansible_host=vyos-01.example.net + vyos02 ansible_host=vyos-02.example.net + vyos03 ansible_host=vyos-03.example.net + + [vyos:vars] + ansible_network_os=vyos.vyos.vyos + ansible_user=my_vyos_user + ansible_password=my_vyos_password + +If you use ssh-agent, you do not need the ``ansible_password`` lines. If you use ssh keys, but not ssh-agent, and you have multiple keys, specify the key to use for each connection in the ``[group:vars]`` section with ``ansible_ssh_private_key_file=/path/to/correct/key``. For more information on ``ansible_ssh_`` options see :ref:`behavioral_parameters`. + +.. FIXME FUTURE Gundalow - Link to network auth & proxy page (to be written) + +.. warning:: Never store passwords in plain text. + +Ansible vault for password encryption +------------------------------------- + +The "Vault" feature of Ansible allows you to keep sensitive data such as passwords or keys in encrypted files, rather than as plain text in your playbooks or roles. These vault files can then be distributed or placed in source control. See :ref:`playbooks_vault` for more information. + +Here's what it would look like if you specified your SSH passwords (encrypted with Ansible Vault) among your variables: + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: vyos.vyos.vyos + ansible_user: my_vyos_user + ansible_ssh_pass: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 39336231636137663964343966653162353431333566633762393034646462353062633264303765 + 6331643066663534383564343537343334633031656538370a333737656236393835383863306466 + 62633364653238323333633337313163616566383836643030336631333431623631396364663533 + 3665626431626532630a353564323566316162613432373738333064366130303637616239396438 + 9853 + +Common inventory variables +-------------------------- + +The following variables are common for all platforms in the inventory, though they can be overwritten for a particular inventory group or host. + +:ansible_connection: + + Ansible uses the ansible-connection setting to determine how to connect to a remote device. When working with Ansible Networking, set this to an appropriate network connection option, such as``ansible.netcommon.network_cli``, so Ansible treats the remote node as a network device with a limited execution environment. Without this setting, Ansible would attempt to use ssh to connect to the remote and execute the Python script on the network device, which would fail because Python generally isn't available on network devices. +:ansible_network_os: + Informs Ansible which Network platform this hosts corresponds to. This is required when using the ``ansible.netcommon.*`` connection options. +:ansible_user: The user to connect to the remote device (switch) as. Without this the user that is running ``ansible-playbook`` would be used. + Specifies which user on the network device the connection +:ansible_password: + The corresponding password for ``ansible_user`` to log in as. If not specified SSH key will be used. +:ansible_become: + If enable mode (privilege mode) should be used, see the next section. +:ansible_become_method: + Which type of `become` should be used, for ``network_cli`` the only valid choice is ``enable``. + +Privilege escalation +-------------------- + +Certain network platforms, such as Arista EOS and Cisco IOS, have the concept of different privilege modes. Certain network modules, such as those that modify system state including users, will only work in high privilege states. Ansible supports ``become`` when using ``connection: ansible.netcommon.network_cli``. This allows privileges to be raised for the specific tasks that need them. Adding ``become: yes`` and ``become_method: enable`` informs Ansible to go into privilege mode before executing the task, as shown here: + +.. code-block:: ini + + [eos:vars] + ansible_connection=ansible.netcommon.network_cli + ansible_network_os=arista.eos.eos + ansible_become=yes + ansible_become_method=enable + +For more information, see the :ref:`using become with network modules<become_network>` guide. + + +Jump hosts +---------- + +If the Ansible Controller does not have a direct route to the remote device and you need to use a Jump Host, please see the :ref:`Ansible Network Proxy Command <network_delegate_to_vs_ProxyCommand>` guide for details on how to achieve this. + +Example 1: collecting facts and creating backup files with a playbook +===================================================================== + +Ansible facts modules gather system information 'facts' that are available to the rest of your playbook. + +Ansible Networking ships with a number of network-specific facts modules. In this example, we use the ``_facts`` modules :ref:`arista.eos.eos_facts <ansible_collections.arista.eos.eos_facts_module>`, :ref:`cisco.ios.ios_facts <ansible_collections.cisco.ios.ios_facts_module>` and :ref:`vyos.vyos.vyos_facts <ansible_collections.vyos.vyos.vyos_facts_module>` to connect to the remote networking device. As the credentials are not explicitly passed with module arguments, Ansible uses the username and password from the inventory file. + +Ansible's "Network Fact modules" gather information from the system and store the results in facts prefixed with ``ansible_net_``. The data collected by these modules is documented in the `Return Values` section of the module docs, in this case :ref:`arista.eos.eos_facts <ansible_collections.arista.eos.eos_facts_module>` and :ref:`vyos.vyos.vyos_facts <ansible_collections.vyos.vyos.vyos_facts_module>`. We can use the facts, such as ``ansible_net_version`` late on in the "Display some facts" task. + +To ensure we call the correct mode (``*_facts``) the task is conditionally run based on the group defined in the inventory file, for more information on the use of conditionals in Ansible Playbooks see :ref:`the_when_statement`. + +In this example, we will create an inventory file containing some network switches, then run a playbook to connect to the network devices and return some information about them. + +Step 1: Creating the inventory +------------------------------ + +First, create a file called ``inventory``, containing: + +.. code-block:: ini + + [switches:children] + eos + ios + vyos + + [eos] + eos01.example.net + + [ios] + ios01.example.net + + [vyos] + vyos01.example.net + + +Step 2: Creating the playbook +----------------------------- + +Next, create a playbook file called ``facts-demo.yml`` containing the following: + +.. code-block:: yaml + + - name: "Demonstrate connecting to switches" + hosts: switches + gather_facts: no + + tasks: + ### + # Collect data + # + - name: Gather facts (eos) + arista.eos.eos_facts: + when: ansible_network_os == 'arista.eos.eos' + + - name: Gather facts (ios) + cisco.ios.ios_facts: + when: ansible_network_os == 'cisco.ios.ios' + + - name: Gather facts (vyos) + vyos.vyos.vyos_facts: + when: ansible_network_os == 'vyos.vyos.vyos' + + ### + # Demonstrate variables + # + - name: Display some facts + debug: + msg: "The hostname is {{ ansible_net_hostname }} and the OS is {{ ansible_net_version }}" + + - name: Facts from a specific host + debug: + var: hostvars['vyos01.example.net'] + + - name: Write facts to disk using a template + copy: + content: | + #jinja2: lstrip_blocks: True + EOS device info: + {% for host in groups['eos'] %} + Hostname: {{ hostvars[host].ansible_net_hostname }} + Version: {{ hostvars[host].ansible_net_version }} + Model: {{ hostvars[host].ansible_net_model }} + Serial: {{ hostvars[host].ansible_net_serialnum }} + {% endfor %} + + IOS device info: + {% for host in groups['ios'] %} + Hostname: {{ hostvars[host].ansible_net_hostname }} + Version: {{ hostvars[host].ansible_net_version }} + Model: {{ hostvars[host].ansible_net_model }} + Serial: {{ hostvars[host].ansible_net_serialnum }} + {% endfor %} + + VyOS device info: + {% for host in groups['vyos'] %} + Hostname: {{ hostvars[host].ansible_net_hostname }} + Version: {{ hostvars[host].ansible_net_version }} + Model: {{ hostvars[host].ansible_net_model }} + Serial: {{ hostvars[host].ansible_net_serialnum }} + {% endfor %} + dest: /tmp/switch-facts + run_once: yes + + ### + # Get running configuration + # + + - name: Backup switch (eos) + arista.eos.eos_config: + backup: yes + register: backup_eos_location + when: ansible_network_os == 'arista.eos.eos' + + - name: backup switch (vyos) + vyos.vyos.vyos_config: + backup: yes + register: backup_vyos_location + when: ansible_network_os == 'vyos.vyos.vyos' + + - name: Create backup dir + file: + path: "/tmp/backups/{{ inventory_hostname }}" + state: directory + recurse: yes + + - name: Copy backup files into /tmp/backups/ (eos) + copy: + src: "{{ backup_eos_location.backup_path }}" + dest: "/tmp/backups/{{ inventory_hostname }}/{{ inventory_hostname }}.bck" + when: ansible_network_os == 'arista.eos.eos' + + - name: Copy backup files into /tmp/backups/ (vyos) + copy: + src: "{{ backup_vyos_location.backup_path }}" + dest: "/tmp/backups/{{ inventory_hostname }}/{{ inventory_hostname }}.bck" + when: ansible_network_os == 'vyos.vyos.vyos' + +Step 3: Running the playbook +---------------------------- + +To run the playbook, run the following from a console prompt: + +.. code-block:: console + + ansible-playbook -i inventory facts-demo.yml + +This should return output similar to the following: + +.. code-block:: console + + PLAY RECAP + eos01.example.net : ok=7 changed=2 unreachable=0 failed=0 + ios01.example.net : ok=7 changed=2 unreachable=0 failed=0 + vyos01.example.net : ok=6 changed=2 unreachable=0 failed=0 + +Step 4: Examining the playbook results +-------------------------------------- + +Next, look at the contents of the file we created containing the switch facts: + +.. code-block:: console + + cat /tmp/switch-facts + +You can also look at the backup files: + +.. code-block:: console + + find /tmp/backups + + +If `ansible-playbook` fails, please follow the debug steps in :ref:`network_debug_troubleshooting`. + + +.. _network-agnostic-examples: + +Example 2: simplifying playbooks with network agnostic modules +============================================================== + +(This example originally appeared in the `Deep Dive on cli_command for Network Automation <https://www.ansible.com/blog/deep-dive-on-cli-command-for-network-automation>`_ blog post by Sean Cavanaugh -`@IPvSean <https://github.com/IPvSean>`_). + +If you have two or more network platforms in your environment, you can use the network agnostic modules to simplify your playbooks. You can use network agnostic modules such as ``ansible.netcommon.cli_command`` or ``ansible.netcommon.cli_config`` in place of the platform-specific modules such as ``arista.eos.eos_config``, ``cisco.ios.ios_config``, and ``junipernetworks.junos.junos_config``. This reduces the number of tasks and conditionals you need in your playbooks. + +.. note:: + Network agnostic modules require the :ref:`ansible.netcommon.network_cli <ansible_collections.ansible.netcommon.network_cli_connection>` connection plugin. + + +Sample playbook with platform-specific modules +---------------------------------------------- + +This example assumes three platforms, Arista EOS, Cisco NXOS, and Juniper JunOS. Without the network agnostic modules, a sample playbook might contain the following three tasks with platform-specific commands: + +.. code-block:: yaml + + --- + - name: Run Arista command + arista.eos.eos_command: + commands: show ip int br + when: ansible_network_os == 'arista.eos.eos' + + - name: Run Cisco NXOS command + cisco.nxos.nxos_command: + commands: show ip int br + when: ansible_network_os == 'cisco.nxos.nxos' + + - name: Run Vyos command + vyos.vyos.vyos_command: + commands: show interface + when: ansible_network_os == 'vyos.vyos.vyos' + +Simplified playbook with ``cli_command`` network agnostic module +---------------------------------------------------------------- + +You can replace these platform-specific modules with the network agnostic ``ansible.netcommon.cli_command`` module as follows: + +.. code-block:: yaml + + --- + - hosts: network + gather_facts: false + connection: ansible.netcommon.network_cli + + tasks: + - name: Run cli_command on Arista and display results + block: + - name: Run cli_command on Arista + ansible.netcommon.cli_command: + command: show ip int br + register: result + + - name: Display result to terminal window + debug: + var: result.stdout_lines + when: ansible_network_os == 'arista.eos.eos' + + - name: Run cli_command on Cisco IOS and display results + block: + - name: Run cli_command on Cisco IOS + ansible.netcommon.cli_command: + command: show ip int br + register: result + + - name: Display result to terminal window + debug: + var: result.stdout_lines + when: ansible_network_os == 'cisco.ios.ios' + + - name: Run cli_command on Vyos and display results + block: + - name: Run cli_command on Vyos + ansible.netcommon.cli_command: + command: show interfaces + register: result + + - name: Display result to terminal window + debug: + var: result.stdout_lines + when: ansible_network_os == 'vyos.vyos.vyos' + + +If you use groups and group_vars by platform type, this playbook can be further simplified to : + +.. code-block:: yaml + + --- + - name: Run command and print to terminal window + hosts: routers + gather_facts: false + + tasks: + - name: Run show command + ansible.netcommon.cli_command: + command: "{{show_interfaces}}" + register: command_output + + +You can see a full example of this using group_vars and also a configuration backup example at `Network agnostic examples <https://github.com/network-automation/agnostic_example>`_. + +Using multiple prompts with the ``ansible.netcommon.cli_command`` +------------------------------------------------------------------- + +The ``ansible.netcommon.cli_command`` also supports multiple prompts. + +.. code-block:: yaml + + --- + - name: Change password to default + ansible.netcommon.cli_command: + command: "{{ item }}" + prompt: + - "New password" + - "Retype new password" + answer: + - "mypassword123" + - "mypassword123" + check_all: True + loop: + - "configure" + - "rollback" + - "set system root-authentication plain-text-password" + - "commit" + +See the :ref:`ansible.netcommon.cli_command <cli_command_module>` for full documentation on this command. + + +Implementation Notes +==================== + + +Demo variables +-------------- + +Although these tasks are not needed to write data to disk, they are used in this example to demonstrate some methods of accessing facts about the given devices or a named host. + +Ansible ``hostvars`` allows you to access variables from a named host. Without this we would return the details for the current host, rather than the named host. + +For more information, see :ref:`magic_variables_and_hostvars`. + +Get running configuration +------------------------- + +The :ref:`arista.eos.eos_config <ansible_collections.arista.eos.eos_config_module>` and :ref:`vyos.vyos.vyos_config <ansible_collections.vyos.vyos.vyos_config_module>` modules have a ``backup:`` option that when set will cause the module to create a full backup of the current ``running-config`` from the remote device before any changes are made. The backup file is written to the ``backup`` folder in the playbook root directory. If the directory does not exist, it is created. + +To demonstrate how we can move the backup file to a different location, we register the result and move the file to the path stored in ``backup_path``. + +Note that when using variables from tasks in this way we use double quotes (``"``) and double curly-brackets (``{{...}}`` to tell Ansible that this is a variable. + +Troubleshooting +=============== + +If you receive an connection error please double check the inventory and playbook for typos or missing lines. If the issue still occurs follow the debug steps in :ref:`network_debug_troubleshooting`. + +.. seealso:: + + * :ref:`network_guide` + * :ref:`intro_inventory` + * :ref:`Keeping vaulted variables visible <tip_for_variables_and_vaults>` diff --git a/docs/docsite/rst/network/user_guide/network_debug_troubleshooting.rst b/docs/docsite/rst/network/user_guide/network_debug_troubleshooting.rst new file mode 100644 index 00000000..97f671bb --- /dev/null +++ b/docs/docsite/rst/network/user_guide/network_debug_troubleshooting.rst @@ -0,0 +1,828 @@ +.. _network_debug_troubleshooting: + +*************************************** +Network Debug and Troubleshooting Guide +*************************************** + +This section discusses how to debug and troubleshoot network modules in Ansible. + +.. contents:: + :local: + + +How to troubleshoot +=================== + +Ansible network automation errors generally fall into one of the following categories: + +:Authentication issues: + * Not correctly specifying credentials + * Remote device (network switch/router) not falling back to other other authentication methods + * SSH key issues +:Timeout issues: + * Can occur when trying to pull a large amount of data + * May actually be masking a authentication issue +:Playbook issues: + * Use of ``delegate_to``, instead of ``ProxyCommand``. See :ref:`network proxy guide <network_delegate_to_vs_ProxyCommand>` for more information. + +.. warning:: ``unable to open shell`` + + The ``unable to open shell`` message means that the ``ansible-connection`` daemon has not been able to successfully + talk to the remote network device. This generally means that there is an authentication issue. See the "Authentication and connection issues" section + in this document for more information. + +.. _enable_network_logging: + +Enabling Networking logging and how to read the logfile +------------------------------------------------------- + +**Platforms:** Any + +Ansible includes logging to help diagnose and troubleshoot issues regarding Ansible Networking modules. + +Because logging is very verbose, it is disabled by default. It can be enabled with the :envvar:`ANSIBLE_LOG_PATH` and :envvar:`ANSIBLE_DEBUG` options on the ansible-controller, that is the machine running ``ansible-playbook``. + +Before running ``ansible-playbook``, run the following commands to enable logging:: + + # Specify the location for the log file + export ANSIBLE_LOG_PATH=~/ansible.log + # Enable Debug + export ANSIBLE_DEBUG=True + + # Run with 4*v for connection level verbosity + ansible-playbook -vvvv ... + +After Ansible has finished running you can inspect the log file which has been created on the ansible-controller: + +.. code:: + + less $ANSIBLE_LOG_PATH + + 2017-03-30 13:19:52,740 p=28990 u=fred | creating new control socket for host veos01:22 as user admin + 2017-03-30 13:19:52,741 p=28990 u=fred | control socket path is /home/fred/.ansible/pc/ca5960d27a + 2017-03-30 13:19:52,741 p=28990 u=fred | current working directory is /home/fred/ansible/test/integration + 2017-03-30 13:19:52,741 p=28990 u=fred | using connection plugin network_cli + ... + 2017-03-30 13:20:14,771 paramiko.transport userauth is OK + 2017-03-30 13:20:15,283 paramiko.transport Authentication (keyboard-interactive) successful! + 2017-03-30 13:20:15,302 p=28990 u=fred | ssh connection done, setting terminal + 2017-03-30 13:20:15,321 p=28990 u=fred | ssh connection has completed successfully + 2017-03-30 13:20:15,322 p=28990 u=fred | connection established to veos01 in 0:00:22.580626 + + +From the log notice: + +* ``p=28990`` Is the PID (Process ID) of the ``ansible-connection`` process +* ``u=fred`` Is the user `running` ansible, not the remote-user you are attempting to connect as +* ``creating new control socket for host veos01:22 as user admin`` host:port as user +* ``control socket path is`` location on disk where the persistent connection socket is created +* ``using connection plugin network_cli`` Informs you that persistent connection is being used +* ``connection established to veos01 in 0:00:22.580626`` Time taken to obtain a shell on the remote device + + +.. note: Port None ``creating new control socket for host veos01:None`` + + If the log reports the port as ``None`` this means that the default port is being used. + A future Ansible release will improve this message so that the port is always logged. + +Because the log files are verbose, you can use grep to look for specific information. For example, once you have identified the ``pid`` from the ``creating new control socket for host`` line you can search for other connection log entries:: + + grep "p=28990" $ANSIBLE_LOG_PATH + + +Enabling Networking device interaction logging +---------------------------------------------- + +**Platforms:** Any + +Ansible includes logging of device interaction in the log file to help diagnose and troubleshoot +issues regarding Ansible Networking modules. The messages are logged in the file pointed to by the ``log_path`` configuration +option in the Ansible configuration file or by setting the :envvar:`ANSIBLE_LOG_PATH`. + +.. warning:: + The device interaction messages consist of command executed on the target device and the returned response. Since this + log data can contain sensitive information including passwords in plain text it is disabled by default. + Additionally, in order to prevent accidental leakage of data, a warning will be shown on every task with this + setting enabled, specifying which host has it enabled and where the data is being logged. + +Be sure to fully understand the security implications of enabling this option. The device interaction logging can be enabled either globally by setting in configuration file or by setting environment or enabled on per task basis by passing a special variable to the task. + +Before running ``ansible-playbook`` run the following commands to enable logging: + +.. code-block:: text + + # Specify the location for the log file + export ANSIBLE_LOG_PATH=~/ansible.log + + +Enable device interaction logging for a given task + +.. code-block:: yaml + + - name: get version information + cisco.ios.ios_command: + commands: + - show version + vars: + ansible_persistent_log_messages: True + + +To make this a global setting, add the following to your ``ansible.cfg`` file: + +.. code-block:: ini + + [persistent_connection] + log_messages = True + +or enable the environment variable `ANSIBLE_PERSISTENT_LOG_MESSAGES`: + +.. code-block:: text + + # Enable device interaction logging + export ANSIBLE_PERSISTENT_LOG_MESSAGES=True + +If the task is failing on connection initialization itself, you should enable this option +globally. If an individual task is failing intermittently this option can be enabled for that task itself to find the root cause. + +After Ansible has finished running you can inspect the log file which has been created on the ansible-controller + +.. note:: Be sure to fully understand the security implications of enabling this option as it can log sensitive + information in log file thus creating security vulnerability. + + +Isolating an error +------------------ + +**Platforms:** Any + +As with any effort to troubleshoot it's important to simplify the test case as much as possible. + +For Ansible this can be done by ensuring you are only running against one remote device: + +* Using ``ansible-playbook --limit switch1.example.net...`` +* Using an ad-hoc ``ansible`` command + +`ad-hoc` refers to running Ansible to perform some quick command using ``/usr/bin/ansible``, rather than the orchestration language, which is ``/usr/bin/ansible-playbook``. In this case we can ensure connectivity by attempting to execute a single command on the remote device:: + + ansible -m arista.eos.eos_command -a 'commands=?' -i inventory switch1.example.net -e 'ansible_connection=ansible.netcommon.network_cli' -u admin -k + +In the above example, we: + +* connect to ``switch1.example.net`` specified in the inventory file ``inventory`` +* use the module ``arista.eos.eos_command`` +* run the command ``?`` +* connect using the username ``admin`` +* inform the ``ansible`` command to prompt for the SSH password by specifying ``-k`` + +If you have SSH keys configured correctly, you don't need to specify the ``-k`` parameter. + +If the connection still fails you can combine it with the enable_network_logging parameter. For example: + +.. code-block:: text + + # Specify the location for the log file + export ANSIBLE_LOG_PATH=~/ansible.log + # Enable Debug + export ANSIBLE_DEBUG=True + # Run with ``-vvvv`` for connection level verbosity + ansible -m arista.eos.eos_command -a 'commands=?' -i inventory switch1.example.net -e 'ansible_connection=ansible.netcommon.network_cli' -u admin -k + +Then review the log file and find the relevant error message in the rest of this document. + +.. For details on other ways to authenticate, see LINKTOAUTHHOWTODOCS. + +.. _socket_path_issue: + +Troubleshooting socket path issues +================================== + +**Platforms:** Any + +The ``Socket path does not exist or cannot be found`` and ``Unable to connect to socket`` messages indicate that the socket used to communicate with the remote network device is unavailable or does not exist. + +For example: + +.. code-block:: none + + fatal: [spine02]: FAILED! => { + "changed": false, + "failed": true, + "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_TSqk5J/ansible_modlib.zip/ansible/module_utils/connection.py\", line 115, in _exec_jsonrpc\nansible.module_utils.connection.ConnectionError: Socket path XX does not exist or cannot be found. See Troubleshooting socket path issues in the Network Debug and Troubleshooting Guide\n", + "module_stdout": "", + "msg": "MODULE FAILURE", + "rc": 1 + } + +or + +.. code-block:: none + + fatal: [spine02]: FAILED! => { + "changed": false, + "failed": true, + "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_TSqk5J/ansible_modlib.zip/ansible/module_utils/connection.py\", line 123, in _exec_jsonrpc\nansible.module_utils.connection.ConnectionError: Unable to connect to socket XX. See Troubleshooting socket path issues in Network Debug and Troubleshooting Guide\n", + "module_stdout": "", + "msg": "MODULE FAILURE", + "rc": 1 + } + +Suggestions to resolve: + +#. Verify that you have write access to the socket path described in the error message. + +#. Follow the steps detailed in :ref:`enable network logging <enable_network_logging>`. + +If the identified error message from the log file is: + +.. code-block:: yaml + + 2017-04-04 12:19:05,670 p=18591 u=fred | command timeout triggered, timeout value is 30 secs + +or + +.. code-block:: yaml + + 2017-04-04 12:19:05,670 p=18591 u=fred | persistent connection idle timeout triggered, timeout value is 30 secs + +Follow the steps detailed in :ref:`timeout issues <timeout_issues>` + + +.. _unable_to_open_shell: + +Category "Unable to open shell" +=============================== + + +**Platforms:** Any + +The ``unable to open shell`` message means that the ``ansible-connection`` daemon has not been able to successfully talk to the remote network device. This generally means that there is an authentication issue. It is a "catch all" message, meaning you need to enable :ref:`logging <a_note_about_logging>` to find the underlying issues. + + + +For example: + +.. code-block:: none + + TASK [prepare_eos_tests : enable cli on remote device] ************************************************** + fatal: [veos01]: FAILED! => {"changed": false, "failed": true, "msg": "unable to open shell"} + + +or: + + +.. code-block:: none + + TASK [ios_system : configure name_servers] ************************************************************* + task path: + fatal: [ios-csr1000v]: FAILED! => { + "changed": false, + "failed": true, + "msg": "unable to open shell", + } + +Suggestions to resolve: + +Follow the steps detailed in enable_network_logging_. + +Once you've identified the error message from the log file, the specific solution can be found in the rest of this document. + + + +Error: "[Errno -2] Name or service not known" +--------------------------------------------- + +**Platforms:** Any + +Indicates that the remote host you are trying to connect to can not be reached + +For example: + +.. code-block:: yaml + + 2017-04-04 11:39:48,147 p=15299 u=fred | control socket path is /home/fred/.ansible/pc/ca5960d27a + 2017-04-04 11:39:48,147 p=15299 u=fred | current working directory is /home/fred/git/ansible-inc/stable-2.3/test/integration + 2017-04-04 11:39:48,147 p=15299 u=fred | using connection plugin network_cli + 2017-04-04 11:39:48,340 p=15299 u=fred | connecting to host veos01 returned an error + 2017-04-04 11:39:48,340 p=15299 u=fred | [Errno -2] Name or service not known + + +Suggestions to resolve: + +* If you are using the ``provider:`` options ensure that its suboption ``host:`` is set correctly. +* If you are not using ``provider:`` nor top-level arguments ensure your inventory file is correct. + + + + + +Error: "Authentication failed" +------------------------------ + +**Platforms:** Any + +Occurs if the credentials (username, passwords, or ssh keys) passed to ``ansible-connection`` (via ``ansible`` or ``ansible-playbook``) can not be used to connect to the remote device. + + + +For example: + +.. code-block:: yaml + + <ios01> ESTABLISH CONNECTION FOR USER: cisco on PORT 22 TO ios01 + <ios01> Authentication failed. + + +Suggestions to resolve: + +If you are specifying credentials via ``password:`` (either directly or via ``provider:``) or the environment variable `ANSIBLE_NET_PASSWORD` it is possible that ``paramiko`` (the Python SSH library that Ansible uses) is using ssh keys, and therefore the credentials you are specifying are being ignored. To find out if this is the case, disable "look for keys". This can be done like this: + +.. code-block:: yaml + + export ANSIBLE_PARAMIKO_LOOK_FOR_KEYS=False + +To make this a permanent change, add the following to your ``ansible.cfg`` file: + +.. code-block:: ini + + [paramiko_connection] + look_for_keys = False + + +Error: "connecting to host <hostname> returned an error" or "Bad address" +------------------------------------------------------------------------- + +This may occur if the SSH fingerprint hasn't been added to Paramiko's (the Python SSH library) know hosts file. + +When using persistent connections with Paramiko, the connection runs in a background process. If the host doesn't already have a valid SSH key, by default Ansible will prompt to add the host key. This will cause connections running in background processes to fail. + +For example: + +.. code-block:: yaml + + 2017-04-04 12:06:03,486 p=17981 u=fred | using connection plugin network_cli + 2017-04-04 12:06:04,680 p=17981 u=fred | connecting to host veos01 returned an error + 2017-04-04 12:06:04,682 p=17981 u=fred | (14, 'Bad address') + 2017-04-04 12:06:33,519 p=17981 u=fred | number of connection attempts exceeded, unable to connect to control socket + 2017-04-04 12:06:33,520 p=17981 u=fred | persistent_connect_interval=1, persistent_connect_retries=30 + + +Suggestions to resolve: + +Use ``ssh-keyscan`` to pre-populate the known_hosts. You need to ensure the keys are correct. + +.. code-block:: shell + + ssh-keyscan veos01 + + +or + +You can tell Ansible to automatically accept the keys + +Environment variable method:: + + export ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD=True + ansible-playbook ... + +``ansible.cfg`` method: + +ansible.cfg + +.. code-block:: ini + + [paramiko_connection] + host_key_auto_add = True + + + +.. warning: Security warning + + Care should be taken before accepting keys. + +Error: "No authentication methods available" +-------------------------------------------- + +For example: + +.. code-block:: yaml + + 2017-04-04 12:19:05,670 p=18591 u=fred | creating new control socket for host veos01:None as user admin + 2017-04-04 12:19:05,670 p=18591 u=fred | control socket path is /home/fred/.ansible/pc/ca5960d27a + 2017-04-04 12:19:05,670 p=18591 u=fred | current working directory is /home/fred/git/ansible-inc/ansible-workspace-2/test/integration + 2017-04-04 12:19:05,670 p=18591 u=fred | using connection plugin network_cli + 2017-04-04 12:19:06,606 p=18591 u=fred | connecting to host veos01 returned an error + 2017-04-04 12:19:06,606 p=18591 u=fred | No authentication methods available + 2017-04-04 12:19:35,708 p=18591 u=fred | connect retry timeout expired, unable to connect to control socket + 2017-04-04 12:19:35,709 p=18591 u=fred | persistent_connect_retry_timeout is 15 secs + + +Suggestions to resolve: + +No password or SSH key supplied + +Clearing Out Persistent Connections +----------------------------------- + +**Platforms:** Any + +In Ansible 2.3, persistent connection sockets are stored in ``~/.ansible/pc`` for all network devices. When an Ansible playbook runs, the persistent socket connection is displayed when verbose output is specified. + +``<switch> socket_path: /home/fred/.ansible/pc/f64ddfa760`` + +To clear out a persistent connection before it times out (the default timeout is 30 seconds +of inactivity), simple delete the socket file. + + +.. _timeout_issues: + +Timeout issues +============== + +Persistent connection idle timeout +---------------------------------- + +By default, ``ANSIBLE_PERSISTENT_CONNECT_TIMEOUT`` is set to 30 (seconds). You may see the following error if this value is too low: + +.. code-block:: yaml + + 2017-04-04 12:19:05,670 p=18591 u=fred | persistent connection idle timeout triggered, timeout value is 30 secs + +Suggestions to resolve: + +Increase value of persistent connection idle timeout: + +.. code-block:: sh + + export ANSIBLE_PERSISTENT_CONNECT_TIMEOUT=60 + +To make this a permanent change, add the following to your ``ansible.cfg`` file: + +.. code-block:: ini + + [persistent_connection] + connect_timeout = 60 + +Command timeout +--------------- + +By default, ``ANSIBLE_PERSISTENT_COMMAND_TIMEOUT`` is set to 30 (seconds). Prior versions of Ansible had this value set to 10 seconds by default. +You may see the following error if this value is too low: + + +.. code-block:: yaml + + 2017-04-04 12:19:05,670 p=18591 u=fred | command timeout triggered, timeout value is 30 secs + +Suggestions to resolve: + +* Option 1 (Global command timeout setting): + Increase value of command timeout in configuration file or by setting environment variable. + + .. code-block:: yaml + + export ANSIBLE_PERSISTENT_COMMAND_TIMEOUT=60 + + To make this a permanent change, add the following to your ``ansible.cfg`` file: + + .. code-block:: ini + + [persistent_connection] + command_timeout = 60 + +* Option 2 (Per task command timeout setting): + Increase command timeout per task basis. All network modules support a + timeout value that can be set on a per task basis. + The timeout value controls the amount of time in seconds before the + task will fail if the command has not returned. + + For local connection type: + + .. FIXME: Detail error here + + Suggestions to resolve: + + .. code-block:: yaml + + - name: save running-config + cisco.ios.ios_command: + commands: copy running-config startup-config + provider: "{{ cli }}" + timeout: 30 + + + Suggestions to resolve: + + .. code-block:: yaml + + - name: save running-config + cisco.ios.ios_command: + commands: copy running-config startup-config + vars: + ansible_command_timeout: 60 + +Some operations take longer than the default 30 seconds to complete. One good +example is saving the current running config on IOS devices to startup config. +In this case, changing the timeout value from the default 30 seconds to 60 +seconds will prevent the task from failing before the command completes +successfully. + +Persistent connection retry timeout +----------------------------------- + +By default, ``ANSIBLE_PERSISTENT_CONNECT_RETRY_TIMEOUT`` is set to 15 (seconds). You may see the following error if this value is too low: + +.. code-block:: yaml + + 2017-04-04 12:19:35,708 p=18591 u=fred | connect retry timeout expired, unable to connect to control socket + 2017-04-04 12:19:35,709 p=18591 u=fred | persistent_connect_retry_timeout is 15 secs + +Suggestions to resolve: + +Increase the value of the persistent connection idle timeout. +Note: This value should be greater than the SSH timeout value (the timeout value under the defaults +section in the configuration file) and less than the value of the persistent +connection idle timeout (connect_timeout). + +.. code-block:: yaml + + export ANSIBLE_PERSISTENT_CONNECT_RETRY_TIMEOUT=30 + +To make this a permanent change, add the following to your ``ansible.cfg`` file: + +.. code-block:: ini + + [persistent_connection] + connect_retry_timeout = 30 + + +Timeout issue due to platform specific login menu with ``network_cli`` connection type +-------------------------------------------------------------------------------------- + +In Ansible 2.9 and later, the network_cli connection plugin configuration options are added +to handle the platform specific login menu. These options can be set as group/host or tasks +variables. + +Example: Handle single login menu prompts with host variables + +.. code-block:: console + + $cat host_vars/<hostname>.yaml + --- + ansible_terminal_initial_prompt: + - "Connect to a host" + ansible_terminal_initial_answer: + - "3" + +Example: Handle remote host multiple login menu prompts with host variables + +.. code-block:: console + + $cat host_vars/<inventory-hostname>.yaml + --- + ansible_terminal_initial_prompt: + - "Press any key to enter main menu" + - "Connect to a host" + ansible_terminal_initial_answer: + - "\\r" + - "3" + ansible_terminal_initial_prompt_checkall: True + +To handle multiple login menu prompts: + +* The values of ``ansible_terminal_initial_prompt`` and ``ansible_terminal_initial_answer`` should be a list. +* The prompt sequence should match the answer sequence. +* The value of ``ansible_terminal_initial_prompt_checkall`` should be set to ``True``. + +.. note:: If all the prompts in sequence are not received from remote host at the time connection initialization it will result in a timeout. + + +Playbook issues +=============== + +This section details issues are caused by issues with the Playbook itself. + +Error: "Unable to enter configuration mode" +------------------------------------------- + +**Platforms:** Arista EOS and Cisco IOS + +This occurs when you attempt to run a task that requires privileged mode in a user mode shell. + +For example: + +.. code-block:: console + + TASK [ios_system : configure name_servers] ***************************************************************************** + task path: + fatal: [ios-csr1000v]: FAILED! => { + "changed": false, + "failed": true, + "msg": "unable to enter configuration mode", + } + +Suggestions to resolve: + + Use ``connection: ansible.netcommon.network_cli`` and ``become: yes`` + + +Proxy Issues +============ + + .. _network_delegate_to_vs_ProxyCommand: + +delegate_to vs ProxyCommand +--------------------------- + +In order to use a bastion or intermediate jump host to connect to network devices over ``cli`` +transport, network modules support the use of ``ProxyCommand``. + +To use ``ProxyCommand``, configure the proxy settings in the Ansible inventory +file to specify the proxy host. + +.. code-block:: ini + + [nxos] + nxos01 + nxos02 + + [nxos:vars] + ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +With the configuration above, simply build and run the playbook as normal with +no additional changes necessary. The network module will now connect to the +network device by first connecting to the host specified in +``ansible_ssh_common_args``, which is ``bastion01`` in the above example. + +You can also set the proxy target for all hosts by using environment variables. + +.. code-block:: sh + + export ANSIBLE_SSH_ARGS='-o ProxyCommand="ssh -W %h:%p -q bastion01"' + +Using bastion/jump host with netconf connection +----------------------------------------------- + +Enabling jump host setting +-------------------------- + + +Bastion/jump host with netconf connection can be enabled by: + - Setting Ansible variable ``ansible_netconf_ssh_config`` either to ``True`` or custom ssh config file path + - Setting environment variable ``ANSIBLE_NETCONF_SSH_CONFIG`` to ``True`` or custom ssh config file path + - Setting ``ssh_config = 1`` or ``ssh_config = <ssh-file-path>`` under ``netconf_connection`` section + +If the configuration variable is set to 1 the proxycommand and other ssh variables are read from +default ssh config file (~/.ssh/config). + +If the configuration variable is set to file path the proxycommand and other ssh variables are read +from the given custom ssh file path + +Example ssh config file (~/.ssh/config) +--------------------------------------- + +.. code-block:: ini + + Host jumphost + HostName jumphost.domain.name.com + User jumphost-user + IdentityFile "/path/to/ssh-key.pem" + Port 22 + + # Note: Due to the way that Paramiko reads the SSH Config file, + # you need to specify the NETCONF port that the host uses. + # In other words, it does not automatically use ansible_port + # As a result you need either: + + Host junos01 + HostName junos01 + ProxyCommand ssh -W %h:22 jumphost + + # OR + + Host junos01 + HostName junos01 + ProxyCommand ssh -W %h:830 jumphost + + # Depending on the netconf port used. + +Example Ansible inventory file + +.. code-block:: ini + + [junos] + junos01 + + [junos:vars] + ansible_connection=ansible.netcommon.netconf + ansible_network_os=junipernetworks.junos.junos + ansible_user=myuser + ansible_password=!vault... + + +.. note:: Using ``ProxyCommand`` with passwords via variables + + By design, SSH doesn't support providing passwords via environment variables. + This is done to prevent secrets from leaking out, for example in ``ps`` output. + + We recommend using SSH Keys, and if needed an ssh-agent, rather than passwords, where ever possible. + +Miscellaneous Issues +==================== + + +Intermittent failure while using ``ansible.netcommon.network_cli`` connection type +------------------------------------------------------------------------------------ + +If the command prompt received in response is not matched correctly within +the ``ansible.netcommon.network_cli`` connection plugin the task might fail intermittently with truncated +response or with the error message ``operation requires privilege escalation``. +Starting in 2.7.1 a new buffer read timer is added to ensure prompts are matched properly +and a complete response is send in output. The timer default value is 0.2 seconds and +can be adjusted on a per task basis or can be set globally in seconds. + +Example Per task timer setting + +.. code-block:: yaml + + - name: gather ios facts + cisco.ios.ios_facts: + gather_subset: all + register: result + vars: + ansible_buffer_read_timeout: 2 + + +To make this a global setting, add the following to your ``ansible.cfg`` file: + +.. code-block:: ini + + [persistent_connection] + buffer_read_timeout = 2 + +This timer delay per command executed on remote host can be disabled by setting the value to zero. + + +Task failure due to mismatched error regex within command response using ``ansible.netcommon.network_cli`` connection type +---------------------------------------------------------------------------------------------------------------------------- + +In Ansible 2.9 and later, the ``ansible.netcommon.network_cli`` connection plugin configuration options are added +to handle the stdout and stderr regex to identify if the command execution response consist +of a normal response or an error response. These options can be set group/host variables or as +tasks variables. + +Example: For mismatched error response + +.. code-block:: yaml + + - name: fetch logs from remote host + cisco.ios.ios_command: + commands: + - show logging + + +Playbook run output: + +.. code-block:: console + + TASK [first fetch logs] ******************************************************** + fatal: [ios01]: FAILED! => { + "changed": false, + "msg": "RF Name:\r\n\r\n <--nsip--> + \"IPSEC-3-REPLAY_ERROR: Test log\"\r\n*Aug 1 08:36:18.483: %SYS-7-USERLOG_DEBUG: + Message from tty578(user id: ansible): test\r\nan-ios-02#"} + +Suggestions to resolve: + +Modify the error regex for individual task. + +.. code-block:: yaml + + - name: fetch logs from remote host + cisco.ios.ios_command: + commands: + - show logging + vars: + ansible_terminal_stderr_re: + - pattern: 'connection timed out' + flags: 're.I' + +The terminal plugin regex options ``ansible_terminal_stderr_re`` and ``ansible_terminal_stdout_re`` have +``pattern`` and ``flags`` as keys. The value of the ``flags`` key should be a value that is accepted by +the ``re.compile`` python method. + + +Intermittent failure while using ``ansible.netcommon.network_cli`` connection type due to slower network or remote target host +---------------------------------------------------------------------------------------------------------------------------------- + +In Ansible 2.9 and later, the ``ansible.netcommon.network_cli`` connection plugin configuration option is added to control +the number of attempts to connect to a remote host. The default number of attempts is three. +After every retry attempt the delay between retries is increased by power of 2 in seconds until either the +maximum attempts are exhausted or either the ``persistent_command_timeout`` or ``persistent_connect_timeout`` timers are triggered. + +To make this a global setting, add the following to your ``ansible.cfg`` file: + +.. code-block:: ini + + [persistent_connection] + network_cli_retries = 5 diff --git a/docs/docsite/rst/network/user_guide/network_resource_modules.rst b/docs/docsite/rst/network/user_guide/network_resource_modules.rst new file mode 100644 index 00000000..f319d7cf --- /dev/null +++ b/docs/docsite/rst/network/user_guide/network_resource_modules.rst @@ -0,0 +1,196 @@ +.. _resource_modules: + +************************ +Network Resource Modules +************************ + +Ansible network resource modules simplify and standardize how you manage different network devices. Network devices separate configuration into sections (such as interfaces and VLANs) that apply to a network service. Ansible network resource modules take advantage of this to allow you to configure subsections or *resources* within the network device configuration. Network resource modules provide a consistent experience across different network devices. + + +.. contents:: + :local: + +Network resource module states +=============================== + +You use the network resource modules by assigning a state to what you want the module to do. The resource modules support the following states: + +merged + Ansible merges the on-device configuration with the provided configuration in the task. + +replaced + Ansible replaces the on-device configuration subsection with the provided configuration subsection in the task. + +overridden + Ansible overrides the on-device configuration for the resource with the provided configuration in the task. Use caution with this state as you could remove your access to the device (for example, by overriding the management interface configuration). + +deleted + Ansible deletes the on-device configuration subsection and restores any default settings. + +gathered + Ansible displays the resource details gathered from the network device and accessed with the ``gathered`` key in the result. + +rendered + Ansible renders the provided configuration in the task in the device-native format (for example, Cisco IOS CLI). Ansible returns this rendered configuration in the ``rendered`` key in the result. Note this state does not communicate with the network device and can be used offline. + +parsed + Ansible parses the configuration from the ``running_configuration`` option into Ansible structured data in the ``parsed`` key in the result. Note this does not gather the configuration from the network device so this state can be used offline. + +Using network resource modules +============================== + +This example configures the L3 interface resource on a Cisco IOS device, based on different state settings. + + .. code-block:: yaml + + - name: configure l3 interface + cisco.ios.ios_l3_interfaces: + config: "{{ config }}" + state: <state> + +The following table shows an example of how an initial resource configuration changes with this task for different states. + ++-----------------------------------------+------------------------------------+-----------------------------------------+ +| Resource starting configuration | task-provided configuration (YAML) | Final resource configuration on device | ++=========================================+====================================+=========================================+ +| .. code-block:: text | .. code-block:: yaml | *merged* | +| | | .. code-block:: text | +| interface loopback100 | config: | | +| ip address 10.10.1.100 255.255.255.0 | - ipv6: | interface loopback100 | +| ipv6 address FC00:100/64 | - address: fc00::100/64 | ip address 10.10.1.100 255.255.255.0| +| | - address: fc00::101/64 | ipv6 address FC00:100/64 | +| | name: loopback100 | ipv6 address FC00:101/64 | +| | +-----------------------------------------+ +| | | *replaced* | +| | | .. code-block:: text | +| | | | +| | | interface loopback100 | +| | | no ip address | +| | | ipv6 address FC00:100/64 | +| | | ipv6 address FC00:101/64 | +| | +-----------------------------------------+ +| | | *overridden* | +| | | Incorrect use case. This would remove | +| | | all interfaces from the device | +| | | (including the mgmt interface) except | +| | | the configured loopback100 | +| | +-----------------------------------------+ +| | | *deleted* | +| | | .. code-block:: text | +| | | | +| | | interface loopback100 | +| | | no ip address | ++-----------------------------------------+------------------------------------+-----------------------------------------+ + +Network resource modules return the following details: + +* The *before* state - the existing resource configuration before the task was executed. +* The *after* state - the new resource configuration that exists on the network device after the task was executed. +* Commands - any commands configured on the device. + +.. code-block:: yaml + + ok: [nxos101] => + result: + after: + contact: IT Support + location: Room E, Building 6, Seattle, WA 98134 + users: + - algorithm: md5 + group: network-admin + localized_key: true + password: '0x73fd9a2cc8c53ed3dd4ed8f4ff157e69' + privacy_password: '0x73fd9a2cc8c53ed3dd4ed8f4ff157e69' + username: admin + before: + contact: IT Support + location: Room E, Building 5, Seattle HQ + users: + - algorithm: md5 + group: network-admin + localized_key: true + password: '0x73fd9a2cc8c53ed3dd4ed8f4ff157e69' + privacy_password: '0x73fd9a2cc8c53ed3dd4ed8f4ff157e69' + username: admin + changed: true + commands: + - snmp-server location Room E, Building 6, Seattle, WA 98134 + failed: false + + +Example: Verifying the network device configuration has not changed +==================================================================== + +The following playbook uses the :ref:`arista.eos.eos_l3_interfaces <ansible_collections.arista.eos.eos_l3_interfaces_module>` module to gather a subset of the network device configuration (Layer 3 interfaces only) and verifies the information is accurate and has not changed. This playbook passes the results of :ref:`arista.eos.eos_facts <ansible_collections.arista.eos.eos_facts_module>` directly to the ``arista.eos.eos_l3_interfaces`` module. + + +.. code-block:: yaml + + - name: Example of facts being pushed right back to device. + hosts: arista + gather_facts: false + tasks: + - name: grab arista eos facts + arista.eos.eos_facts: + gather_subset: min + gather_network_resources: l3_interfaces + + - name: Ensure that the IP address information is accurate. + arista.eos.eos_l3_interfaces: + config: "{{ ansible_network_resources['l3_interfaces'] }}" + register: result + + - name: Ensure config did not change. + assert: + that: not result.changed + +Example: Acquiring and updating VLANs on a network device +========================================================== + +This example shows how you can use resource modules to: + +#. Retrieve the current configuration on a network device. +#. Save that configuration locally. +#. Update that configuration and apply it to the network device. + +This example uses the ``cisco.ios.ios_vlans`` resource module to retrieve and update the VLANs on an IOS device. + +1. Retrieve the current IOS VLAN configuration: + +.. code-block:: yaml + + - name: Gather VLAN information as structured data + cisco.ios.ios_facts: + gather_subset: + - '!all' + - '!min' + gather_network_resources: + - 'vlans' + +2. Store the VLAN configuration locally: + +.. code-block:: yaml + + - name: Store VLAN facts to host_vars + copy: + content: "{{ ansible_network_resources | to_nice_yaml }}" + dest: "{{ playbook_dir }}/host_vars/{{ inventory_hostname }}" + +3. Modify the stored file to update the VLAN configuration locally. + +4. Merge the updated VLAN configuration with the existing configuration on the device: + +.. code-block:: yaml + + - name: Make VLAN config changes by updating stored facts on the controller. + cisco.ios.ios_vlans: + config: "{{ vlans }}" + state: merged + tags: update_config + +.. seealso:: + + `Network Features in Ansible 2.9 <https://www.ansible.com/blog/network-features-coming-soon-in-ansible-engine-2.9>`_ + A introductory blog post on network resource modules. + `Deep Dive into Network Resource Modules <https://www.ansible.com/deep-dive-into-ansible-network-resource-module>`_ + A deeper dive presentation into network resource modules. diff --git a/docs/docsite/rst/network/user_guide/network_working_with_command_output.rst b/docs/docsite/rst/network/user_guide/network_working_with_command_output.rst new file mode 100644 index 00000000..12040d4b --- /dev/null +++ b/docs/docsite/rst/network/user_guide/network_working_with_command_output.rst @@ -0,0 +1,122 @@ +.. _networking_working_with_command_output: + +********************************************************** +Working with command output and prompts in network modules +********************************************************** + +.. contents:: + :local: + +Conditionals in networking modules +=================================== + +Ansible allows you to use conditionals to control the flow of your playbooks. Ansible networking command modules use the following unique conditional statements. + +* ``eq`` - Equal +* ``neq`` - Not equal +* ``gt`` - Greater than +* ``ge`` - Greater than or equal +* ``lt`` - Less than +* ``le`` - Less than or equal +* ``contains`` - Object contains specified item + + +Conditional statements evaluate the results from the commands that are +executed remotely on the device. Once the task executes the command +set, the ``wait_for`` argument can be used to evaluate the results before +returning control to the Ansible playbook. + +For example:: + + --- + - name: wait for interface to be admin enabled + arista.eos.eos_command: + commands: + - show interface Ethernet4 | json + wait_for: + - "result[0].interfaces.Ethernet4.interfaceStatus eq connected" + +In the above example task, the command :code:`show interface Ethernet4 | json` +is executed on the remote device and the results are evaluated. If +the path +:code:`(result[0].interfaces.Ethernet4.interfaceStatus)` is not equal to +"connected", then the command is retried. This process continues +until either the condition is satisfied or the number of retries has +expired (by default, this is 10 retries at 1 second intervals). + +The commands module can also evaluate more than one set of command +results in an interface. For instance:: + + --- + - name: wait for interfaces to be admin enabled + arista.eos.eos_command: + commands: + - show interface Ethernet4 | json + - show interface Ethernet5 | json + wait_for: + - "result[0].interfaces.Ethernet4.interfaceStatus eq connected" + - "result[1].interfaces.Ethernet5.interfaceStatus eq connected" + +In the above example, two commands are executed on the +remote device, and the results are evaluated. By specifying the result +index value (0 or 1), the correct result output is checked against the +conditional. + +The ``wait_for`` argument must always start with result and then the +command index in ``[]``, where ``0`` is the first command in the commands list, +``1`` is the second command, ``2`` is the third and so on. + + +Handling prompts in network modules +=================================== + +Network devices may require that you answer a prompt before performing a change on the device. Individual network modules such as :ref:`cisco.ios.ios_command <ansible_collections.cisco.ios.ios_command_module>` and :ref:`cisco.nxos.nxos_command <ansible_collections.cisco.nxos.nxos_command_module>` can handle this with a ``prompt`` parameter. + +.. note:: + + ``prompt`` is a Python regex. If you add special characters such as ``?`` in the ``prompt`` value, the prompt won't match and you will get a timeout. To avoid this, ensure that the ``prompt`` value is a Python regex that matches the actual device prompt. Any special characters must be handled correctly in the ``prompt`` regex. + +You can also use the :ref:`ansible.netcommon.cli_command <ansible_collections.ansible.netcommon.cli_command_module>` to handle multiple prompts. + +.. code-block:: yaml + + --- + - name: multiple prompt, multiple answer (mandatory check for all prompts) + ansible.netcommon.cli_command: + command: "copy sftp sftp://user@host//user/test.img" + check_all: True + prompt: + - "Confirm download operation" + - "Password" + - "Do you want to change that to the standby image" + answer: + - 'y' + - <password> + - 'y' + +You must list the prompt and the answers in the same order (that is, prompt[0] is answered by answer[0]). + +In the above example, ``check_all: True`` ensures that the task gives the matching answer to each prompt. Without that setting, a task with multiple prompts would give the first answer to every prompt. + +In the following example, the second answer would be ignored and ``y`` would be the answer given to both prompts. That is, this task only works because both answers are identical. Also notice again that ``prompt`` must be a Python regex, which is why the ``?`` is escaped in the first prompt. + +.. code-block:: yaml + + --- + - name: reboot ios device + ansible.netcommon.cli_command: + command: reload + prompt: + - Save\? + - confirm + answer: + - y + - y + +.. seealso:: + + `Rebooting network devices with Ansible <https://www.ansible.com/blog/rebooting-network-devices-with-ansible>`_ + Examples using ``wait_for``, ``wait_for_connection``, and ``prompt`` for network devices. + + `Deep dive on cli_command <https://www.ansible.com/blog/deep-dive-on-cli-command-for-network-automation>`_ + Detailed overview of how to use the ``cli_command``. diff --git a/docs/docsite/rst/network/user_guide/platform_ce.rst b/docs/docsite/rst/network/user_guide/platform_ce.rst new file mode 100644 index 00000000..19491748 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_ce.rst @@ -0,0 +1,213 @@ +.. _ce_platform_options: + +*************************************** +CloudEngine OS Platform Options +*************************************** + +CloudEngine CE OS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and supports multiple connections. This page offers details on how each connection works in Ansible and how to use it. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== ========================= + .. CLI NETCONF + + + ==================== ========================================== ========================= + Protocol SSH XML over SSH + + Credentials uses SSH keys / SSH-agent if present uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) via a bastion (jump host) + + Connection Settings ``ansible_connection:`` ``ansible_connection:`` + ``ansible.netcommon.network_cli`` ``ansible.netcommon.netconf`` + + |enable_mode| not supported by ce OS not supported by ce OS + + Returned Data Format Refer to individual module documentation Refer to individual module documentation + ==================== ========================================== ========================= + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.netconf`` or ``ansible_connection=ansible.netcommon.network_cli`` instead. + +Using CLI in Ansible +==================== + +Example CLI inventory ``[ce:vars]`` +-------------------------------------- + +.. code-block:: yaml + + [ce:vars] + ansible_connection=ansible.netcommon.network_cli + ansible_network_os=community.network.ce + ansible_user=myuser + ansible_password=!vault... + ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Retrieve CE OS version + community.network.ce_command: + commands: display version + when: ansible_network_os == 'community.network.ce' + + +Using NETCONF in Ansible +======================== + +Enabling NETCONF +---------------- + +Before you can use NETCONF to connect to a switch, you must: + +- install the ``ncclient`` python package on your control node(s) with ``pip install ncclient`` +- enable NETCONF on the CloudEngine OS device(s) + +To enable NETCONF on a new switch using Ansible, use the ``community.network.ce_config`` module with the CLI connection. Set up your platform-level variables just like in the CLI example above, then run a playbook task like this: + +.. code-block:: yaml + + - name: Enable NETCONF + connection: ansible.netcommon.network_cli + community.network.ce_config: + lines: + - snetconf server enable + when: ansible_network_os == 'community.network.ce' + +Once NETCONF is enabled, change your variables to use the NETCONF connection. + +Example NETCONF inventory ``[ce:vars]`` +------------------------------------------ + +.. code-block:: yaml + + [ce:vars] + ansible_connection=ansible.netcommon.netconf + ansible_network_os=community.network.ce + ansible_user=myuser + ansible_password=!vault | + ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +Example NETCONF task +-------------------- + +.. code-block:: yaml + + - name: Create a vlan, id is 50(ce) + community.network.ce_vlan: + vlan_id: 50 + name: WEB + when: ansible_network_os == 'community.network.ce' + + +Notes +======================== + +Modules that work with ``ansible.netcommon.network_cli`` +--------------------------------------------------------- + +.. code-block:: yaml + + community.network.ce_acl_interface + community.network.ce_command + community.network.ce_config + community.network.ce_evpn_bgp + community.network.ce_evpn_bgp_rr + community.network.ce_evpn_global + community.network.ce_facts + community.network.ce_mlag_interface + community.network.ce_mtu + community.network.ce_netstream_aging + community.network.ce_netstream_export + community.network.ce_netstream_global + community.network.ce_netstream_template + community.network.ce_ntp_auth + community.network.ce_rollback + community.network.ce_snmp_contact + community.network.ce_snmp_location + community.network.ce_snmp_traps + community.network.ce_startup + community.network.ce_stp + community.network.ce_vxlan_arp + community.network.ce_vxlan_gateway + community.network.ce_vxlan_global + + +Modules that work with ``ansible.netcommon.netconf`` +----------------------------------------------------- + +.. code-block:: yaml + + community.network.ce_aaa_server + community.network.ce_aaa_server_host + community.network.ce_acl + community.network.ce_acl_advance + community.network.ce_bfd_global + community.network.ce_bfd_session + community.network.ce_bfd_view + community.network.ce_bgp + community.network.ce_bgp_af + community.network.ce_bgp_neighbor + community.network.ce_bgp_neighbor_af + community.network.ce_dldp + community.network.ce_dldp_interface + community.network.ce_eth_trunk + community.network.ce_evpn_bd_vni + community.network.ce_file_copy + community.network.ce_info_center_debug + community.network.ce_info_center_global + community.network.ce_info_center_log + community.network.ce_info_center_trap + community.network.ce_interface + community.network.ce_interface_ospf + community.network.ce_ip_interface + community.network.ce_lacp + community.network.ce_link_status + community.network.ce_lldp + community.network.ce_lldp_interface + community.network.ce_mlag_config + community.network.ce_netconf + community.network.ce_ntp + community.network.ce_ospf + community.network.ce_ospf_vrf + community.network.ce_reboot + community.network.ce_sflow + community.network.ce_snmp_community + community.network.ce_snmp_target_host + community.network.ce_snmp_user + community.network.ce_static_route + community.network.ce_static_route_bfd + community.network.ce_switchport + community.network.ce_vlan + community.network.ce_vrf + community.network.ce_vrf_af + community.network.ce_vrf_interface + community.network.ce_vrrp + community.network.ce_vxlan_tunnel + community.network.ce_vxlan_vap + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_cnos.rst b/docs/docsite/rst/network/user_guide/platform_cnos.rst new file mode 100644 index 00000000..62e1d549 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_cnos.rst @@ -0,0 +1,78 @@ +.. _cnos_platform_options: + +*************************************** +CNOS Platform Options +*************************************** + +CNOS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on CNOS in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| supported: use ``ansible_become: yes`` + with ``ansible_become_method: enable`` + and ``ansible_become_password:`` + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead. + +Using CLI in Ansible +================================================================================ + +Example CLI ``group_vars/cnos.yml`` +-------------------------------------------------------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: community.network.cnos + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + ansible_become_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Retrieve CNOS OS version + community.network.cnos_command: + commands: show version + when: ansible_network_os == 'community.network.cnos' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_dellos10.rst b/docs/docsite/rst/network/user_guide/platform_dellos10.rst new file mode 100644 index 00000000..638932a2 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_dellos10.rst @@ -0,0 +1,80 @@ +.. _dellos10_platform_options: + +*************************************** +Dell OS10 Platform Options +*************************************** + +The `dellemc.os10 <https://galaxy.ansible.com/dellemc_networking/os10>`_ collection supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on OS10 in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| supported: use ``ansible_become: yes`` + with ``ansible_become_method: enable`` + and ``ansible_become_password:`` + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead. + + +Using CLI in Ansible +================================================================================ + +Example CLI ``group_vars/dellos10.yml`` +--------------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: dellemc.os10.os10 + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + ansible_become_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Backup current switch config (dellos10) + dellemc.os10.os10_config: + backup: yes + register: backup_dellos10_location + when: ansible_network_os == 'dellemc.os10.os10' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_dellos6.rst b/docs/docsite/rst/network/user_guide/platform_dellos6.rst new file mode 100644 index 00000000..d315c59d --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_dellos6.rst @@ -0,0 +1,79 @@ +.. _dellos6_platform_options: + +*************************************** +Dell OS6 Platform Options +*************************************** + +The `dellemc.os6 <https://github.com/ansible-collections/dellemc.os6>`_ collection supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on OS6 in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| supported: use ``ansible_become: yes`` + with ``ansible_become_method: enable`` + and ``ansible_become_password:`` + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead. + +Using CLI in Ansible +================================================================================ + +Example CLI ``group_vars/dellos6.yml`` +-------------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: dellemc.os6.os6 + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + ansible_become_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Backup current switch config (dellos6) + dellemc.os6.os6_config: + backup: yes + register: backup_dellso6_location + when: ansible_network_os == 'dellemc.os6.os6' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_dellos9.rst b/docs/docsite/rst/network/user_guide/platform_dellos9.rst new file mode 100644 index 00000000..cadde622 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_dellos9.rst @@ -0,0 +1,79 @@ +.. _dellos9_platform_options: + +*************************************** +Dell OS9 Platform Options +*************************************** + +The `dellemc.os9 <https://github.com/ansible-collections/dellemc.os9>`_ collection supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on OS9 in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| supported: use ``ansible_become: yes`` + with ``ansible_become_method: enable`` + and ``ansible_become_password:`` + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead. + +Using CLI in Ansible +================================================================================ + +Example CLI ``group_vars/dellos9.yml`` +-------------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: dellemc.os9.os9 + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + ansible_become_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Backup current switch config (dellos9) + dellemc.os9.os9_config: + backup: yes + register: backup_dellos9_location + when: ansible_network_os == 'dellemc.os9.os9' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_enos.rst b/docs/docsite/rst/network/user_guide/platform_enos.rst new file mode 100644 index 00000000..58c0b83e --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_enos.rst @@ -0,0 +1,80 @@ +.. _enos_platform_options: + +*************************************** +ENOS Platform Options +*************************************** + +ENOS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on ENOS in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| supported: use ``ansible_become: yes`` + with ``ansible_become_method: enable`` + and ``ansible_become_password:`` + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + ++---------------------------+-----------------------------------------------+ + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead. + +Using CLI in Ansible +================================================================================ + +Example CLI ``group_vars/enos.yml`` +-------------------------------------------------------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: community.network.enos + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + ansible_become_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Retrieve ENOS OS version + community.network.enos_command: + commands: show version + when: ansible_network_os == 'community.network.enos' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_eos.rst b/docs/docsite/rst/network/user_guide/platform_eos.rst new file mode 100644 index 00000000..065a7dc0 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_eos.rst @@ -0,0 +1,140 @@ +.. _eos_platform_options: + +*************************************** +EOS Platform Options +*************************************** + +The `Arista EOS <https://galaxy.ansible.com/arista/eos>`_ collection supports multiple connections. This page offers details on how each connection works in Ansible and how to use it. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== =========================== + .. CLI eAPI + ==================== ========================================== =========================== + Protocol SSH HTTP(S) + + Credentials uses SSH keys / SSH-agent if present uses HTTPS certificates if + present + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) via a web proxy + + Connection Settings ``ansible_connection:`` ``ansible_connection:`` + ``ansible.netcommon.network_cli`` ``ansible.netcommon.httpapi`` + + + |enable_mode| supported: |br| supported: |br| + + * use ``ansible_become: yes`` * ``httpapi`` + with ``ansible_become_method: enable`` uses ``ansible_become: yes`` + with ``ansible_become_method: enable`` + + Returned Data Format ``stdout[0].`` ``stdout[0].messages[0].`` + ==================== ========================================== =========================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` or ``ansible_connection: ansible.netcommon.httpapi`` instead. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/eos.yml`` +---------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: arista.eos.eos + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + ansible_become_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Backup current switch config (eos) + arista.eos.eos_config: + backup: yes + register: backup_eos_location + when: ansible_network_os == 'arista.eos.eos' + + + +Using eAPI in Ansible +===================== + +Enabling eAPI +------------- + +Before you can use eAPI to connect to a switch, you must enable eAPI. To enable eAPI on a new switch with Ansible, use the ``arista.eos.eos_eapi`` module through the CLI connection. Set up ``group_vars/eos.yml`` just like in the CLI example above, then run a playbook task like this: + +.. code-block:: yaml + + - name: Enable eAPI + arista.eos.eos_eapi: + enable_http: yes + enable_https: yes + become: true + become_method: enable + when: ansible_network_os == 'arista.eos.eos' + +You can find more options for enabling HTTP/HTTPS connections in the :ref:`arista.eos.eos_eapi <ansible_collections.arista.eos.eos_eapi_module>` module documentation. + +Once eAPI is enabled, change your ``group_vars/eos.yml`` to use the eAPI connection. + +Example eAPI ``group_vars/eos.yml`` +----------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.httpapi + ansible_network_os: arista.eos.eos + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + proxy_env: + http_proxy: http://proxy.example.com:8080 + +- If you are accessing your host directly (not through a web proxy) you can remove the ``proxy_env`` configuration. +- If you are accessing your host through a web proxy using ``https``, change ``http_proxy`` to ``https_proxy``. + + +Example eAPI task +----------------- + +.. code-block:: yaml + + - name: Backup current switch config (eos) + arista.eos.eos_config: + backup: yes + register: backup_eos_location + environment: "{{ proxy_env }}" + when: ansible_network_os == 'arista.eos.eos' + +In this example the ``proxy_env`` variable defined in ``group_vars`` gets passed to the ``environment`` option of the module in the task. + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_eric_eccli.rst b/docs/docsite/rst/network/user_guide/platform_eric_eccli.rst new file mode 100644 index 00000000..cdd45779 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_eric_eccli.rst @@ -0,0 +1,73 @@ +.. _eic_eccli_platform_options: + +*************************************** +ERIC_ECCLI Platform Options +*************************************** + +Extreme ERIC_ECCLI is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. This page offers details on how to use ``ansible.netcommon.network_cli`` on ERIC_ECCLI in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| not supported by ERIC_ECCLI + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + +ERIC_ECCLI does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/eric_eccli.yml`` +----------------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: community.network.eric_eccli + ansible_user: myuser + ansible_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: run show version on remote devices (eric_eccli) + community.network.eric_eccli_command: + commands: show version + when: ansible_network_os == 'community.network.eric_eccli' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_exos.rst b/docs/docsite/rst/network/user_guide/platform_exos.rst new file mode 100644 index 00000000..e27e9ae4 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_exos.rst @@ -0,0 +1,108 @@ +.. _exos_platform_options: + +*************************************** +EXOS Platform Options +*************************************** + +Extreme EXOS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and supports multiple connections. This page offers details on how each connection works in Ansible and how to use it. + +.. contents:: + :local: + +Connections available +================================================================================ + + +.. table:: + :class: documentation-table + + ==================== ========================================== ========================= + .. CLI EXOS-API + ==================== ========================================== ========================= + Protocol SSH HTTP(S) + + Credentials uses SSH keys / SSH-agent if present uses HTTPS certificates if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) via a web proxy + + Connection Settings ``ansible_connection:`` ``ansible_connection:`` + ``ansible.netcommon.network_cli`` ``ansible.netcommon.httpapi`` + + |enable_mode| not supported by EXOS not supported by EXOS + + Returned Data Format ``stdout[0].`` ``stdout[0].messages[0].`` + ==================== ========================================== ========================= + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + +EXOS does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli`` or ``ansible_connection: ansible.netcommon.httpapi``. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/exos.yml`` +----------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: community.network.exos + ansible_user: myuser + ansible_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Retrieve EXOS OS version + community.network.exos_command: + commands: show version + when: ansible_network_os == 'community.network.exos' + + + +Using EXOS-API in Ansible +========================= + +Example EXOS-API ``group_vars/exos.yml`` +---------------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.httpapi + ansible_network_os: community.network.exos + ansible_user: myuser + ansible_password: !vault... + proxy_env: + http_proxy: http://proxy.example.com:8080 + +- If you are accessing your host directly (not through a web proxy) you can remove the ``proxy_env`` configuration. +- If you are accessing your host through a web proxy using ``https``, change ``http_proxy`` to ``https_proxy``. + + +Example EXOS-API task +--------------------- + +.. code-block:: yaml + + - name: Retrieve EXOS OS version + community.network.exos_command: + commands: show version + when: ansible_network_os == 'community.network.exos' + +In this example the ``proxy_env`` variable defined in ``group_vars`` gets passed to the ``environment`` option of the module used in the task. + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_frr.rst b/docs/docsite/rst/network/user_guide/platform_frr.rst new file mode 100644 index 00000000..0d7bad14 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_frr.rst @@ -0,0 +1,73 @@ +.. _frr_platform_options: + +*************************************** +FRR Platform Options +*************************************** + +The `FRR <https://galaxy.ansible.com/frr/frr>`_ collection supports the ``ansible.netcommon.network_cli`` connection. This section provides details on how to use this connection for Free Range Routing (FRR). + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| not supported + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/frr.yml`` +---------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: frr.frr.frr + ansible_user: frruser + ansible_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + +- The ``ansible_user`` should be a part of the ``frrvty`` group and should have the default shell set to ``/bin/vtysh``. +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Gather FRR facts + frr.frr.frr_facts: + gather_subset: + - config + - hardware + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_icx.rst b/docs/docsite/rst/network/user_guide/platform_icx.rst new file mode 100644 index 00000000..96777f1e --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_icx.rst @@ -0,0 +1,77 @@ +.. _icx_platform_options: + +*************************************** +ICX Platform Options +*************************************** + +ICX is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on ICX in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| supported: use ``ansible_become: yes`` with + ``ansible_become_method: enable`` and ``ansible_become_password:`` + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/icx.yml`` +---------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: community.network.icx + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + ansible_become_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Backup current switch config (icx) + community.network.icx_config: + backup: yes + register: backup_icx_location + when: ansible_network_os == 'community.network.icx' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_index.rst b/docs/docsite/rst/network/user_guide/platform_index.rst new file mode 100644 index 00000000..ad372de3 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_index.rst @@ -0,0 +1,121 @@ +.. _platform_options: + +**************** +Platform Options +**************** + +Some Ansible Network platforms support multiple connection types, privilege escalation (``enable`` mode), or other options. The pages in this section offer standardized guides to understanding available options on each network platform. We welcome contributions from community-maintained platforms to this section. + +.. toctree:: + :maxdepth: 2 + :caption: Platform Options + + platform_ce + platform_cnos + platform_dellos6 + platform_dellos9 + platform_dellos10 + platform_enos + platform_eos + platform_eric_eccli + platform_exos + platform_frr + platform_icx + platform_ios + platform_iosxr + platform_ironware + platform_junos + platform_meraki + platform_netvisor + platform_nos + platform_nxos + platform_routeros + platform_slxos + platform_voss + platform_vyos + platform_netconf_enabled + +.. _settings_by_platform: + +Settings by Platform +================================ + +.. raw:: html + + <style> + /* Style for this single table. Add delimiters between header columns */ + table#network-platform-table thead tr th.head { + border-left-width: 1px; + border-left-color: rgb(225, 228, 229); + border-left-style: solid; + } + </style> + +.. table:: + :name: network-platform-table + + =============================== ================================ =========== ======= ======= =========== + .. ``ansible_connection:`` settings available + ----------------------------------------------------------------- ------------------------------------------ + Network OS ``ansible_network_os:`` network_cli netconf httpapi local + =============================== ================================ =========== ======= ======= =========== + `Arista EOS`_ `[†]`_ ``arista.eos.eos`` ✓ ✓ ✓ + `Ciena SAOS6`_ ``ciena.saos6.saos6`` ✓ ✓ + `Cisco ASA`_ `[†]`_ ``cisco.asa.asa`` ✓ ✓ + `Cisco IOS`_ `[†]`_ ``cisco.ios.ios`` ✓ ✓ + `Cisco IOS XR`_ `[†]`_ ``cisco.iosxr.iosxr`` ✓ ✓ + `Cisco NX-OS`_ `[†]`_ ``cisco.nxos.nxos`` ✓ ✓ ✓ + `Cloudengine OS`_ ``community.network.ce`` ✓ ✓ ✓ + `Dell OS6`_ ``dellemc.os6.os6`` ✓ ✓ + `Dell OS9`_ ``dellemc.os9.os9`` ✓ ✓ + `Dell OS10`_ ``dellemc.os10.0s10`` ✓ ✓ + `Ericsson ECCLI`_ ``community.network.eric_eccli`` ✓ ✓ + `Extreme EXOS`_ ``community.network.exos`` ✓ ✓ + `Extreme IronWare`_ ``community.network.ironware`` ✓ ✓ + `Extreme NOS`_ ``community.network.nos`` ✓ + `Extreme SLX-OS`_ ``community.network.slxos`` ✓ + `Extreme VOSS`_ ``community.network.voss`` ✓ + `F5 BIG-IP`_ ✓ + `F5 BIG-IQ`_ ✓ + `Junos OS`_ `[†]`_ ``junipernetworks.junos.junos`` ✓ ✓ ✓ + `Lenovo CNOS`_ ``community.network.cnos`` ✓ ✓ + `Lenovo ENOS`_ ``community.network.enos`` ✓ ✓ + `Meraki`_ ✓ + `MikroTik RouterOS`_ ``community.network.routeros`` ✓ + `Nokia SR OS`_ ✓ + `Pluribus Netvisor`_ ``community.network.netvisor`` ✓ + `Ruckus ICX`_ ``community.network.icx`` ✓ + `VyOS`_ `[†]`_ ``vyos.vyos.vyos`` ✓ ✓ + OS that supports Netconf `[†]`_ ``<network-os>`` ✓ ✓ + =============================== ================================ =========== ======= ======= =========== + +.. _Arista EOS: https://galaxy.ansible.com/arista/eos +.. _Ciena SAOS6: https://galaxy.ansible.com/ciena/saos6 +.. _Cisco ASA: https://galaxy.ansible.com/cisco/asa +.. _Cisco IOS: https://galaxy.ansible.com/cisco/ios +.. _Cisco IOS XR: https://galaxy.ansible.com/cisco/iosxr +.. _Cisco NX-OS: https://galaxy.ansible.com/cisco/nxos +.. _Cloudengine OS: https://galaxy.ansible.com/community/network +.. _Dell OS6: https://github.com/ansible-collections/dellemc.os6 +.. _Dell OS9: https://github.com/ansible-collections/dellemc.os9 +.. _Dell OS10: https://galaxy.ansible.com/dellemc/os10 +.. _Ericsson ECCLI: https://galaxy.ansible.com/community/network +.. _Extreme EXOS: https://galaxy.ansible.com/community/network +.. _Extreme IronWare: https://galaxy.ansible.com/community/network +.. _Extreme NOS: https://galaxy.ansible.com/community/network +.. _Extreme SLX-OS: https://galaxy.ansible.com/community/network +.. _Extreme VOSS: https://galaxy.ansible.com/community/network +.. _F5 BIG-IP: https://galaxy.ansible.com/f5networks/f5_modules +.. _F5 BIG-IQ: https://galaxy.ansible.com/f5networks/f5_modules +.. _Junos OS: https://galaxy.ansible.com/junipernetworks/junos +.. _Lenovo CNOS: https://galaxy.ansible.com/community/network +.. _Lenovo ENOS: https://galaxy.ansible.com/community/network +.. _Meraki: https://galaxy.ansible.com/cisco/meraki +.. _MikroTik RouterOS: https://galaxy.ansible.com/community/network +.. _Nokia SR OS: https://galaxy.ansible.com/community/network +.. _Pluribus Netvisor: https://galaxy.ansible.com/community/network +.. _Ruckus ICX: https://galaxy.ansible.com/community/network +.. _VyOS: https://galaxy.ansible.com/vyos/vyos +.. _`[†]`: + +**[†]** Maintained by Ansible Network Team diff --git a/docs/docsite/rst/network/user_guide/platform_ios.rst b/docs/docsite/rst/network/user_guide/platform_ios.rst new file mode 100644 index 00000000..1c53a5ca --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_ios.rst @@ -0,0 +1,79 @@ +.. _ios_platform_options: + +*************************************** +IOS Platform Options +*************************************** + +The `Cisco IOS <https://galaxy.ansible.com/cisco/ios>`_ collection supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on IOS in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| supported: use ``ansible_become: yes`` with + ``ansible_become_method: enable`` and ``ansible_become_password:`` + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/ios.yml`` +---------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: cisco.ios.ios + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + ansible_become_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Backup current switch config (ios) + cisco.ios.ios_config: + backup: yes + register: backup_ios_location + when: ansible_network_os == 'cisco.ios.ios' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_iosxr.rst b/docs/docsite/rst/network/user_guide/platform_iosxr.rst new file mode 100644 index 00000000..1e1eab27 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_iosxr.rst @@ -0,0 +1,130 @@ +.. _iosxr_platform_options: + +*************************************** +IOS-XR Platform Options +*************************************** + +The `Cisco IOS-XR collection <https://galaxy.ansible.com/cisco/iosxr>`_ supports multiple connections. This page offers details on how each connection works in Ansible and how to use it. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== ========================= + .. CLI NETCONF + + only for modules ``iosxr_banner``, + ``iosxr_interface``, ``iosxr_logging``, + ``iosxr_system``, ``iosxr_user`` + ==================== ========================================== ========================= + Protocol SSH XML over SSH + + Credentials uses SSH keys / SSH-agent if present uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) via a bastion (jump host) + + Connection Settings ``ansible_connection:`` ``ansible_connection:`` + ``ansible.netcommon.network_cli`` ``ansible.netcommon.netconf`` + + |enable_mode| not supported not supported + + Returned Data Format Refer to individual module documentation Refer to individual module documentation + ==================== ========================================== ========================= + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` or ``ansible_connection: ansible.netcommon.netconf`` instead. + +Using CLI in Ansible +==================== + +Example CLI inventory ``[iosxr:vars]`` +-------------------------------------- + +.. code-block:: yaml + + [iosxr:vars] + ansible_connection=ansible.netcommon.network_cli + ansible_network_os=cisco.iosxr.iosxr + ansible_user=myuser + ansible_password=!vault... + ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Retrieve IOS-XR version + cisco.iosxr.iosxr_command: + commands: show version + when: ansible_network_os == 'cisco.iosxr.iosxr' + + +Using NETCONF in Ansible +======================== + +Enabling NETCONF +---------------- + +Before you can use NETCONF to connect to a switch, you must: + +- install the ``ncclient`` python package on your control node(s) with ``pip install ncclient`` +- enable NETCONF on the Cisco IOS-XR device(s) + +To enable NETCONF on a new switch via Ansible, use the ``cisco.iosxr.iosxr_netconf`` module through the CLI connection. Set up your platform-level variables just like in the CLI example above, then run a playbook task like this: + +.. code-block:: yaml + + - name: Enable NETCONF + connection: ansible.netcommon.network_cli + cisco.iosxr.iosxr_netconf: + when: ansible_network_os == 'cisco.iosxr.iosxr' + +Once NETCONF is enabled, change your variables to use the NETCONF connection. + +Example NETCONF inventory ``[iosxr:vars]`` +------------------------------------------ + +.. code-block:: yaml + + [iosxr:vars] + ansible_connection=ansible.netcommon.netconf + ansible_network_os=cisco.iosxr.iosxr + ansible_user=myuser + ansible_password=!vault | + ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +Example NETCONF task +-------------------- + +.. code-block:: yaml + + - name: Configure hostname and domain-name + cisco.iosxr.iosxr_system: + hostname: iosxr01 + domain_name: test.example.com + domain_search: + - ansible.com + - redhat.com + - cisco.com + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_ironware.rst b/docs/docsite/rst/network/user_guide/platform_ironware.rst new file mode 100644 index 00000000..a17141c4 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_ironware.rst @@ -0,0 +1,80 @@ +.. _ironware_platform_options: + +*************************************** +IronWare Platform Options +*************************************** + +IronWare is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on IronWare in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| supported: use ``ansible_become: yes`` + with ``ansible_become_method: enable`` + and ``ansible_become_password:`` + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/mlx.yml`` +---------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: community.network.ironware + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + ansible_become_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Backup current switch config (ironware) + community.network.ironware_config: + backup: yes + register: backup_ironware_location + when: ansible_network_os == 'community.network.ironware' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_junos.rst b/docs/docsite/rst/network/user_guide/platform_junos.rst new file mode 100644 index 00000000..3b838103 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_junos.rst @@ -0,0 +1,129 @@ +.. _junos_platform_options: + +*************************************** +Junos OS Platform Options +*************************************** + +The `Juniper Junos OS <https://galaxy.ansible.com/junipernetworks/junos>`_ supports multiple connections. This page offers details on how each connection works in Ansible and how to use it. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== ========================= + .. CLI NETCONF + + ``junos_netconf`` & ``junos_command`` all modules except ``junos_netconf``, + modules only which enables NETCONF + ==================== ========================================== ========================= + Protocol SSH XML over SSH + + Credentials uses SSH keys / SSH-agent if present uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) via a bastion (jump host) + + Connection Settings ``ansible_connection: ``ansible_connection: + ``ansible.netcommon.network_cli`` ``ansible.netcommon.netconf`` + + |enable_mode| not supported by Junos OS not supported by Junos OS + + Returned Data Format ``stdout[0].`` * json: ``result[0]['software-information'][0]['host-name'][0]['data'] foo lo0`` + * text: ``result[1].interface-information[0].physical-interface[0].name[0].data foo lo0`` + * xml: ``result[1].rpc-reply.interface-information[0].physical-interface[0].name[0].data foo lo0`` + ==================== ========================================== ========================= + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` or ``ansible_connection: ansible.netcommon.netconf`` instead. + +Using CLI in Ansible +==================== + +Example CLI inventory ``[junos:vars]`` +-------------------------------------- + +.. code-block:: yaml + + [junos:vars] + ansible_connection=ansible.netcommon.network_cli + ansible_network_os=junipernetworks.junos.junos + ansible_user=myuser + ansible_password=!vault... + ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Retrieve Junos OS version + junipernetworks.junos.junos_command: + commands: show version + when: ansible_network_os == 'junipernetworks.junos.junos' + + +Using NETCONF in Ansible +======================== + +Enabling NETCONF +---------------- + +Before you can use NETCONF to connect to a switch, you must: + +- install the ``ncclient`` python package on your control node(s) with ``pip install ncclient`` +- enable NETCONF on the Junos OS device(s) + +To enable NETCONF on a new switch via Ansible, use the ``junipernetworks.junos.junos_netconf`` module through the CLI connection. Set up your platform-level variables just like in the CLI example above, then run a playbook task like this: + +.. code-block:: yaml + + - name: Enable NETCONF + connection: ansible.netcommon.network_cli + junipernetworks.junos.junos_netconf: + when: ansible_network_os == 'junipernetworks.junos.junos' + +Once NETCONF is enabled, change your variables to use the NETCONF connection. + +Example NETCONF inventory ``[junos:vars]`` +------------------------------------------ + +.. code-block:: yaml + + [junos:vars] + ansible_connection=ansible.netcommon.netconf + ansible_network_os=junipernetworks.junos.junos + ansible_user=myuser + ansible_password=!vault | + ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +Example NETCONF task +-------------------- + +.. code-block:: yaml + + - name: Backup current switch config (junos) + junipernetworks.junos.junos_config: + backup: yes + register: backup_junos_location + when: ansible_network_os == 'junipernetworks.junos.junos' + + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_meraki.rst b/docs/docsite/rst/network/user_guide/platform_meraki.rst new file mode 100644 index 00000000..e51ca5b9 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_meraki.rst @@ -0,0 +1,44 @@ +.. _meraki_platform_options: + +*************************************** +Meraki Platform Options +*************************************** + +The `cisco.meraki <https://galaxy.ansible.com/cisco/meraki>`_ collection only supports the ``local`` connection type at this time. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. Dashboard API + ==================== ========================================== + Protocol HTTP(S) + + Credentials uses API key from Dashboard + + Connection Settings ``ansible_connection: localhost`` + + Returned Data Format ``data.`` + ==================== ========================================== + + +Example Meraki task +------------------- + +.. code-block:: yaml + + cisco.meraki.meraki_organization: + auth_key: abc12345 + org_name: YourOrg + state: present + delegate_to: localhost + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_netconf_enabled.rst b/docs/docsite/rst/network/user_guide/platform_netconf_enabled.rst new file mode 100644 index 00000000..6169b076 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_netconf_enabled.rst @@ -0,0 +1,133 @@ +.. _netconf_enabled_platform_options: + +*************************************** +Netconf enabled Platform Options +*************************************** + +This page offers details on how the netconf connection works in Ansible and how to use it. + +.. contents:: + :local: + +Connections available +================================================================================ +.. table:: + :class: documentation-table + + ==================== ========================================== + .. NETCONF + + all modules except ``junos_netconf``, + which enables NETCONF + ==================== ========================================== + Protocol XML over SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.netconf`` + ==================== ========================================== + + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.netconf`` instead. + +Using NETCONF in Ansible +======================== + +Enabling NETCONF +---------------- + +Before you can use NETCONF to connect to a switch, you must: + +- install the ``ncclient`` Python package on your control node(s) with ``pip install ncclient`` +- enable NETCONF on the Junos OS device(s) + +To enable NETCONF on a new switch via Ansible, use the platform specific module via the CLI connection or set it manually. +For example set up your platform-level variables just like in the CLI example above, then run a playbook task like this: + +.. code-block:: yaml + + - name: Enable NETCONF + connection: ansible.netcommon.network_cli + junipernetworks.junos.junos_netconf: + when: ansible_network_os == 'junipernetworks.junos.junos' + +Once NETCONF is enabled, change your variables to use the NETCONF connection. + +Example NETCONF inventory ``[junos:vars]`` +------------------------------------------ + +.. code-block:: yaml + + [junos:vars] + ansible_connection=ansible.netcommon.netconf + ansible_network_os=junipernetworks.junos.junos + ansible_user=myuser + ansible_password=!vault | + + +Example NETCONF task +-------------------- + +.. code-block:: yaml + + - name: Backup current switch config + junipernetworks.junos.netconf_config: + backup: yes + register: backup_junos_location + +Example NETCONF task with configurable variables +------------------------------------------------ + +.. code-block:: yaml + + - name: configure interface while providing different private key file path + junipernetworks.junos.netconf_config: + backup: yes + register: backup_junos_location + vars: + ansible_private_key_file: /home/admin/.ssh/newprivatekeyfile + +Note: For netconf connection plugin configurable variables see :ref:`ansible.netcommon.netconf <ansible_collections.ansible.netcommon.netconf_connection>`. + +Bastion/Jumphost configuration +------------------------------ +To use a jump host to connect to a NETCONF enabled device you must set the ``ANSIBLE_NETCONF_SSH_CONFIG`` environment variable. + +``ANSIBLE_NETCONF_SSH_CONFIG`` can be set to either: + - 1 or TRUE (to trigger the use of the default SSH config file ~/.ssh/config) + - The absolute path to a custom SSH config file. + +The SSH config file should look something like: + +.. code-block:: ini + + Host * + proxycommand ssh -o StrictHostKeyChecking=no -W %h:%p jumphost-username@jumphost.fqdn.com + StrictHostKeyChecking no + +Authentication for the jump host must use key based authentication. + +You can either specify the private key used in the SSH config file: + +.. code-block:: ini + + IdentityFile "/absolute/path/to/private-key.pem" + +Or you can use an ssh-agent. + +ansible_network_os auto-detection +--------------------------------- + +If ``ansible_network_os`` is not specified for a host, then Ansible will attempt to automatically detect what ``network_os`` plugin to use. + +``ansible_network_os`` auto-detection can also be triggered by using ``auto`` as the ``ansible_network_os``. (Note: Previously ``default`` was used instead of ``auto``). + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_netvisor.rst b/docs/docsite/rst/network/user_guide/platform_netvisor.rst new file mode 100644 index 00000000..57748658 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_netvisor.rst @@ -0,0 +1,78 @@ +.. _netvisor_platform_options: + +********************************** +Pluribus NETVISOR Platform Options +********************************** + +Pluribus NETVISOR Ansible is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. ``httpapi`` modules may be added in future. +This page offers details on how to use ``ansible.netcommon.network_cli`` on NETVISOR in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| not supported by NETVISOR + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + +Pluribus NETVISOR does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/netvisor.yml`` +--------------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: community.netcommon.netvisor + ansible_user: myuser + ansible_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Create access list + community.network.pn_access_list: + pn_name: "foo" + pn_scope: "local" + state: "present" + register: acc_list + when: ansible_network_os == 'community.network.netvisor' + + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_nos.rst b/docs/docsite/rst/network/user_guide/platform_nos.rst new file mode 100644 index 00000000..0ea3f529 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_nos.rst @@ -0,0 +1,76 @@ +.. _nos_platform_options: + +*************************************** +NOS Platform Options +*************************************** + +Extreme NOS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. ``httpapi`` modules may be added in future. +This page offers details on how to use ``ansible.netcommon.network_cli`` on NOS in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: community.netcommon.network_cli`` + + |enable_mode| not supported by NOS + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + +NOS does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/nos.yml`` +---------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: community.network.nos + ansible_user: myuser + ansible_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Get version information (nos) + community.network.nos_command: + commands: "show version" + register: show_ver + when: ansible_network_os == 'community.network.nos' + + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_nxos.rst b/docs/docsite/rst/network/user_guide/platform_nxos.rst new file mode 100644 index 00000000..e698b2d9 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_nxos.rst @@ -0,0 +1,164 @@ +.. _nxos_platform_options: + +*************************************** +NXOS Platform Options +*************************************** + +The `Cisco NXOS <https://galaxy.ansible.com/cisco/nxos>`_ supports multiple connections. This page offers details on how each connection works in Ansible and how to use it. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== ========================= + .. CLI NX-API + ==================== ========================================== ========================= + Protocol SSH HTTP(S) + + Credentials uses SSH keys / SSH-agent if present uses HTTPS certificates if + present + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) via a web proxy + + Connection Settings ``ansible_connection:`` ``ansible_connection:`` + ``ansible.netcommon.network_cli`` ``ansible.netcommon.httpapi`` + + |enable_mode| supported: use ``ansible_become: yes`` not supported by NX-API + with ``ansible_become_method: enable`` + and ``ansible_become_password:`` + + Returned Data Format ``stdout[0].`` ``stdout[0].messages[0].`` + ==================== ========================================== ========================= + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) |br| supported as of 2.5.3 + + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` or ``ansible_connection: ansible.netcommon.httpapi`` instead. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/nxos.yml`` +----------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: cisco.nxos.nxos + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + ansible_become_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Backup current switch config (nxos) + cisco.nxos.nxos_config: + backup: yes + register: backup_nxos_location + when: ansible_network_os == 'cisco.nxos.nxos' + + + +Using NX-API in Ansible +======================= + +Enabling NX-API +--------------- + +Before you can use NX-API to connect to a switch, you must enable NX-API. To enable NX-API on a new switch via Ansible, use the ``nxos_nxapi`` module via the CLI connection. Set up group_vars/nxos.yml just like in the CLI example above, then run a playbook task like this: + +.. code-block:: yaml + + - name: Enable NX-API + cisco.nxos.nxos_nxapi: + enable_http: yes + enable_https: yes + when: ansible_network_os == 'cisco.nxos.nxos' + +To find out more about the options for enabling HTTP/HTTPS and local http see the :ref:`nxos_nxapi <nxos_nxapi_module>` module documentation. + +Once NX-API is enabled, change your ``group_vars/nxos.yml`` to use the NX-API connection. + +Example NX-API ``group_vars/nxos.yml`` +-------------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.httpapi + ansible_network_os: cisco.nxos.nxos + ansible_user: myuser + ansible_password: !vault... + proxy_env: + http_proxy: http://proxy.example.com:8080 + +- If you are accessing your host directly (not through a web proxy) you can remove the ``proxy_env`` configuration. +- If you are accessing your host through a web proxy using ``https``, change ``http_proxy`` to ``https_proxy``. + + +Example NX-API task +------------------- + +.. code-block:: yaml + + - name: Backup current switch config (nxos) + cisco.nxos.nxos_config: + backup: yes + register: backup_nxos_location + environment: "{{ proxy_env }}" + when: ansible_network_os == 'cisco.nxos.nxos' + +In this example the ``proxy_env`` variable defined in ``group_vars`` gets passed to the ``environment`` option of the module used in the task. + +.. include:: shared_snippets/SSH_warning.txt + +Cisco Nexus platform support matrix +=================================== + +The following platforms and software versions have been certified by Cisco to work with this version of Ansible. + +.. table:: Platform / Software Minimum Requirements + :align: center + + =================== ===================== + Supported Platforms Minimum NX-OS Version + =================== ===================== + Cisco Nexus N3k 7.0(3)I2(5) and later + Cisco Nexus N9k 7.0(3)I2(5) and later + Cisco Nexus N5k 7.3(0)N1(1) and later + Cisco Nexus N6k 7.3(0)N1(1) and later + Cisco Nexus N7k 7.3(0)D1(1) and later + =================== ===================== + +.. table:: Platform Models + :align: center + + ======== ============================================== + Platform Description + ======== ============================================== + N3k Support includes N30xx, N31xx and N35xx models + N5k Support includes all N5xxx models + N6k Support includes all N6xxx models + N7k Support includes all N7xxx models + N9k Support includes all N9xxx models + ======== ============================================== + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_routeros.rst b/docs/docsite/rst/network/user_guide/platform_routeros.rst new file mode 100644 index 00000000..387db92d --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_routeros.rst @@ -0,0 +1,80 @@ +.. _routeros_platform_options: + +*************************************** +RouterOS Platform Options +*************************************** + +RouterOS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. ``httpapi`` modules may be added in future. +This page offers details on how to use ``ansible.netcommon.network_cli`` on RouterOS in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.network.network_cli`` + + |enable_mode| not supported by RouterOS + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + + +RouterOS does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/routeros.yml`` +--------------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: community.network.routeros + ansible_user: myuser + ansible_password: !vault... + ansible_become: yes + ansible_become_method: enable + ansible_become_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. +- If you are getting timeout errors you may want to add ``+cet1024w`` suffix to your username which will disable console colors, enable "dumb" mode, tell RouterOS not to try detecting terminal capabilities and set terminal width to 1024 columns. See article `Console login process <https://wiki.mikrotik.com/wiki/Manual:Console_login_process>`_ in MikroTik wiki for more information. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Display resource statistics (routeros) + community.network.routeros_command: + commands: /system resource print + register: routeros_resources + when: ansible_network_os == 'community.network.routeros' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_slxos.rst b/docs/docsite/rst/network/user_guide/platform_slxos.rst new file mode 100644 index 00000000..f433599c --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_slxos.rst @@ -0,0 +1,77 @@ +.. _slxos_platform_options: + +*************************************** +SLX-OS Platform Options +*************************************** + +Extreme SLX-OS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. ``httpapi`` modules may be added in future. +This page offers details on how to use ``ansible.netcommon.network_cli`` on SLX-OS in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| not supported by SLX-OS + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + + +SLX-OS does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/slxos.yml`` +------------------------------------ + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: community.network.slxos + ansible_user: myuser + ansible_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Backup current switch config (slxos) + community.network.slxos_config: + backup: yes + register: backup_slxos_location + when: ansible_network_os == 'community.network.slxos' + + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_voss.rst b/docs/docsite/rst/network/user_guide/platform_voss.rst new file mode 100644 index 00000000..b532e224 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_voss.rst @@ -0,0 +1,78 @@ +.. _voss_platform_options: + +*************************************** +VOSS Platform Options +*************************************** + +Extreme VOSS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. This page offers details on how to +use ``ansible.netcommon.network_cli`` on VOSS in Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| supported: use ``ansible_become: yes`` + with ``ansible_become_method: enable`` + + Returned Data Format ``stdout[0].`` + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + + +VOSS does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/voss.yml`` +----------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: community.network.voss + ansible_user: myuser + ansible_become: yes + ansible_become_method: enable + ansible_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Retrieve VOSS info + community.network.voss_command: + commands: show sys-info + when: ansible_network_os == 'community.network.voss' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/platform_vyos.rst b/docs/docsite/rst/network/user_guide/platform_vyos.rst new file mode 100644 index 00000000..5ec00f7b --- /dev/null +++ b/docs/docsite/rst/network/user_guide/platform_vyos.rst @@ -0,0 +1,74 @@ +.. _vyos_platform_options: + +*************************************** +VyOS Platform Options +*************************************** + +The `VyOS <https://galaxy.ansible.com/vyos/vyos>`_ collection supports the ``ansible.netcommon.network_cli`` connection type. This page offers details on connection options to manage VyOS using Ansible. + +.. contents:: + :local: + +Connections available +================================================================================ + +.. table:: + :class: documentation-table + + ==================== ========================================== + .. CLI + ==================== ========================================== + Protocol SSH + + Credentials uses SSH keys / SSH-agent if present + + accepts ``-u myuser -k`` if using password + + Indirect Access via a bastion (jump host) + + Connection Settings ``ansible_connection: ansible.netcommon.network_cli`` + + |enable_mode| not supported + + Returned Data Format Refer to individual module documentation + ==================== ========================================== + +.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) + + +The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead. + +Using CLI in Ansible +==================== + +Example CLI ``group_vars/vyos.yml`` +----------------------------------- + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: vyos.vyos.vyos + ansible_user: myuser + ansible_password: !vault... + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"' + + +- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration. +- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration. +- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables. + +Example CLI task +---------------- + +.. code-block:: yaml + + - name: Retrieve VyOS version info + vyos.vyos.vyos_command: + commands: show version + when: ansible_network_os == 'vyos.vyos.vyos' + +.. include:: shared_snippets/SSH_warning.txt + +.. seealso:: + + :ref:`timeout_options` diff --git a/docs/docsite/rst/network/user_guide/shared_snippets/SSH_warning.txt b/docs/docsite/rst/network/user_guide/shared_snippets/SSH_warning.txt new file mode 100644 index 00000000..27424f57 --- /dev/null +++ b/docs/docsite/rst/network/user_guide/shared_snippets/SSH_warning.txt @@ -0,0 +1,2 @@ +.. warning:: + Never store passwords in plain text. We recommend using SSH keys to authenticate SSH connections. Ansible supports ssh-agent to manage your SSH keys. If you must use passwords to authenticate SSH connections, we recommend encrypting them with :ref:`Ansible Vault <playbooks_vault>`. diff --git a/docs/docsite/rst/plugins/action.rst b/docs/docsite/rst/plugins/action.rst new file mode 100644 index 00000000..93c4e4ba --- /dev/null +++ b/docs/docsite/rst/plugins/action.rst @@ -0,0 +1,56 @@ +.. _action_plugins: + +Action Plugins +============== + +.. contents:: + :local: + :depth: 2 + +Action plugins act in conjunction with :ref:`modules <working_with_modules>` to execute the actions required by playbook tasks. +They usually execute automatically in the background doing prerequisite work before modules execute. + +The 'normal' action plugin is used for modules that do not already have an action plugin. + +.. _enabling_action: + +Enabling action plugins +----------------------- + +You can enable a custom action plugin by either dropping it into the ``action_plugins`` directory adjacent to your play, inside a role, or by putting it in one of the action plugin directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`. + +.. _using_action: + +Using action plugins +-------------------- + +Action plugin are executed by default when an associated module is used; no action is required. + +Plugin list +----------- + +You cannot list action plugins directly, they show up as their counterpart modules: + +Use ``ansible-doc -l`` to see the list of available modules. +Use ``ansible-doc <name>`` to see specific documentation and examples, this should note if the module has a corresponding action plugin. + +.. seealso:: + + :ref:`cache_plugins` + Ansible Cache plugins + :ref:`callback_plugins` + Ansible callback plugins + :ref:`connection_plugins` + Ansible connection plugins + :ref:`inventory_plugins` + Ansible inventory plugins + :ref:`shell_plugins` + Ansible Shell plugins + :ref:`strategy_plugins` + Ansible Strategy plugins + :ref:`vars_plugins` + Ansible Vars plugins + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/plugins/become.rst b/docs/docsite/rst/plugins/become.rst new file mode 100644 index 00000000..c710bcf6 --- /dev/null +++ b/docs/docsite/rst/plugins/become.rst @@ -0,0 +1,67 @@ +.. _become_plugins: + +Become Plugins +============== + +.. contents:: + :local: + :depth: 2 + +.. versionadded:: 2.8 + +Become plugins work to ensure that Ansible can use certain privilege escalation systems when running the basic +commands to work with the target machine as well as the modules required to execute the tasks specified in +the play. + +These utilities (``sudo``, ``su``, ``doas``, and so on) generally let you 'become' another user to execute a command +with the permissions of that user. + + +.. _enabling_become: + +Enabling Become Plugins +----------------------- + +The become plugins shipped with Ansible are already enabled. Custom plugins can be added by placing +them into a ``become_plugins`` directory adjacent to your play, inside a role, or by placing them in one of +the become plugin directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`. + + +.. _using_become: + +Using Become Plugins +-------------------- + +In addition to the default configuration settings in :ref:`ansible_configuration_settings` or the +``--become-method`` command line option, you can use the ``become_method`` keyword in a play or, if you need +to be 'host specific', the connection variable ``ansible_become_method`` to select the plugin to use. + +You can further control the settings for each plugin via other configuration options detailed in the plugin +themselves (linked below). + +.. _become_plugin_list: + +Plugin List +----------- + +You can use ``ansible-doc -t become -l`` to see the list of available plugins. +Use ``ansible-doc -t become <plugin name>`` to see specific documentation and examples. + +.. seealso:: + + :ref:`about_playbooks` + An introduction to playbooks + :ref:`inventory_plugins` + Ansible inventory plugins + :ref:`callback_plugins` + Ansible callback plugins + :ref:`playbooks_filters` + Jinja2 filter plugins + :ref:`playbooks_tests` + Jinja2 test plugins + :ref:`playbooks_lookups` + Jinja2 lookup plugins + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/plugins/cache.rst b/docs/docsite/rst/plugins/cache.rst new file mode 100644 index 00000000..a13c78db --- /dev/null +++ b/docs/docsite/rst/plugins/cache.rst @@ -0,0 +1,140 @@ +.. _cache_plugins: + +Cache Plugins +============= + +.. contents:: + :local: + :depth: 2 + +Cache plugins allow Ansible to store gathered facts or inventory source data without the performance hit of retrieving them from source. + +The default cache plugin is the :ref:`memory <memory_cache>` plugin, which only caches the data for the current execution of Ansible. Other plugins with persistent storage are available to allow caching the data across runs. Some of these cache plugins write to files, others write to databases. + +You can use different cache plugins for inventory and facts. If you enable inventory caching without setting an inventory-specific cache plugin, Ansible uses the fact cache plugin for both facts and inventory. + +.. _enabling_cache: + +Enabling Fact Cache Plugins +--------------------------- + +Fact caching is always enabled. However, only one fact cache plugin can be active at a time. You can select the cache plugin to use for fact caching in the Ansible configuration, either with an environment variable: + +.. code-block:: shell + + export ANSIBLE_CACHE_PLUGIN=jsonfile + +or in the ``ansible.cfg`` file: + +.. code-block:: ini + + [defaults] + fact_caching=redis + +If the cache plugin is in a collection use the fully qualified name: + +.. code-block:: ini + + [defaults] + fact_caching = namespace.collection_name.cache_plugin_name + +To enable a custom cache plugin, save it in a ``cache_plugins`` directory adjacent to your play, inside a role, or in one of the directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`. + +You also need to configure other settings specific to each plugin. Consult the individual plugin documentation or the Ansible :ref:`configuration <ansible_configuration_settings>` for more details. + + +Enabling Inventory Cache Plugins +-------------------------------- + +Inventory caching is disabled by default. To cache inventory data, you must enable inventory caching and then select the specific cache plugin you want to use. Not all inventory plugins support caching, so check the documentation for the inventory plugin(s) you want to use. You can enable inventory caching with an environment variable: + +.. code-block:: shell + + export ANSIBLE_INVENTORY_CACHE=True + +or in the ``ansible.cfg`` file: + +.. code-block:: ini + + [inventory] + cache=True + +or if the inventory plugin accepts a YAML configuration source, in the configuration file: + +.. code-block:: yaml + + # dev.aws_ec2.yaml + plugin: aws_ec2 + cache: True + +Only one inventory cache plugin can be active at a time. You can set it with an environment variable: + +.. code-block:: shell + + export ANSIBLE_INVENTORY_CACHE_PLUGIN=jsonfile + +or in the ansible.cfg file: + +.. code-block:: ini + + [inventory] + cache_plugin=jsonfile + +or if the inventory plugin accepts a YAML configuration source, in the configuration file: + +.. code-block:: yaml + + # dev.aws_ec2.yaml + plugin: aws_ec2 + cache_plugin: jsonfile + +To cache inventory with a custom plugin in your plugin path, follow the :ref:`developer guide on cache plugins<developing_cache_plugins>`. + +To cache inventory with a cache plugin in a collection, use the FQCN: + +.. code-block:: ini + + [inventory] + cache_plugin=collection_namespace.collection_name.cache_plugin + +If you enable caching for inventory plugins without selecting an inventory-specific cache plugin, Ansible falls back to caching inventory with the fact cache plugin you configured. Consult the individual inventory plugin documentation or the Ansible :ref:`configuration <ansible_configuration_settings>` for more details. + +.. Note: In Ansible 2.7 and earlier, inventory plugins can only use file-based cache plugins, such as jsonfile, pickle, and yaml. + + +.. _using_cache: + +Using Cache Plugins +------------------- + +Cache plugins are used automatically once they are enabled. + + +.. _cache_plugin_list: + +Plugin List +----------- + +You can use ``ansible-doc -t cache -l`` to see the list of available plugins. +Use ``ansible-doc -t cache <plugin name>`` to see specific documentation and examples. + +.. seealso:: + + :ref:`action_plugins` + Ansible Action plugins + :ref:`callback_plugins` + Ansible callback plugins + :ref:`connection_plugins` + Ansible connection plugins + :ref:`inventory_plugins` + Ansible inventory plugins + :ref:`shell_plugins` + Ansible Shell plugins + :ref:`strategy_plugins` + Ansible Strategy plugins + :ref:`vars_plugins` + Ansible Vars plugins + `User Mailing List <https://groups.google.com/forum/#!forum/ansible-devel>`_ + Have a question? Stop by the google group! + `webchat.freenode.net <https://webchat.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/plugins/callback.rst b/docs/docsite/rst/plugins/callback.rst new file mode 100644 index 00000000..a8de3de9 --- /dev/null +++ b/docs/docsite/rst/plugins/callback.rst @@ -0,0 +1,101 @@ +.. _callback_plugins: + +Callback Plugins +================ + +.. contents:: + :local: + :depth: 2 + +Callback plugins enable adding new behaviors to Ansible when responding to events. +By default, callback plugins control most of the output you see when running the command line programs, +but can also be used to add additional output, integrate with other tools and marshall the events to a storage backend. + +.. _callback_examples: + +Example callback plugins +------------------------ + +The :ref:`log_plays <log_plays_callback>` callback is an example of how to record playbook events to a log file, +and the :ref:`mail <mail_callback>` callback sends email on playbook failures. + +The :ref:`say <say_callback>` callback responds with computer synthesized speech in relation to playbook events. + +.. _enabling_callbacks: + +Enabling callback plugins +------------------------- + +You can activate a custom callback by either dropping it into a ``callback_plugins`` directory adjacent to your play, inside a role, or by putting it in one of the callback directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`. + +Plugins are loaded in alphanumeric order. For example, a plugin implemented in a file named `1_first.py` would run before a plugin file named `2_second.py`. + +Most callbacks shipped with Ansible are disabled by default and need to be whitelisted in your :ref:`ansible.cfg <ansible_configuration_settings>` file in order to function. For example: + +.. code-block:: ini + + #callback_whitelist = timer, mail, profile_roles, collection_namespace.collection_name.custom_callback + +Setting a callback plugin for ``ansible-playbook`` +-------------------------------------------------- + +You can only have one plugin be the main manager of your console output. If you want to replace the default, you should define CALLBACK_TYPE = stdout in the subclass and then configure the stdout plugin in :ref:`ansible.cfg <ansible_configuration_settings>`. For example: + +.. code-block:: ini + + stdout_callback = dense + +or for my custom callback: + +.. code-block:: ini + + stdout_callback = mycallback + +This only affects :ref:`ansible-playbook` by default. + +Setting a callback plugin for ad-hoc commands +--------------------------------------------- + +The :ref:`ansible` ad hoc command specifically uses a different callback plugin for stdout, +so there is an extra setting in :ref:`ansible_configuration_settings` you need to add to use the stdout callback defined above: + +.. code-block:: ini + + [defaults] + bin_ansible_callbacks=True + +You can also set this as an environment variable: + +.. code-block:: shell + + export ANSIBLE_LOAD_CALLBACK_PLUGINS=1 + + +.. _callback_plugin_list: + +Plugin list +----------- + +You can use ``ansible-doc -t callback -l`` to see the list of available plugins. +Use ``ansible-doc -t callback <plugin name>`` to see specific documents and examples. + +.. seealso:: + + :ref:`action_plugins` + Ansible Action plugins + :ref:`cache_plugins` + Ansible cache plugins + :ref:`connection_plugins` + Ansible connection plugins + :ref:`inventory_plugins` + Ansible inventory plugins + :ref:`shell_plugins` + Ansible Shell plugins + :ref:`strategy_plugins` + Ansible Strategy plugins + :ref:`vars_plugins` + Ansible Vars plugins + `User Mailing List <https://groups.google.com/forum/#!forum/ansible-devel>`_ + Have a question? Stop by the google group! + `webchat.freenode.net <https://webchat.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/plugins/cliconf.rst b/docs/docsite/rst/plugins/cliconf.rst new file mode 100644 index 00000000..2de12dd5 --- /dev/null +++ b/docs/docsite/rst/plugins/cliconf.rst @@ -0,0 +1,47 @@ +.. _cliconf_plugins: + +Cliconf Plugins +=============== + +.. contents:: + :local: + :depth: 2 + +Cliconf plugins are abstractions over the CLI interface to network devices. They provide a standard interface for Ansible to execute tasks on those network devices. + +These plugins generally correspond one-to-one to network device platforms. Ansible loads the appropriate cliconf plugin automatically based on the ``ansible_network_os`` variable. + +.. _enabling_cliconf: + +Adding cliconf plugins +------------------------- + +You can extend Ansible to support other network devices by dropping a custom plugin into the ``cliconf_plugins`` directory. + +.. _using_cliconf: + +Using cliconf plugins +------------------------ + +The cliconf plugin to use is determined automatically from the ``ansible_network_os`` variable. There should be no reason to override this functionality. + +Most cliconf plugins can operate without configuration. A few have additional options that can be set to affect how tasks are translated into CLI commands. + +Plugins are self-documenting. Each plugin should document its configuration options. + +.. _cliconf_plugin_list: + +Viewing cliconf plugins +----------------------- + +These plugins have migrated to collections on `Ansible Galaxy <https://galaxy.ansible.com>`_. If you installed Ansible version 2.10 or later using ``pip``, you have access to several cliconf plugins. To list all available cliconf plugins on your control node, type ``ansible-doc -t cliconf -l``. To view plugin-specific documentation and examples, use ``ansible-doc -t cliconf``. + + +.. seealso:: + + :ref:`Ansible for Network Automation<network_guide>` + An overview of using Ansible to automate networking devices. + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible-network IRC chat channel diff --git a/docs/docsite/rst/plugins/connection.rst b/docs/docsite/rst/plugins/connection.rst new file mode 100644 index 00000000..0417526f --- /dev/null +++ b/docs/docsite/rst/plugins/connection.rst @@ -0,0 +1,78 @@ +.. _connection_plugins: + +Connection Plugins +================== + +.. contents:: + :local: + :depth: 2 + +Connection plugins allow Ansible to connect to the target hosts so it can execute tasks on them. Ansible ships with many connection plugins, but only one can be used per host at a time. + +By default, Ansible ships with several plugins. The most commonly used are the :ref:`paramiko SSH<paramiko_ssh_connection>`, native ssh (just called :ref:`ssh<ssh_connection>`), and :ref:`local<local_connection>` connection types. All of these can be used in playbooks and with :command:`/usr/bin/ansible` to decide how you want to talk to remote machines. + +The basics of these connection types are covered in the :ref:`getting started<intro_getting_started>` section. + +.. _ssh_plugins: + +``ssh`` plugins +--------------- + +Because ssh is the default protocol used in system administration and the protocol most used in Ansible, ssh options are included in the command line tools. See :ref:`ansible-playbook` for more details. + +.. _enabling_connection: + +Adding connection plugins +------------------------- + +You can extend Ansible to support other transports (such as SNMP or message bus) by dropping a custom plugin +into the ``connection_plugins`` directory. + +.. _using_connection: + +Using connection plugins +------------------------ + +You can set the connection plugin globally via :ref:`configuration<ansible_configuration_settings>`, at the command line (``-c``, ``--connection``), as a :ref:`keyword <playbook_keywords>` in your play, or by setting a :ref:`variable<behavioral_parameters>`, most often in your inventory. +For example, for Windows machines you might want to set the :ref:`winrm <winrm_connection>` plugin as an inventory variable. + +Most connection plugins can operate with minimal configuration. By default they use the :ref:`inventory hostname<inventory_hostnames_lookup>` and defaults to find the target host. + +Plugins are self-documenting. Each plugin should document its configuration options. The following are connection variables common to most connection plugins: + +:ref:`ansible_host<magic_variables_and_hostvars>` + The name of the host to connect to, if different from the :ref:`inventory <intro_inventory>` hostname. +:ref:`ansible_port<faq_setting_users_and_ports>` + The ssh port number, for :ref:`ssh <ssh_connection>` and :ref:`paramiko_ssh <paramiko_ssh_connection>` it defaults to 22. +:ref:`ansible_user<faq_setting_users_and_ports>` + The default user name to use for log in. Most plugins default to the 'current user running Ansible'. + +Each plugin might also have a specific version of a variable that overrides the general version. For example, ``ansible_ssh_host`` for the :ref:`ssh <ssh_connection>` plugin. + +.. _connection_plugin_list: + +Plugin List +----------- + +You can use ``ansible-doc -t connection -l`` to see the list of available plugins. +Use ``ansible-doc -t connection <plugin name>`` to see detailed documentation and examples. + + +.. seealso:: + + :ref:`Working with Playbooks<working_with_playbooks>` + An introduction to playbooks + :ref:`callback_plugins` + Ansible callback plugins + :ref:`Filters<playbooks_filters>` + Jinja2 filter plugins + :ref:`Tests<playbooks_tests>` + Jinja2 test plugins + :ref:`Lookups<playbooks_lookups>` + Jinja2 lookup plugins + :ref:`vars_plugins` + Ansible vars plugins + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/plugins/httpapi.rst b/docs/docsite/rst/plugins/httpapi.rst new file mode 100644 index 00000000..cf9b0bbf --- /dev/null +++ b/docs/docsite/rst/plugins/httpapi.rst @@ -0,0 +1,72 @@ +.. _httpapi_plugins: + +Httpapi Plugins +=============== + +.. contents:: + :local: + :depth: 2 + +Httpapi plugins tell Ansible how to interact with a remote device's HTTP-based API and execute tasks on the +device. + +Each plugin represents a particular dialect of API. Some are platform-specific (Arista eAPI, Cisco NXAPI), while others might be usable on a variety of platforms (RESTCONF). Ansible loads the appropriate httpapi plugin automatically based on the ``ansible_network_os`` variable. + + +.. _enabling_httpapi: + +Adding httpapi plugins +------------------------- + +You can extend Ansible to support other APIs by dropping a custom plugin into the ``httpapi_plugins`` directory. See :ref:`developing_plugins_httpapi` for details. + +.. _using_httpapi: + +Using httpapi plugins +------------------------ + +The httpapi plugin to use is determined automatically from the ``ansible_network_os`` variable. + +Most httpapi plugins can operate without configuration. Additional options may be defined by each plugin. + +Plugins are self-documenting. Each plugin should document its configuration options. + + +The following sample playbook shows the httpapi plugin for an Arista network device, assuming an inventory variable set as ``ansible_network_os=eos`` for the httpapi plugin to trigger off: + +.. code-block:: yaml + + - hosts: leaf01 + connection: httpapi + gather_facts: false + tasks: + + - name: type a simple arista command + eos_command: + commands: + - show version | json + register: command_output + + - name: print command output to terminal window + debug: + var: command_output.stdout[0]["version"] + +See the full working example `on GitHub <https://github.com/network-automation/httpapi>`_. + +.. _httpapi_plugin_list: + +Viewing httpapi plugins +----------------------- + +These plugins have migrated to collections on `Ansible Galaxy <https://galaxy.ansible.com>`_. If you installed Ansible version 2.10 or later using ``pip``, you have access to several httpapi plugins. To list all available httpapi plugins on your control node, type ``ansible-doc -t httpapi -l``. To view plugin-specific documentation and examples, use ``ansible-doc -t httpapi``. + +.. seealso:: + + :ref:`Ansible for Network Automation<network_guide>` + An overview of using Ansible to automate networking devices. + :ref:`Developing network modules<developing_modules_network>` + How to develop network modules. + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible-network IRC chat channel diff --git a/docs/docsite/rst/plugins/index.html b/docs/docsite/rst/plugins/index.html new file mode 100644 index 00000000..a7eac856 --- /dev/null +++ b/docs/docsite/rst/plugins/index.html @@ -0,0 +1,4 @@ +<html> +<head><noscript><meta http-equiv="refresh" content="0; url=plugins.html"></noscript></head> +<body onload="window.location = 'plugins.html'>Redirecting to <a href='plugins.html'>plugins</a> page.</body> +</html> diff --git a/docs/docsite/rst/plugins/inventory.rst b/docs/docsite/rst/plugins/inventory.rst new file mode 100644 index 00000000..562c1303 --- /dev/null +++ b/docs/docsite/rst/plugins/inventory.rst @@ -0,0 +1,162 @@ +.. _inventory_plugins: + +Inventory Plugins +================= + +.. contents:: + :local: + :depth: 2 + +Inventory plugins allow users to point at data sources to compile the inventory of hosts that Ansible uses to target tasks, either using the ``-i /path/to/file`` and/or ``-i 'host1, host2'`` command line parameters or from other configuration sources. + + +.. _enabling_inventory: + +Enabling inventory plugins +-------------------------- + +Most inventory plugins shipped with Ansible are enabled by default or can be used by with the ``auto`` plugin. + +In some circumstances, for example, if the inventory plugin does not use a YAML configuration file, you may need to enable the specific plugin. You can do this by setting ``enable_plugins`` in your :ref:`ansible.cfg <ansible_configuration_settings>` file in the ``[inventory]`` section. Modifying this will override the default list of enabled plugins. Here is the default list of enabled plugins that ships with Ansible: + +.. code-block:: ini + + [inventory] + enable_plugins = host_list, script, auto, yaml, ini, toml + +If the plugin is in a collection, use the fully qualified name: + +.. code-block:: ini + + [inventory] + enable_plugins = namespace.collection_name.inventory_plugin_name + + +.. _using_inventory: + +Using inventory plugins +----------------------- + +To use an inventory plugin, you must provide an inventory source. Most of the time this is a file containing host information or a YAML configuration file with options for the plugin. You can use the ``-i`` flag to provide inventory sources or configure a default inventory path. + +.. code-block:: bash + + ansible hostname -i inventory_source -m ansible.builtin.ping + +To start using an inventory plugin with a YAML configuration source, create a file with the accepted filename schema documented for the plugin in question, then add ``plugin: plugin_name``. Use the fully qualified name if the plugin is in a collection. + +.. code-block:: yaml + + # demo.aws_ec2.yml + plugin: amazon.aws.aws_ec2 + +Each plugin should document any naming restrictions. In addition, the YAML config file must end with the extension ``yml`` or ``yaml`` to be enabled by default with the ``auto`` plugin (otherwise, see the section above on enabling plugins). + +After providing any required options, you can view the populated inventory with ``ansible-inventory -i demo.aws_ec2.yml --graph``: + +.. code-block:: text + + @all: + |--@aws_ec2: + | |--ec2-12-345-678-901.compute-1.amazonaws.com + | |--ec2-98-765-432-10.compute-1.amazonaws.com + |--@ungrouped: + +If you are using an inventory plugin in a playbook-adjacent collection and want to test your setup with ``ansible-inventory``, use the ``--playbook-dir`` flag. + +Your inventory source might be a directory of inventory configuration files. The constructed inventory plugin only operates on those hosts already in inventory, so you may want the constructed inventory configuration parsed at a particular point (such as last). Ansible parses the directory recursively, alphabetically. You cannot configure the parsing approach, so name your files to make it work predictably. Inventory plugins that extend constructed features directly can work around that restriction by adding constructed options in addition to the inventory plugin options. Otherwise, you can use ``-i`` with multiple sources to impose a specific order, for example ``-i demo.aws_ec2.yml -i clouds.yml -i constructed.yml``. + +You can create dynamic groups using host variables with the constructed ``keyed_groups`` option. The option ``groups`` can also be used to create groups and ``compose`` creates and modifies host variables. Here is an aws_ec2 example utilizing constructed features: + +.. code-block:: yaml + + # demo.aws_ec2.yml + plugin: amazon.aws.aws_ec2 + regions: + - us-east-1 + - us-east-2 + keyed_groups: + # add hosts to tag_Name_value groups for each aws_ec2 host's tags.Name variable + - key: tags.Name + prefix: tag_Name_ + separator: "" + groups: + # add hosts to the group development if any of the dictionary's keys or values is the word 'devel' + development: "'devel' in (tags|list)" + compose: + # set the ansible_host variable to connect with the private IP address without changing the hostname + ansible_host: private_ip_address + +Now the output of ``ansible-inventory -i demo.aws_ec2.yml --graph``: + +.. code-block:: text + + @all: + |--@aws_ec2: + | |--ec2-12-345-678-901.compute-1.amazonaws.com + | |--ec2-98-765-432-10.compute-1.amazonaws.com + | |--... + |--@development: + | |--ec2-12-345-678-901.compute-1.amazonaws.com + | |--ec2-98-765-432-10.compute-1.amazonaws.com + |--@tag_Name_ECS_Instance: + | |--ec2-98-765-432-10.compute-1.amazonaws.com + |--@tag_Name_Test_Server: + | |--ec2-12-345-678-901.compute-1.amazonaws.com + |--@ungrouped + +If a host does not have the variables in the configuration above (in other words, ``tags.Name``, ``tags``, ``private_ip_address``), the host will not be added to groups other than those that the inventory plugin creates and the ``ansible_host`` host variable will not be modified. + +Inventory plugins that support caching can use the general settings for the fact cache defined in the ``ansible.cfg`` file's ``[defaults]`` section or define inventory-specific settings in the ``[inventory]`` section. Individual plugins can define plugin-specific cache settings in their config file: + +.. code-block:: yaml + + # demo.aws_ec2.yml + plugin: amazon.aws.aws_ec2 + cache: yes + cache_plugin: ansible.builtin.jsonfile + cache_timeout: 7200 + cache_connection: /tmp/aws_inventory + cache_prefix: aws_ec2 + +Here is an example of setting inventory caching with some fact caching defaults for the cache plugin used and the timeout in an ``ansible.cfg`` file: + +.. code-block:: ini + + [defaults] + fact_caching = ansible.builtin.jsonfile + fact_caching_connection = /tmp/ansible_facts + cache_timeout = 3600 + + [inventory] + cache = yes + cache_connection = /tmp/ansible_inventory + +.. _inventory_plugin_list: + +Plugin List +----------- + +You can use ``ansible-doc -t inventory -l`` to see the list of available plugins. +Use ``ansible-doc -t inventory <plugin name>`` to see plugin-specific documentation and examples. + +.. seealso:: + + :ref:`about_playbooks` + An introduction to playbooks + :ref:`callback_plugins` + Ansible callback plugins + :ref:`connection_plugins` + Ansible connection plugins + :ref:`playbooks_filters` + Jinja2 filter plugins + :ref:`playbooks_tests` + Jinja2 test plugins + :ref:`playbooks_lookups` + Jinja2 lookup plugins + :ref:`vars_plugins` + Ansible vars plugins + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/plugins/lookup.rst b/docs/docsite/rst/plugins/lookup.rst new file mode 100644 index 00000000..31183b15 --- /dev/null +++ b/docs/docsite/rst/plugins/lookup.rst @@ -0,0 +1,158 @@ +.. _lookup_plugins: + +Lookup Plugins +============== + +.. contents:: + :local: + :depth: 2 + +Lookup plugins are an Ansible-specific extension to the Jinja2 templating language. You can use lookup plugins to access data from outside sources (files, databases, key/value stores, APIs, and other services) within your playbooks. Like all :ref:`templating <playbooks_templating>`, lookups execute and are evaluated on the Ansible control machine. Ansible makes the data returned by a lookup plugin available using the standard templating system. You can use lookup plugins to load variables or templates with information from external sources. + +.. note:: + - Lookups are executed with a working directory relative to the role or play, + as opposed to local tasks, which are executed relative the executed script. + - Pass ``wantlist=True`` to lookups to use in Jinja2 template "for" loops. + +.. warning:: + - Some lookups pass arguments to a shell. When using variables from a remote/untrusted source, use the `|quote` filter to ensure safe usage. + + +.. _enabling_lookup: + +Enabling lookup plugins +----------------------- + +Ansible enables all lookup plugins it can find. You can activate a custom lookup by either dropping it into a ``lookup_plugins`` directory adjacent to your play, inside the ``plugins/lookup/`` directory of a collection you have installed, inside a standalone role, or in one of the lookup directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`. + + +.. _using_lookup: + +Using lookup plugins +-------------------- + +You can use lookup plugins anywhere you can use templating in Ansible: in a play, in variables file, or in a Jinja2 template for the :ref:`template <template_module>` module. + +.. code-block:: YAML+Jinja + + vars: + file_contents: "{{lookup('file', 'path/to/file.txt')}}" + +Lookups are an integral part of loops. Wherever you see ``with_``, the part after the underscore is the name of a lookup. For this reason, most lookups output lists and take lists as input; for example, ``with_items`` uses the :ref:`items <items_lookup>` lookup:: + + tasks: + - name: count to 3 + debug: msg={{item}} + with_items: [1, 2, 3] + +You can combine lookups with :ref:`filters <playbooks_filters>`, :ref:`tests <playbooks_tests>` and even each other to do some complex data generation and manipulation. For example:: + + tasks: + - name: valid but useless and over complicated chained lookups and filters + debug: msg="find the answer here:\n{{ lookup('url', 'https://google.com/search/?q=' + item|urlencode)|join(' ') }}" + with_nested: + - "{{lookup('consul_kv', 'bcs/' + lookup('file', '/the/question') + ', host=localhost, port=2000')|shuffle}}" + - "{{lookup('sequence', 'end=42 start=2 step=2')|map('log', 4)|list)}}" + - ['a', 'c', 'd', 'c'] + +.. versionadded:: 2.6 + +You can control how errors behave in all lookup plugins by setting ``errors`` to ``ignore``, ``warn``, or ``strict``. The default setting is ``strict``, which causes the task to fail if the lookup returns an error. For example: + +To ignore lookup errors:: + + - name: if this file does not exist, I do not care .. file plugin itself warns anyway ... + debug: msg="{{ lookup('file', '/nosuchfile', errors='ignore') }}" + +.. code-block:: ansible-output + + [WARNING]: Unable to find '/nosuchfile' in expected paths (use -vvvvv to see paths) + + ok: [localhost] => { + "msg": "" + } + + +To get a warning instead of a failure:: + + - name: if this file does not exist, let me know, but continue + debug: msg="{{ lookup('file', '/nosuchfile', errors='warn') }}" + +.. code-block:: ansible-output + + [WARNING]: Unable to find '/nosuchfile' in expected paths (use -vvvvv to see paths) + + [WARNING]: An unhandled exception occurred while running the lookup plugin 'file'. Error was a <class 'ansible.errors.AnsibleError'>, original message: could not locate file in lookup: /nosuchfile + + ok: [localhost] => { + "msg": "" + } + + +To get a fatal error (the default):: + + - name: if this file does not exist, FAIL (this is the default) + debug: msg="{{ lookup('file', '/nosuchfile', errors='strict') }}" + +.. code-block:: ansible-output + + [WARNING]: Unable to find '/nosuchfile' in expected paths (use -vvvvv to see paths) + + fatal: [localhost]: FAILED! => {"msg": "An unhandled exception occurred while running the lookup plugin 'file'. Error was a <class 'ansible.errors.AnsibleError'>, original message: could not locate file in lookup: /nosuchfile"} + + +.. _query: + +Forcing lookups to return lists: ``query`` and ``wantlist=True`` +---------------------------------------------------------------- + +.. versionadded:: 2.5 + +In Ansible 2.5, a new Jinja2 function called ``query`` was added for invoking lookup plugins. The difference between ``lookup`` and ``query`` is largely that ``query`` will always return a list. +The default behavior of ``lookup`` is to return a string of comma separated values. ``lookup`` can be explicitly configured to return a list using ``wantlist=True``. + +This feature provides an easier and more consistent interface for interacting with the new ``loop`` keyword, while maintaining backwards compatibility with other uses of ``lookup``. + +The following examples are equivalent: + +.. code-block:: jinja + + lookup('dict', dict_variable, wantlist=True) + + query('dict', dict_variable) + +As demonstrated above, the behavior of ``wantlist=True`` is implicit when using ``query``. + +Additionally, ``q`` was introduced as a shortform of ``query``: + +.. code-block:: jinja + + q('dict', dict_variable) + + +.. _lookup_plugins_list: + +Plugin list +----------- + +You can use ``ansible-doc -t lookup -l`` to see the list of available plugins. Use ``ansible-doc -t lookup <plugin name>`` to see specific documents and examples. + + +.. seealso:: + + :ref:`about_playbooks` + An introduction to playbooks + :ref:`inventory_plugins` + Ansible inventory plugins + :ref:`callback_plugins` + Ansible callback plugins + :ref:`playbooks_filters` + Jinja2 filter plugins + :ref:`playbooks_tests` + Jinja2 test plugins + :ref:`playbooks_lookups` + Jinja2 lookup plugins + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/plugins/netconf.rst b/docs/docsite/rst/plugins/netconf.rst new file mode 100644 index 00000000..fef2aeb3 --- /dev/null +++ b/docs/docsite/rst/plugins/netconf.rst @@ -0,0 +1,47 @@ +.. _netconf_plugins: + +Netconf Plugins +=============== + +.. contents:: + :local: + :depth: 2 + +Netconf plugins are abstractions over the Netconf interface to network devices. They provide a standard interface for Ansible to execute tasks on those network devices. + +These plugins generally correspond one-to-one to network device platforms. Ansible loads the appropriate netconf plugin automatically based on the ``ansible_network_os`` variable. If the platform supports standard Netconf implementation as defined in the Netconf RFC specification, Ansible loads the ``default`` netconf plugin. If the platform supports propriety Netconf RPCs, Ansible loads the platform-specific netconf plugin. + +.. _enabling_netconf: + +Adding netconf plugins +------------------------- + +You can extend Ansible to support other network devices by dropping a custom plugin into the ``netconf_plugins`` directory. + +.. _using_netconf: + +Using netconf plugins +------------------------ + +The netconf plugin to use is determined automatically from the ``ansible_network_os`` variable. There should be no reason to override this functionality. + +Most netconf plugins can operate without configuration. A few have additional options that can be set to affect how tasks are translated into netconf commands. A ncclient device specific handler name can be set in the netconf plugin or else the value of ``default`` is used as per ncclient device handler. + +Plugins are self-documenting. Each plugin should document its configuration options. + +.. _netconf_plugin_list: + +Listing netconf plugins +----------------------- + +These plugins have migrated to collections on `Ansible Galaxy <https://galaxy.ansible.com>`_. If you installed Ansible version 2.10 or later using ``pip``, you have access to several netconf plugins. To list all available netconf plugins on your control node, type ``ansible-doc -t netconf -l``. To view plugin-specific documentation and examples, use ``ansible-doc -t netconf``. + + +.. seealso:: + + :ref:`Ansible for Network Automation<network_guide>` + An overview of using Ansible to automate networking devices. + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible-network IRC chat channel diff --git a/docs/docsite/rst/plugins/plugins.rst b/docs/docsite/rst/plugins/plugins.rst new file mode 100644 index 00000000..4dee6c6a --- /dev/null +++ b/docs/docsite/rst/plugins/plugins.rst @@ -0,0 +1,44 @@ +.. _plugins_lookup: + +******************** +Working With Plugins +******************** + +Plugins are pieces of code that augment Ansible's core functionality. Ansible uses a plugin architecture to enable a rich, flexible and expandable feature set. + +Ansible ships with a number of handy plugins, and you can easily write your own. + +This section covers the various types of plugins that are included with Ansible: + +.. toctree:: + :maxdepth: 1 + + action + become + cache + callback + cliconf + connection + httpapi + inventory + lookup + netconf + shell + strategy + vars + ../user_guide/playbooks_filters + ../user_guide/playbooks_tests + ../user_guide/plugin_filtering_config + +.. seealso:: + + :ref:`about_playbooks` + An introduction to playbooks + :ref:`ansible_configuration_settings` + Ansible configuration documentation and settings + :ref:`command_line_tools` + Ansible tools, description and options + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/plugins/shell.rst b/docs/docsite/rst/plugins/shell.rst new file mode 100644 index 00000000..b0846323 --- /dev/null +++ b/docs/docsite/rst/plugins/shell.rst @@ -0,0 +1,53 @@ +.. _shell_plugins: + +Shell Plugins +============= + +.. contents:: + :local: + :depth: 2 + +Shell plugins work to ensure that the basic commands Ansible runs are properly formatted to work with +the target machine and allow the user to configure certain behaviors related to how Ansible executes tasks. + +.. _enabling_shell: + +Enabling shell plugins +---------------------- + +You can add a custom shell plugin by dropping it into a ``shell_plugins`` directory adjacent to your play, inside a role, +or by putting it in one of the shell plugin directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`. + +.. warning:: You should not alter which plugin is used unless you have a setup in which the default ``/bin/sh`` + is not a POSIX compatible shell or is not available for execution. + +.. _using_shell: + +Using shell plugins +------------------- + +In addition to the default configuration settings in :ref:`ansible_configuration_settings`, you can use +the connection variable :ref:`ansible_shell_type <ansible_shell_type>` to select the plugin to use. +In this case, you will also want to update the :ref:`ansible_shell_executable <ansible_shell_executable>` to match. + +You can further control the settings for each plugin via other configuration options +detailed in the plugin themselves (linked below). + +.. seealso:: + + :ref:`about_playbooks` + An introduction to playbooks + :ref:`inventory_plugins` + Ansible inventory plugins + :ref:`callback_plugins` + Ansible callback plugins + :ref:`playbooks_filters` + Jinja2 filter plugins + :ref:`playbooks_tests` + Jinja2 test plugins + :ref:`playbooks_lookups` + Jinja2 lookup plugins + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/plugins/strategy.rst b/docs/docsite/rst/plugins/strategy.rst new file mode 100644 index 00000000..e3623329 --- /dev/null +++ b/docs/docsite/rst/plugins/strategy.rst @@ -0,0 +1,79 @@ +.. _strategy_plugins: + +Strategy Plugins +================ + +.. contents:: + :local: + :depth: 2 + +Strategy plugins control the flow of play execution by handling task and host scheduling. + +.. _enable_strategy: + +Enabling strategy plugins +------------------------- + +All strategy plugins shipped with Ansible are enabled by default. You can enable a custom strategy plugin by +putting it in one of the lookup directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`. + +.. _using_strategy: + +Using strategy plugins +---------------------- + +Only one strategy plugin can be used in a play, but you can use different ones for each play in a playbook or ansible run. +The default is the :ref:`linear <linear_strategy>` plugin. You can change this default in Ansible :ref:`configuration <ansible_configuration_settings>` using an environment variable: + +.. code-block:: shell + + export ANSIBLE_STRATEGY=free + +or in the `ansible.cfg` file: + +.. code-block:: ini + + [defaults] + strategy=linear + +You can also specify the strategy plugin in the play via the :ref:`strategy keyword <playbook_keywords>` in a play:: + + - hosts: all + strategy: debug + tasks: + - copy: src=myhosts dest=/etc/hosts + notify: restart_tomcat + + - package: name=tomcat state=present + + handlers: + - name: restart_tomcat + service: name=tomcat state=restarted + +.. _strategy_plugin_list: + +Plugin list +----------- + +You can use ``ansible-doc -t strategy -l`` to see the list of available plugins. +Use ``ansible-doc -t strategy <plugin name>`` to see plugin-specific specific documentation and examples. + + +.. seealso:: + + :ref:`about_playbooks` + An introduction to playbooks + :ref:`inventory_plugins` + Ansible inventory plugins + :ref:`callback_plugins` + Ansible callback plugins + :ref:`playbooks_filters` + Jinja2 filter plugins + :ref:`playbooks_tests` + Jinja2 test plugins + :ref:`playbooks_lookups` + Jinja2 lookup plugins + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/plugins/vars.rst b/docs/docsite/rst/plugins/vars.rst new file mode 100644 index 00000000..c24bdb81 --- /dev/null +++ b/docs/docsite/rst/plugins/vars.rst @@ -0,0 +1,79 @@ +.. _vars_plugins: + +Vars Plugins +============ + +.. contents:: + :local: + :depth: 2 + +Vars plugins inject additional variable data into Ansible runs that did not come from an inventory source, playbook, or command line. Playbook constructs like 'host_vars' and 'group_vars' work using vars plugins. + +Vars plugins were partially implemented in Ansible 2.0 and rewritten to be fully implemented starting with Ansible 2.4. + +The :ref:`host_group_vars <host_group_vars_vars>` plugin shipped with Ansible enables reading variables from :ref:`host_variables` and :ref:`group_variables`. + + +.. _enable_vars: + +Enabling vars plugins +--------------------- + +You can activate a custom vars plugin by either dropping it into a ``vars_plugins`` directory adjacent to your play, inside a role, or by putting it in one of the directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`. + +Starting in Ansible 2.10, vars plugins can require whitelisting rather than running by default. To enable a plugin that requires whitelisting set ``vars_plugins_enabled`` in the ``defaults`` section of :ref:`ansible.cfg <ansible_configuration_settings>` or set the ``ANSIBLE_VARS_ENABLED`` environment variable to the list of vars plugins you want to execute. By default, the :ref:`host_group_vars <host_group_vars_vars>` plugin shipped with Ansible is whitelisted. + +Starting in Ansible 2.10, you can use vars plugins in collections. All vars plugins in collections require whitelisting and need to use the fully qualified collection name in the format ``namespace.collection_name.vars_plugin_name``. + +.. code-block:: yaml + + [defaults] + vars_plugins_enabled = host_group_vars,namespace.collection_name.vars_plugin_name + +.. _using_vars: + +Using vars plugins +------------------ + +By default, vars plugins are used on demand automatically after they are enabled. + +Starting in Ansible 2.10, vars plugins can be made to run at specific times. `ansible-inventory` does not use these settings, and always loads vars plugins. + +The global setting ``RUN_VARS_PLUGINS`` can be set in ``ansible.cfg`` using ``run_vars_plugins`` in the ``defaults`` section or by the ``ANSIBLE_RUN_VARS_PLUGINS`` environment variable. The default option, ``demand``, runs any enabled vars plugins relative to inventory sources whenever variables are demanded by tasks. You can use the option ``start`` to run any enabled vars plugins relative to inventory sources after importing that inventory source instead. + +You can also control vars plugin execution on a per-plugin basis for vars plugins that support the ``stage`` option. To run the :ref:`host_group_vars <host_group_vars_vars>` plugin after importing inventory you can add the following to :ref:`ansible.cfg <ansible_configuration_settings>`: + +.. code-block:: ini + + [vars_host_group_vars] + stage = inventory + +.. _vars_plugin_list: + +Plugin Lists +------------ + +You can use ``ansible-doc -t vars -l`` to see the list of available plugins. +Use ``ansible-doc -t vars <plugin name>`` to see specific plugin-specific documentation and examples. + + +.. seealso:: + + :ref:`action_plugins` + Ansible Action plugins + :ref:`cache_plugins` + Ansible Cache plugins + :ref:`callback_plugins` + Ansible callback plugins + :ref:`connection_plugins` + Ansible connection plugins + :ref:`inventory_plugins` + Ansible inventory plugins + :ref:`shell_plugins` + Ansible Shell plugins + :ref:`strategy_plugins` + Ansible Strategy plugins + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/porting_guides/core_porting_guides.rst b/docs/docsite/rst/porting_guides/core_porting_guides.rst new file mode 100644 index 00000000..0c8f3d56 --- /dev/null +++ b/docs/docsite/rst/porting_guides/core_porting_guides.rst @@ -0,0 +1,15 @@ +.. _core_porting_guides: + +*************************** +Ansible Core Porting Guides +*************************** + +This section lists porting guides that can help you in updating playbooks, plugins and other parts of your Ansible infrastructure from one version of ``ansible-core`` to the next. + +Please note that this is not a complete list. If you believe any extra information would be useful in these pages, you can edit by clicking `Edit on GitHub` on the top right, or raising an issue. + +.. toctree:: + :maxdepth: 1 + :glob: + + porting_guide_base_2.10 diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.0.rst b/docs/docsite/rst/porting_guides/porting_guide_2.0.rst new file mode 100644 index 00000000..00879a6b --- /dev/null +++ b/docs/docsite/rst/porting_guides/porting_guide_2.0.rst @@ -0,0 +1,13 @@ +:orphan: + +.. _porting_2.0_guide: + +************************* +Ansible 2.0 Porting Guide +************************* + +Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.0 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.0.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.10.rst b/docs/docsite/rst/porting_guides/porting_guide_2.10.rst new file mode 100644 index 00000000..3a8ba48b --- /dev/null +++ b/docs/docsite/rst/porting_guides/porting_guide_2.10.rst @@ -0,0 +1,13 @@ +:orphan: + +.. _porting_2.10_guide: + +========================== +Ansible 2.10 Porting Guide +========================== + +Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.10 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.10.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.3.rst b/docs/docsite/rst/porting_guides/porting_guide_2.3.rst new file mode 100644 index 00000000..0db4a98f --- /dev/null +++ b/docs/docsite/rst/porting_guides/porting_guide_2.3.rst @@ -0,0 +1,12 @@ +:orphan: + +.. _porting_2.3_guide: + +************************* +Ansible 2.3 Porting Guide +************************* +Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.3 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.3.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.4.rst b/docs/docsite/rst/porting_guides/porting_guide_2.4.rst new file mode 100644 index 00000000..eb2d9954 --- /dev/null +++ b/docs/docsite/rst/porting_guides/porting_guide_2.4.rst @@ -0,0 +1,13 @@ +:orphan: + +.. _porting_2.4_guide: + +************************* +Ansible 2.4 Porting Guide +************************* + +Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.4 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.4.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.5.rst b/docs/docsite/rst/porting_guides/porting_guide_2.5.rst new file mode 100644 index 00000000..439cbae0 --- /dev/null +++ b/docs/docsite/rst/porting_guides/porting_guide_2.5.rst @@ -0,0 +1,13 @@ +:orphan: + +.. _porting_2.5_guide: + +************************* +Ansible 2.5 Porting Guide +************************* + +Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.5 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.5.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.6.rst b/docs/docsite/rst/porting_guides/porting_guide_2.6.rst new file mode 100644 index 00000000..6150ccc9 --- /dev/null +++ b/docs/docsite/rst/porting_guides/porting_guide_2.6.rst @@ -0,0 +1,13 @@ +:orphan: + +.. _porting_2.6_guide: + +************************* +Ansible 2.6 Porting Guide +************************* + +Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.6 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.6.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.7.rst b/docs/docsite/rst/porting_guides/porting_guide_2.7.rst new file mode 100644 index 00000000..2da9785e --- /dev/null +++ b/docs/docsite/rst/porting_guides/porting_guide_2.7.rst @@ -0,0 +1,13 @@ +:orphan: + +.. _porting_2.7_guide: + +************************* +Ansible 2.7 Porting Guide +************************* + +Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.7 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.7.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.8.rst b/docs/docsite/rst/porting_guides/porting_guide_2.8.rst new file mode 100644 index 00000000..56fb2db3 --- /dev/null +++ b/docs/docsite/rst/porting_guides/porting_guide_2.8.rst @@ -0,0 +1,13 @@ +:orphan: + +.. _porting_2.8_guide: + +************************* +Ansible 2.8 Porting Guide +************************* + +Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.8 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.8.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.9.rst b/docs/docsite/rst/porting_guides/porting_guide_2.9.rst new file mode 100644 index 00000000..99742313 --- /dev/null +++ b/docs/docsite/rst/porting_guides/porting_guide_2.9.rst @@ -0,0 +1,13 @@ +:orphan: + +.. _porting_2.9_guide: + +************************* +Ansible 2.9 Porting Guide +************************* + +Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.9 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.9.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. diff --git a/docs/docsite/rst/porting_guides/porting_guide_base_2.10.rst b/docs/docsite/rst/porting_guides/porting_guide_base_2.10.rst new file mode 100644 index 00000000..46724313 --- /dev/null +++ b/docs/docsite/rst/porting_guides/porting_guide_base_2.10.rst @@ -0,0 +1,13 @@ +:orphan: + +.. _porting_2.10_guide_base: + +******************************* +Ansible-base 2.10 Porting Guide +******************************* + +Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible-base 2.10 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_base_2.10.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. diff --git a/docs/docsite/rst/porting_guides/porting_guides.rst b/docs/docsite/rst/porting_guides/porting_guides.rst new file mode 100644 index 00000000..3dc29127 --- /dev/null +++ b/docs/docsite/rst/porting_guides/porting_guides.rst @@ -0,0 +1,11 @@ +.. _porting_guides: + +********************** +Ansible Porting Guides +********************** + +Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel porting guides <https://docs.ansible.com/ansible/devel/porting_guides/porting_guides.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. diff --git a/docs/docsite/rst/reference_appendices/.rstcheck.cfg b/docs/docsite/rst/reference_appendices/.rstcheck.cfg new file mode 100644 index 00000000..5b33c076 --- /dev/null +++ b/docs/docsite/rst/reference_appendices/.rstcheck.cfg @@ -0,0 +1,2 @@ +[rstcheck] +ignore_directives=autoclass,automodule diff --git a/docs/docsite/rst/reference_appendices/YAMLSyntax.rst b/docs/docsite/rst/reference_appendices/YAMLSyntax.rst new file mode 100644 index 00000000..7d439664 --- /dev/null +++ b/docs/docsite/rst/reference_appendices/YAMLSyntax.rst @@ -0,0 +1,242 @@ +.. _yaml_syntax: + + +YAML Syntax +=========== + +This page provides a basic overview of correct YAML syntax, which is how Ansible +playbooks (our configuration management language) are expressed. + +We use YAML because it is easier for humans to read and write than other common +data formats like XML or JSON. Further, there are libraries available in most +programming languages for working with YAML. + +You may also wish to read :ref:`working_with_playbooks` at the same time to see how this +is used in practice. + + +YAML Basics +----------- + +For Ansible, nearly every YAML file starts with a list. +Each item in the list is a list of key/value pairs, commonly +called a "hash" or a "dictionary". So, we need to know how +to write lists and dictionaries in YAML. + +There's another small quirk to YAML. All YAML files (regardless of their association with Ansible or not) can optionally +begin with ``---`` and end with ``...``. This is part of the YAML format and indicates the start and end of a document. + +All members of a list are lines beginning at the same indentation level starting with a ``"- "`` (a dash and a space):: + + --- + # A list of tasty fruits + - Apple + - Orange + - Strawberry + - Mango + ... + +A dictionary is represented in a simple ``key: value`` form (the colon must be followed by a space):: + + # An employee record + martin: + name: Martin D'vloper + job: Developer + skill: Elite + +More complicated data structures are possible, such as lists of dictionaries, dictionaries whose values are lists or a mix of both:: + + # Employee records + - martin: + name: Martin D'vloper + job: Developer + skills: + - python + - perl + - pascal + - tabitha: + name: Tabitha Bitumen + job: Developer + skills: + - lisp + - fortran + - erlang + +Dictionaries and lists can also be represented in an abbreviated form if you really want to:: + + --- + martin: {name: Martin D'vloper, job: Developer, skill: Elite} + ['Apple', 'Orange', 'Strawberry', 'Mango'] + +These are called "Flow collections". + +.. _truthiness: + +Ansible doesn't really use these too much, but you can also specify a boolean value (true/false) in several forms:: + + create_key: yes + needs_agent: no + knows_oop: True + likes_emacs: TRUE + uses_cvs: false + +Use lowercase 'true' or 'false' for boolean values in dictionaries if you want to be compatible with default yamllint options. + +Values can span multiple lines using ``|`` or ``>``. Spanning multiple lines using a "Literal Block Scalar" ``|`` will include the newlines and any trailing spaces. +Using a "Folded Block Scalar" ``>`` will fold newlines to spaces; it's used to make what would otherwise be a very long line easier to read and edit. +In either case the indentation will be ignored. +Examples are:: + + include_newlines: | + exactly as you see + will appear these three + lines of poetry + + fold_newlines: > + this is really a + single line of text + despite appearances + +While in the above ``>`` example all newlines are folded into spaces, there are two ways to enforce a newline to be kept:: + + fold_some_newlines: > + a + b + + c + d + e + f + same_as: "a b\nc d\n e\nf\n" + +Let's combine what we learned so far in an arbitrary YAML example. +This really has nothing to do with Ansible, but will give you a feel for the format:: + + --- + # An employee record + name: Martin D'vloper + job: Developer + skill: Elite + employed: True + foods: + - Apple + - Orange + - Strawberry + - Mango + languages: + perl: Elite + python: Elite + pascal: Lame + education: | + 4 GCSEs + 3 A-Levels + BSc in the Internet of Things + +That's all you really need to know about YAML to start writing `Ansible` playbooks. + +Gotchas +------- + +While you can put just about anything into an unquoted scalar, there are some exceptions. +A colon followed by a space (or newline) ``": "`` is an indicator for a mapping. +A space followed by the pound sign ``" #"`` starts a comment. + +Because of this, the following is going to result in a YAML syntax error:: + + foo: somebody said I should put a colon here: so I did + + windows_drive: c: + +...but this will work:: + + windows_path: c:\windows + +You will want to quote hash values using colons followed by a space or the end of the line:: + + foo: 'somebody said I should put a colon here: so I did' + + windows_drive: 'c:' + +...and then the colon will be preserved. + +Alternatively, you can use double quotes:: + + foo: "somebody said I should put a colon here: so I did" + + windows_drive: "c:" + +The difference between single quotes and double quotes is that in double quotes +you can use escapes:: + + foo: "a \t TAB and a \n NEWLINE" + +The list of allowed escapes can be found in the YAML Specification under "Escape Sequences" (YAML 1.1) or "Escape Characters" (YAML 1.2). + +The following is invalid YAML: + +.. code-block:: text + + foo: "an escaped \' single quote" + + +Further, Ansible uses "{{ var }}" for variables. If a value after a colon starts +with a "{", YAML will think it is a dictionary, so you must quote it, like so:: + + foo: "{{ variable }}" + +If your value starts with a quote the entire value must be quoted, not just part of it. Here are some additional examples of how to properly quote things:: + + foo: "{{ variable }}/additional/string/literal" + foo2: "{{ variable }}\\backslashes\\are\\also\\special\\characters" + foo3: "even if it's just a string literal it must all be quoted" + +Not valid:: + + foo: "E:\\path\\"rest\\of\\path + +In addition to ``'`` and ``"`` there are a number of characters that are special (or reserved) and cannot be used +as the first character of an unquoted scalar: ``[] {} > | * & ! % # ` @ ,``. + +You should also be aware of ``? : -``. In YAML, they are allowed at the beginning of a string if a non-space +character follows, but YAML processor implementations differ, so it's better to use quotes. + +In Flow Collections, the rules are a bit more strict:: + + a scalar in block mapping: this } is [ all , valid + + flow mapping: { key: "you { should [ use , quotes here" } + +Boolean conversion is helpful, but this can be a problem when you want a literal `yes` or other boolean values as a string. +In these cases just use quotes:: + + non_boolean: "yes" + other_string: "False" + + +YAML converts certain strings into floating-point values, such as the string +`1.0`. If you need to specify a version number (in a requirements.yml file, for +example), you will need to quote the value if it looks like a floating-point +value:: + + version: "1.0" + + +.. seealso:: + + :ref:`working_with_playbooks` + Learn what playbooks can do and how to write/run them. + `YAMLLint <http://yamllint.com/>`_ + YAML Lint (online) helps you debug YAML syntax if you are having problems + `GitHub examples directory <https://github.com/ansible/ansible-examples>`_ + Complete playbook files from the github project source + `Wikipedia YAML syntax reference <https://en.wikipedia.org/wiki/YAML>`_ + A good guide to YAML syntax + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel and #yaml for YAML specific questions + `YAML 1.1 Specification <https://yaml.org/spec/1.1/>`_ + The Specification for YAML 1.1, which PyYAML and libyaml are currently + implementing + `YAML 1.2 Specification <https://yaml.org/spec/1.2/spec.html>`_ + For completeness, YAML 1.2 is the successor of 1.1 diff --git a/docs/docsite/rst/reference_appendices/automationhub.rst b/docs/docsite/rst/reference_appendices/automationhub.rst new file mode 100644 index 00000000..dd70b98f --- /dev/null +++ b/docs/docsite/rst/reference_appendices/automationhub.rst @@ -0,0 +1,10 @@ +.. _automation_hub: + +Ansible Automation Hub +====================== + +`Ansible Automation Hub <https://www.ansible.com/products/automation-hub>`_ is the official location to discover and download supported :ref:`collections <collections>`, included as part of an Ansible Automation Platform subscription. These content collections contain modules, plugins, roles, and playbooks in a downloadable package. + +Ansible Automation Hub gives you direct access to trusted content collections from Red Hat and Certified Partners. You can find content by topic or Ansible Partner organizations. + +Ansible Automation Hub is the downstream Red Hat supported product version of Ansible Galaxy. Find out more about Ansible Automation Hub features and how to access it at `Ansible Automation Hub <https://www.ansible.com/products/automation-hub>`_. Ansible Automation Hub is part of the Red Hat Ansible Automation Platform subscription, and comes bundled with support from Red Hat, Inc. diff --git a/docs/docsite/rst/reference_appendices/common_return_values.rst b/docs/docsite/rst/reference_appendices/common_return_values.rst new file mode 100644 index 00000000..392dc96c --- /dev/null +++ b/docs/docsite/rst/reference_appendices/common_return_values.rst @@ -0,0 +1,251 @@ +.. _common_return_values: + +Return Values +------------- + +.. contents:: Topics + +Ansible modules normally return a data structure that can be registered into a variable, or seen directly when output by +the `ansible` program. Each module can optionally document its own unique return values (visible through ansible-doc and on the :ref:`main docsite<ansible_documentation>`). + +This document covers return values common to all modules. + +.. note:: Some of these keys might be set by Ansible itself once it processes the module's return information. + + +Common +^^^^^^ + +backup_file +``````````` +For those modules that implement `backup=no|yes` when manipulating files, a path to the backup file created. + + .. code-block:: console + + "backup_file": "./foo.txt.32729.2020-07-30@06:24:19~" + + +changed +``````` +A boolean indicating if the task had to make changes. + + .. code-block:: console + + "changed": true + +diff +```` +Information on differences between the previous and current state. Often a dictionary with entries ``before`` and ``after``, which will then be formatted by the callback plugin to a diff view. + + .. code-block:: console + + "diff": [ + { + "after": "", + "after_header": "foo.txt (content)", + "before": "", + "before_header": "foo.txt (content)" + }, + { + "after_header": "foo.txt (file attributes)", + "before_header": "foo.txt (file attributes)" + } + +failed +`````` +A boolean that indicates if the task was failed or not. + + .. code-block:: console + + "failed": false + +invocation +`````````` +Information on how the module was invoked. + + .. code-block:: console + + "invocation": { + "module_args": { + "_original_basename": "foo.txt", + "attributes": null, + "backup": true, + "checksum": "da39a3ee5e6b4b0d3255bfef95601890afd80709", + "content": null, + "delimiter": null, + "dest": "./foo.txt", + "directory_mode": null, + "follow": false, + "force": true, + "group": null, + "local_follow": null, + "mode": "666", + "owner": null, + "regexp": null, + "remote_src": null, + "selevel": null, + "serole": null, + "setype": null, + "seuser": null, + "src": "/Users/foo/.ansible/tmp/ansible-tmp-1596115458.110205-105717464505158/source", + "unsafe_writes": null, + "validate": null + } + +msg +``` +A string with a generic message relayed to the user. + + .. code-block:: console + + "msg": "line added" + +rc +`` +Some modules execute command line utilities or are geared for executing commands directly (raw, shell, command, and so on), this field contains 'return code' of these utilities. + + .. code-block:: console + + "rc": 257 + +results +``````` +If this key exists, it indicates that a loop was present for the task and that it contains a list of the normal module 'result' per item. + + .. code-block:: console + + "results": [ + { + "ansible_loop_var": "item", + "backup": "foo.txt.83170.2020-07-30@07:03:05~", + "changed": true, + "diff": [ + { + "after": "", + "after_header": "foo.txt (content)", + "before": "", + "before_header": "foo.txt (content)" + }, + { + "after_header": "foo.txt (file attributes)", + "before_header": "foo.txt (file attributes)" + } + ], + "failed": false, + "invocation": { + "module_args": { + "attributes": null, + "backrefs": false, + "backup": true + } + }, + "item": "foo", + "msg": "line added" + }, + { + "ansible_loop_var": "item", + "backup": "foo.txt.83187.2020-07-30@07:03:05~", + "changed": true, + "diff": [ + { + "after": "", + "after_header": "foo.txt (content)", + "before": "", + "before_header": "foo.txt (content)" + }, + { + "after_header": "foo.txt (file attributes)", + "before_header": "foo.txt (file attributes)" + } + ], + "failed": false, + "invocation": { + "module_args": { + "attributes": null, + "backrefs": false, + "backup": true + } + }, + "item": "bar", + "msg": "line added" + } + ] + +skipped +``````` +A boolean that indicates if the task was skipped or not + + .. code-block:: console + + "skipped": true + +stderr +`````` +Some modules execute command line utilities or are geared for executing commands directly (raw, shell, command, and so on), this field contains the error output of these utilities. + + .. code-block:: console + + "stderr": "ls: foo: No such file or directory" + +stderr_lines +```````````` +When `stderr` is returned we also always provide this field which is a list of strings, one item per line from the original. + + .. code-block:: console + + "stderr_lines": [ + "ls: doesntexist: No such file or directory" + ] + +stdout +`````` +Some modules execute command line utilities or are geared for executing commands directly (raw, shell, command, and so on). This field contains the normal output of these utilities. + + .. code-block:: console + + "stdout": "foo!" + +stdout_lines +```````````` +When `stdout` is returned, Ansible always provides a list of strings, each containing one item per line from the original output. + + .. code-block:: console + + "stdout_lines": [ + "foo!" + ] + + +.. _internal_return_values: + +Internal use +^^^^^^^^^^^^ + +These keys can be added by modules but will be removed from registered variables; they are 'consumed' by Ansible itself. + +ansible_facts +````````````` +This key should contain a dictionary which will be appended to the facts assigned to the host. These will be directly accessible and don't require using a registered variable. + +exception +````````` +This key can contain traceback information caused by an exception in a module. It will only be displayed on high verbosity (-vvv). + +warnings +```````` +This key contains a list of strings that will be presented to the user. + +deprecations +```````````` +This key contains a list of dictionaries that will be presented to the user. Keys of the dictionaries are `msg` and `version`, values are string, value for the `version` key can be an empty string. + +.. seealso:: + + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + `GitHub modules directory <https://github.com/ansible/ansible/tree/devel/lib/ansible/modules>`_ + Browse source of core and extras modules + `Mailing List <https://groups.google.com/group/ansible-devel>`_ + Development mailing list + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/reference_appendices/faq.rst b/docs/docsite/rst/reference_appendices/faq.rst new file mode 100644 index 00000000..329e2b4c --- /dev/null +++ b/docs/docsite/rst/reference_appendices/faq.rst @@ -0,0 +1,766 @@ +.. _ansible_faq: + +Frequently Asked Questions +========================== + +Here are some commonly asked questions and their answers. + +.. _collections_transition: + +Where did all the modules go? ++++++++++++++++++++++++++++++ + +In July, 2019, we announced that collections would be the `future of Ansible content delivery <https://www.ansible.com/blog/the-future-of-ansible-content-delivery>`_. A collection is a distribution format for Ansible content that can include playbooks, roles, modules, and plugins. In Ansible 2.9 we added support for collections. In Ansible 2.10 we extracted most modules from the main ansible/ansible repository and placed them in :ref:`collections <list_of_collections>`. Collections may be maintained by the Ansible team, by the Ansible community, or by Ansible partners. The `ansible/ansible repository <https://github.com/ansible/ansible>`_ now contains the code for basic features and functions, such as copying module code to managed nodes. This code is also known as ``ansible-base``. + +* To learn more about using collections, see :ref:`collections`. +* To learn more about developing collections, see :ref:`developing_collections`. +* To learn more about contributing to existing collections, see the individual collection repository for guidelines, or see :ref:`contributing_maintained_collections` to contribute to one of the Ansible-maintained collections. + +.. _find_my_module: + +Where did this specific module go? +++++++++++++++++++++++++++++++++++ + +IF you are searching for a specific module, you can check the `runtime.yml <https://github.com/ansible/ansible/blob/devel/lib/ansible/config/ansible_builtin_runtime.yml>`_ file, which lists the first destination for each module that we extracted from the main ansible/ansible repository. Some modules have moved again since then. You can also search on `Ansible Galaxy <https://galaxy.ansible.com/>`_ or ask on one of our :ref:`IRC channels <communication_irc>`. + +.. _set_environment: + +How can I set the PATH or any other environment variable for a task or entire play? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Setting environment variables can be done with the `environment` keyword. It can be used at the task or other levels in the play. + +.. code-block:: yaml + + shell: + cmd: date + environment: + LANG=fr_FR.UTF-8 + +.. code-block:: yaml + + hosts: servers + environment: + PATH: "{{ ansible_env.PATH }}:/thingy/bin" + SOME: value + +.. note:: starting in 2.0.1 the setup task from ``gather_facts`` also inherits the environment directive from the play, you might need to use the ``|default`` filter to avoid errors if setting this at play level. + +.. _faq_setting_users_and_ports: + +How do I handle different machines needing different user accounts or ports to log in with? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Setting inventory variables in the inventory file is the easiest way. + +For instance, suppose these hosts have different usernames and ports: + +.. code-block:: ini + + [webservers] + asdf.example.com ansible_port=5000 ansible_user=alice + jkl.example.com ansible_port=5001 ansible_user=bob + +You can also dictate the connection type to be used, if you want: + +.. code-block:: ini + + [testcluster] + localhost ansible_connection=local + /path/to/chroot1 ansible_connection=chroot + foo.example.com ansible_connection=paramiko + +You may also wish to keep these in group variables instead, or file them in a group_vars/<groupname> file. +See the rest of the documentation for more information about how to organize variables. + +.. _use_ssh: + +How do I get ansible to reuse connections, enable Kerberized SSH, or have Ansible pay attention to my local SSH config file? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Switch your default connection type in the configuration file to ``ssh``, or use ``-c ssh`` to use +Native OpenSSH for connections instead of the python paramiko library. In Ansible 1.2.1 and later, ``ssh`` will be used +by default if OpenSSH is new enough to support ControlPersist as an option. + +Paramiko is great for starting out, but the OpenSSH type offers many advanced options. You will want to run Ansible +from a machine new enough to support ControlPersist, if you are using this connection type. You can still manage +older clients. If you are using RHEL 6, CentOS 6, SLES 10 or SLES 11 the version of OpenSSH is still a bit old, so +consider managing from a Fedora or openSUSE client even though you are managing older nodes, or just use paramiko. + +We keep paramiko as the default as if you are first installing Ansible on these enterprise operating systems, it offers a better experience for new users. + +.. _use_ssh_jump_hosts: + +How do I configure a jump host to access servers that I have no direct access to? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +You can set a ``ProxyCommand`` in the +``ansible_ssh_common_args`` inventory variable. Any arguments specified in +this variable are added to the sftp/scp/ssh command line when connecting +to the relevant host(s). Consider the following inventory group: + +.. code-block:: ini + + [gatewayed] + foo ansible_host=192.0.2.1 + bar ansible_host=192.0.2.2 + +You can create `group_vars/gatewayed.yml` with the following contents:: + + ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q user@gateway.example.com"' + +Ansible will append these arguments to the command line when trying to +connect to any hosts in the group ``gatewayed``. (These arguments are used +in addition to any ``ssh_args`` from ``ansible.cfg``, so you do not need to +repeat global ``ControlPersist`` settings in ``ansible_ssh_common_args``.) + +Note that ``ssh -W`` is available only with OpenSSH 5.4 or later. With +older versions, it's necessary to execute ``nc %h:%p`` or some equivalent +command on the bastion host. + +With earlier versions of Ansible, it was necessary to configure a +suitable ``ProxyCommand`` for one or more hosts in ``~/.ssh/config``, +or globally by setting ``ssh_args`` in ``ansible.cfg``. + +.. _ssh_serveraliveinterval: + +How do I get Ansible to notice a dead target in a timely manner? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +You can add ``-o ServerAliveInterval=NumberOfSeconds`` in ``ssh_args`` from ``ansible.cfg``. Without this option, +SSH and therefore Ansible will wait until the TCP connection times out. Another solution is to add ``ServerAliveInterval`` +into your global SSH configuration. A good value for ``ServerAliveInterval`` is up to you to decide; keep in mind that +``ServerAliveCountMax=3`` is the SSH default so any value you set will be tripled before terminating the SSH session. + +.. _cloud_provider_performance: + +How do I speed up run of ansible for servers from cloud providers (EC2, openstack,.. )? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Don't try to manage a fleet of machines of a cloud provider from your laptop. +Rather connect to a management node inside this cloud provider first and run Ansible from there. + +.. _python_interpreters: + +How do I handle not having a Python interpreter at /usr/bin/python on a remote machine? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +While you can write Ansible modules in any language, most Ansible modules are written in Python, +including the ones central to letting Ansible work. + +By default, Ansible assumes it can find a :command:`/usr/bin/python` on your remote system that is +either Python2, version 2.6 or higher or Python3, 3.5 or higher. + +Setting the inventory variable ``ansible_python_interpreter`` on any host will tell Ansible to +auto-replace the Python interpreter with that value instead. Thus, you can point to any Python you +want on the system if :command:`/usr/bin/python` on your system does not point to a compatible +Python interpreter. + +Some platforms may only have Python 3 installed by default. If it is not installed as +:command:`/usr/bin/python`, you will need to configure the path to the interpreter via +``ansible_python_interpreter``. Although most core modules will work with Python 3, there may be some +special purpose ones which do not or you may encounter a bug in an edge case. As a temporary +workaround you can install Python 2 on the managed host and configure Ansible to use that Python via +``ansible_python_interpreter``. If there's no mention in the module's documentation that the module +requires Python 2, you can also report a bug on our `bug tracker +<https://github.com/ansible/ansible/issues>`_ so that the incompatibility can be fixed in a future release. + +Do not replace the shebang lines of your python modules. Ansible will do this for you automatically at deploy time. + +Also, this works for ANY interpreter, for example ruby: ``ansible_ruby_interpreter``, perl: ``ansible_perl_interpreter``, and so on, +so you can use this for custom modules written in any scripting language and control the interpreter location. + +Keep in mind that if you put ``env`` in your module shebang line (``#!/usr/bin/env <other>``), +this facility will be ignored so you will be at the mercy of the remote `$PATH`. + +.. _installation_faqs: + +How do I handle the package dependencies required by Ansible package dependencies during Ansible installation ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +While installing Ansible, sometimes you may encounter errors such as `No package 'libffi' found` or `fatal error: Python.h: No such file or directory` +These errors are generally caused by the missing packages, which are dependencies of the packages required by Ansible. +For example, `libffi` package is dependency of `pynacl` and `paramiko` (Ansible -> paramiko -> pynacl -> libffi). + +In order to solve these kinds of dependency issues, you might need to install required packages using +the OS native package managers, such as `yum`, `dnf`, or `apt`, or as mentioned in the package installation guide. + +Refer to the documentation of the respective package for such dependencies and their installation methods. + +Common Platform Issues +++++++++++++++++++++++ + +What customer platforms does Red Hat support? +--------------------------------------------- + +A number of them! For a definitive list please see this `Knowledge Base article <https://access.redhat.com/articles/3168091>`_. + +Running in a virtualenv +----------------------- + +You can install Ansible into a virtualenv on the controller quite simply: + +.. code-block:: shell + + $ virtualenv ansible + $ source ./ansible/bin/activate + $ pip install ansible + +If you want to run under Python 3 instead of Python 2 you may want to change that slightly: + +.. code-block:: shell + + $ virtualenv -p python3 ansible + $ source ./ansible/bin/activate + $ pip install ansible + +If you need to use any libraries which are not available via pip (for instance, SELinux Python +bindings on systems such as Red Hat Enterprise Linux or Fedora that have SELinux enabled), then you +need to install them into the virtualenv. There are two methods: + +* When you create the virtualenv, specify ``--system-site-packages`` to make use of any libraries + installed in the system's Python: + + .. code-block:: shell + + $ virtualenv ansible --system-site-packages + +* Copy those files in manually from the system. For instance, for SELinux bindings you might do: + + .. code-block:: shell + + $ virtualenv ansible --system-site-packages + $ cp -r -v /usr/lib64/python3.*/site-packages/selinux/ ./py3-ansible/lib64/python3.*/site-packages/ + $ cp -v /usr/lib64/python3.*/site-packages/*selinux*.so ./py3-ansible/lib64/python3.*/site-packages/ + + +Running on BSD +-------------- + +.. seealso:: :ref:`working_with_bsd` + + +Running on Solaris +------------------ + +By default, Solaris 10 and earlier run a non-POSIX shell which does not correctly expand the default +tmp directory Ansible uses ( :file:`~/.ansible/tmp`). If you see module failures on Solaris machines, this +is likely the problem. There are several workarounds: + +* You can set ``remote_tmp`` to a path that will expand correctly with the shell you are using + (see the plugin documentation for :ref:`C shell<csh_shell>`, :ref:`fish shell<fish_shell>`, + and :ref:`Powershell<powershell_shell>`). For example, in the ansible config file you can set:: + + remote_tmp=$HOME/.ansible/tmp + + In Ansible 2.5 and later, you can also set it per-host in inventory like this:: + + solaris1 ansible_remote_tmp=$HOME/.ansible/tmp + +* You can set :ref:`ansible_shell_executable<ansible_shell_executable>` to the path to a POSIX compatible shell. For + instance, many Solaris hosts have a POSIX shell located at :file:`/usr/xpg4/bin/sh` so you can set + this in inventory like so:: + + solaris1 ansible_shell_executable=/usr/xpg4/bin/sh + + (bash, ksh, and zsh should also be POSIX compatible if you have any of those installed). + +Running on z/OS +--------------- + +There are a few common errors that one might run into when trying to execute Ansible on z/OS as a target. + +* Version 2.7.6 of python for z/OS will not work with Ansible because it represents strings internally as EBCDIC. + + To get around this limitation, download and install a later version of `python for z/OS <https://www.rocketsoftware.com/zos-open-source>`_ (2.7.13 or 3.6.1) that represents strings internally as ASCII. Version 2.7.13 is verified to work. + +* When ``pipelining = False`` in `/etc/ansible/ansible.cfg` then Ansible modules are transferred in binary mode via sftp however execution of python fails with + + .. error:: + SyntaxError: Non-UTF-8 code starting with \'\\x83\' in file /a/user1/.ansible/tmp/ansible-tmp-1548232945.35-274513842609025/AnsiballZ_stat.py on line 1, but no encoding declared; see https://python.org/dev/peps/pep-0263/ for details + + To fix it set ``pipelining = True`` in `/etc/ansible/ansible.cfg`. + +* Python interpret cannot be found in default location ``/usr/bin/python`` on target host. + + .. error:: + /usr/bin/python: EDC5129I No such file or directory + + To fix this set the path to the python installation in your inventory like so:: + + zos1 ansible_python_interpreter=/usr/lpp/python/python-2017-04-12-py27/python27/bin/python + +* Start of python fails with ``The module libpython2.7.so was not found.`` + + .. error:: + EE3501S The module libpython2.7.so was not found. + + On z/OS, you must execute python from gnu bash. If gnu bash is installed at ``/usr/lpp/bash``, you can fix this in your inventory by specifying an ``ansible_shell_executable``:: + + zos1 ansible_shell_executable=/usr/lpp/bash/bin/bash + + +Running under fakeroot +---------------------- + +Some issues arise as ``fakeroot`` does not create a full nor POSIX compliant system by default. +It is known that it will not correctly expand the default tmp directory Ansible uses (:file:`~/.ansible/tmp`). +If you see module failures, this is likely the problem. +The simple workaround is to set ``remote_tmp`` to a path that will expand correctly (see documentation of the shell plugin you are using for specifics). + +For example, in the ansible config file (or via environment variable) you can set:: + + remote_tmp=$HOME/.ansible/tmp + + + +.. _use_roles: + +What is the best way to make content reusable/redistributable? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +If you have not done so already, read all about "Roles" in the playbooks documentation. This helps you make playbook content +self-contained, and works well with things like git submodules for sharing content with others. + +If some of these plugin types look strange to you, see the API documentation for more details about ways Ansible can be extended. + +.. _configuration_file: + +Where does the configuration file live and what can I configure in it? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + +See :ref:`intro_configuration`. + +.. _who_would_ever_want_to_disable_cowsay_but_ok_here_is_how: + +How do I disable cowsay? +++++++++++++++++++++++++ + +If cowsay is installed, Ansible takes it upon itself to make your day happier when running playbooks. If you decide +that you would like to work in a professional cow-free environment, you can either uninstall cowsay, set ``nocows=1`` +in ``ansible.cfg``, or set the :envvar:`ANSIBLE_NOCOWS` environment variable: + +.. code-block:: shell-session + + export ANSIBLE_NOCOWS=1 + +.. _browse_facts: + +How do I see a list of all of the ansible\_ variables? +++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Ansible by default gathers "facts" about the machines under management, and these facts can be accessed in playbooks +and in templates. To see a list of all of the facts that are available about a machine, you can run the ``setup`` module +as an ad-hoc action: + +.. code-block:: shell-session + + ansible -m setup hostname + +This will print out a dictionary of all of the facts that are available for that particular host. You might want to pipe +the output to a pager.This does NOT include inventory variables or internal 'magic' variables. See the next question +if you need more than just 'facts'. + + +.. _browse_inventory_vars: + +How do I see all the inventory variables defined for my host? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +By running the following command, you can see inventory variables for a host: + +.. code-block:: shell-session + + ansible-inventory --list --yaml + + +.. _browse_host_vars: + +How do I see all the variables specific to my host? ++++++++++++++++++++++++++++++++++++++++++++++++++++ + +To see all host specific variables, which might include facts and other sources: + +.. code-block:: shell-session + + ansible -m debug -a "var=hostvars['hostname']" localhost + +Unless you are using a fact cache, you normally need to use a play that gathers facts first, for facts included in the task above. + + +.. _host_loops: + +How do I loop over a list of hosts in a group, inside of a template? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +A pretty common pattern is to iterate over a list of hosts inside of a host group, perhaps to populate a template configuration +file with a list of servers. To do this, you can just access the "$groups" dictionary in your template, like this: + +.. code-block:: jinja + + {% for host in groups['db_servers'] %} + {{ host }} + {% endfor %} + +If you need to access facts about these hosts, for instance, the IP address of each hostname, +you need to make sure that the facts have been populated. For example, make sure you have a play that talks to db_servers:: + + - hosts: db_servers + tasks: + - debug: msg="doesn't matter what you do, just that they were talked to previously." + +Then you can use the facts inside your template, like this: + +.. code-block:: jinja + + {% for host in groups['db_servers'] %} + {{ hostvars[host]['ansible_eth0']['ipv4']['address'] }} + {% endfor %} + +.. _programatic_access_to_a_variable: + +How do I access a variable name programmatically? ++++++++++++++++++++++++++++++++++++++++++++++++++ + +An example may come up where we need to get the ipv4 address of an arbitrary interface, where the interface to be used may be supplied +via a role parameter or other input. Variable names can be built by adding strings together, like so: + +.. code-block:: jinja + + {{ hostvars[inventory_hostname]['ansible_' + which_interface]['ipv4']['address'] }} + +The trick about going through hostvars is necessary because it's a dictionary of the entire namespace of variables. ``inventory_hostname`` +is a magic variable that indicates the current host you are looping over in the host loop. + +In the example above, if your interface names have dashes, you must replace them with underscores: + +.. code-block:: jinja + + {{ hostvars[inventory_hostname]['ansible_' + which_interface | replace('_', '-') ]['ipv4']['address'] }} + +Also see dynamic_variables_. + + +.. _access_group_variable: + +How do I access a group variable? ++++++++++++++++++++++++++++++++++ + +Technically, you don't, Ansible does not really use groups directly. Groups are labels for host selection and a way to bulk assign variables, +they are not a first class entity, Ansible only cares about Hosts and Tasks. + +That said, you could just access the variable by selecting a host that is part of that group, see first_host_in_a_group_ below for an example. + + +.. _first_host_in_a_group: + +How do I access a variable of the first host in a group? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +What happens if we want the ip address of the first webserver in the webservers group? Well, we can do that too. Note that if we +are using dynamic inventory, which host is the 'first' may not be consistent, so you wouldn't want to do this unless your inventory +is static and predictable. (If you are using :ref:`ansible_tower`, it will use database order, so this isn't a problem even if you are using cloud +based inventory scripts). + +Anyway, here's the trick: + +.. code-block:: jinja + + {{ hostvars[groups['webservers'][0]]['ansible_eth0']['ipv4']['address'] }} + +Notice how we're pulling out the hostname of the first machine of the webservers group. If you are doing this in a template, you +could use the Jinja2 '#set' directive to simplify this, or in a playbook, you could also use set_fact:: + + - set_fact: headnode={{ groups['webservers'][0] }} + + - debug: msg={{ hostvars[headnode].ansible_eth0.ipv4.address }} + +Notice how we interchanged the bracket syntax for dots -- that can be done anywhere. + +.. _file_recursion: + +How do I copy files recursively onto a target host? ++++++++++++++++++++++++++++++++++++++++++++++++++++ + +The ``copy`` module has a recursive parameter. However, take a look at the ``synchronize`` module if you want to do something more efficient +for a large number of files. The ``synchronize`` module wraps rsync. See the module index for info on both of these modules. + +.. _shell_env: + +How do I access shell environment variables? +++++++++++++++++++++++++++++++++++++++++++++ + + +**On controller machine :** Access existing variables from controller use the ``env`` lookup plugin. +For example, to access the value of the HOME environment variable on the management machine:: + + --- + # ... + vars: + local_home: "{{ lookup('env','HOME') }}" + + +**On target machines :** Environment variables are available via facts in the ``ansible_env`` variable: + +.. code-block:: jinja + + {{ ansible_env.HOME }} + +If you need to set environment variables for TASK execution, see :ref:`playbooks_environment` +in the :ref:`Advanced Playbooks <playbooks_special_topics>` section. +There are several ways to set environment variables on your target machines. You can use the +:ref:`template <template_module>`, :ref:`replace <replace_module>`, or :ref:`lineinfile <lineinfile_module>` +modules to introduce environment variables into files. The exact files to edit vary depending on your OS +and distribution and local configuration. + +.. _user_passwords: + +How do I generate encrypted passwords for the user module? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Ansible ad-hoc command is the easiest option: + +.. code-block:: shell-session + + ansible all -i localhost, -m debug -a "msg={{ 'mypassword' | password_hash('sha512', 'mysecretsalt') }}" + +The ``mkpasswd`` utility that is available on most Linux systems is also a great option: + +.. code-block:: shell-session + + mkpasswd --method=sha-512 + + +If this utility is not installed on your system (for example, you are using macOS) then you can still easily +generate these passwords using Python. First, ensure that the `Passlib <https://foss.heptapod.net/python-libs/passlib/-/wikis/home>`_ +password hashing library is installed: + +.. code-block:: shell-session + + pip install passlib + +Once the library is ready, SHA512 password values can then be generated as follows: + +.. code-block:: shell-session + + python -c "from passlib.hash import sha512_crypt; import getpass; print(sha512_crypt.using(rounds=5000).hash(getpass.getpass()))" + +Use the integrated :ref:`hash_filters` to generate a hashed version of a password. +You shouldn't put plaintext passwords in your playbook or host_vars; instead, use :ref:`playbooks_vault` to encrypt sensitive data. + +In OpenBSD, a similar option is available in the base system called ``encrypt (1)`` + +.. _dot_or_array_notation: + +Ansible allows dot notation and array notation for variables. Which notation should I use? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +The dot notation comes from Jinja and works fine for variables without special +characters. If your variable contains dots (.), colons (:), or dashes (-), if +a key begins and ends with two underscores, or if a key uses any of the known +public attributes, it is safer to use the array notation. See :ref:`playbooks_variables` +for a list of the known public attributes. + +.. code-block:: jinja + + item[0]['checksum:md5'] + item['section']['2.1'] + item['region']['Mid-Atlantic'] + It is {{ temperature['Celsius']['-3'] }} outside. + +Also array notation allows for dynamic variable composition, see dynamic_variables_. + +Another problem with 'dot notation' is that some keys can cause problems because they collide with attributes and methods of python dictionaries. + +.. code-block:: jinja + + item.update # this breaks if item is a dictionary, as 'update()' is a python method for dictionaries + item['update'] # this works + + +.. _argsplat_unsafe: + +When is it unsafe to bulk-set task arguments from a variable? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + +You can set all of a task's arguments from a dictionary-typed variable. This +technique can be useful in some dynamic execution scenarios. However, it +introduces a security risk. We do not recommend it, so Ansible issues a +warning when you do something like this:: + + #... + vars: + usermod_args: + name: testuser + state: present + update_password: always + tasks: + - user: '{{ usermod_args }}' + +This particular example is safe. However, constructing tasks like this is +risky because the parameters and values passed to ``usermod_args`` could +be overwritten by malicious values in the ``host facts`` on a compromised +target machine. To mitigate this risk: + +* set bulk variables at a level of precedence greater than ``host facts`` in the order of precedence + found in :ref:`ansible_variable_precedence` (the example above is safe because play vars take + precedence over facts) +* disable the :ref:`inject_facts_as_vars` configuration setting to prevent fact values from colliding + with variables (this will also disable the original warning) + + +.. _commercial_support: + +Can I get training on Ansible? +++++++++++++++++++++++++++++++ + +Yes! See our `services page <https://www.ansible.com/products/consulting>`_ for information on our services +and training offerings. Email `info@ansible.com <mailto:info@ansible.com>`_ for further details. + +We also offer free web-based training classes on a regular basis. See our +`webinar page <https://www.ansible.com/resources/webinars-training>`_ for more info on upcoming webinars. + + +.. _web_interface: + +Is there a web interface / REST API / GUI? +++++++++++++++++++++++++++++++++++++++++++++ + +Yes! Ansible, Inc makes a great product that makes Ansible even more powerful and easy to use. See :ref:`ansible_tower`. + + +.. _keep_secret_data: + +How do I keep secret data in my playbook? ++++++++++++++++++++++++++++++++++++++++++ + +If you would like to keep secret data in your Ansible content and still share it publicly or keep things in source control, see :ref:`playbooks_vault`. + +If you have a task that you don't want to show the results or command given to it when using -v (verbose) mode, the following task or playbook attribute can be useful:: + + - name: secret task + shell: /usr/bin/do_something --value={{ secret_value }} + no_log: True + +This can be used to keep verbose output but hide sensitive information from others who would otherwise like to be able to see the output. + +The ``no_log`` attribute can also apply to an entire play:: + + - hosts: all + no_log: True + +Though this will make the play somewhat difficult to debug. It's recommended that this +be applied to single tasks only, once a playbook is completed. Note that the use of the +``no_log`` attribute does not prevent data from being shown when debugging Ansible itself via +the :envvar:`ANSIBLE_DEBUG` environment variable. + + +.. _when_to_use_brackets: +.. _dynamic_variables: +.. _interpolate_variables: + +When should I use {{ }}? Also, how to interpolate variables or dynamic variable names ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +A steadfast rule is 'always use ``{{ }}`` except when ``when:``'. +Conditionals are always run through Jinja2 as to resolve the expression, +so ``when:``, ``failed_when:`` and ``changed_when:`` are always templated and you should avoid adding ``{{ }}``. + +In most other cases you should always use the brackets, even if previously you could use variables without +specifying (like ``loop`` or ``with_`` clauses), as this made it hard to distinguish between an undefined variable and a string. + +Another rule is 'moustaches don't stack'. We often see this: + +.. code-block:: jinja + + {{ somevar_{{other_var}} }} + +The above DOES NOT WORK as you expect, if you need to use a dynamic variable use the following as appropriate: + +.. code-block:: jinja + + {{ hostvars[inventory_hostname]['somevar_' + other_var] }} + +For 'non host vars' you can use the :ref:`vars lookup<vars_lookup>` plugin: + +.. code-block:: jinja + + {{ lookup('vars', 'somevar_' + other_var) }} + + +.. _why_no_wheel: + +Why don't you ship ansible in wheel format (or other packaging format) ? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +In most cases it has to do with maintainability. There are many ways to ship software and we do not have +the resources to release Ansible on every platform. +In some cases there are technical issues. For example, our dependencies are not present on Python Wheels. + +.. _ansible_host_delegated: + +How do I get the original ansible_host when I delegate a task? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +As the documentation states, connection variables are taken from the ``delegate_to`` host so ``ansible_host`` is overwritten, +but you can still access the original via ``hostvars``:: + + original_host: "{{ hostvars[inventory_hostname]['ansible_host'] }}" + +This works for all overridden connection variables, like ``ansible_user``, ``ansible_port``, and so on. + + +.. _scp_protocol_error_filename: + +How do I fix 'protocol error: filename does not match request' when fetching a file? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Since release ``7.9p1`` of OpenSSH there is a `bug <https://bugzilla.mindrot.org/show_bug.cgi?id=2966>`_ +in the SCP client that can trigger this error on the Ansible controller when using SCP as the file transfer mechanism:: + + failed to transfer file to /tmp/ansible/file.txt\r\nprotocol error: filename does not match request + +In these releases, SCP tries to validate that the path of the file to fetch matches the requested path. +The validation +fails if the remote filename requires quotes to escape spaces or non-ascii characters in its path. To avoid this error: + +* Use SFTP instead of SCP by setting ``scp_if_ssh`` to ``smart`` (which tries SFTP first) or to ``False``. You can do this in one of four ways: + * Rely on the default setting, which is ``smart`` - this works if ``scp_if_ssh`` is not explicitly set anywhere + * Set a :ref:`host variable <host_variables>` or :ref:`group variable <group_variables>` in inventory: ``ansible_scp_if_ssh: False`` + * Set an environment variable on your control node: ``export ANSIBLE_SCP_IF_SSH=False`` + * Pass an environment variable when you run Ansible: ``ANSIBLE_SCP_IF_SSH=smart ansible-playbook`` + * Modify your ``ansible.cfg`` file: add ``scp_if_ssh=False`` to the ``[ssh_connection]`` section +* If you must use SCP, set the ``-T`` arg to tell the SCP client to ignore path validation. You can do this in one of three ways: + * Set a :ref:`host variable <host_variables>` or :ref:`group variable <group_variables>`: ``ansible_scp_extra_args=-T``, + * Export or pass an environment variable: ``ANSIBLE_SCP_EXTRA_ARGS=-T`` + * Modify your ``ansible.cfg`` file: add ``scp_extra_args=-T`` to the ``[ssh_connection]`` section + +.. note:: If you see an ``invalid argument`` error when using ``-T``, then your SCP client is not performing filename validation and will not trigger this error. + +.. _docs_contributions: + +How do I submit a change to the documentation? +++++++++++++++++++++++++++++++++++++++++++++++ + +Documentation for Ansible is kept in the main project git repository, and complete instructions +for contributing can be found in the docs README `viewable on GitHub <https://github.com/ansible/ansible/blob/devel/docs/docsite/README.md>`_. Thanks! + +.. _i_dont_see_my_question: + +I don't see my question here +++++++++++++++++++++++++++++ + +Please see the section below for a link to IRC and the Google Group, where you can ask your question there. + +.. seealso:: + + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + `User Mailing List <https://groups.google.com/group/ansible-project>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/reference_appendices/general_precedence.rst b/docs/docsite/rst/reference_appendices/general_precedence.rst new file mode 100644 index 00000000..90494b69 --- /dev/null +++ b/docs/docsite/rst/reference_appendices/general_precedence.rst @@ -0,0 +1,140 @@ +.. _general_precedence_rules: + +Controlling how Ansible behaves: precedence rules +================================================= + +To give you maximum flexibility in managing your environments, Ansible offers many ways to control how Ansible behaves: how it connects to managed nodes, how it works once it has connected. +If you use Ansible to manage a large number of servers, network devices, and cloud resources, you may define Ansible behavior in several different places and pass that information to Ansible in several different ways. +This flexibility is convenient, but it can backfire if you do not understand the precedence rules. + +These precedence rules apply to any setting that can be defined in multiple ways (by configuration settings, command-line options, playbook keywords, variables). + +.. contents:: + :local: + +Precedence categories +--------------------- + +Ansible offers four sources for controlling its behavior. In order of precedence from lowest (most easily overridden) to highest (overrides all others), the categories are: + + * Configuration settings + * Command-line options + * Playbook keywords + * Variables + +Each category overrides any information from all lower-precedence categories. For example, a playbook keyword will override any configuration setting. + +Within each precedence category, specific rules apply. However, generally speaking, 'last defined' wins and overrides any previous definitions. + +Configuration settings +^^^^^^^^^^^^^^^^^^^^^^ + +:ref:`Configuration settings<ansible_configuration_settings>` include both values from the ``ansible.cfg`` file and environment variables. Within this category, values set in configuration files have lower precedence. Ansible uses the first ``ansible.cfg`` file it finds, ignoring all others. Ansible searches for ``ansible.cfg`` in these locations in order: + + * ``ANSIBLE_CONFIG`` (environment variable if set) + * ``ansible.cfg`` (in the current directory) + * ``~/.ansible.cfg`` (in the home directory) + * ``/etc/ansible/ansible.cfg`` + +Environment variables have a higher precedence than entries in ``ansible.cfg``. If you have environment variables set on your control node, they override the settings in whichever ``ansible.cfg`` file Ansible loads. The value of any given environment variable follows normal shell precedence: the last value defined overwrites previous values. + +Command-line options +^^^^^^^^^^^^^^^^^^^^ + +Any command-line option will override any configuration setting. + +When you type something directly at the command line, you may feel that your hand-crafted values should override all others, but Ansible does not work that way. Command-line options have low precedence - they override configuration only. They do not override playbook keywords, variables from inventory or variables from playbooks. + +You can override all other settings from all other sources in all other precedence categories at the command line by :ref:`general_precedence_extra_vars`, but that is not a command-line option, it is a way of passing a :ref:`variable<general_precedence_variables>`. + +At the command line, if you pass multiple values for a parameter that accepts only a single value, the last defined value wins. For example, this :ref:`ad-hoc task<intro_adhoc>` will connect as ``carol``, not as ``mike``:: + + ansible -u mike -m ping myhost -u carol + +Some parameters allow multiple values. In this case, Ansible will append all values from the hosts listed in inventory files inventory1 and inventory2:: + + ansible -i /path/inventory1 -i /path/inventory2 -m ping all + +The help for each :ref:`command-line tool<command_line_tools>` lists available options for that tool. + +Playbook keywords +^^^^^^^^^^^^^^^^^ + +Any :ref:`playbook keyword<playbook_keywords>` will override any command-line option and any configuration setting. + +Within playbook keywords, precedence flows with the playbook itself; the more specific wins against the more general: + +- play (most general) +- blocks/includes/imports/roles (optional and can contain tasks and each other) +- tasks (most specific) + +A simple example:: + + - hosts: all + connection: ssh + tasks: + - name: This task uses ssh. + ping: + + - name: This task uses paramiko. + connection: paramiko + ping: + +In this example, the ``connection`` keyword is set to ``ssh`` at the play level. The first task inherits that value, and connects using ``ssh``. The second task inherits that value, overrides it, and connects using ``paramiko``. +The same logic applies to blocks and roles as well. All tasks, blocks, and roles within a play inherit play-level keywords; any task, block, or role can override any keyword by defining a different value for that keyword within the task, block, or role. + +Remember that these are KEYWORDS, not variables. Both playbooks and variable files are defined in YAML but they have different significance. +Playbooks are the command or 'state description' structure for Ansible, variables are data we use to help make playbooks more dynamic. + +.. _general_precedence_variables: + +Variables +^^^^^^^^^ + +Any variable will override any playbook keyword, any command-line option, and any configuration setting. + +Variables that have equivalent playbook keywords, command-line options, and configuration settings are known as :ref:`connection_variables`. Originally designed for connection parameters, this category has expanded to include other core variables like the temporary directory and the python interpreter. + +Connection variables, like all variables, can be set in multiple ways and places. You can define variables for hosts and groups in :ref:`inventory<intro_inventory>`. You can define variables for tasks and plays in ``vars:`` blocks in :ref:`playbooks<about_playbooks>`. However, they are still variables - they are data, not keywords or configuration settings. Variables that override playbook keywords, command-line options, and configuration settings follow the same rules of :ref:`variable precedence <ansible_variable_precedence>` as any other variables. + +When set in a playbook, variables follow the same inheritance rules as playbook keywords. You can set a value for the play, then override it in a task, block, or role:: + + - hosts: cloud + gather_facts: false + become: yes + vars: + ansible_become_user: admin + tasks: + - name: This task uses admin as the become user. + dnf: + name: some-service + state: latest + - block: + - name: This task uses service-admin as the become user. + # a task to configure the new service + - name: This task also uses service-admin as the become user, defined in the block. + # second task to configure the service + vars: + ansible_become_user: service-admin + - name: This task (outside of the block) uses admin as the become user again. + service: + name: some-service + state: restarted + +Variable scope: how long is a value available? +"""""""""""""""""""""""""""""""""""""""""""""" + +Variable values set in a playbook exist only within the playbook object that defines them. These 'playbook object scope' variables are not available to subsequent objects, including other plays. + +Variable values associated directly with a host or group, including variables defined in inventory, by vars plugins, or using modules like :ref:`set_fact<set_fact_module>` and :ref:`include_vars<include_vars_module>`, are available to all plays. These 'host scope' variables are also available via the ``hostvars[]`` dictionary. + +.. _general_precedence_extra_vars: + +Using ``-e`` extra variables at the command line +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To override all other settings in all other categories, you can use extra variables: ``--extra-vars`` or ``-e`` at the command line. Values passed with ``-e`` are variables, not command-line options, and they will override configuration settings, command-line options, and playbook keywords as well as variables set elsewhere. For example, this task will connect as ``brian`` not as ``carol``:: + + ansible -u carol -e 'ansible_user=brian' -a whoami all + +You must specify both the variable name and the value with ``--extra-vars``. diff --git a/docs/docsite/rst/reference_appendices/glossary.rst b/docs/docsite/rst/reference_appendices/glossary.rst new file mode 100644 index 00000000..c1170c6d --- /dev/null +++ b/docs/docsite/rst/reference_appendices/glossary.rst @@ -0,0 +1,528 @@ +Glossary +======== + +The following is a list (and re-explanation) of term definitions used elsewhere in the Ansible documentation. + +Consult the documentation home page for the full documentation and to see the terms in context, but this should be a good resource +to check your knowledge of Ansible's components and understand how they fit together. It's something you might wish to read for review or +when a term comes up on the mailing list. + +.. glossary:: + + Action + An action is a part of a task that specifies which of the modules to + run and which arguments to pass to that module. Each task can have + only one action, but it may also have other parameters. + + Ad Hoc + Refers to running Ansible to perform some quick command, using + :command:`/usr/bin/ansible`, rather than the :term:`orchestration` + language, which is :command:`/usr/bin/ansible-playbook`. An example + of an ad hoc command might be rebooting 50 machines in your + infrastructure. Anything you can do ad hoc can be accomplished by + writing a :term:`playbook <playbooks>` and playbooks can also glue + lots of other operations together. + + Ansible (the package) + A software package (Python, deb, rpm, and so on) that contains ansible-base and a select group of collections. Playbooks that worked with Ansible 2.9 should still work with the Ansible 2.10 package. See the :file:`ansible-<version>.build` file in the release-specific directory at `ansible-build-data <https://github.com/ansible-community/ansible-build-data>`_ for a list of collections included in Ansible, as well as the included ``ansible-base`` version. + + ansible-base + New for 2.10. The installable package (RPM/Python/Deb package) generated from the `ansible/ansible repository <https://github.com/ansible/ansible>`_. Contains the command-line tools and the code for basic features and functions, such as copying module code to managed nodes. The ``ansible-base`` package includes a few modules and plugins and allows you to add others by installing collections. + + Ansible Galaxy + An `online resource <galaxy.ansible.com>`_ for finding and sharing Ansible community content. Also, the command-line utility that lets users install individual Ansible Collections, for example`` ansible-galaxy install community.crypto``. + + Async + Refers to a task that is configured to run in the background rather + than waiting for completion. If you have a long process that would + run longer than the SSH timeout, it would make sense to launch that + task in async mode. Async modes can poll for completion every so many + seconds or can be configured to "fire and forget", in which case + Ansible will not even check on the task again; it will just kick it + off and proceed to future steps. Async modes work with both + :command:`/usr/bin/ansible` and :command:`/usr/bin/ansible-playbook`. + + Callback Plugin + Refers to some user-written code that can intercept results from + Ansible and do something with them. Some supplied examples in the + GitHub project perform custom logging, send email, or even play sound + effects. + + Check Mode + Refers to running Ansible with the ``--check`` option, which does not + make any changes on the remote systems, but only outputs the changes + that might occur if the command ran without this flag. This is + analogous to so-called "dry run" modes in other systems, though the + user should be warned that this does not take into account unexpected + command failures or cascade effects (which is true of similar modes in + other systems). Use this to get an idea of what might happen, but do + not substitute it for a good staging environment. + + Collection + A packaging format for bundling and distributing Ansible content, including plugins, roles, modules, and more. Collections release independent of other collections or ``ansible-base`` so features can be available sooner to users. Some collections are packaged with Ansible (version 2.10 or later). You can install other collections (or other versions of collections) with ``ansible-galaxy collection install <namespace.collection>``. + + Collection name + The second part of a Fully Qualified Collection Name. The collection name divides the collection namespace and usually reflects the function of the collection content. For example, the ``cisco`` namespace might contain ``cisco.ios``, ``cisco.aci``, and ``cisco.nxos``, with content for managing the different network devices maintained by Cisco. + + community.general (collection) + A special collection managed by the Ansible Community Team containing all the modules and plugins which shipped in Ansible 2.9 that do ont have their own dedicated Collection. See `community.general <https://galaxy.ansible.com/community/general>`_` on Galaxy. + + community.network (collection) + Similar to ``community.general``, focusing on network content. `community.network <https://galaxy.ansible.com/community/network>`_` on Galaxy. + + Connection Plugin + By default, Ansible talks to remote machines through pluggable + libraries. Ansible uses native OpenSSH (:term:`SSH (Native)`) or + a Python implementation called :term:`paramiko`. OpenSSH is preferred + if you are using a recent version, and also enables some features like + Kerberos and jump hosts. This is covered in the :ref:`getting + started section <remote_connection_information>`. There are also + other connection types like ``accelerate`` mode, which must be + bootstrapped over one of the SSH-based connection types but is very + fast, and local mode, which acts on the local system. Users can also + write their own connection plugins. + + Conditionals + A conditional is an expression that evaluates to true or false that + decides whether a given task is executed on a given machine or not. + Ansible's conditionals are powered by the 'when' statement, which are + discussed in the :ref:`working_with_playbooks`. + + Declarative + An approach to achieving a task that uses a description of the + final state rather than a description of the sequence of steps + necessary to achieve that state. For a real world example, a + declarative specification of a task would be: "put me in California". + Depending on your current location, the sequence of steps to get you to + California may vary, and if you are already in California, nothing + at all needs to be done. Ansible's Resources are declarative; it + figures out the steps needed to achieve the final state. It also lets + you know whether or not any steps needed to be taken to get to the + final state. + + Diff Mode + A ``--diff`` flag can be passed to Ansible to show what changed on + modules that support it. You can combine it with ``--check`` to get a + good 'dry run'. File diffs are normally in unified diff format. + + Executor + A core software component of Ansible that is the power behind + :command:`/usr/bin/ansible` directly -- and corresponds to the + invocation of each task in a :term:`playbook <playbooks>`. The + Executor is something Ansible developers may talk about, but it's not + really user land vocabulary. + + Facts + Facts are simply things that are discovered about remote nodes. While + they can be used in :term:`playbooks` and templates just like + variables, facts are things that are inferred, rather than set. Facts + are automatically discovered by Ansible when running plays by + executing the internal :ref:`setup module <setup_module>` on the remote nodes. You + never have to call the setup module explicitly, it just runs, but it + can be disabled to save time if it is not needed or you can tell + ansible to collect only a subset of the full facts via the + ``gather_subset:`` option. For the convenience of users who are + switching from other configuration management systems, the fact module + will also pull in facts from the :program:`ohai` and :program:`facter` + tools if they are installed. These are fact libraries from Chef and + Puppet, respectively. (These may also be disabled via + ``gather_subset:``) + + Filter Plugin + A filter plugin is something that most users will never need to + understand. These allow for the creation of new :term:`Jinja2` + filters, which are more or less only of use to people who know what + Jinja2 filters are. If you need them, you can learn how to write them + in the :ref:`API docs section <developing_filter_plugins>`. + + Forks + Ansible talks to remote nodes in parallel and the level of parallelism + can be set either by passing ``--forks`` or editing the default in + a configuration file. The default is a very conservative five (5) + forks, though if you have a lot of RAM, you can easily set this to + a value like 50 for increased parallelism. + + Fully Qualified Collection Name (FQCN) + The full definition of a module, plugin, or role hosted within a collection, in the form <namespace.collection.content_name>. Allows a Playbook to refer to a specific module or plugin from a specific source in an unambiguous manner, for example, ``community.grafana.grafana_dashboard``. The FQCN is required when you want to specify the exact source of a plugin. For example, if multiple collections contain a module plugin called ``user``, the FQCN specifies which one to use for a given task. When you have multiple collections installed, the FQCN is always the explicit and authoritative indicator of which collection to search for the correct plugin for each task. + + Gather Facts (Boolean) + :term:`Facts` are mentioned above. Sometimes when running a multi-play + :term:`playbook <playbooks>`, it is desirable to have some plays that + don't bother with fact computation if they aren't going to need to + utilize any of these values. Setting ``gather_facts: False`` on + a playbook allows this implicit fact gathering to be skipped. + + Globbing + Globbing is a way to select lots of hosts based on wildcards, rather + than the name of the host specifically, or the name of the group they + are in. For instance, it is possible to select ``ww*`` to match all + hosts starting with ``www``. This concept is pulled directly from + :program:`Func`, one of Michael DeHaan's (an Ansible Founder) earlier + projects. In addition to basic globbing, various set operations are + also possible, such as 'hosts in this group and not in another group', + and so on. + + Group + A group consists of several hosts assigned to a pool that can be + conveniently targeted together, as well as given variables that they + share in common. + + Group Vars + The :file:`group_vars/` files are files that live in a directory + alongside an inventory file, with an optional filename named after + each group. This is a convenient place to put variables that are + provided to a given group, especially complex data structures, so that + these variables do not have to be embedded in the :term:`inventory` + file or :term:`playbook <playbooks>`. + + Handlers + Handlers are just like regular tasks in an Ansible + :term:`playbook <playbooks>` (see :term:`Tasks`) but are only run if + the Task contains a ``notify`` directive and also indicates that it + changed something. For example, if a config file is changed, then the + task referencing the config file templating operation may notify + a service restart handler. This means services can be bounced only if + they need to be restarted. Handlers can be used for things other than + service restarts, but service restarts are the most common usage. + + Host + A host is simply a remote machine that Ansible manages. They can have + individual variables assigned to them, and can also be organized in + groups. All hosts have a name they can be reached at (which is either + an IP address or a domain name) and, optionally, a port number, if they + are not to be accessed on the default SSH port. + + Host Specifier + Each :term:`Play <plays>` in Ansible maps a series of :term:`tasks` (which define the role, + purpose, or orders of a system) to a set of systems. + + This ``hosts:`` directive in each play is often called the hosts specifier. + + It may select one system, many systems, one or more groups, or even + some hosts that are in one group and explicitly not in another. + + Host Vars + Just like :term:`Group Vars`, a directory alongside the inventory file named + :file:`host_vars/` can contain a file named after each hostname in the + inventory file, in :term:`YAML` format. This provides a convenient place to + assign variables to the host without having to embed them in the + :term:`inventory` file. The Host Vars file can also be used to define complex + data structures that can't be represented in the inventory file. + + Idempotency + An operation is idempotent if the result of performing it once is + exactly the same as the result of performing it repeatedly without + any intervening actions. + + Includes + The idea that :term:`playbook <playbooks>` files (which are nothing + more than lists of :term:`plays`) can include other lists of plays, + and task lists can externalize lists of :term:`tasks` in other files, + and similarly with :term:`handlers`. Includes can be parameterized, + which means that the loaded file can pass variables. For instance, an + included play for setting up a WordPress blog may take a parameter + called ``user`` and that play could be included more than once to + create a blog for both ``alice`` and ``bob``. + + Inventory + A file (by default, Ansible uses a simple INI format) that describes + :term:`Hosts <Host>` and :term:`Groups <Group>` in Ansible. Inventory + can also be provided via an :term:`Inventory Script` (sometimes called + an "External Inventory Script"). + + Inventory Script + A very simple program (or a complicated one) that looks up + :term:`hosts <Host>`, :term:`group` membership for hosts, and variable + information from an external resource -- whether that be a SQL + database, a CMDB solution, or something like LDAP. This concept was + adapted from Puppet (where it is called an "External Nodes + Classifier") and works more or less exactly the same way. + + Jinja2 + Jinja2 is the preferred templating language of Ansible's template + module. It is a very simple Python template language that is + generally readable and easy to write. + + JSON + Ansible uses JSON for return data from remote modules. This allows + modules to be written in any language, not just Python. + + Lazy Evaluation + In general, Ansible evaluates any variables in + :term:`playbook <playbooks>` content at the last possible second, + which means that if you define a data structure that data structure + itself can define variable values within it, and everything "just + works" as you would expect. This also means variable strings can + include other variables inside of those strings. + + Library + A collection of modules made available to :command:`/usr/bin/ansible` + or an Ansible :term:`playbook <playbooks>`. + + Limit Groups + By passing ``--limit somegroup`` to :command:`ansible` or + :command:`ansible-playbook`, the commands can be limited to a subset + of :term:`hosts <Host>`. For instance, this can be used to run + a :term:`playbook <playbooks>` that normally targets an entire set of + servers to one particular server. + + Local Action + A local_action directive in a :term:`playbook <playbooks>` targeting + remote machines means that the given step will actually occur on the + local machine, but that the variable ``{{ ansible_hostname }}`` can be + passed in to reference the remote hostname being referred to in that + step. This can be used to trigger, for example, an rsync operation. + + Local Connection + By using ``connection: local`` in a :term:`playbook <playbooks>`, or + passing ``-c local`` to :command:`/usr/bin/ansible`, this indicates + that we are managing the local host and not a remote machine. + + Lookup Plugin + A lookup plugin is a way to get data into Ansible from the outside world. + Lookup plugins are an extension of Jinja2 and can be accessed in templates, for example, + ``{{ lookup('file','/path/to/file') }}``. + These are how such things as ``with_items``, are implemented. + There are also lookup plugins like ``file`` which loads data from + a file and ones for querying environment variables, DNS text records, + or key value stores. + + Loops + Generally, Ansible is not a programming language. It prefers to be + more declarative, though various constructs like ``loop`` allow + a particular task to be repeated for multiple items in a list. + Certain modules, like :ref:`yum <yum_module>` and :ref:`apt <apt_module>`, actually take + lists directly, and can install all packages given in those lists + within a single transaction, dramatically speeding up total time to + configuration, so they can be used without loops. + + Modules + Modules are the units of work that Ansible ships out to remote + machines. Modules are kicked off by either + :command:`/usr/bin/ansible` or :command:`/usr/bin/ansible-playbook` + (where multiple tasks use lots of different modules in conjunction). + Modules can be implemented in any language, including Perl, Bash, or + Ruby -- but can leverage some useful communal library code if written + in Python. Modules just have to return :term:`JSON`. Once modules are + executed on remote machines, they are removed, so no long running + daemons are used. Ansible refers to the collection of available + modules as a :term:`library`. + + Multi-Tier + The concept that IT systems are not managed one system at a time, but + by interactions between multiple systems and groups of systems in + well defined orders. For instance, a web server may need to be + updated before a database server and pieces on the web server may + need to be updated after *THAT* database server and various load + balancers and monitoring servers may need to be contacted. Ansible + models entire IT topologies and workflows rather than looking at + configuration from a "one system at a time" perspective. + + Namespace + The first part of a fully qualified collection name, the namespace usually reflects a functional content category. Example: in ``cisco.ios.ios_config``, ``cisco`` is the namespace. Namespaces are reserved and distributed by Red Hat at Red Hat's discretion. Many, but not all, namespaces will correspond with vendor names. See `Galaxy namespaces <https://galaxy.ansible.com/docs/contributing/namespaces.html#galaxy-namespaces>`_ on the Galaxy docsite for namespace requirements. + + Notify + The act of a :term:`task <tasks>` registering a change event and + informing a :term:`handler <handlers>` task that another + :term:`action` needs to be run at the end of the :term:`play <plays>`. If + a handler is notified by multiple tasks, it will still be run only + once. Handlers are run in the order they are listed, not in the order + that they are notified. + + Orchestration + Many software automation systems use this word to mean different + things. Ansible uses it as a conductor would conduct an orchestra. + A datacenter or cloud architecture is full of many systems, playing + many parts -- web servers, database servers, maybe load balancers, + monitoring systems, continuous integration systems, and so on. In + performing any process, it is necessary to touch systems in particular + orders, often to simulate rolling updates or to deploy software + correctly. Some system may perform some steps, then others, then + previous systems already processed may need to perform more steps. + Along the way, emails may need to be sent or web services contacted. + Ansible orchestration is all about modeling that kind of process. + + paramiko + By default, Ansible manages machines over SSH. The library that + Ansible uses by default to do this is a Python-powered library called + paramiko. The paramiko library is generally fast and easy to manage, + though users who want to use Kerberos or Jump Hosts may wish to switch + to a native SSH binary such as OpenSSH by specifying the connection + type in their :term:`playbooks`, or using the ``-c ssh`` flag. + + Playbooks + Playbooks are the language by which Ansible orchestrates, configures, + administers, or deploys systems. They are called playbooks partially + because it's a sports analogy, and it's supposed to be fun using them. + They aren't workbooks :) + + Plays + A :term:`playbook <playbooks>` is a list of plays. A play is + minimally a mapping between a set of :term:`hosts <Host>` selected by a host + specifier (usually chosen by :term:`groups <Group>` but sometimes by + hostname :term:`globs <Globbing>`) and the :term:`tasks` which run on those + hosts to define the role that those systems will perform. There can be + one or many plays in a playbook. + + Pull Mode + By default, Ansible runs in :term:`push mode`, which allows it very + fine-grained control over when it talks to each system. Pull mode is + provided for when you would rather have nodes check in every N minutes + on a particular schedule. It uses a program called + :command:`ansible-pull` and can also be set up (or reconfigured) using + a push-mode :term:`playbook <playbooks>`. Most Ansible users use push + mode, but pull mode is included for variety and the sake of having + choices. + + :command:`ansible-pull` works by checking configuration orders out of + git on a crontab and then managing the machine locally, using the + :term:`local connection` plugin. + + Push Mode + Push mode is the default mode of Ansible. In fact, it's not really + a mode at all -- it's just how Ansible works when you aren't thinking + about it. Push mode allows Ansible to be fine-grained and conduct + nodes through complex orchestration processes without waiting for them + to check in. + + Register Variable + The result of running any :term:`task <tasks>` in Ansible can be + stored in a variable for use in a template or a conditional statement. + The keyword used to define the variable is called ``register``, taking + its name from the idea of registers in assembly programming (though + Ansible will never feel like assembly programming). There are an + infinite number of variable names you can use for registration. + + Resource Model + Ansible modules work in terms of resources. For instance, the + :ref:`file module <file_module>` will select a particular file and ensure + that the attributes of that resource match a particular model. As an + example, we might wish to change the owner of :file:`/etc/motd` to + ``root`` if it is not already set to ``root``, or set its mode to + ``0644`` if it is not already set to ``0644``. The resource models + are :term:`idempotent <idempotency>` meaning change commands are not + run unless needed, and Ansible will bring the system back to a desired + state regardless of the actual state -- rather than you having to tell + it how to get to the state. + + Roles + Roles are units of organization in Ansible. Assigning a role to + a group of :term:`hosts <Host>` (or a set of :term:`groups <group>`, + or :term:`host patterns <Globbing>`, and so on) implies that they should + implement a specific behavior. A role may include applying certain + variable values, certain :term:`tasks`, and certain :term:`handlers` + -- or just one or more of these things. Because of the file structure + associated with a role, roles become redistributable units that allow + you to share behavior among :term:`playbooks` -- or even with other users. + + Rolling Update + The act of addressing a number of nodes in a group N at a time to + avoid updating them all at once and bringing the system offline. For + instance, in a web topology of 500 nodes handling very large volume, + it may be reasonable to update 10 or 20 machines at a time, moving on + to the next 10 or 20 when done. The ``serial:`` keyword in an Ansible + :term:`playbooks` control the size of the rolling update pool. The + default is to address the batch size all at once, so this is something + that you must opt-in to. OS configuration (such as making sure config + files are correct) does not typically have to use the rolling update + model, but can do so if desired. + + Serial + .. seealso:: + + :term:`Rolling Update` + + Sudo + Ansible does not require root logins, and since it's daemonless, + definitely does not require root level daemons (which can be + a security concern in sensitive environments). Ansible can log in and + perform many operations wrapped in a sudo command, and can work with + both password-less and password-based sudo. Some operations that + don't normally work with sudo (like scp file transfer) can be achieved + with Ansible's :ref:`copy <copy_module>`, :ref:`template <template_module>`, and + :ref:`fetch <fetch_module>` modules while running in sudo mode. + + SSH (Native) + Native OpenSSH as an Ansible transport is specified with ``-c ssh`` + (or a config file, or a directive in the :term:`playbook <playbooks>`) + and can be useful if wanting to login via Kerberized SSH or using SSH + jump hosts, and so on. In 1.2.1, ``ssh`` will be used by default if the + OpenSSH binary on the control machine is sufficiently new. + Previously, Ansible selected ``paramiko`` as a default. Using + a client that supports ``ControlMaster`` and ``ControlPersist`` is + recommended for maximum performance -- if you don't have that and + don't need Kerberos, jump hosts, or other features, ``paramiko`` is + a good choice. Ansible will warn you if it doesn't detect + ControlMaster/ControlPersist capability. + + Tags + Ansible allows tagging resources in a :term:`playbook <playbooks>` + with arbitrary keywords, and then running only the parts of the + playbook that correspond to those keywords. For instance, it is + possible to have an entire OS configuration, and have certain steps + labeled ``ntp``, and then run just the ``ntp`` steps to reconfigure + the time server information on a remote host. + + Task + :term:`Playbooks` exist to run tasks. Tasks combine an :term:`action` + (a module and its arguments) with a name and optionally some other + keywords (like :term:`looping directives <loops>`). :term:`Handlers` + are also tasks, but they are a special kind of task that do not run + unless they are notified by name when a task reports an underlying + change on a remote system. + + Tasks + A list of :term:`Task`. + + Templates + Ansible can easily transfer files to remote systems but often it is + desirable to substitute variables in other files. Variables may come + from the :term:`inventory` file, :term:`Host Vars`, :term:`Group + Vars`, or :term:`Facts`. Templates use the :term:`Jinja2` template + engine and can also include logical constructs like loops and if + statements. + + Transport + Ansible uses :term:``Connection Plugins`` to define types of available + transports. These are simply how Ansible will reach out to managed + systems. Transports included are :term:`paramiko`, + :term:`ssh <SSH (Native)>` (using OpenSSH), and + :term:`local <Local Connection>`. + + When + An optional conditional statement attached to a :term:`task <tasks>` that is used to + determine if the task should run or not. If the expression following + the ``when:`` keyword evaluates to false, the task will be ignored. + + Vars (Variables) + As opposed to :term:`Facts`, variables are names of values (they can + be simple scalar values -- integers, booleans, strings) or complex + ones (dictionaries/hashes, lists) that can be used in templates and + :term:`playbooks`. They are declared things, not things that are + inferred from the remote system's current state or nature (which is + what Facts are). + + YAML + Ansible does not want to force people to write programming language + code to automate infrastructure, so Ansible uses YAML to define + :term:`playbook <playbooks>` configuration languages and also variable + files. YAML is nice because it has a minimum of syntax and is very + clean and easy for people to skim. It is a good data format for + configuration files and humans, but also machine readable. Ansible's + usage of YAML stemmed from Michael DeHaan's first use of it inside of + Cobbler around 2006. YAML is fairly popular in the dynamic language + community and the format has libraries available for serialization in + many languages (Python, Perl, Ruby, and so on). + +.. seealso:: + + :ref:`ansible_faq` + Frequently asked questions + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/reference_appendices/interpreter_discovery.rst b/docs/docsite/rst/reference_appendices/interpreter_discovery.rst new file mode 100644 index 00000000..9fa7d585 --- /dev/null +++ b/docs/docsite/rst/reference_appendices/interpreter_discovery.rst @@ -0,0 +1,51 @@ +.. _interpreter_discovery: + +Interpreter Discovery +===================== + +Most Ansible modules that execute under a POSIX environment require a Python +interpreter on the target host. Unless configured otherwise, Ansible will +attempt to discover a suitable Python interpreter on each target host +the first time a Python module is executed for that host. + +To control the discovery behavior: + +* for individual hosts and groups, use the ``ansible_python_interpreter`` inventory variable +* globally, use the ``interpreter_python`` key in the ``[defaults]`` section of ``ansible.cfg`` + +Use one of the following values: + +auto_legacy : (default in 2.8) + Detects the target OS platform, distribution, and version, then consults a + table listing the correct Python interpreter and path for each + platform/distribution/version. If an entry is found, and ``/usr/bin/python`` is absent, uses the discovered interpreter (and path). If an entry + is found, and ``/usr/bin/python`` is present, uses ``/usr/bin/python`` + and issues a warning. + This exception provides temporary compatibility with previous versions of + Ansible that always defaulted to ``/usr/bin/python``, so if you have + installed Python and other dependencies at ``/usr/bin/python`` on some hosts, + Ansible will find and use them with this setting. + If no entry is found, or the listed Python is not present on the + target host, searches a list of common Python interpreter + paths and uses the first one found; also issues a warning that future + installation of another Python interpreter could alter the one chosen. + +auto : (future default in 2.12) + Detects the target OS platform, distribution, and version, then consults a + table listing the correct Python interpreter and path for each + platform/distribution/version. If an entry is found, uses the discovered + interpreter. + If no entry is found, or the listed Python is not present on the + target host, searches a list of common Python interpreter + paths and uses the first one found; also issues a warning that future + installation of another Python interpreter could alter the one chosen. + +auto_legacy_silent + Same as ``auto_legacy``, but does not issue warnings. + +auto_silent + Same as ``auto``, but does not issue warnings. + +You can still set ``ansible_python_interpreter`` to a specific path at any +variable level (for example, in host_vars, in vars files, in playbooks, and so on). +Setting a specific path completely disables automatic interpreter discovery; Ansible always uses the path specified. diff --git a/docs/docsite/rst/reference_appendices/logging.rst b/docs/docsite/rst/reference_appendices/logging.rst new file mode 100644 index 00000000..6fbd0440 --- /dev/null +++ b/docs/docsite/rst/reference_appendices/logging.rst @@ -0,0 +1,14 @@ +********************** +Logging Ansible output +********************** + +By default Ansible sends output about plays, tasks, and module arguments to your screen (STDOUT) on the control node. If you want to capture Ansible output in a log, you have three options: + +* To save Ansible output in a single log on the control node, set the ``log_path`` :ref:`configuration file setting <intro_configuration>`. You may also want to set ``display_args_to_stdout``, which helps to differentiate similar tasks by including variable values in the Ansible output. +* To save Ansible output in separate logs, one on each managed node, set the ``no_target_syslog`` and ``syslog_facility`` :ref:`configuration file settings <intro_configuration>`. +* To save Ansible output to a secure database, use :ref:`Ansible Tower <ansible_tower>`. Tower allows you to review history based on hosts, projects, and particular inventories over time, using graphs and/or a REST API. + +Protecting sensitive data with ``no_log`` +========================================= + +If you save Ansible output to a log, you expose any secret data in your Ansible output, such as passwords and user names. To keep sensitive values out of your logs, mark tasks that expose them with the ``no_log: True`` attribute. However, the ``no_log`` attribute does not affect debugging output, so be careful not to debug playbooks in a production environment. See :ref:`keep_secret_data` for an example. diff --git a/docs/docsite/rst/reference_appendices/module_utils.rst b/docs/docsite/rst/reference_appendices/module_utils.rst new file mode 100644 index 00000000..7fa4620c --- /dev/null +++ b/docs/docsite/rst/reference_appendices/module_utils.rst @@ -0,0 +1,27 @@ +.. _ansible.module_utils: +.. _module_utils: + +*************************************************************** +Ansible Reference: Module Utilities +*************************************************************** + +This page documents utilities intended to be helpful when writing +Ansible modules in Python. + + +AnsibleModule +-------------- + +To use this functionality, include ``from ansible.module_utils.basic import AnsibleModule`` in your module. + +.. autoclass:: ansible.module_utils.basic.AnsibleModule + :members: + :noindex: + +Basic +------ + +To use this functionality, include ``import ansible.module_utils.basic`` in your module. + +.. automodule:: ansible.module_utils.basic + :members: diff --git a/docs/docsite/rst/reference_appendices/python_3_support.rst b/docs/docsite/rst/reference_appendices/python_3_support.rst new file mode 100644 index 00000000..da06023c --- /dev/null +++ b/docs/docsite/rst/reference_appendices/python_3_support.rst @@ -0,0 +1,95 @@ +================ +Python 3 Support +================ + +Ansible 2.5 and above work with Python 3. Previous to 2.5, using Python 3 was +considered a tech preview. This topic discusses how to set up your controller and managed machines +to use Python 3. + +.. note:: On the controller we support Python 3.5 or greater and Python 2.7 or greater. Module-side, we support Python 3.5 or greater and Python 2.6 or greater. + +On the controller side +---------------------- + +The easiest way to run :command:`/usr/bin/ansible` under Python 3 is to install it with the Python3 +version of pip. This will make the default :command:`/usr/bin/ansible` run with Python3: + +.. code-block:: shell + + $ pip3 install ansible + $ ansible --version | grep "python version" + python version = 3.6.2 (default, Sep 22 2017, 08:28:09) [GCC 7.2.1 20170915 (Red Hat 7.2.1-2)] + +If you are running Ansible :ref:`from_source` and want to use Python 3 with your source checkout, run your +command via ``python3``. For example: + +.. code-block:: shell + + $ source ./hacking/env-setup + $ python3 $(which ansible) localhost -m ping + $ python3 $(which ansible-playbook) sample-playbook.yml + +.. note:: Individual Linux distribution packages may be packaged for Python2 or Python3. When running from + distro packages you'll only be able to use Ansible with the Python version for which it was + installed. Sometimes distros will provide a means of installing for several Python versions + (via a separate package or via some commands that are run after install). You'll need to check + with your distro to see if that applies in your case. + + +Using Python 3 on the managed machines with commands and playbooks +------------------------------------------------------------------ + +* Ansible will automatically detect and use Python 3 on many platforms that ship with it. To explicitly configure a + Python 3 interpreter, set the ``ansible_python_interpreter`` inventory variable at a group or host level to the + location of a Python 3 interpreter, such as :command:`/usr/bin/python3`. The default interpreter path may also be + set in ``ansible.cfg``. + +.. seealso:: :ref:`interpreter_discovery` for more information. + +.. code-block:: ini + + # Example inventory that makes an alias for localhost that uses Python3 + localhost-py3 ansible_host=localhost ansible_connection=local ansible_python_interpreter=/usr/bin/python3 + + # Example of setting a group of hosts to use Python3 + [py3-hosts] + ubuntu16 + fedora27 + + [py3-hosts:vars] + ansible_python_interpreter=/usr/bin/python3 + +.. seealso:: :ref:`intro_inventory` for more information. + +* Run your command or playbook: + +.. code-block:: shell + + $ ansible localhost-py3 -m ping + $ ansible-playbook sample-playbook.yml + + +Note that you can also use the `-e` command line option to manually +set the python interpreter when you run a command. This can be useful if you want to test whether +a specific module or playbook has any bugs under Python 3. For example: + +.. code-block:: shell + + $ ansible localhost -m ping -e 'ansible_python_interpreter=/usr/bin/python3' + $ ansible-playbook sample-playbook.yml -e 'ansible_python_interpreter=/usr/bin/python3' + +What to do if an incompatibility is found +----------------------------------------- + +We have spent several releases squashing bugs and adding new tests so that Ansible's core feature +set runs under both Python 2 and Python 3. However, bugs may still exist in edge cases and many of +the modules shipped with Ansible are maintained by the community and not all of those may be ported +yet. + +If you find a bug running under Python 3 you can submit a bug report on `Ansible's GitHub project +<https://github.com/ansible/ansible/issues/>`_. Be sure to mention Python3 in the bug report so +that the right people look at it. + +If you would like to fix the code and submit a pull request on github, you can +refer to :ref:`developing_python_3` for information on how we fix +common Python3 compatibility issues in the Ansible codebase. diff --git a/docs/docsite/rst/reference_appendices/release_and_maintenance.rst b/docs/docsite/rst/reference_appendices/release_and_maintenance.rst new file mode 100644 index 00000000..eef77130 --- /dev/null +++ b/docs/docsite/rst/reference_appendices/release_and_maintenance.rst @@ -0,0 +1,33 @@ +.. _release_and_maintenance: + +Release and maintenance +======================= + +.. _release_cycle: +.. _release_schedule: +.. _support_life: +.. _methods: +.. _development_and_stable_version_maintenance_workflow: +.. _release_changelogs: +.. _release_freezing: + +Please go to `the devel release and maintenance page <https://docs.ansible.com/ansible/devel/reference_appendices/release_and_maintenance.html>`_ for up to date information. + +.. note:: + + This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation. + +.. seealso:: + + :ref:`community_committer_guidelines` + Guidelines for Ansible core contributors and maintainers + :ref:`testing_strategies` + Testing strategies + :ref:`ansible_community_guide` + Community information and contributing + `Ansible release tarballs <https://releases.ansible.com/ansible/>`_ + Ansible release tarballs + `Development Mailing List <https://groups.google.com/group/ansible-devel>`_ + Mailing list for development topics + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/reference_appendices/special_variables.rst b/docs/docsite/rst/reference_appendices/special_variables.rst new file mode 100644 index 00000000..e4ecc177 --- /dev/null +++ b/docs/docsite/rst/reference_appendices/special_variables.rst @@ -0,0 +1,167 @@ +.. _special_variables: + +Special Variables +================= + +Magic variables +--------------- +These variables cannot be set directly by the user; Ansible will always override them to reflect internal state. + +ansible_check_mode + Boolean that indicates if we are in check mode or not + +ansible_config_file + The full path of used Ansible configuration file + +ansible_dependent_role_names + The names of the roles currently imported into the current play as dependencies of other plays + +ansible_diff_mode + Boolean that indicates if we are in diff mode or not + +ansible_forks + Integer reflecting the number of maximum forks available to this run + +ansible_inventory_sources + List of sources used as inventory + +ansible_limit + Contents of the ``--limit`` CLI option for the current execution of Ansible + +ansible_loop + A dictionary/map containing extended loop information when enabled via ``loop_control.extended`` + +ansible_loop_var + The name of the value provided to ``loop_control.loop_var``. Added in ``2.8`` + +ansible_index_var + The name of the value provided to ``loop_control.index_var``. Added in ``2.9`` + +ansible_parent_role_names + When the current role is being executed by means of an :ref:`include_role <include_role_module>` or :ref:`import_role <import_role_module>` action, this variable contains a list of all parent roles, with the most recent role (in other words, the role that included/imported this role) being the first item in the list. + When multiple inclusions occur, this list lists the *last* role (in other words, the role that included this role) as the *first* item in the list. It is also possible that a specific role exists more than once in this list. + + For example: When role **A** includes role **B**, inside role B, ``ansible_parent_role_names`` will equal to ``['A']``. If role **B** then includes role **C**, the list becomes ``['B', 'A']``. + +ansible_parent_role_paths + When the current role is being executed by means of an :ref:`include_role <include_role_module>` or :ref:`import_role <import_role_module>` action, this variable contains a list of all parent roles, with the most recent role (in other words, the role that included/imported this role) being the first item in the list. + Please refer to ``ansible_parent_role_names`` for the order of items in this list. + +ansible_play_batch + List of active hosts in the current play run limited by the serial, aka 'batch'. Failed/Unreachable hosts are not considered 'active'. + +ansible_play_hosts + List of hosts in the current play run, not limited by the serial. Failed/Unreachable hosts are included in this list. + +ansible_play_hosts_all + List of all the hosts that were targeted by the play + +ansible_play_role_names + The names of the roles currently imported into the current play. This list does **not** contain the role names that are + implicitly included via dependencies. + +ansible_playbook_python + The path to the python interpreter being used by Ansible on the controller + +ansible_role_names + The names of the roles currently imported into the current play, or roles referenced as dependencies of the roles + imported into the current play. + +ansible_role_name + The fully qualified collection role name, in the format of ``namespace.collection.role_name`` + +ansible_collection_name + The name of the collection the task that is executing is a part of. In the format of ``namespace.collection`` + +ansible_run_tags + Contents of the ``--tags`` CLI option, which specifies which tags will be included for the current run. + +ansible_search_path + Current search path for action plugins and lookups, in other words, where we search for relative paths when you do ``template: src=myfile`` + +ansible_skip_tags + Contents of the ``--skip-tags`` CLI option, which specifies which tags will be skipped for the current run. + +ansible_verbosity + Current verbosity setting for Ansible + +ansible_version + Dictionary/map that contains information about the current running version of ansible, it has the following keys: full, major, minor, revision and string. + +group_names + List of groups the current host is part of + +groups + A dictionary/map with all the groups in inventory and each group has the list of hosts that belong to it + +hostvars + A dictionary/map with all the hosts in inventory and variables assigned to them + +inventory_hostname + The inventory name for the 'current' host being iterated over in the play + +inventory_hostname_short + The short version of `inventory_hostname` + +inventory_dir + The directory of the inventory source in which the `inventory_hostname` was first defined + +inventory_file + The file name of the inventory source in which the `inventory_hostname` was first defined + +omit + Special variable that allows you to 'omit' an option in a task, for example ``- user: name=bob home={{ bobs_home|default(omit) }}`` + +play_hosts + Deprecated, the same as ansible_play_batch + +ansible_play_name + The name of the currently executed play. Added in ``2.8``. + +playbook_dir + The path to the directory of the playbook that was passed to the ``ansible-playbook`` command line. + +role_name + The name of the role currently being executed. + +role_names + Deprecated, the same as ansible_play_role_names + +role_path + The path to the dir of the currently running role + +Facts +----- +These are variables that contain information pertinent to the current host (`inventory_hostname`). They are only available if gathered first. See :ref:`vars_and_facts` for more information. + +ansible_facts + Contains any facts gathered or cached for the `inventory_hostname` + Facts are normally gathered by the :ref:`setup <setup_module>` module automatically in a play, but any module can return facts. + +ansible_local + Contains any 'local facts' gathered or cached for the `inventory_hostname`. + The keys available depend on the custom facts created. + See the :ref:`setup <setup_module>` module and :ref:`local_facts` for more details. + +.. _connection_variables: + +Connection variables +--------------------- +Connection variables are normally used to set the specifics on how to execute actions on a target. Most of them correspond to connection plugins, but not all are specific to them; other plugins like shell, terminal and become are normally involved. +Only the common ones are described as each connection/become/shell/etc plugin can define its own overrides and specific variables. +See :ref:`general_precedence_rules` for how connection variables interact with :ref:`configuration settings<ansible_configuration_settings>`, :ref:`command-line options<command_line_tools>`, and :ref:`playbook keywords<playbook_keywords>`. + +ansible_become_user + The user Ansible 'becomes' after using privilege escalation. This must be available to the 'login user'. + +ansible_connection + The connection plugin actually used for the task on the target host. + +ansible_host + The ip/name of the target host to use instead of `inventory_hostname`. + +ansible_python_interpreter + The path to the Python executable Ansible should use on the target host. + +ansible_user + The user Ansible 'logs in' as. diff --git a/docs/docsite/rst/reference_appendices/test_strategies.rst b/docs/docsite/rst/reference_appendices/test_strategies.rst new file mode 100644 index 00000000..01da667a --- /dev/null +++ b/docs/docsite/rst/reference_appendices/test_strategies.rst @@ -0,0 +1,275 @@ +.. _testing_strategies: + +Testing Strategies +================== + +.. _testing_intro: + +Integrating Testing With Ansible Playbooks +`````````````````````````````````````````` + +Many times, people ask, "how can I best integrate testing with Ansible playbooks?" There are many options. Ansible is actually designed +to be a "fail-fast" and ordered system, therefore it makes it easy to embed testing directly in Ansible playbooks. In this chapter, +we'll go into some patterns for integrating tests of infrastructure and discuss the right level of testing that may be appropriate. + +.. note:: This is a chapter about testing the application you are deploying, not the chapter on how to test Ansible modules during development. For that content, please hop over to the Development section. + +By incorporating a degree of testing into your deployment workflow, there will be fewer surprises when code hits production and, in many cases, +tests can be leveraged in production to prevent failed updates from migrating across an entire installation. Since it's push-based, it's +also very easy to run the steps on the localhost or testing servers. Ansible lets you insert as many checks and balances into your upgrade workflow as you would like to have. + +The Right Level of Testing +`````````````````````````` + +Ansible resources are models of desired-state. As such, it should not be necessary to test that services are started, packages are +installed, or other such things. Ansible is the system that will ensure these things are declaratively true. Instead, assert these +things in your playbooks. + +.. code-block:: yaml + + tasks: + - service: + name: foo + state: started + enabled: yes + +If you think the service may not be started, the best thing to do is request it to be started. If the service fails to start, Ansible +will yell appropriately. (This should not be confused with whether the service is doing something functional, which we'll show more about how to +do later). + +.. _check_mode_drift: + +Check Mode As A Drift Test +`````````````````````````` + +In the above setup, `--check` mode in Ansible can be used as a layer of testing as well. If running a deployment playbook against an +existing system, using the `--check` flag to the `ansible` command will report if Ansible thinks it would have had to have made any changes to +bring the system into a desired state. + +This can let you know up front if there is any need to deploy onto the given system. Ordinarily scripts and commands don't run in check mode, so if you +want certain steps to execute in normal mode even when the `--check` flag is used, such as calls to the script module, disable check mode for those tasks:: + + + roles: + - webserver + + tasks: + - script: verify.sh + check_mode: no + +Modules That Are Useful for Testing +``````````````````````````````````` + +Certain playbook modules are particularly good for testing. Below is an example that ensures a port is open:: + + tasks: + + - wait_for: + host: "{{ inventory_hostname }}" + port: 22 + delegate_to: localhost + +Here's an example of using the URI module to make sure a web service returns:: + + tasks: + + - action: uri url=http://www.example.com return_content=yes + register: webpage + + - fail: + msg: 'service is not happy' + when: "'AWESOME' not in webpage.content" + +It's easy to push an arbitrary script (in any language) on a remote host and the script will automatically fail if it has a non-zero return code:: + + tasks: + + - script: test_script1 + - script: test_script2 --parameter value --parameter2 value + +If using roles (you should be, roles are great!), scripts pushed by the script module can live in the 'files/' directory of a role. + +And the assert module makes it very easy to validate various kinds of truth:: + + tasks: + + - shell: /usr/bin/some-command --parameter value + register: cmd_result + + - assert: + that: + - "'not ready' not in cmd_result.stderr" + - "'gizmo enabled' in cmd_result.stdout" + +Should you feel the need to test for existence of files that are not declaratively set by your Ansible configuration, the 'stat' module is a great choice:: + + tasks: + + - stat: + path: /path/to/something + register: p + + - assert: + that: + - p.stat.exists and p.stat.isdir + + +As mentioned above, there's no need to check things like the return codes of commands. Ansible is checking them automatically. +Rather than checking for a user to exist, consider using the user module to make it exist. + +Ansible is a fail-fast system, so when there is an error creating that user, it will stop the playbook run. You do not have +to check up behind it. + +Testing Lifecycle +````````````````` + +If writing some degree of basic validation of your application into your playbooks, they will run every time you deploy. + +As such, deploying into a local development VM and a staging environment will both validate that things are according to plan +ahead of your production deploy. + +Your workflow may be something like this:: + + - Use the same playbook all the time with embedded tests in development + - Use the playbook to deploy to a staging environment (with the same playbooks) that simulates production + - Run an integration test battery written by your QA team against staging + - Deploy to production, with the same integrated tests. + +Something like an integration test battery should be written by your QA team if you are a production webservice. This would include +things like Selenium tests or automated API tests and would usually not be something embedded into your Ansible playbooks. + +However, it does make sense to include some basic health checks into your playbooks, and in some cases it may be possible to run +a subset of the QA battery against remote nodes. This is what the next section covers. + +Integrating Testing With Rolling Updates +```````````````````````````````````````` + +If you have read into :ref:`playbooks_delegation` it may quickly become apparent that the rolling update pattern can be extended, and you +can use the success or failure of the playbook run to decide whether to add a machine into a load balancer or not. + +This is the great culmination of embedded tests:: + + --- + + - hosts: webservers + serial: 5 + + pre_tasks: + + - name: take out of load balancer pool + command: /usr/bin/take_out_of_pool {{ inventory_hostname }} + delegate_to: 127.0.0.1 + + roles: + + - common + - webserver + - apply_testing_checks + + post_tasks: + + - name: add back to load balancer pool + command: /usr/bin/add_back_to_pool {{ inventory_hostname }} + delegate_to: 127.0.0.1 + +Of course in the above, the "take out of the pool" and "add back" steps would be replaced with a call to a Ansible load balancer +module or appropriate shell command. You might also have steps that use a monitoring module to start and end an outage window +for the machine. + +However, what you can see from the above is that tests are used as a gate -- if the "apply_testing_checks" step is not performed, +the machine will not go back into the pool. + +Read the delegation chapter about "max_fail_percentage" and you can also control how many failing tests will stop a rolling update +from proceeding. + +This above approach can also be modified to run a step from a testing machine remotely against a machine:: + + --- + + - hosts: webservers + serial: 5 + + pre_tasks: + + - name: take out of load balancer pool + command: /usr/bin/take_out_of_pool {{ inventory_hostname }} + delegate_to: 127.0.0.1 + + roles: + + - common + - webserver + + tasks: + - script: /srv/qa_team/app_testing_script.sh --server {{ inventory_hostname }} + delegate_to: testing_server + + post_tasks: + + - name: add back to load balancer pool + command: /usr/bin/add_back_to_pool {{ inventory_hostname }} + delegate_to: 127.0.0.1 + +In the above example, a script is run from the testing server against a remote node prior to bringing it back into +the pool. + +In the event of a problem, fix the few servers that fail using Ansible's automatically generated +retry file to repeat the deploy on just those servers. + +Achieving Continuous Deployment +``````````````````````````````` + +If desired, the above techniques may be extended to enable continuous deployment practices. + +The workflow may look like this:: + + - Write and use automation to deploy local development VMs + - Have a CI system like Jenkins deploy to a staging environment on every code change + - The deploy job calls testing scripts to pass/fail a build on every deploy + - If the deploy job succeeds, it runs the same deploy playbook against production inventory + +Some Ansible users use the above approach to deploy a half-dozen or dozen times an hour without taking all of their infrastructure +offline. A culture of automated QA is vital if you wish to get to this level. + +If you are still doing a large amount of manual QA, you should still make the decision on whether to deploy manually as well, but +it can still help to work in the rolling update patterns of the previous section and incorporate some basic health checks using +modules like 'script', 'stat', 'uri', and 'assert'. + +Conclusion +`````````` + +Ansible believes you should not need another framework to validate basic things of your infrastructure is true. This is the case +because Ansible is an order-based system that will fail immediately on unhandled errors for a host, and prevent further configuration +of that host. This forces errors to the top and shows them in a summary at the end of the Ansible run. + +However, as Ansible is designed as a multi-tier orchestration system, it makes it very easy to incorporate tests into the end of +a playbook run, either using loose tasks or roles. When used with rolling updates, testing steps can decide whether to put a machine +back into a load balanced pool or not. + +Finally, because Ansible errors propagate all the way up to the return code of the Ansible program itself, and Ansible by default +runs in an easy push-based mode, Ansible is a great step to put into a build environment if you wish to use it to roll out systems +as part of a Continuous Integration/Continuous Delivery pipeline, as is covered in sections above. + +The focus should not be on infrastructure testing, but on application testing, so we strongly encourage getting together with your +QA team and ask what sort of tests would make sense to run every time you deploy development VMs, and which sort of tests they would like +to run against the staging environment on every deploy. Obviously at the development stage, unit tests are great too. But don't unit +test your playbook. Ansible describes states of resources declaratively, so you don't have to. If there are cases where you want +to be sure of something though, that's great, and things like stat/assert are great go-to modules for that purpose. + +In all, testing is a very organizational and site-specific thing. Everybody should be doing it, but what makes the most sense for your +environment will vary with what you are deploying and who is using it -- but everyone benefits from a more robust and reliable deployment +system. + +.. seealso:: + + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_delegation` + Delegation, useful for working with load balancers, clouds, and locally executed steps. + `User Mailing List <https://groups.google.com/group/ansible-project>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel + diff --git a/docs/docsite/rst/reference_appendices/tower.rst b/docs/docsite/rst/reference_appendices/tower.rst new file mode 100644 index 00000000..0ef8fe7d --- /dev/null +++ b/docs/docsite/rst/reference_appendices/tower.rst @@ -0,0 +1,13 @@ +.. _ansible_tower: + +Red Hat Ansible Tower +===================== + +`Red Hat Ansible Tower <https://www.ansible.com/products/tower>`_ is a web console and REST API for operationalizing Ansible across your team, organization, and enterprise. It's designed to be the hub for all of your automation tasks. + +Ansible Tower gives you role-based access control, including control over the use of securely stored credentials for SSH and other services. You can sync your Ansible Tower inventory with a wide variety of cloud sources, and powerful multi-playbook workflows allow you to model +complex processes. + +It logs all of your jobs, integrates well with LDAP, SAML, and other authentication sources, and has an amazing browsable REST API. Command line tools are available for easy integration with Jenkins as well. + +Ansible Tower is the downstream Red-Hat supported product version of Ansible AWX. Find out more about Ansible Tower features and how to download it on the `Ansible Tower webpage <https://www.ansible.com/products/tower>`_. Ansible Tower is part of the Red Hat Ansible Automation subscription, and comes bundled with amazing support from Red Hat, Inc. diff --git a/docs/docsite/rst/roadmap/COLLECTIONS_2_10.rst b/docs/docsite/rst/roadmap/COLLECTIONS_2_10.rst new file mode 100644 index 00000000..88d6fdcd --- /dev/null +++ b/docs/docsite/rst/roadmap/COLLECTIONS_2_10.rst @@ -0,0 +1,51 @@ +==================== +Ansible project 2.10 +==================== + +This release schedule includes dates for the `ansible <https://pypi.org/project/ansible/>`_ package, with a few dates for the `ansible-base <https://pypi.org/project/ansible-base/>`_ package as well. All dates are subject to change. See :ref:`base_roadmap_2_10` for the most recent updates on ansible-base. + +.. contents:: + :local: + +Release Schedule +---------------- + +.. note:: Dates subject to change. +.. note:: We plan to post weekly alpha releases to the `PyPI ansible project <https://pypi.org/project/ansible/>`_ for testing. + +.. warning:: + We initially were going to have feature freeze on 2020-08-18. We tried this but decided to + change course. Instead, we'll enter feature freeze when ansible-2.10.0 beta1 is released. + +- 2020-06-23: ansible-2.10 alpha freeze. + No net new collections will be added to the ``ansible-2.10`` package after this date. +- 2020-07-10: Ansible collections freeze date for content shuffling. + Content should be in its final collection for the ansible-2.10 series of releases. No more content should move out of the ``community.general`` or ``community.network`` collections. +- 2020-08-13: ansible-base 2.10 Release date, see :ref:`base_roadmap_2_10`. +- 2020-08-14: final ansible-2.10 alpha. +- 2020-09-01: ansible-2.10.0 beta1 and feature freeze. + + - No new modules or major features will be added after this date. In practice this means we will freeze the semver collection versions to compatible release versions. For example, if the version of community.crypto on this date was community-crypto-1.1.0; ansible-2.10.0 could ship with community-crypto-1.1.1. It would not ship with community-crypto-1.2.0. + +- 2020-09-08: ansible-2.10.0 beta2. +- 2020-09-15: ansible-2.10.0 rc1 and final freeze. + + - After this date only changes blocking a release are accepted. + - Collections will only be updated to a new version if a blocker is approved. Collection owners should discuss any blockers at the community IRC meeting (on 9-17) to decide whether to bump the version of the collection for a fix. See the `Community IRC meeting agenda <https://github.com/ansible/community/issues/539>`_. + +** Additional release candidates to be published as needed as blockers are fixed ** + +- 2020-09-22: ansible-2.10 GA release date. + +Ansible-2.10.x patch releases will occur roughly every three weeks if changes to collections have been made or if it is deemed necessary to force an upgrade to a later ansible-base-2.10.x. Ansible-2.10.x patch releases may contain new features but not backwards incompatibilities. In practice, this means we will include new collection versions where either the patch or the minor version number has changed but not when the major number has changed (example: Ansible-2.10 ships with community-crypto-1.1.0; ansible-2.10.1 may ship with community-crypto-1.2.0 but would not ship with community-crypto-2.0.0). + + +.. note:: + + Minor releases will stop when :ref:`Ansible-3 <ansible_3_roadmap>` is released. See the :ref:`Release and Maintenance Page <release_and_maintenance>` for more information. + + +Breaking changes may be introduced in ansible-3.0 although we encourage collection owners to use deprecation periods that will show up in at least one Ansible release before being changed incompatibly. + + +For more information, reach out on a mailing list or an IRC channel - see :ref:`communication` for more details. diff --git a/docs/docsite/rst/roadmap/COLLECTIONS_3_0.rst b/docs/docsite/rst/roadmap/COLLECTIONS_3_0.rst new file mode 100644 index 00000000..2af350ab --- /dev/null +++ b/docs/docsite/rst/roadmap/COLLECTIONS_3_0.rst @@ -0,0 +1,58 @@ +.. _ansible_3_roadmap: + +=================== +Ansible project 3.0 +=================== + +This release schedule includes dates for the `ansible <https://pypi.org/project/ansible/>`_ package, with a few dates for the `ansible-base <https://pypi.org/project/ansible-base/>`_ package as well. All dates are subject to change. Ansible 3.x.x includes ``ansible-base`` 2.10. See :ref:`base_roadmap_2_10` for the most recent updates on ``ansible-base``. + +.. contents:: + :local: + +Release schedule +================= + +.. note:: + + Ansible is switching from its traditional versioning scheme to `semantic versioning <https://semver.org/>`_ starting with this release. So this version is 3.0.0 instead of 2.11.0. + + + +:2020-12-16: Finalize rules for net-new collections submitted for the ansible release. +:2021-01-27: Final day for new collections to be **reviewed and approved**. They MUST be + submitted prior to this to give reviewers a chance to look them over and for collection owners + to fix any problems. +:2021-02-02: Ansible-3.0.0-beta1 -- feature freeze [1]_ +:2021-02-09: Ansible-3.0.0-rc1 -- final freeze [2]_ [3]_ +:2021-02-16: Release of Ansible-3.0.0 +:2021-03-09: Release of Ansible-3.1.0 (bugfix + compatible features: every three weeks) + +.. [1] No new modules or major features accepted after this date. In practice this means we will freeze the semver collection versions to compatible release versions. For example, if the version of community.crypto on this date was community-crypto-2.1.0; ansible-3.0.0 could ship with community-crypto-2.1.1. It would not ship with community-crypto-2.2.0. + +.. [2] After this date only changes blocking a release are accepted. Accepted changes require creating a new rc and may slip the final release date. +.. [3] Collections will only be updated to a new version if a blocker is approved. Collection owners should discuss any blockers at a community IRC meeting (before this freeze) to decide whether to bump the version of the collection for a fix. See the `Community IRC meeting agenda <https://github.com/ansible/community/issues/539>`_. + + +.. note:: + + Breaking changes may be introduced in Ansible 3.0.0, although we encourage collection owners to use deprecation periods that will show up in at least one Ansible release before the breaking change happens. + + +Ansible minor releases +======================= + +Ansible 3.x.x minor releases will occur approximately every three weeks if changes to collections have been made or if it is deemed necessary to force an upgrade to a later ansible-base-2.10.x. Ansible 3.x.x minor releases may contain new features but not backwards incompatibilities. In practice, this means we will include new collection versions where either the patch or the minor version number has changed but not when the major number has changed. For example, if Ansible-3.0.0 ships with community-crypto-2.1.0; Ansible-3.1.0 may ship with community-crypto-2.2.0 but would not ship with community-crypto-3.0.0). + + +.. note:: + + Minor releases will stop when :ref:`Ansible-4 <ansible_4_roadmap>` is released. See the :ref:`Release and Maintenance Page <release_and_maintenance>` for more information. + + +For more information, reach out on a mailing list or an IRC channel - see :ref:`communication` for more details. + + +ansible-base release +==================== + +Ansible 3.x.x works with ``ansible-base`` 2.10. See :ref:`base_roadmap_2_10` for details. diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_10.rst b/docs/docsite/rst/roadmap/ROADMAP_2_10.rst new file mode 100644 index 00000000..d303ca46 --- /dev/null +++ b/docs/docsite/rst/roadmap/ROADMAP_2_10.rst @@ -0,0 +1,51 @@ +.. _base_roadmap_2_10: + +================= +Ansible-base 2.10 +================= + +.. contents:: + :local: + +Release Schedule +---------------- + +Expected +======== + +PRs must be raised well in advance of the dates below to have a chance of being included in this ansible-base release. + +.. note:: There is no Alpha phase in 2.10. +.. note:: Dates subject to change. + +- 2020-06-16 Beta 1 **Feature freeze** + No new functionality (including modules/plugins) to any code + +- 2020-07-21 Release Candidate 1 (bumped from 2020-07-14) +- 2020-07-24 Release Candidate 2 +- 2020-07-25 Release Candidate 3 +- 2020-07-30 Release Candidate 4 +- 2020-08-13 Release + +Release Manager +--------------- + +@sivel + +Planned work +============ + +- Migrate non-base plugins and modules from the ``ansible/ansible`` repository to smaller collection repositories +- Add functionality to ease transition to collections, such as automatic redirects from the 2.9 names to the new FQCN of the plugin +- Create new ``ansible-base`` package representing the ``ansible/ansible`` repository + +Additional Resources +==================== + +The 2.10 release of Ansible will fundamentally change the scope of plugins included in the ``ansible/ansible`` repository, by +moving much of the plugins into smaller collection repositories that will be shipped through https://galaxy.ansible.com/ + +The following links have more information about this process: + +- https://groups.google.com/d/msg/ansible-devel/oKqgCeYTs-M/cHrOgMw8CAAJ +- https://github.com/ansible-collections/overview/blob/main/README.rst diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_5.rst b/docs/docsite/rst/roadmap/ROADMAP_2_5.rst new file mode 100644 index 00000000..34d376ce --- /dev/null +++ b/docs/docsite/rst/roadmap/ROADMAP_2_5.rst @@ -0,0 +1,142 @@ +=========== +Ansible 2.5 +=========== +**Core Engine Freeze and Module Freeze: 22 January 2018** + +**Core and Curated Module Freeze: 22 January 2018** + +**Community Module Freeze: 7 February 2018** + +**Release Candidate 1 will be 21 February, 2018** + +**Target: March 2018** + +**Service Release schedule: every 2-3 weeks** + +.. contents:: Topics + +Release Manager +--------------- +Matt Davis (IRC/GitHub: @nitzmahone) + + +Engine improvements +------------------- +- Assemble module improvements + - assemble just skips when in check mode, it should be able to test if there is a difference and changed=true/false. + - The same with diff, it should work as template modules does +- Handle Password reset prompts cleaner +- Tasks stats for rescues and ignores +- Normalize temp dir usage across all subsystems +- Add option to set playbook dir for adhoc, inventory and console to allow for 'relative path loading' + + +Ansible-Config +-------------- +- Extend config to more plugin types and update plugins to support the new config + +Inventory +--------- +- ansible-inventory option to output group variable assignment and data (--export) +- Create inventory plugins for: + - aws + +Facts +----- +- Namespacing fact variables (via a config option) implemented in ansible/ansible PR `#18445 <https://github.com/ansible/ansible/pull/18445>`_. + Proposal found in ansible/proposals issue `#17 <https://github.com/ansible/proposals/issues/17>`_. +- Make fact collectors and gather_subset specs finer grained +- Eliminate unneeded deps between fact collectors +- Allow fact collectors to indicate if they need information from another fact collector to be gathered first. + +Static Loop Keyword +------------------- + +- A simpler altenative to ``with_``, ``loop:`` only takes a list +- Remove complexity from loops, lookups are still available to users +- Less confusing having a static directive vs a one that is dynamic depending on plugins loaded. + +Vault +----- +- Vault secrets client inc new 'keyring' client + +Runtime Check on Modules for Blacklisting +----------------------------------------- +- Filter on things like "supported_by" in module metadata +- Provide users with an option of "warning, error or allow/ignore" +- Configurable via ansible.cfg and environment variable + +Windows +------- +- Implement gather_subset on Windows facts +- Fix Windows async + become to allow them to work together +- Implement Windows become flags for controlling various modes **(done)** + - logontype + - elevation behavior +- Convert win_updates to action plugin for auto reboot and extra features **(done)** +- Spike out changing the connection over to PSRP instead of WSMV **(done- it's possible)** +- Module updates + + - win_updates **(done)** + + - Fix win_updates to detect (or request) become + - Add whitelist/blacklist features to win_updates + - win_dsc further improvements **(done)** + +General Cloud +------------- +- Make multi-cloud provisioning easier +- Diff mode will output provisioning task results of ansible-playbook runs +- Terraform module + +AWS +--- +- Focus on pull requests for various modules +- Triage existing merges for modules +- Module work + + - ec2_instance + - ec2_vpc: Allow the addition of secondary IPv4 CIDRS to existing VPCs. + - AWS Network Load Balancer support (NLB module, ASG support, and so on) + - rds_instance + +Azure +----- +- Azure CLI auth **(done)** +- Fix Azure module results to have "high-level" output instead of raw REST API dictionary **(partial, more to come in 2.6)** +- Deprecate Azure automatic storage accounts in azure_rm_virtualmachine **(breaks on Azure Stack, punted until AS supports managed disks)** + +Network Roadmap +--------------- +- Refactor common network shared code into package **(done)** +- Convert various nxos modules to leverage declarative intent **(done)** +- Refactor various modules to leverage the cliconf plugin **(done)** +- Add various missing declarative modules for supported platforms and functions **(done)** +- Implement a feature that handles platform differences and feature unavailability **(done)** +- netconf-config.py should provide control for deployment strategy +- Create netconf connection plugin **(done)** +- Create netconf fact module +- Turn network_cli into a usable connection type **(done)** +- Implements jsonrpc message passing for ansible-connection **(done)** +- Improve logging for ansible-connection **(done)** +- Improve stdout output for failures whilst using persistent connection **(done)** +- Create IOS-XR NetConf Plugin and refactor iosxr modules to leverage netconf plugin **(done)** +- Refactor junos modules to use netconf plugin **(done)** +- Filters: Add a filter to convert XML response from a network device to JSON object **(done)** + +Documentation +------------- +- Extend documentation to more plugins +- Document vault-password-client scripts. +- Network Documentation + + - New landing page (to replace intro_networking) **(done)** + - Platform specific guides **(done)** + - Walk through: Getting Started **(done)** + - Networking and ``become`` **(done)** + - Best practice **(done)** + +Contributor Quality of Life +--------------------------- +- Finish PSScriptAnalyer integration with ansible-test (for enforcing Powershell style) **(done)** +- Resolve issues requiring skipping of some integration tests on Python 3. diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_6.rst b/docs/docsite/rst/roadmap/ROADMAP_2_6.rst new file mode 100644 index 00000000..49a6ebab --- /dev/null +++ b/docs/docsite/rst/roadmap/ROADMAP_2_6.rst @@ -0,0 +1,82 @@ +=========== +Ansible 2.6 +=========== + +.. contents:: Topics + +Release Schedule +---------------- + +Actual +====== + +- 2018-05-17 Core Freeze (Engine and Core Modules/Plugins) +- 2018-05-21 Alpha Release 1 +- 2018-05-25 Community Freeze (Non-Core Modules/Plugins) +- 2018-05-25 Branch stable-2.6 +- 2018-05-30 Alpha Release 2 +- 2018-06-05 Release Candidate 1 +- 2018-06-08 Release Candidate 2 +- 2018-06-18 Release Candidate 3 +- 2018-06-25 Release Candidate 4 +- 2018-06-26 Release Candidate 5 +- 2018-06-28 Final Release + + +Release Manager +--------------- +* 2.6.0-2.6.12 Matt Clay (IRC/GitHub: @mattclay) +* 2.6.13+ Toshio Kuratomi (IRC: abadger1999; GitHub: @abadger) + + +Engine improvements +------------------- + +- Version 2.6 is largely going to be a stabilization release for Core code. +- Some of the items covered in this release, but are not limited to are the following: + + - ``ansible-inventory`` + - ``import_*`` + - ``include_*`` + - Test coverage + - Performance Testing + +Core Modules +------------ +- Adopt-a-module Campaign + + - Review current status of all Core Modules + - Reduce backlog of open issues against these modules + +Cloud Modules +------------- + +Network +------- + +Connection work +================ + +* New connection plugin: eAPI `proposal#102 <https://github.com/ansible/proposals/issues/102>`_ +* New connection plugin: NX-API +* Support for configurable options for network_cli & netconf + +Modules +======= + +* New ``net_get`` - platform agnostic module for pulling configuration via SCP/SFTP over network_cli +* New ``net_put`` - platform agnostic module for pushing configuration via SCP/SFTP over network_cli +* New ``netconf_get`` - Netconf module to fetch configuration and state data `proposal#104 <https://github.com/ansible/proposals/issues/104>`_ + +Other Features +================ + +* Stretch & tech preview: Configuration caching for network_cli. Opt-in feature to avoid ``show running`` performance hit + + +Windows +------- + + + + diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_7.rst b/docs/docsite/rst/roadmap/ROADMAP_2_7.rst new file mode 100644 index 00000000..bf65dcf7 --- /dev/null +++ b/docs/docsite/rst/roadmap/ROADMAP_2_7.rst @@ -0,0 +1,109 @@ +=========== +Ansible 2.7 +=========== + +.. contents:: Topics + +Release Schedule +---------------- + +Expected +======== + +- 2018-08-23 Core Freeze (Engine and Core Modules/Plugins) +- 2018-08-23 Alpha Release 1 +- 2018-08-30 Community Freeze (Non-Core Modules/Plugins) +- 2018-08-30 Beta Release 1 +- 2018-09-06 Release Candidate 1 (If needed) +- 2018-09-13 Release Candidate 2 (If needed) +- 2018-09-20 Release Candidate 3 (If needed) +- 2018-09-27 Release Candidate 4 (If needed) +- 2018-10-04 General Availability + +Release Manager +--------------- +Toshio Kuratomi (IRC: abadger1999; GitHub: @abadger) + + +Cleaning Duty +------------- + +- Drop Py2.6 for controllers `Docs PR #42971 <https://github.com/ansible/ansible/pull/42971>`_ and + `issue #42972 <https://github.com/ansible/ansible/issues/42972>`_ +- Remove dependency on simplejson `issue #42761 <https://github.com/ansible/ansible/issues/42761>`_ + + +Engine Improvements +------------------- + +- Performance improvement invoking Python modules `pr #41749 <https://github.com/ansible/ansible/pull/41749>`_ +- Jinja native types will allow for users to render a Python native type. `pr #32738 <https://github.com/ansible/ansible/pull/32738>`_ + + +Core Modules +------------ + +- Include feature changes and improvements + + - Create new argument ``apply`` that will allow for included tasks to inherit explicitly provided attributes. `pr #39236 <https://github.com/ansible/ansible/pull/39236>`_ + - Create "private" functionality for allowing vars/default to be exposed outside of roles. `pr #41330 <https://github.com/ansible/ansible/pull/41330>`_ +- Provide a parameter for the ``template`` module to output to different encoding formats `pr + #42171 <https://github.com/ansible/ansible/pull/42171>`_ +- ``reboot`` module for Linux hosts (@samdoran) `pr #35205 <https://github.com/ansible/ansible/pull/35205>`_ + +Cloud Modules +------------- + +General +======= +* Cloud auth plugin `proposal #24 <https://github.com/ansible/proposals/issues/24>`_ + +AWS +=== +* Inventory plugin for RDS `pr #41919 <https://github.com/ansible/ansible/pull/41919>`_ +* Count support for `ec2_instance` +* `aws_eks` module `pr #41183 <https://github.com/ansible/ansible/pull/41183>`_ +* Cloudformation stack sets support (`PR#41669 <https://github.com/ansible/ansible/pull/41669>`_) +* RDS instance and snapshot modules `pr #39994 <https://github.com/ansible/ansible/pull/39994>`_ `pr #43789 <https://github.com/ansible/ansible/pull/43789>`_ +* Diff mode improvements for cloud modules `pr #44533 <https://github.com/ansible/ansible/pull/44533>`_ + +Azure +===== + +* Azure inventory plugin `issue #42769 <https://github.com/ansible/ansible/issues/42769>`__ + + +Network +------- + +General +======= + +* Refactor the APIs in cliconf (`issue #39056 <https://github.com/ansible/ansible/issues/39056>`_) and netconf (`issue #39160 <https://github.com/ansible/ansible/issues/39160>`_) plugins so that they have a uniform signature across supported network platforms. **done** + (`PR #41846 <https://github.com/ansible/ansible/pull/41846>`_) (`PR #43643 <https://github.com/ansible/ansible/pull/43643>`_) (`PR #43837 <https://github.com/ansible/ansible/pull/43837>`_) + (`PR #43203 <https://github.com/ansible/ansible/pull/43203>`_) (`PR #42300 <https://github.com/ansible/ansible/pull/42300>`_) (`PR #44157 <https://github.com/ansible/ansible/pull/44157>`_) + +Modules +======= + +* New ``cli_config`` module `issue #39228 <https://github.com/ansible/ansible/issues/39228>`_ **done** `PR #42413 <https://github.com/ansible/ansible/pull/42413>`_. +* New ``cli_command`` module `issue #39284 <https://github.com/ansible/ansible/issues/39284>`_ +* Refactor ``netconf_config`` module to add additional functionality. **done** `proposal #104 <https://github.com/ansible/proposals/issues/104>`_ (`PR #44379 <https://github.com/ansible/ansible/pull/44379>`_) + +Windows +------- + +General +======= + +* Added new connection plugin that uses PSRP as the connection protocol `pr #41729 <https://github.com/ansible/ansible/pull/41729>`__ + +Modules +======= + +* Revamp Chocolatey to fix bugs and support offline installation `pr #43013 <https://github.com/ansible/ansible/pull/43013>`_. +* Add Chocolatey modules that can manage the following Chocolatey features + + * `Sources <https://chocolatey.org/docs/commands-sources>`_ `pr #42790 <https://github.com/ansible/ansible/pull/42790>`_ + * `Features <https://chocolatey.org/docs/chocolatey-configuration#features>`_ `pr #42848 <https://github.com/ansible/ansible/pull/42848>`_ + * `Config <https://chocolatey.org/docs/chocolatey-configuration#config-settings>`_ `pr #42915 <h*ttps://github.com/ansible/ansible/pull/42915>`_ diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_8.rst b/docs/docsite/rst/roadmap/ROADMAP_2_8.rst new file mode 100644 index 00000000..04977aa7 --- /dev/null +++ b/docs/docsite/rst/roadmap/ROADMAP_2_8.rst @@ -0,0 +1,38 @@ +=========== +Ansible 2.8 +=========== + +.. contents:: + :local: + +Release Schedule +---------------- + +Expected +======== + +PRs must be raised well in advance of the dates below to have a chance of being included in this Ansible release. + +- 2019-04-04 Alpha 1 **Core freeze** + No new features to ``support:core`` code. + Includes no new options to existing Core modules + +- 2019-04-11 Beta 1 **Feature freeze** + No new functionality (including modules/plugins) to any code + +- 2019-04-25 Release Candidate 1 +- 2019-05-02 Release Candidate 2 +- 2019-05-10 Release Candidate 3 +- 2019-05-16 Release + + + +Release Manager +--------------- + +Toshio Kuratomi (IRC: abadger1999; GitHub: @abadger) + +Planned work +============ + +See the `Ansible 2.8 Project Board <https://github.com/ansible/ansible/projects/30>`_ diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_9.rst b/docs/docsite/rst/roadmap/ROADMAP_2_9.rst new file mode 100644 index 00000000..370930ac --- /dev/null +++ b/docs/docsite/rst/roadmap/ROADMAP_2_9.rst @@ -0,0 +1,39 @@ +=========== +Ansible 2.9 +=========== + +.. contents:: + :local: + +Release Schedule +---------------- + +Expected +======== + +PRs must be raised well in advance of the dates below to have a chance of being included in this Ansible release. + +.. note:: There is no Alpha phase in 2.9. + +- 2019-08-29 Beta 1 **Feature freeze** + No new functionality (including modules/plugins) to any code + +- 2019-09-19 Release Candidate 1 +- 2019-10-03 Release Candidate 2 +- 2019-10-10 Release Candidate 3 +- 2019-10-17 Release Candidate 4 (if needed) +- 2019-10-24 Release Candidate 5 (if needed) +- 2019-10-31 Release + + + +Release Manager +--------------- +TBD + +Temporarily, Matt Davis (@nitzmahone) or Matt Clay (@mattclay) on IRC or github. + +Planned work +============ + +See the `Ansible 2.9 Project Board <https://github.com/ansible/ansible/projects/34>`_ diff --git a/docs/docsite/rst/roadmap/ansible_base_roadmap_index.rst b/docs/docsite/rst/roadmap/ansible_base_roadmap_index.rst new file mode 100644 index 00000000..8bc68547 --- /dev/null +++ b/docs/docsite/rst/roadmap/ansible_base_roadmap_index.rst @@ -0,0 +1,31 @@ +.. _ansible_core_roadmaps: + +ansible-core Roadmaps +===================== + +The ``ansible-core`` team develops a roadmap for each major and minor ``ansible-core`` release. The latest roadmap shows current work; older roadmaps provide a history of the project. We don't publish roadmaps for subminor versions. So 2.10 and 2.11 have roadmaps, but 2.10.1 does not. + +.. note:: + + Ansible renamed ``ansible-base`` to ``ansible-core``. + +We incorporate team and community feedback in each roadmap, and aim for further transparency and better inclusion of both community desires and submissions. + +Each roadmap offers a *best guess*, based on the ``ansible-core`` team's experience and on requests and feedback from the community, of what will be included in a given release. However, some items on the roadmap may be dropped due to time constraints, lack of community maintainers, and so on. + +Each roadmap is published both as an idea of what is upcoming in ``ansible-core``, and as a medium for seeking further feedback from the community. + +You can submit feedback on the current roadmap in multiple ways: + +- Edit the agenda of an IRC `Core Team Meeting <https://github.com/ansible/community/blob/master/meetings/README.md>`_ (preferred) +- Post on the ``#ansible-devel`` Freenode IRC channel +- Email the ansible-devel list + +See :ref:`Ansible communication channels <communication>` for details on how to join and use the email lists and IRC channels. + +.. toctree:: + :maxdepth: 1 + :glob: + :caption: ansible-core Roadmaps + + ROADMAP_2_10 diff --git a/docs/docsite/rst/roadmap/ansible_roadmap_index.rst b/docs/docsite/rst/roadmap/ansible_roadmap_index.rst new file mode 100644 index 00000000..d350023b --- /dev/null +++ b/docs/docsite/rst/roadmap/ansible_roadmap_index.rst @@ -0,0 +1,26 @@ +.. _ansible_roadmaps: + +Ansible Roadmap +=============== + +The Ansible team develops a roadmap for each major and minor Ansible release. The latest roadmap shows current work; older roadmaps provide a history of the project. We don't publish roadmaps for subminor versions. So 2.10 and 2.11 have roadmaps, but 2.10.1 does not. + +We incorporate team and community feedback in each roadmap, and aim for further transparency and better inclusion of both community desires and submissions. + +Each roadmap offers a *best guess*, based on the Ansible team's experience and on requests and feedback from the community, of what will be included in a given release. However, some items on the roadmap may be dropped due to time constraints, lack of community maintainers, and so on. + +Each roadmap is published both as an idea of what is upcoming in Ansible, and as a medium for seeking further feedback from the community. + +You can submit feedback on the current roadmap in multiple ways: + +- Edit the agenda of an IRC `Ansible Community Meeting <https://github.com/ansible/community/issues/539>`_ (preferred) +- Post on the ``#ansible-community`` Freenode IRC channel + +See :ref:`Ansible communication channels <communication>` for details on how to join and use the IRC channels. + +.. toctree:: + :maxdepth: 1 + :glob: + :caption: Ansible Release Roadmaps + + COLLECTIONS_2_10 diff --git a/docs/docsite/rst/roadmap/index.rst b/docs/docsite/rst/roadmap/index.rst new file mode 100644 index 00000000..d1e248b0 --- /dev/null +++ b/docs/docsite/rst/roadmap/index.rst @@ -0,0 +1,29 @@ +.. _roadmaps: + +Ansible Roadmap +=============== + +The Ansible team develops a roadmap for each major and minor Ansible release. The latest roadmap shows current work; older roadmaps provide a history of the project. We don't publish roadmaps for subminor versions. So 2.0 and 2.8 have roadmaps, but 2.7.1 does not. + +We incorporate team and community feedback in each roadmap, and aim for further transparency and better inclusion of both community desires and submissions. + +Each roadmap offers a *best guess*, based on the Ansible team's experience and on requests and feedback from the community, of what will be included in a given release. However, some items on the roadmap may be dropped due to time constraints, lack of community maintainers, etc. + +Each roadmap is published both as an idea of what is upcoming in Ansible, and as a medium for seeking further feedback from the community. + +You can submit feedback on the current roadmap in multiple ways: + +- Edit the agenda of an IRC `Core Team Meeting <https://github.com/ansible/community/blob/master/meetings/README.md>`_ (preferred) +- Post on the ``#ansible-devel`` Freenode IRC channel +- Email the ansible-devel list + +See :ref:`Ansible communication channels <communication>` for details on how to join and use the email lists and IRC channels. + +.. toctree:: + :maxdepth: 1 + :glob: + :caption: Ansible Roadmaps + + ansible_base_roadmap_index + ansible_roadmap_index + old_roadmap_index diff --git a/docs/docsite/rst/roadmap/old_roadmap_index.rst b/docs/docsite/rst/roadmap/old_roadmap_index.rst new file mode 100644 index 00000000..78769f17 --- /dev/null +++ b/docs/docsite/rst/roadmap/old_roadmap_index.rst @@ -0,0 +1,19 @@ +.. _old_roadmaps: + +Older Roadmaps +=============== + +Older roadmaps are listed here to provide a history of the Ansible project. + +See :ref:`roadmaps` to find current Ansible and ``ansible-base`` roadmaps. + +.. toctree:: + :maxdepth: 1 + :glob: + :caption: Older Roadmaps + + ROADMAP_2_9 + ROADMAP_2_8 + ROADMAP_2_7 + ROADMAP_2_6 + ROADMAP_2_5 diff --git a/docs/docsite/rst/scenario_guides/cloud_guides.rst b/docs/docsite/rst/scenario_guides/cloud_guides.rst new file mode 100644 index 00000000..d430bdda --- /dev/null +++ b/docs/docsite/rst/scenario_guides/cloud_guides.rst @@ -0,0 +1,22 @@ +.. _cloud_guides: + +******************* +Public Cloud Guides +******************* + +The guides in this section cover using Ansible with a range of public cloud platforms. They explore particular use cases in greater depth and provide a more "top-down" explanation of some basic features. + +.. toctree:: + :maxdepth: 1 + + guide_alicloud + guide_aws + guide_cloudstack + guide_gce + guide_azure + guide_online + guide_oracle + guide_packet + guide_rax + guide_scaleway + guide_vultr diff --git a/docs/docsite/rst/scenario_guides/guide_aci.rst b/docs/docsite/rst/scenario_guides/guide_aci.rst new file mode 100644 index 00000000..5fe4c648 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_aci.rst @@ -0,0 +1,661 @@ +.. _aci_guide: + +Cisco ACI Guide +=============== + + +.. _aci_guide_intro: + +What is Cisco ACI ? +------------------- + +Application Centric Infrastructure (ACI) +........................................ +The Cisco Application Centric Infrastructure (ACI) allows application requirements to define the network. This architecture simplifies, optimizes, and accelerates the entire application deployment life cycle. + + +Application Policy Infrastructure Controller (APIC) +................................................... +The APIC manages the scalable ACI multi-tenant fabric. The APIC provides a unified point of automation and management, policy programming, application deployment, and health monitoring for the fabric. The APIC, which is implemented as a replicated synchronized clustered controller, optimizes performance, supports any application anywhere, and provides unified operation of the physical and virtual infrastructure. + +The APIC enables network administrators to easily define the optimal network for applications. Data center operators can clearly see how applications consume network resources, easily isolate and troubleshoot application and infrastructure problems, and monitor and profile resource usage patterns. + +The Cisco Application Policy Infrastructure Controller (APIC) API enables applications to directly connect with a secure, shared, high-performance resource pool that includes network, compute, and storage capabilities. + + +ACI Fabric +.......... +The Cisco Application Centric Infrastructure (ACI) Fabric includes Cisco Nexus 9000 Series switches with the APIC to run in the leaf/spine ACI fabric mode. These switches form a "fat-tree" network by connecting each leaf node to each spine node; all other devices connect to the leaf nodes. The APIC manages the ACI fabric. + +The ACI fabric provides consistent low-latency forwarding across high-bandwidth links (40 Gbps, with a 100-Gbps future capability). Traffic with the source and destination on the same leaf switch is handled locally, and all other traffic travels from the ingress leaf to the egress leaf through a spine switch. Although this architecture appears as two hops from a physical perspective, it is actually a single Layer 3 hop because the fabric operates as a single Layer 3 switch. + +The ACI fabric object-oriented operating system (OS) runs on each Cisco Nexus 9000 Series node. It enables programming of objects for each configurable element of the system. The ACI fabric OS renders policies from the APIC into a concrete model that runs in the physical infrastructure. The concrete model is analogous to compiled software; it is the form of the model that the switch operating system can execute. + +All the switch nodes contain a complete copy of the concrete model. When an administrator creates a policy in the APIC that represents a configuration, the APIC updates the logical model. The APIC then performs the intermediate step of creating a fully elaborated policy that it pushes into all the switch nodes where the concrete model is updated. + +The APIC is responsible for fabric activation, switch firmware management, network policy configuration, and instantiation. While the APIC acts as the centralized policy and network management engine for the fabric, it is completely removed from the data path, including the forwarding topology. Therefore, the fabric can still forward traffic even when communication with the APIC is lost. + + +More information +................ +Various resources exist to start learning ACI, here is a list of interesting articles from the community. + +- `Adam Raffe: Learning ACI <https://adamraffe.com/learning-aci/>`_ +- `Luca Relandini: ACI for dummies <https://lucarelandini.blogspot.be/2015/03/aci-for-dummies.html>`_ +- `Cisco DevNet Learning Labs about ACI <https://learninglabs.cisco.com/labs/tags/ACI>`_ + + +.. _aci_guide_modules: + +Using the ACI modules +--------------------- +The Ansible ACI modules provide a user-friendly interface to managing your ACI environment using Ansible playbooks. + +For instance ensuring that a specific tenant exists, is done using the following Ansible task using the aci_tenant module: + +.. code-block:: yaml + + - name: Ensure tenant customer-xyz exists + aci_tenant: + host: my-apic-1 + username: admin + password: my-password + + tenant: customer-xyz + description: Customer XYZ + state: present + +A complete list of existing ACI modules is available on the content tab of the `ACI collection on Ansible Galaxy <https://galaxy.ansible.com/cisco/aci>`_. + +If you want to learn how to write your own ACI modules to contribute, look at the :ref:`Developing Cisco ACI modules <aci_dev_guide>` section. + +Querying ACI configuration +.......................... + +A module can also be used to query a specific object. + +.. code-block:: yaml + + - name: Query tenant customer-xyz + aci_tenant: + host: my-apic-1 + username: admin + password: my-password + + tenant: customer-xyz + state: query + register: my_tenant + +Or query all objects. + +.. code-block:: yaml + + - name: Query all tenants + aci_tenant: + host: my-apic-1 + username: admin + password: my-password + + state: query + register: all_tenants + +After registering the return values of the aci_tenant task as shown above, you can access all tenant information from variable ``all_tenants``. + + +Running on the controller locally +................................. +As originally designed, Ansible modules are shipped to and run on the remote target(s), however the ACI modules (like most network-related modules) do not run on the network devices or controller (in this case the APIC), but they talk directly to the APIC's REST interface. + +For this very reason, the modules need to run on the local Ansible controller (or are delegated to another system that *can* connect to the APIC). + + +Gathering facts +``````````````` +Because we run the modules on the Ansible controller gathering facts will not work. That is why when using these ACI modules it is mandatory to disable facts gathering. You can do this globally in your ``ansible.cfg`` or by adding ``gather_facts: no`` to every play. + +.. code-block:: yaml + :emphasize-lines: 3 + + - name: Another play in my playbook + hosts: my-apic-1 + gather_facts: no + tasks: + - name: Create a tenant + aci_tenant: + ... + +Delegating to localhost +``````````````````````` +So let us assume we have our target configured in the inventory using the FQDN name as the ``ansible_host`` value, as shown below. + +.. code-block:: yaml + :emphasize-lines: 3 + + apics: + my-apic-1: + ansible_host: apic01.fqdn.intra + ansible_user: admin + ansible_password: my-password + +One way to set this up is to add to every task the directive: ``delegate_to: localhost``. + +.. code-block:: yaml + :emphasize-lines: 8 + + - name: Query all tenants + aci_tenant: + host: '{{ ansible_host }}' + username: '{{ ansible_user }}' + password: '{{ ansible_password }}' + + state: query + delegate_to: localhost + register: all_tenants + +If one would forget to add this directive, Ansible will attempt to connect to the APIC using SSH and attempt to copy the module and run it remotely. This will fail with a clear error, yet may be confusing to some. + + +Using the local connection method +````````````````````````````````` +Another option frequently used, is to tie the ``local`` connection method to this target so that every subsequent task for this target will use the local connection method (hence run it locally, rather than use SSH). + +In this case the inventory may look like this: + +.. code-block:: yaml + :emphasize-lines: 6 + + apics: + my-apic-1: + ansible_host: apic01.fqdn.intra + ansible_user: admin + ansible_password: my-password + ansible_connection: local + +But used tasks do not need anything special added. + +.. code-block:: yaml + + - name: Query all tenants + aci_tenant: + host: '{{ ansible_host }}' + username: '{{ ansible_user }}' + password: '{{ ansible_password }}' + + state: query + register: all_tenants + +.. hint:: For clarity we have added ``delegate_to: localhost`` to all the examples in the module documentation. This helps to ensure first-time users can easily copy&paste parts and make them work with a minimum of effort. + + +Common parameters +................. +Every Ansible ACI module accepts the following parameters that influence the module's communication with the APIC REST API: + + host + Hostname or IP address of the APIC. + + port + Port to use for communication. (Defaults to ``443`` for HTTPS, and ``80`` for HTTP) + + username + User name used to log on to the APIC. (Defaults to ``admin``) + + password + Password for ``username`` to log on to the APIC, using password-based authentication. + + private_key + Private key for ``username`` to log on to APIC, using signature-based authentication. + This could either be the raw private key content (include header/footer) or a file that stores the key content. + *New in version 2.5* + + certificate_name + Name of the certificate in the ACI Web GUI. + This defaults to either the ``username`` value or the ``private_key`` file base name). + *New in version 2.5* + + timeout + Timeout value for socket-level communication. + + use_proxy + Use system proxy settings. (Defaults to ``yes``) + + use_ssl + Use HTTPS or HTTP for APIC REST communication. (Defaults to ``yes``) + + validate_certs + Validate certificate when using HTTPS communication. (Defaults to ``yes``) + + output_level + Influence the level of detail ACI modules return to the user. (One of ``normal``, ``info`` or ``debug``) *New in version 2.5* + + +Proxy support +............. +By default, if an environment variable ``<protocol>_proxy`` is set on the target host, requests will be sent through that proxy. This behaviour can be overridden by setting a variable for this task (see :ref:`playbooks_environment`), or by using the ``use_proxy`` module parameter. + +HTTP redirects can redirect from HTTP to HTTPS so ensure that the proxy environment for both protocols is correctly configured. + +If proxy support is not needed, but the system may have it configured nevertheless, use the parameter ``use_proxy: no`` to avoid accidental system proxy usage. + +.. hint:: Selective proxy support using the ``no_proxy`` environment variable is also supported. + + +Return values +............. + +.. versionadded:: 2.5 + +The following values are always returned: + + current + The resulting state of the managed object, or results of your query. + +The following values are returned when ``output_level: info``: + + previous + The original state of the managed object (before any change was made). + + proposed + The proposed config payload, based on user-supplied values. + + sent + The sent config payload, based on user-supplied values and the existing configuration. + +The following values are returned when ``output_level: debug`` or ``ANSIBLE_DEBUG=1``: + + filter_string + The filter used for specific APIC queries. + + method + The HTTP method used for the sent payload. (Either ``GET`` for queries, ``DELETE`` or ``POST`` for changes) + + response + The HTTP response from the APIC. + + status + The HTTP status code for the request. + + url + The url used for the request. + +.. note:: The module return values are documented in detail as part of each module's documentation. + + +More information +................ +Various resources exist to start learn more about ACI programmability, we recommend the following links: + +- :ref:`Developing Cisco ACI modules <aci_dev_guide>` +- `Jacob McGill: Automating Cisco ACI with Ansible <https://blogs.cisco.com/developer/automating-cisco-aci-with-ansible-eliminates-repetitive-day-to-day-tasks>`_ +- `Cisco DevNet Learning Labs about ACI and Ansible <https://learninglabs.cisco.com/labs/tags/ACI,Ansible>`_ + + +.. _aci_guide_auth: + +ACI authentication +------------------ + +Password-based authentication +............................. +If you want to log on using a username and password, you can use the following parameters with your ACI modules: + +.. code-block:: yaml + + username: admin + password: my-password + +Password-based authentication is very simple to work with, but it is not the most efficient form of authentication from ACI's point-of-view as it requires a separate login-request and an open session to work. To avoid having your session time-out and requiring another login, you can use the more efficient Signature-based authentication. + +.. note:: Password-based authentication also may trigger anti-DoS measures in ACI v3.1+ that causes session throttling and results in HTTP 503 errors and login failures. + +.. warning:: Never store passwords in plain text. + +The "Vault" feature of Ansible allows you to keep sensitive data such as passwords or keys in encrypted files, rather than as plain text in your playbooks or roles. These vault files can then be distributed or placed in source control. See :ref:`playbooks_vault` for more information. + + +Signature-based authentication using certificates +................................................. + +.. versionadded:: 2.5 + +Using signature-based authentication is more efficient and more reliable than password-based authentication. + +Generate certificate and private key +```````````````````````````````````` +Signature-based authentication requires a (self-signed) X.509 certificate with private key, and a configuration step for your AAA user in ACI. To generate a working X.509 certificate and private key, use the following procedure: + +.. code-block:: bash + + $ openssl req -new -newkey rsa:1024 -days 36500 -nodes -x509 -keyout admin.key -out admin.crt -subj '/CN=Admin/O=Your Company/C=US' + +Configure your local user +````````````````````````` +Perform the following steps: + +- Add the X.509 certificate to your ACI AAA local user at :guilabel:`ADMIN` » :guilabel:`AAA` +- Click :guilabel:`AAA Authentication` +- Check that in the :guilabel:`Authentication` field the :guilabel:`Realm` field displays :guilabel:`Local` +- Expand :guilabel:`Security Management` » :guilabel:`Local Users` +- Click the name of the user you want to add a certificate to, in the :guilabel:`User Certificates` area +- Click the :guilabel:`+` sign and in the :guilabel:`Create X509 Certificate` enter a certificate name in the :guilabel:`Name` field + + * If you use the basename of your private key here, you don't need to enter ``certificate_name`` in Ansible + +- Copy and paste your X.509 certificate in the :guilabel:`Data` field. + +You can automate this by using the following Ansible task: + +.. code-block:: yaml + + - name: Ensure we have a certificate installed + aci_aaa_user_certificate: + host: my-apic-1 + username: admin + password: my-password + + aaa_user: admin + certificate_name: admin + certificate: "{{ lookup('file', 'pki/admin.crt') }}" # This will read the certificate data from a local file + +.. note:: Signature-based authentication only works with local users. + + +Use signature-based authentication with Ansible +``````````````````````````````````````````````` +You need the following parameters with your ACI module(s) for it to work: + +.. code-block:: yaml + :emphasize-lines: 2,3 + + username: admin + private_key: pki/admin.key + certificate_name: admin # This could be left out ! + +or you can use the private key content: + +.. code-block:: yaml + :emphasize-lines: 2,3 + + username: admin + private_key: | + -----BEGIN PRIVATE KEY----- + <<your private key content>> + -----END PRIVATE KEY----- + certificate_name: admin # This could be left out ! + + +.. hint:: If you use a certificate name in ACI that matches the private key's basename, you can leave out the ``certificate_name`` parameter like the example above. + + +Using Ansible Vault to encrypt the private key +`````````````````````````````````````````````` +.. versionadded:: 2.8 + +To start, encrypt the private key and give it a strong password. + +.. code-block:: bash + + ansible-vault encrypt admin.key + +Use a text editor to open the private-key. You should have an encrypted cert now. + +.. code-block:: bash + + $ANSIBLE_VAULT;1.1;AES256 + 56484318584354658465121889743213151843149454864654151618131547984132165489484654 + 45641818198456456489479874513215489484843614848456466655432455488484654848489498 + .... + +Copy and paste the new encrypted cert into your playbook as a new variable. + +.. code-block:: yaml + + private_key: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 56484318584354658465121889743213151843149454864654151618131547984132165489484654 + 45641818198456456489479874513215489484843614848456466655432455488484654848489498 + .... + +Use the new variable for the private_key: + +.. code-block:: yaml + + username: admin + private_key: "{{ private_key }}" + certificate_name: admin # This could be left out ! + +When running the playbook, use "--ask-vault-pass" to decrypt the private key. + +.. code-block:: bash + + ansible-playbook site.yaml --ask-vault-pass + + +More information +```````````````` +- Detailed information about Signature-based Authentication is available from `Cisco APIC Signature-Based Transactions <https://www.cisco.com/c/en/us/td/docs/switches/datacenter/aci/apic/sw/kb/b_KB_Signature_Based_Transactions.html>`_. +- More information on Ansible Vault can be found on the :ref:`Ansible Vault <vault>` page. + + +.. _aci_guide_rest: + +Using ACI REST with Ansible +--------------------------- +While already a lot of ACI modules exists in the Ansible distribution, and the most common actions can be performed with these existing modules, there's always something that may not be possible with off-the-shelf modules. + +The aci_rest module provides you with direct access to the APIC REST API and enables you to perform any task not already covered by the existing modules. This may seem like a complex undertaking, but you can generate the needed REST payload for any action performed in the ACI web interface effortlessly. + + +Built-in idempotency +.................... +Because the APIC REST API is intrinsically idempotent and can report whether a change was made, the aci_rest module automatically inherits both capabilities and is a first-class solution for automating your ACI infrastructure. As a result, users that require more powerful low-level access to their ACI infrastructure don't have to give up on idempotency and don't have to guess whether a change was performed when using the aci_rest module. + + +Using the aci_rest module +......................... +The aci_rest module accepts the native XML and JSON payloads, but additionally accepts inline YAML payload (structured like JSON). The XML payload requires you to use a path ending with ``.xml`` whereas JSON or YAML require the path to end with ``.json``. + +When you're making modifications, you can use the POST or DELETE methods, whereas doing just queries require the GET method. + +For instance, if you would like to ensure a specific tenant exists on ACI, these below four examples are functionally identical: + +**XML** (Native ACI REST) + +.. code-block:: yaml + + - aci_rest: + host: my-apic-1 + private_key: pki/admin.key + + method: post + path: /api/mo/uni.xml + content: | + <fvTenant name="customer-xyz" descr="Customer XYZ"/> + +**JSON** (Native ACI REST) + +.. code-block:: yaml + + - aci_rest: + host: my-apic-1 + private_key: pki/admin.key + + method: post + path: /api/mo/uni.json + content: + { + "fvTenant": { + "attributes": { + "name": "customer-xyz", + "descr": "Customer XYZ" + } + } + } + +**YAML** (Ansible-style REST) + +.. code-block:: yaml + + - aci_rest: + host: my-apic-1 + private_key: pki/admin.key + + method: post + path: /api/mo/uni.json + content: + fvTenant: + attributes: + name: customer-xyz + descr: Customer XYZ + +**Ansible task** (Dedicated module) + +.. code-block:: yaml + + - aci_tenant: + host: my-apic-1 + private_key: pki/admin.key + + tenant: customer-xyz + description: Customer XYZ + state: present + + +.. hint:: The XML format is more practical when there is a need to template the REST payload (inline), but the YAML format is more convenient for maintaining your infrastructure-as-code and feels more naturally integrated with Ansible playbooks. The dedicated modules offer a more simple, abstracted, but also a more limited experience. Use what feels best for your use-case. + + +More information +................ +Plenty of resources exist to learn about ACI's APIC REST interface, we recommend the links below: + +- `The ACI collection on Ansible Galaxy <https://galaxy.ansible.com/cisco/aci>`_ +- `APIC REST API Configuration Guide <https://www.cisco.com/c/en/us/td/docs/switches/datacenter/aci/apic/sw/2-x/rest_cfg/2_1_x/b_Cisco_APIC_REST_API_Configuration_Guide.html>`_ -- Detailed guide on how the APIC REST API is designed and used, incl. many examples +- `APIC Management Information Model reference <https://developer.cisco.com/docs/apic-mim-ref/>`_ -- Complete reference of the APIC object model +- `Cisco DevNet Learning Labs about ACI and REST <https://learninglabs.cisco.com/labs/tags/ACI,REST>`_ + + +.. _aci_guide_ops: + +Operational examples +-------------------- +Here is a small overview of useful operational tasks to reuse in your playbooks. + +Feel free to contribute more useful snippets. + + +Waiting for all controllers to be ready +....................................... +You can use the below task after you started to build your APICs and configured the cluster to wait until all the APICs have come online. It will wait until the number of controllers equals the number listed in the ``apic`` inventory group. + +.. code-block:: yaml + + - name: Waiting for all controllers to be ready + aci_rest: + host: my-apic-1 + private_key: pki/admin.key + method: get + path: /api/node/class/topSystem.json?query-target-filter=eq(topSystem.role,"controller") + register: topsystem + until: topsystem|success and topsystem.totalCount|int >= groups['apic']|count >= 3 + retries: 20 + delay: 30 + + +Waiting for cluster to be fully-fit +................................... +The below example waits until the cluster is fully-fit. In this example you know the number of APICs in the cluster and you verify each APIC reports a 'fully-fit' status. + +.. code-block:: yaml + + - name: Waiting for cluster to be fully-fit + aci_rest: + host: my-apic-1 + private_key: pki/admin.key + method: get + path: /api/node/class/infraWiNode.json?query-target-filter=wcard(infraWiNode.dn,"topology/pod-1/node-1/av") + register: infrawinode + until: > + infrawinode|success and + infrawinode.totalCount|int >= groups['apic']|count >= 3 and + infrawinode.imdata[0].infraWiNode.attributes.health == 'fully-fit' and + infrawinode.imdata[1].infraWiNode.attributes.health == 'fully-fit' and + infrawinode.imdata[2].infraWiNode.attributes.health == 'fully-fit' + retries: 30 + delay: 30 + + +.. _aci_guide_errors: + +APIC error messages +------------------- +The following error messages may occur and this section can help you understand what exactly is going on and how to fix/avoid them. + + APIC Error 122: unknown managed object class 'polUni' + In case you receive this error while you are certain your aci_rest payload and object classes are seemingly correct, the issue might be that your payload is not in fact correct JSON (for example, the sent payload is using single quotes, rather than double quotes), and as a result the APIC is not correctly parsing your object classes from the payload. One way to avoid this is by using a YAML or an XML formatted payload, which are easier to construct correctly and modify later. + + + APIC Error 400: invalid data at line '1'. Attributes are missing, tag 'attributes' must be specified first, before any other tag + Although the JSON specification allows unordered elements, the APIC REST API requires that the JSON ``attributes`` element precede the ``children`` array or other elements. So you need to ensure that your payload conforms to this requirement. Sorting your dictionary keys will do the trick just fine. If you don't have any attributes, it may be necessary to add: ``attributes: {}`` as the APIC does expect the entry to precede any ``children``. + + + APIC Error 801: property descr of uni/tn-TENANT/ap-AP failed validation for value 'A "legacy" network' + Some values in the APIC have strict format-rules to comply to, and the internal APIC validation check for the provided value failed. In the above case, the ``description`` parameter (internally known as ``descr``) only accepts values conforming to `Regex: [a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+ <https://pubhub-prod.s3.amazonaws.com/media/apic-mim-ref/docs/MO-fvAp.html#descr>`_, in general it must not include quotes or square brackets. + + +.. _aci_guide_known_issues: + +Known issues +------------ +The aci_rest module is a wrapper around the APIC REST API. As a result any issues related to the APIC will be reflected in the use of this module. + +All below issues either have been reported to the vendor, and most can simply be avoided. + + Too many consecutive API calls may result in connection throttling + Starting with ACI v3.1 the APIC will actively throttle password-based authenticated connection rates over a specific threshold. This is as part of an anti-DDOS measure but can act up when using Ansible with ACI using password-based authentication. Currently, one solution is to increase this threshold within the nginx configuration, but using signature-based authentication is recommended. + + **NOTE:** It is advisable to use signature-based authentication with ACI as it not only prevents connection-throttling, but also improves general performance when using the ACI modules. + + + Specific requests may not reflect changes correctly (`#35401 <https://github.com/ansible/ansible/issues/35041>`_) + There is a known issue where specific requests to the APIC do not properly reflect changed in the resulting output, even when we request those changes explicitly from the APIC. In one instance using the path ``api/node/mo/uni/infra.xml`` fails, where ``api/node/mo/uni/infra/.xml`` does work correctly. + + **NOTE:** A workaround is to register the task return values (for example, ``register: this``) and influence when the task should report a change by adding: ``changed_when: this.imdata != []``. + + + Specific requests are known to not be idempotent (`#35050 <https://github.com/ansible/ansible/issues/35050>`_) + The behaviour of the APIC is inconsistent to the use of ``status="created"`` and ``status="deleted"``. The result is that when you use ``status="created"`` in your payload the resulting tasks are not idempotent and creation will fail when the object was already created. However this is not the case with ``status="deleted"`` where such call to an non-existing object does not cause any failure whatsoever. + + **NOTE:** A workaround is to avoid using ``status="created"`` and instead use ``status="modified"`` when idempotency is essential to your workflow.. + + + Setting user password is not idempotent (`#35544 <https://github.com/ansible/ansible/issues/35544>`_) + Due to an inconsistency in the APIC REST API, a task that sets the password of a locally-authenticated user is not idempotent. The APIC will complain with message ``Password history check: user dag should not use previous 5 passwords``. + + **NOTE:** There is no workaround for this issue. + + +.. _aci_guide_community: + +ACI Ansible community +--------------------- +If you have specific issues with the ACI modules, or a feature request, or you like to contribute to the ACI project by proposing changes or documentation updates, look at the Ansible Community wiki ACI page at: https://github.com/ansible/community/wiki/Network:-ACI + +You will find our roadmap, an overview of open ACI issues and pull-requests, and more information about who we are. If you have an interest in using ACI with Ansible, feel free to join! We occasionally meet online to track progress and prepare for new Ansible releases. + + +.. seealso:: + + `ACI collection on Ansible Galaxy <https://galaxy.ansible.com/cisco/aci>`_ + View the content tab for a complete list of supported ACI modules. + :ref:`Developing Cisco ACI modules <aci_dev_guide>` + A walkthough on how to develop new Cisco ACI modules to contribute back. + `ACI community <https://github.com/ansible/community/wiki/Network:-ACI>`_ + The Ansible ACI community wiki page, includes roadmap, ideas and development documentation. + :ref:`network_guide` + A detailed guide on how to use Ansible for automating network infrastructure. + `Network Working Group <https://github.com/ansible/community/tree/master/group-network>`_ + The Ansible Network community page, includes contact information and meeting information. + `#ansible-network <https://webchat.freenode.net/?channels=ansible-network>`_ + The #ansible-network IRC chat channel on Freenode.net. + `User Mailing List <https://groups.google.com/group/ansible-project>`_ + Have a question? Stop by the google group! diff --git a/docs/docsite/rst/scenario_guides/guide_alicloud.rst b/docs/docsite/rst/scenario_guides/guide_alicloud.rst new file mode 100644 index 00000000..c91eaf7f --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_alicloud.rst @@ -0,0 +1,125 @@ +Alibaba Cloud Compute Services Guide +==================================== + +.. _alicloud_intro: + +Introduction +```````````` + +Ansible contains several modules for controlling and managing Alibaba Cloud Compute Services (Alicloud). This guide +explains how to use the Alicloud Ansible modules together. + +All Alicloud modules require ``footmark`` - install it on your control machine with ``pip install footmark``. + +Cloud modules, including Alicloud modules, execute on your local machine (the control machine) with ``connection: local``, rather than on remote machines defined in your hosts. + +Normally, you'll use the following pattern for plays that provision Alicloud resources:: + + - hosts: localhost + connection: local + vars: + - ... + tasks: + - ... + +.. _alicloud_authentication: + +Authentication +`````````````` + +You can specify your Alicloud authentication credentials (access key and secret key) by passing them as +environment variables or by storing them in a vars file. + +To pass authentication credentials as environment variables:: + + export ALICLOUD_ACCESS_KEY='Alicloud123' + export ALICLOUD_SECRET_KEY='AlicloudSecret123' + +To store authentication credentials in a vars_file, encrypt them with :ref:`Ansible Vault<vault>` to keep them secure, then list them:: + + --- + alicloud_access_key: "--REMOVED--" + alicloud_secret_key: "--REMOVED--" + +Note that if you store your credentials in a vars_file, you need to refer to them in each Alicloud module. For example:: + + - ali_instance: + alicloud_access_key: "{{alicloud_access_key}}" + alicloud_secret_key: "{{alicloud_secret_key}}" + image_id: "..." + +.. _alicloud_provisioning: + +Provisioning +```````````` + +Alicloud modules create Alicloud ECS instances, disks, virtual private clouds, virtual switches, security groups and other resources. + +You can use the ``count`` parameter to control the number of resources you create or terminate. For example, if you want exactly 5 instances tagged ``NewECS``, +set the ``count`` of instances to 5 and the ``count_tag`` to ``NewECS``, as shown in the last task of the example playbook below. +If there are no instances with the tag ``NewECS``, the task creates 5 new instances. If there are 2 instances with that tag, the task +creates 3 more. If there are 8 instances with that tag, the task terminates 3 of those instances. + +If you do not specify a ``count_tag``, the task creates the number of instances you specify in ``count`` with the ``instance_name`` you provide. + +:: + + # alicloud_setup.yml + + - hosts: localhost + connection: local + + tasks: + + - name: Create VPC + ali_vpc: + cidr_block: '{{ cidr_block }}' + vpc_name: new_vpc + register: created_vpc + + - name: Create VSwitch + ali_vswitch: + alicloud_zone: '{{ alicloud_zone }}' + cidr_block: '{{ vsw_cidr }}' + vswitch_name: new_vswitch + vpc_id: '{{ created_vpc.vpc.id }}' + register: created_vsw + + - name: Create security group + ali_security_group: + name: new_group + vpc_id: '{{ created_vpc.vpc.id }}' + rules: + - proto: tcp + port_range: 22/22 + cidr_ip: 0.0.0.0/0 + priority: 1 + rules_egress: + - proto: tcp + port_range: 80/80 + cidr_ip: 192.168.0.54/32 + priority: 1 + register: created_group + + - name: Create a set of instances + ali_instance: + security_groups: '{{ created_group.group_id }}' + instance_type: ecs.n4.small + image_id: "{{ ami_id }}" + instance_name: "My-new-instance" + instance_tags: + Name: NewECS + Version: 0.0.1 + count: 5 + count_tag: + Name: NewECS + allocate_public_ip: true + max_bandwidth_out: 50 + vswitch_id: '{{ created_vsw.vswitch.id}}' + register: create_instance + +In the example playbook above, data about the vpc, vswitch, group, and instances created by this playbook +are saved in the variables defined by the "register" keyword in each task. + +Each Alicloud module offers a variety of parameter options. Not all options are demonstrated in the above example. +See each individual module for further details and examples. diff --git a/docs/docsite/rst/scenario_guides/guide_aws.rst b/docs/docsite/rst/scenario_guides/guide_aws.rst new file mode 100644 index 00000000..ba453195 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_aws.rst @@ -0,0 +1,281 @@ +Amazon Web Services Guide +========================= + +.. _aws_intro: + +Introduction +```````````` + +Ansible contains a number of modules for controlling Amazon Web Services (AWS). The purpose of this +section is to explain how to put Ansible modules together (and use inventory scripts) to use Ansible in AWS context. + +Requirements for the AWS modules are minimal. + +All of the modules require and are tested against recent versions of boto, usually boto3. Check the module documentation for the minimum required version for each module. You must have the boto3 Python module installed on your control machine. You may also need the original boto package. You can install these modules from your OS distribution or using the python package installer: ``pip install boto3``. + +Whereas classically Ansible will execute tasks in its host loop against multiple remote machines, most cloud-control steps occur on your local machine with reference to the regions to control. + +In your playbook steps we'll typically be using the following pattern for provisioning steps:: + + - hosts: localhost + gather_facts: False + tasks: + - ... + +.. _aws_authentication: + +Authentication +`````````````` + +Authentication with the AWS-related modules is handled by either +specifying your access and secret key as ENV variables or module arguments. + +For environment variables:: + + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +For storing these in a vars_file, ideally encrypted with ansible-vault:: + + --- + ec2_access_key: "--REMOVED--" + ec2_secret_key: "--REMOVED--" + +Note that if you store your credentials in vars_file, you need to refer to them in each AWS-module. For example:: + + - ec2 + aws_access_key: "{{ec2_access_key}}" + aws_secret_key: "{{ec2_secret_key}}" + image: "..." + +.. _aws_provisioning: + +Provisioning +```````````` + +The ec2 module provisions and de-provisions instances within EC2. + +An example of making sure there are only 5 instances tagged 'Demo' in EC2 follows. + +In the example below, the "exact_count" of instances is set to 5. This means if there are 0 instances already existing, then +5 new instances would be created. If there were 2 instances, only 3 would be created, and if there were 8 instances, 3 instances would +be terminated. + +What is being counted is specified by the "count_tag" parameter. The parameter "instance_tags" is used to apply tags to the newly created +instance.:: + + # demo_setup.yml + + - hosts: localhost + gather_facts: False + + tasks: + + - name: Provision a set of instances + ec2: + key_name: my_key + group: test + instance_type: t2.micro + image: "{{ ami_id }}" + wait: true + exact_count: 5 + count_tag: + Name: Demo + instance_tags: + Name: Demo + register: ec2 + +The data about what instances are created is being saved by the "register" keyword in the variable named "ec2". + +From this, we'll use the add_host module to dynamically create a host group consisting of these new instances. This facilitates performing configuration actions on the hosts immediately in a subsequent task.:: + + # demo_setup.yml + + - hosts: localhost + gather_facts: False + + tasks: + + - name: Provision a set of instances + ec2: + key_name: my_key + group: test + instance_type: t2.micro + image: "{{ ami_id }}" + wait: true + exact_count: 5 + count_tag: + Name: Demo + instance_tags: + Name: Demo + register: ec2 + + - name: Add all instance public IPs to host group + add_host: hostname={{ item.public_ip }} groups=ec2hosts + loop: "{{ ec2.instances }}" + +With the host group now created, a second play at the bottom of the same provisioning playbook file might now have some configuration steps:: + + # demo_setup.yml + + - name: Provision a set of instances + hosts: localhost + # ... AS ABOVE ... + + - hosts: ec2hosts + name: configuration play + user: ec2-user + gather_facts: true + + tasks: + + - name: Check NTP service + service: name=ntpd state=started + +.. _aws_security_groups: + +Security Groups +``````````````` + +Security groups on AWS are stateful. The response of a request from your instance is allowed to flow in regardless of inbound security group rules and vice-versa. +In case you only want allow traffic with AWS S3 service, you need to fetch the current IP ranges of AWS S3 for one region and apply them as an egress rule.:: + + - name: fetch raw ip ranges for aws s3 + set_fact: + raw_s3_ranges: "{{ lookup('aws_service_ip_ranges', region='eu-central-1', service='S3', wantlist=True) }}" + + - name: prepare list structure for ec2_group module + set_fact: + s3_ranges: "{{ s3_ranges | default([]) + [{'proto': 'all', 'cidr_ip': item, 'rule_desc': 'S3 Service IP range'}] }}" + loop: "{{ raw_s3_ranges }}" + + - name: set S3 IP ranges to egress rules + ec2_group: + name: aws_s3_ip_ranges + description: allow outgoing traffic to aws S3 service + region: eu-central-1 + state: present + vpc_id: vpc-123456 + purge_rules: true + purge_rules_egress: true + rules: [] + rules_egress: "{{ s3_ranges }}" + tags: + Name: aws_s3_ip_ranges + +.. _aws_host_inventory: + +Host Inventory +`````````````` + +Once your nodes are spun up, you'll probably want to talk to them again. With a cloud setup, it's best to not maintain a static list of cloud hostnames +in text files. Rather, the best way to handle this is to use the aws_ec2 inventory plugin. See :ref:`dynamic_inventory`. + +The plugin will also return instances that were created outside of Ansible and allow Ansible to manage them. + +.. _aws_tags_and_groups: + +Tags And Groups And Variables +````````````````````````````` + +When using the inventory plugin, you can configure extra inventory structure based on the metadata returned by AWS. + +For instance, you might use ``keyed_groups`` to create groups from instance tags:: + + plugin: aws_ec2 + keyed_groups: + - prefix: tag + key: tags + + +You can then target all instances with a "class" tag where the value is "webserver" in a play:: + + - hosts: tag_class_webserver + tasks: + - ping + +You can also use these groups with 'group_vars' to set variables that are automatically applied to matching instances. See :ref:`splitting_out_vars`. + +.. _aws_pull: + +Autoscaling with Ansible Pull +````````````````````````````` + +Amazon Autoscaling features automatically increase or decrease capacity based on load. There are also Ansible modules shown in the cloud documentation that +can configure autoscaling policy. + +When nodes come online, it may not be sufficient to wait for the next cycle of an ansible command to come along and configure that node. + +To do this, pre-bake machine images which contain the necessary ansible-pull invocation. Ansible-pull is a command line tool that fetches a playbook from a git server and runs it locally. + +One of the challenges of this approach is that there needs to be a centralized way to store data about the results of pull commands in an autoscaling context. +For this reason, the autoscaling solution provided below in the next section can be a better approach. + +Read :ref:`ansible-pull` for more information on pull-mode playbooks. + +.. _aws_autoscale: + +Autoscaling with Ansible Tower +`````````````````````````````` + +:ref:`ansible_tower` also contains a very nice feature for auto-scaling use cases. In this mode, a simple curl script can call +a defined URL and the server will "dial out" to the requester and configure an instance that is spinning up. This can be a great way +to reconfigure ephemeral nodes. See the Tower install and product documentation for more details. + +A benefit of using the callback in Tower over pull mode is that job results are still centrally recorded and less information has to be shared +with remote hosts. + +.. _aws_cloudformation_example: + +Ansible With (And Versus) CloudFormation +```````````````````````````````````````` + +CloudFormation is a Amazon technology for defining a cloud stack as a JSON or YAML document. + +Ansible modules provide an easier to use interface than CloudFormation in many examples, without defining a complex JSON/YAML document. +This is recommended for most users. + +However, for users that have decided to use CloudFormation, there is an Ansible module that can be used to apply a CloudFormation template +to Amazon. + +When using Ansible with CloudFormation, typically Ansible will be used with a tool like Packer to build images, and CloudFormation will launch +those images, or ansible will be invoked through user data once the image comes online, or a combination of the two. + +Please see the examples in the Ansible CloudFormation module for more details. + +.. _aws_image_build: + +AWS Image Building With Ansible +``````````````````````````````` + +Many users may want to have images boot to a more complete configuration rather than configuring them entirely after instantiation. To do this, +one of many programs can be used with Ansible playbooks to define and upload a base image, which will then get its own AMI ID for usage with +the ec2 module or other Ansible AWS modules such as ec2_asg or the cloudformation module. Possible tools include Packer, aminator, and Ansible's +ec2_ami module. + +Generally speaking, we find most users using Packer. + +See the Packer documentation of the `Ansible local Packer provisioner <https://www.packer.io/docs/provisioners/ansible-local.html>`_ and `Ansible remote Packer provisioner <https://www.packer.io/docs/provisioners/ansible.html>`_. + +If you do not want to adopt Packer at this time, configuring a base-image with Ansible after provisioning (as shown above) is acceptable. + +.. _aws_next_steps: + +Next Steps: Explore Modules +``````````````````````````` + +Ansible ships with lots of modules for configuring a wide array of EC2 services. Browse the "Cloud" category of the module +documentation for a full list with examples. + +.. seealso:: + + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_delegation` + Delegation, useful for working with loud balancers, clouds, and locally executed steps. + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/scenario_guides/guide_azure.rst b/docs/docsite/rst/scenario_guides/guide_azure.rst new file mode 100644 index 00000000..2317ade4 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_azure.rst @@ -0,0 +1,480 @@ +Microsoft Azure Guide +===================== + +Ansible includes a suite of modules for interacting with Azure Resource Manager, giving you the tools to easily create +and orchestrate infrastructure on the Microsoft Azure Cloud. + +Requirements +------------ + +Using the Azure Resource Manager modules requires having specific Azure SDK modules +installed on the host running Ansible. + +.. code-block:: bash + + $ pip install 'ansible[azure]' + +If you are running Ansible from source, you can install the dependencies from the +root directory of the Ansible repo. + +.. code-block:: bash + + $ pip install .[azure] + +You can also directly run Ansible in `Azure Cloud Shell <https://shell.azure.com>`_, where Ansible is pre-installed. + +Authenticating with Azure +------------------------- + +Using the Azure Resource Manager modules requires authenticating with the Azure API. You can choose from two authentication strategies: + +* Active Directory Username/Password +* Service Principal Credentials + +Follow the directions for the strategy you wish to use, then proceed to `Providing Credentials to Azure Modules`_ for +instructions on how to actually use the modules and authenticate with the Azure API. + + +Using Service Principal +....................... + +There is now a detailed official tutorial describing `how to create a service principal <https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal>`_. + +After stepping through the tutorial you will have: + +* Your Client ID, which is found in the "client id" box in the "Configure" page of your application in the Azure portal +* Your Secret key, generated when you created the application. You cannot show the key after creation. + If you lost the key, you must create a new one in the "Configure" page of your application. +* And finally, a tenant ID. It's a UUID (for example, ABCDEFGH-1234-ABCD-1234-ABCDEFGHIJKL) pointing to the AD containing your + application. You will find it in the URL from within the Azure portal, or in the "view endpoints" of any given URL. + + +Using Active Directory Username/Password +........................................ + +To create an Active Directory username/password: + +* Connect to the Azure Classic Portal with your admin account +* Create a user in your default AAD. You must NOT activate Multi-Factor Authentication +* Go to Settings - Administrators +* Click on Add and enter the email of the new user. +* Check the checkbox of the subscription you want to test with this user. +* Login to Azure Portal with this new user to change the temporary password to a new one. You will not be able to use the + temporary password for OAuth login. + +Providing Credentials to Azure Modules +...................................... + +The modules offer several ways to provide your credentials. For a CI/CD tool such as Ansible Tower or Jenkins, you will +most likely want to use environment variables. For local development you may wish to store your credentials in a file +within your home directory. And of course, you can always pass credentials as parameters to a task within a playbook. The +order of precedence is parameters, then environment variables, and finally a file found in your home directory. + +Using Environment Variables +``````````````````````````` + +To pass service principal credentials via the environment, define the following variables: + +* AZURE_CLIENT_ID +* AZURE_SECRET +* AZURE_SUBSCRIPTION_ID +* AZURE_TENANT + +To pass Active Directory username/password via the environment, define the following variables: + +* AZURE_AD_USER +* AZURE_PASSWORD +* AZURE_SUBSCRIPTION_ID + +To pass Active Directory username/password in ADFS via the environment, define the following variables: + +* AZURE_AD_USER +* AZURE_PASSWORD +* AZURE_CLIENT_ID +* AZURE_TENANT +* AZURE_ADFS_AUTHORITY_URL + +"AZURE_ADFS_AUTHORITY_URL" is optional. It's necessary only when you have own ADFS authority like https://yourdomain.com/adfs. + +Storing in a File +````````````````` + +When working in a development environment, it may be desirable to store credentials in a file. The modules will look +for credentials in ``$HOME/.azure/credentials``. This file is an ini style file. It will look as follows: + +.. code-block:: ini + + [default] + subscription_id=xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + client_id=xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + secret=xxxxxxxxxxxxxxxxx + tenant=xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + +.. note:: If your secret values contain non-ASCII characters, you must `URL Encode <https://www.w3schools.com/tags/ref_urlencode.asp>`_ them to avoid login errors. + +It is possible to store multiple sets of credentials within the credentials file by creating multiple sections. Each +section is considered a profile. The modules look for the [default] profile automatically. Define AZURE_PROFILE in the +environment or pass a profile parameter to specify a specific profile. + +Passing as Parameters +````````````````````` + +If you wish to pass credentials as parameters to a task, use the following parameters for service principal: + +* client_id +* secret +* subscription_id +* tenant + +Or, pass the following parameters for Active Directory username/password: + +* ad_user +* password +* subscription_id + +Or, pass the following parameters for ADFS username/pasword: + +* ad_user +* password +* client_id +* tenant +* adfs_authority_url + +"adfs_authority_url" is optional. It's necessary only when you have own ADFS authority like https://yourdomain.com/adfs. + + +Other Cloud Environments +------------------------ + +To use an Azure Cloud other than the default public cloud (eg, Azure China Cloud, Azure US Government Cloud, Azure Stack), +pass the "cloud_environment" argument to modules, configure it in a credential profile, or set the "AZURE_CLOUD_ENVIRONMENT" +environment variable. The value is either a cloud name as defined by the Azure Python SDK (eg, "AzureChinaCloud", +"AzureUSGovernment"; defaults to "AzureCloud") or an Azure metadata discovery URL (for Azure Stack). + +Creating Virtual Machines +------------------------- + +There are two ways to create a virtual machine, both involving the azure_rm_virtualmachine module. We can either create +a storage account, network interface, security group and public IP address and pass the names of these objects to the +module as parameters, or we can let the module do the work for us and accept the defaults it chooses. + +Creating Individual Components +.............................. + +An Azure module is available to help you create a storage account, virtual network, subnet, network interface, +security group and public IP. Here is a full example of creating each of these and passing the names to the +azure_rm_virtualmachine module at the end: + +.. code-block:: yaml + + - name: Create storage account + azure_rm_storageaccount: + resource_group: Testing + name: testaccount001 + account_type: Standard_LRS + + - name: Create virtual network + azure_rm_virtualnetwork: + resource_group: Testing + name: testvn001 + address_prefixes: "10.10.0.0/16" + + - name: Add subnet + azure_rm_subnet: + resource_group: Testing + name: subnet001 + address_prefix: "10.10.0.0/24" + virtual_network: testvn001 + + - name: Create public ip + azure_rm_publicipaddress: + resource_group: Testing + allocation_method: Static + name: publicip001 + + - name: Create security group that allows SSH + azure_rm_securitygroup: + resource_group: Testing + name: secgroup001 + rules: + - name: SSH + protocol: Tcp + destination_port_range: 22 + access: Allow + priority: 101 + direction: Inbound + + - name: Create NIC + azure_rm_networkinterface: + resource_group: Testing + name: testnic001 + virtual_network: testvn001 + subnet: subnet001 + public_ip_name: publicip001 + security_group: secgroup001 + + - name: Create virtual machine + azure_rm_virtualmachine: + resource_group: Testing + name: testvm001 + vm_size: Standard_D1 + storage_account: testaccount001 + storage_container: testvm001 + storage_blob: testvm001.vhd + admin_username: admin + admin_password: Password! + network_interfaces: testnic001 + image: + offer: CentOS + publisher: OpenLogic + sku: '7.1' + version: latest + +Each of the Azure modules offers a variety of parameter options. Not all options are demonstrated in the above example. +See each individual module for further details and examples. + + +Creating a Virtual Machine with Default Options +............................................... + +If you simply want to create a virtual machine without specifying all the details, you can do that as well. The only +caveat is that you will need a virtual network with one subnet already in your resource group. Assuming you have a +virtual network already with an existing subnet, you can run the following to create a VM: + +.. code-block:: yaml + + azure_rm_virtualmachine: + resource_group: Testing + name: testvm10 + vm_size: Standard_D1 + admin_username: chouseknecht + ssh_password_enabled: false + ssh_public_keys: "{{ ssh_keys }}" + image: + offer: CentOS + publisher: OpenLogic + sku: '7.1' + version: latest + + +Creating a Virtual Machine in Availability Zones +.................................................. + +If you want to create a VM in an availability zone, +consider the following: + +* Both OS disk and data disk must be a 'managed disk', not an 'unmanaged disk'. +* When creating a VM with the ``azure_rm_virtualmachine`` module, + you need to explicitly set the ``managed_disk_type`` parameter + to change the OS disk to a managed disk. + Otherwise, the OS disk becomes an unmanaged disk.. +* When you create a data disk with the ``azure_rm_manageddisk`` module, + you need to explicitly specify the ``storage_account_type`` parameter + to make it a managed disk. + Otherwise, the data disk will be an unmanaged disk. +* A managed disk does not require a storage account or a storage container, + unlike a n unmanaged disk. + In particular, note that once a VM is created on an unmanaged disk, + an unnecessary storage container named "vhds" is automatically created. +* When you create an IP address with the ``azure_rm_publicipaddress`` module, + you must set the ``sku`` parameter to ``standard``. + Otherwise, the IP address cannot be used in an availability zone. + + +Dynamic Inventory Script +------------------------ + +If you are not familiar with Ansible's dynamic inventory scripts, check out :ref:`Intro to Dynamic Inventory <intro_dynamic_inventory>`. + +The Azure Resource Manager inventory script is called `azure_rm.py <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/azure_rm.py>`_. It authenticates with the Azure API exactly the same as the +Azure modules, which means you will either define the same environment variables described above in `Using Environment Variables`_, +create a ``$HOME/.azure/credentials`` file (also described above in `Storing in a File`_), or pass command line parameters. To see available command +line options execute the following: + +.. code-block:: bash + + $ wget https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/azure_rm.py + $ ./azure_rm.py --help + +As with all dynamic inventory scripts, the script can be executed directly, passed as a parameter to the ansible command, +or passed directly to ansible-playbook using the -i option. No matter how it is executed the script produces JSON representing +all of the hosts found in your Azure subscription. You can narrow this down to just hosts found in a specific set of +Azure resource groups, or even down to a specific host. + +For a given host, the inventory script provides the following host variables: + +.. code-block:: JSON + + { + "ansible_host": "XXX.XXX.XXX.XXX", + "computer_name": "computer_name2", + "fqdn": null, + "id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name", + "image": { + "offer": "CentOS", + "publisher": "OpenLogic", + "sku": "7.1", + "version": "latest" + }, + "location": "westus", + "mac_address": "00-00-5E-00-53-FE", + "name": "object-name", + "network_interface": "interface-name", + "network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1", + "network_security_group": null, + "network_security_group_id": null, + "os_disk": { + "name": "object-name", + "operating_system_type": "Linux" + }, + "plan": null, + "powerstate": "running", + "private_ip": "172.26.3.6", + "private_ip_alloc_method": "Static", + "provisioning_state": "Succeeded", + "public_ip": "XXX.XXX.XXX.XXX", + "public_ip_alloc_method": "Static", + "public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name", + "public_ip_name": "object-name", + "resource_group": "galaxy-production", + "security_group": "object-name", + "security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name", + "tags": { + "db": "mysql" + }, + "type": "Microsoft.Compute/virtualMachines", + "virtual_machine_size": "Standard_DS4" + } + +Host Groups +........... + +By default hosts are grouped by: + +* azure (all hosts) +* location name +* resource group name +* security group name +* tag key +* tag key_value +* os_disk operating_system_type (Windows/Linux) + +You can control host groupings and host selection by either defining environment variables or creating an +azure_rm.ini file in your current working directory. + +NOTE: An .ini file will take precedence over environment variables. + +NOTE: The name of the .ini file is the basename of the inventory script (in other words, 'azure_rm') with a '.ini' +extension. This allows you to copy, rename and customize the inventory script and have matching .ini files all in +the same directory. + +Control grouping using the following variables defined in the environment: + +* AZURE_GROUP_BY_RESOURCE_GROUP=yes +* AZURE_GROUP_BY_LOCATION=yes +* AZURE_GROUP_BY_SECURITY_GROUP=yes +* AZURE_GROUP_BY_TAG=yes +* AZURE_GROUP_BY_OS_FAMILY=yes + +Select hosts within specific resource groups by assigning a comma separated list to: + +* AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b + +Select hosts for specific tag key by assigning a comma separated list of tag keys to: + +* AZURE_TAGS=key1,key2,key3 + +Select hosts for specific locations by assigning a comma separated list of locations to: + +* AZURE_LOCATIONS=eastus,eastus2,westus + +Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to: + +* AZURE_TAGS=key1:value1,key2:value2 + +If you don't need the powerstate, you can improve performance by turning off powerstate fetching: + +* AZURE_INCLUDE_POWERSTATE=no + +A sample azure_rm.ini file is included along with the inventory script in +`here <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/azure_rm.ini>`_. +An .ini file will contain the following: + +.. code-block:: ini + + [azure] + # Control which resource groups are included. By default all resources groups are included. + # Set resource_groups to a comma separated list of resource groups names. + #resource_groups= + + # Control which tags are included. Set tags to a comma separated list of keys or key:value pairs + #tags= + + # Control which locations are included. Set locations to a comma separated list of locations. + #locations= + + # Include powerstate. If you don't need powerstate information, turning it off improves runtime performance. + # Valid values: yes, no, true, false, True, False, 0, 1. + include_powerstate=yes + + # Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1. + group_by_resource_group=yes + group_by_location=yes + group_by_security_group=yes + group_by_tag=yes + group_by_os_family=yes + +Examples +........ + +Here are some examples using the inventory script: + +.. code-block:: bash + + # Download inventory script + $ wget https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/azure_rm.py + + # Execute /bin/uname on all instances in the Testing resource group + $ ansible -i azure_rm.py Testing -m shell -a "/bin/uname -a" + + # Execute win_ping on all Windows instances + $ ansible -i azure_rm.py windows -m win_ping + + # Execute ping on all Linux instances + $ ansible -i azure_rm.py linux -m ping + + # Use the inventory script to print instance specific information + $ ./azure_rm.py --host my_instance_host_name --resource-groups=Testing --pretty + + # Use the inventory script with ansible-playbook + $ ansible-playbook -i ./azure_rm.py test_playbook.yml + +Here is a simple playbook to exercise the Azure inventory script: + +.. code-block:: yaml + + - name: Test the inventory script + hosts: azure + connection: local + gather_facts: no + tasks: + - debug: + msg: "{{ inventory_hostname }} has powerstate {{ powerstate }}" + +You can execute the playbook with something like: + +.. code-block:: bash + + $ ansible-playbook -i ./azure_rm.py test_azure_inventory.yml + + +Disabling certificate validation on Azure endpoints +................................................... + +When an HTTPS proxy is present, or when using Azure Stack, it may be necessary to disable certificate validation for +Azure endpoints in the Azure modules. This is not a recommended security practice, but may be necessary when the system +CA store cannot be altered to include the necessary CA certificate. Certificate validation can be controlled by setting +the "cert_validation_mode" value in a credential profile, via the "AZURE_CERT_VALIDATION_MODE" environment variable, or +by passing the "cert_validation_mode" argument to any Azure module. The default value is "validate"; setting the value +to "ignore" will prevent all certificate validation. The module argument takes precedence over a credential profile value, +which takes precedence over the environment value. diff --git a/docs/docsite/rst/scenario_guides/guide_cloudstack.rst b/docs/docsite/rst/scenario_guides/guide_cloudstack.rst new file mode 100644 index 00000000..fcfb8120 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_cloudstack.rst @@ -0,0 +1,377 @@ +CloudStack Cloud Guide +====================== + +.. _cloudstack_introduction: + +Introduction +```````````` +The purpose of this section is to explain how to put Ansible modules together to use Ansible in a CloudStack context. You will find more usage examples in the details section of each module. + +Ansible contains a number of extra modules for interacting with CloudStack based clouds. All modules support check mode, are designed to be idempotent, have been created and tested, and are maintained by the community. + +.. note:: Some of the modules will require domain admin or root admin privileges. + +Prerequisites +````````````` +Prerequisites for using the CloudStack modules are minimal. In addition to Ansible itself, all of the modules require the python library ``cs`` https://pypi.org/project/cs/ + +You'll need this Python module installed on the execution host, usually your workstation. + +.. code-block:: bash + + $ pip install cs + +Or alternatively starting with Debian 9 and Ubuntu 16.04: + +.. code-block:: bash + + $ sudo apt install python-cs + +.. note:: cs also includes a command line interface for ad-hoc interaction with the CloudStack API, for example ``$ cs listVirtualMachines state=Running``. + +Limitations and Known Issues +```````````````````````````` +VPC support has been improved since Ansible 2.3 but is still not yet fully implemented. The community is working on the VPC integration. + +Credentials File +```````````````` +You can pass credentials and the endpoint of your cloud as module arguments, however in most cases it is a far less work to store your credentials in the cloudstack.ini file. + +The python library cs looks for the credentials file in the following order (last one wins): + +* A ``.cloudstack.ini`` (note the dot) file in the home directory. +* A ``CLOUDSTACK_CONFIG`` environment variable pointing to an .ini file. +* A ``cloudstack.ini`` (without the dot) file in the current working directory, same directory as your playbooks are located. + +The structure of the ini file must look like this: + +.. code-block:: bash + + $ cat $HOME/.cloudstack.ini + [cloudstack] + endpoint = https://cloud.example.com/client/api + key = api key + secret = api secret + timeout = 30 + +.. Note:: The section ``[cloudstack]`` is the default section. ``CLOUDSTACK_REGION`` environment variable can be used to define the default section. + +.. versionadded:: 2.4 + +The ENV variables support ``CLOUDSTACK_*`` as written in the documentation of the library ``cs``, like ``CLOUDSTACK_TIMEOUT``, ``CLOUDSTACK_METHOD``, and so on. has been implemented into Ansible. It is even possible to have some incomplete config in your cloudstack.ini: + +.. code-block:: bash + + $ cat $HOME/.cloudstack.ini + [cloudstack] + endpoint = https://cloud.example.com/client/api + timeout = 30 + +and fulfill the missing data by either setting ENV variables or tasks params: + +.. code-block:: yaml + + --- + - name: provision our VMs + hosts: cloud-vm + tasks: + - name: ensure VMs are created and running + delegate_to: localhost + cs_instance: + api_key: your api key + api_secret: your api secret + ... + +Regions +``````` +If you use more than one CloudStack region, you can define as many sections as you want and name them as you like, for example: + +.. code-block:: bash + + $ cat $HOME/.cloudstack.ini + [exoscale] + endpoint = https://api.exoscale.ch/compute + key = api key + secret = api secret + + [example_cloud_one] + endpoint = https://cloud-one.example.com/client/api + key = api key + secret = api secret + + [example_cloud_two] + endpoint = https://cloud-two.example.com/client/api + key = api key + secret = api secret + +.. Hint:: Sections can also be used to for login into the same region using different accounts. + +By passing the argument ``api_region`` with the CloudStack modules, the region wanted will be selected. + +.. code-block:: yaml + + - name: ensure my ssh public key exists on Exoscale + cs_sshkeypair: + name: my-ssh-key + public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" + api_region: exoscale + delegate_to: localhost + +Or by looping over a regions list if you want to do the task in every region: + +.. code-block:: yaml + + - name: ensure my ssh public key exists in all CloudStack regions + local_action: cs_sshkeypair + name: my-ssh-key + public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" + api_region: "{{ item }}" + loop: + - exoscale + - example_cloud_one + - example_cloud_two + +Environment Variables +````````````````````` +.. versionadded:: 2.3 + +Since Ansible 2.3 it is possible to use environment variables for domain (``CLOUDSTACK_DOMAIN``), account (``CLOUDSTACK_ACCOUNT``), project (``CLOUDSTACK_PROJECT``), VPC (``CLOUDSTACK_VPC``) and zone (``CLOUDSTACK_ZONE``). This simplifies the tasks by not repeating the arguments for every tasks. + +Below you see an example how it can be used in combination with Ansible's block feature: + +.. code-block:: yaml + + - hosts: cloud-vm + tasks: + - block: + - name: ensure my ssh public key + cs_sshkeypair: + name: my-ssh-key + public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" + + - name: ensure my ssh public key + cs_instance: + display_name: "{{ inventory_hostname_short }}" + template: Linux Debian 7 64-bit 20GB Disk + service_offering: "{{ cs_offering }}" + ssh_key: my-ssh-key + state: running + + delegate_to: localhost + environment: + CLOUDSTACK_DOMAIN: root/customers + CLOUDSTACK_PROJECT: web-app + CLOUDSTACK_ZONE: sf-1 + +.. Note:: You are still able overwrite the environment variables using the module arguments, for example ``zone: sf-2`` + +.. Note:: Unlike ``CLOUDSTACK_REGION`` these additional environment variables are ignored in the CLI ``cs``. + +Use Cases +````````` +The following should give you some ideas how to use the modules to provision VMs to the cloud. As always, there isn't only one way to do it. But as always: keep it simple for the beginning is always a good start. + +Use Case: Provisioning in a Advanced Networking CloudStack setup +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +Our CloudStack cloud has an advanced networking setup, we would like to provision web servers, which get a static NAT and open firewall ports 80 and 443. Further we provision database servers, to which we do not give any access to. For accessing the VMs by SSH we use a SSH jump host. + +This is how our inventory looks like: + +.. code-block:: none + + [cloud-vm:children] + webserver + db-server + jumphost + + [webserver] + web-01.example.com public_ip=198.51.100.20 + web-02.example.com public_ip=198.51.100.21 + + [db-server] + db-01.example.com + db-02.example.com + + [jumphost] + jump.example.com public_ip=198.51.100.22 + +As you can see, the public IPs for our web servers and jumphost has been assigned as variable ``public_ip`` directly in the inventory. + +The configure the jumphost, web servers and database servers, we use ``group_vars``. The ``group_vars`` directory contains 4 files for configuration of the groups: cloud-vm, jumphost, webserver and db-server. The cloud-vm is there for specifying the defaults of our cloud infrastructure. + +.. code-block:: yaml + + # file: group_vars/cloud-vm + --- + cs_offering: Small + cs_firewall: [] + +Our database servers should get more CPU and RAM, so we define to use a ``Large`` offering for them. + +.. code-block:: yaml + + # file: group_vars/db-server + --- + cs_offering: Large + +The web servers should get a ``Small`` offering as we would scale them horizontally, which is also our default offering. We also ensure the known web ports are opened for the world. + +.. code-block:: yaml + + # file: group_vars/webserver + --- + cs_firewall: + - { port: 80 } + - { port: 443 } + +Further we provision a jump host which has only port 22 opened for accessing the VMs from our office IPv4 network. + +.. code-block:: yaml + + # file: group_vars/jumphost + --- + cs_firewall: + - { port: 22, cidr: "17.17.17.0/24" } + +Now to the fun part. We create a playbook to create our infrastructure we call it ``infra.yml``: + +.. code-block:: yaml + + # file: infra.yaml + --- + - name: provision our VMs + hosts: cloud-vm + tasks: + - name: run all enclosed tasks from localhost + delegate_to: localhost + block: + - name: ensure VMs are created and running + cs_instance: + name: "{{ inventory_hostname_short }}" + template: Linux Debian 7 64-bit 20GB Disk + service_offering: "{{ cs_offering }}" + state: running + + - name: ensure firewall ports opened + cs_firewall: + ip_address: "{{ public_ip }}" + port: "{{ item.port }}" + cidr: "{{ item.cidr | default('0.0.0.0/0') }}" + loop: "{{ cs_firewall }}" + when: public_ip is defined + + - name: ensure static NATs + cs_staticnat: vm="{{ inventory_hostname_short }}" ip_address="{{ public_ip }}" + when: public_ip is defined + +In the above play we defined 3 tasks and use the group ``cloud-vm`` as target to handle all VMs in the cloud but instead SSH to these VMs, we use ``delegate_to: localhost`` to execute the API calls locally from our workstation. + +In the first task, we ensure we have a running VM created with the Debian template. If the VM is already created but stopped, it would just start it. If you like to change the offering on an existing VM, you must add ``force: yes`` to the task, which would stop the VM, change the offering and start the VM again. + +In the second task we ensure the ports are opened if we give a public IP to the VM. + +In the third task we add static NAT to the VMs having a public IP defined. + + +.. Note:: The public IP addresses must have been acquired in advance, also see ``cs_ip_address`` + +.. Note:: For some modules, for example ``cs_sshkeypair`` you usually want this to be executed only once, not for every VM. Therefore you would make a separate play for it targeting localhost. You find an example in the use cases below. + +Use Case: Provisioning on a Basic Networking CloudStack setup ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +A basic networking CloudStack setup is slightly different: Every VM gets a public IP directly assigned and security groups are used for access restriction policy. + +This is how our inventory looks like: + +.. code-block:: none + + [cloud-vm:children] + webserver + + [webserver] + web-01.example.com + web-02.example.com + +The default for your VMs looks like this: + +.. code-block:: yaml + + # file: group_vars/cloud-vm + --- + cs_offering: Small + cs_securitygroups: [ 'default'] + +Our webserver will also be in security group ``web``: + +.. code-block:: yaml + + # file: group_vars/webserver + --- + cs_securitygroups: [ 'default', 'web' ] + +The playbook looks like the following: + +.. code-block:: yaml + + # file: infra.yaml + --- + - name: cloud base setup + hosts: localhost + tasks: + - name: upload ssh public key + cs_sshkeypair: + name: defaultkey + public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" + + - name: ensure security groups exist + cs_securitygroup: + name: "{{ item }}" + loop: + - default + - web + + - name: add inbound SSH to security group default + cs_securitygroup_rule: + security_group: default + start_port: "{{ item }}" + end_port: "{{ item }}" + loop: + - 22 + + - name: add inbound TCP rules to security group web + cs_securitygroup_rule: + security_group: web + start_port: "{{ item }}" + end_port: "{{ item }}" + loop: + - 80 + - 443 + + - name: install VMs in the cloud + hosts: cloud-vm + tasks: + - delegate_to: localhost + block: + - name: create and run VMs on CloudStack + cs_instance: + name: "{{ inventory_hostname_short }}" + template: Linux Debian 7 64-bit 20GB Disk + service_offering: "{{ cs_offering }}" + security_groups: "{{ cs_securitygroups }}" + ssh_key: defaultkey + state: Running + register: vm + + - name: show VM IP + debug: msg="VM {{ inventory_hostname }} {{ vm.default_ip }}" + + - name: assign IP to the inventory + set_fact: ansible_ssh_host={{ vm.default_ip }} + + - name: waiting for SSH to come up + wait_for: port=22 host={{ vm.default_ip }} delay=5 + +In the first play we setup the security groups, in the second play the VMs will created be assigned to these groups. Further you see, that we assign the public IP returned from the modules to the host inventory. This is needed as we do not know the IPs we will get in advance. In a next step you would configure the DNS servers with these IPs for accessing the VMs with their DNS name. + +In the last task we wait for SSH to be accessible, so any later play would be able to access the VM by SSH without failure. diff --git a/docs/docsite/rst/scenario_guides/guide_docker.rst b/docs/docsite/rst/scenario_guides/guide_docker.rst new file mode 100644 index 00000000..c3f019bd --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_docker.rst @@ -0,0 +1,227 @@ +Docker Guide +============ + +The `community.docker collection <https://galaxy.ansible.com/community/docker>`_ offers several modules and plugins for orchestrating Docker containers and Docker Swarm. + +.. contents:: + :local: + :depth: 1 + + +Requirements +------------ + +Most of the modules and plugins in community.docker require the `Docker SDK for Python <https://docker-py.readthedocs.io/en/stable/>`_. The SDK needs to be installed on the machines where the modules and plugins are executed, and for the Python version(s) with which the modules and plugins are executed. You can use the :ref:`community.general.python_requirements_info module <ansible_collections.community.general.python_requirements_info_module>` to make sure that the Docker SDK for Python is installed on the correct machine and for the Python version used by Ansible. + +Note that plugins (inventory plugins and connection plugins) are always executed in the context of Ansible itself. If you use a plugin that requires the Docker SDK for Python, you need to install it on the machine running ``ansible`` or ``ansible-playbook`` and for the same Python interpreter used by Ansible. To see which Python is used, run ``ansible --version``. + +You can install the Docker SDK for Python for Python 2.7 or Python 3 as follows: + +.. code-block:: bash + + $ pip install docker + +For Python 2.6, you need a version before 2.0. For these versions, the SDK was called ``docker-py``, so you need to install it as follows: + +.. code-block:: bash + + $ pip install 'docker-py>=1.10.0' + +Please install only one of ``docker`` or ``docker-py``. Installing both will result in a broken installation. If this happens, Ansible will detect it and inform you about it. If that happens, you must uninstall both and reinstall the correct version. + +If in doubt, always install ``docker`` and never ``docker-py``. + + +Connecting to the Docker API +---------------------------- + +You can connect to a local or remote API using parameters passed to each task or by setting environment variables. The order of precedence is command line parameters and then environment variables. If neither a command line option nor an environment variable is found, Ansible uses the default value provided under `Parameters`_. + + +Parameters +.......... + +Most plugins and modules can be configured by the following parameters: + + docker_host + The URL or Unix socket path used to connect to the Docker API. Defaults to ``unix://var/run/docker.sock``. To connect to a remote host, provide the TCP connection string (for example: ``tcp://192.0.2.23:2376``). If TLS is used to encrypt the connection to the API, then the module will automatically replace 'tcp' in the connection URL with 'https'. + + api_version + The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported by the Docker SDK for Python installed. + + timeout + The maximum amount of time in seconds to wait on a response from the API. Defaults to 60 seconds. + + tls + Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. Defaults to ``false``. + + validate_certs + Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. Default is ``false``. + + cacert_path + Use a CA certificate when performing server verification by providing the path to a CA certificate file. + + cert_path + Path to the client's TLS certificate file. + + key_path + Path to the client's TLS key file. + + tls_hostname + When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults to ``localhost``. + + ssl_version + Provide a valid SSL version number. The default value is determined by the Docker SDK for Python. + + +Environment variables +..................... + +You can also control how the plugins and modules connect to the Docker API by setting the following environment variables. + +For plugins, they have to be set for the environment Ansible itself runs in. For modules, they have to be set for the environment the modules are executed in. For modules running on remote machines, the environment variables have to be set on that machine for the user used to execute the modules with. + + DOCKER_HOST + The URL or Unix socket path used to connect to the Docker API. + + DOCKER_API_VERSION + The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported + by docker-py. + + DOCKER_TIMEOUT + The maximum amount of time in seconds to wait on a response from the API. + + DOCKER_CERT_PATH + Path to the directory containing the client certificate, client key and CA certificate. + + DOCKER_SSL_VERSION + Provide a valid SSL version number. + + DOCKER_TLS + Secure the connection to the API by using TLS without verifying the authenticity of the Docker Host. + + DOCKER_TLS_VERIFY + Secure the connection to the API by using TLS and verify the authenticity of the Docker Host. + + +Plain Docker daemon: images, networks, volumes, and containers +-------------------------------------------------------------- + +For working with a plain Docker daemon, that is without Swarm, there are connection plugins, an inventory plugin, and several modules available: + + docker connection plugin + The :ref:`community.docker.docker connection plugin <ansible_collections.community.docker.docker_connection>` uses the Docker CLI utility to connect to Docker containers and execute modules in them. It essentially wraps ``docker exec`` and ``docker cp``. This connection plugin is supported by the :ref:`ansible.posix.synchronize module <ansible_collections.ansible.posix.synchronize_module>`. + + docker_api connection plugin + The :ref:`community.docker.docker_api connection plugin <ansible_collections.community.docker.docker_api_connection>` talks directly to the Docker daemon to connect to Docker containers and execute modules in them. + + docker_containers inventory plugin + The :ref:`community.docker.docker_containers inventory plugin <ansible_collections.community.docker.docker_containers_inventory>` allows you to dynamically add Docker containers from a Docker Daemon to your Ansible inventory. See :ref:`dynamic_inventory` for details on dynamic inventories. + + The `docker inventory script <https://github.com/ansible-collections/community.general/blob/main/scripts/inventory/docker.py>`_ is deprecated. Please use the inventory plugin instead. The inventory plugin has several compatibility options. If you need to collect Docker containers from multiple Docker daemons, you need to add every Docker daemon as an individual inventory source. + + docker_host_info module + The :ref:`community.docker.docker_host_info module <ansible_collections.community.docker.docker_host_info_module>` allows you to retrieve information on a Docker daemon, such as all containers, images, volumes, networks and so on. + + docker_login module + The :ref:`community.docker.docker_login module <ansible_collections.community.docker.docker_login_module>` allows you to log in and out of a remote registry, such as Docker Hub or a private registry. It provides similar functionality to the ``docker login`` and ``docker logout`` CLI commands. + + docker_prune module + The :ref:`community.docker.docker_prune module <ansible_collections.community.docker.docker_prune_module>` allows you to prune no longer needed containers, images, volumes and so on. It provides similar functionality to the ``docker prune`` CLI command. + + docker_image module + The :ref:`community.docker.docker_image module <ansible_collections.community.docker.docker_image_module>` provides full control over images, including: build, pull, push, tag and remove. + + docker_image_info module + The :ref:`community.docker.docker_image_info module <ansible_collections.community.docker.docker_image_info_module>` allows you to list and inspect images. + + docker_network module + The :ref:`community.docker.docker_network module <ansible_collections.community.docker.docker_network_module>` provides full control over Docker networks. + + docker_network_info module + The :ref:`community.docker.docker_network_info module <ansible_collections.community.docker.docker_network_info_module>` allows you to inspect Docker networks. + + docker_volume_info module + The :ref:`community.docker.docker_volume_info module <ansible_collections.community.docker.docker_volume_info_module>` provides full control over Docker volumes. + + docker_volume module + The :ref:`community.docker.docker_volume module <ansible_collections.community.docker.docker_volume_module>` allows you to inspect Docker volumes. + + docker_container module + The :ref:`community.docker.docker_container module <ansible_collections.community.docker.docker_container_module>` manages the container lifecycle by providing the ability to create, update, stop, start and destroy a Docker container. + + docker_container_info module + The :ref:`community.docker.docker_container_info module <ansible_collections.community.docker.docker_container_info_module>` allows you to inspect a Docker container. + + +Docker Compose +-------------- + +The :ref:`community.docker.docker_compose module <ansible_collections.community.docker.docker_compose_module>` +allows you to use your existing Docker compose files to orchestrate containers on a single Docker daemon or on Swarm. +Supports compose versions 1 and 2. + +Next to Docker SDK for Python, you need to install `docker-compose <https://github.com/docker/compose>`_ on the remote machines to use the module. + + +Docker Machine +-------------- + +The :ref:`community.docker.docker_machine inventory plugin <ansible_collections.community.docker.docker_machine_inventory>` allows you to dynamically add Docker Machine hosts to your Ansible inventory. + + +Docker stack +------------ + +The :ref:`community.docker.docker_stack module <ansible_collections.community.docker.docker_stack_module>` module allows you to control Docker stacks. Information on stacks can be retrieved by the :ref:`community.docker.docker_stack_info module <ansible_collections.community.docker.docker_stack_info_module>`, and information on stack tasks can be retrieved by the :ref:`community.docker.docker_stack_task_info module <ansible_collections.community.docker.docker_stack_task_info_module>`. + + +Docker Swarm +------------ + +The community.docker collection provides multiple plugins and modules for managing Docker Swarms. + +Swarm management +................ + +One inventory plugin and several modules are provided to manage Docker Swarms: + + docker_swarm inventory plugin + The :ref:`community.docker.docker_swarm inventory plugin <ansible_collections.community.docker.docker_swarm_inventory>` allows you to dynamically add all Docker Swarm nodes to your Ansible inventory. + + docker_swarm module + The :ref:`community.docker.docker_swarm module <ansible_collections.community.docker.docker_swarm_module>` allows you to globally configure Docker Swarm manager nodes to join and leave swarms, and to change the Docker Swarm configuration. + + docker_swarm_info module + The :ref:`community.docker.docker_swarm_info module <ansible_collections.community.docker.docker_swarm_info_module>` allows you to retrieve information on Docker Swarm. + + docker_node module + The :ref:`community.docker.docker_node module <ansible_collections.community.docker.docker_node_module>` allows you to manage Docker Swarm nodes. + + docker_node_info module + The :ref:`community.docker.docker_node_info module <ansible_collections.community.docker.docker_node_info_module>` allows you to retrieve information on Docker Swarm nodes. + +Configuration management +........................ + +The community.docker collection offers modules to manage Docker Swarm configurations and secrets: + + docker_config module + The :ref:`community.docker.docker_config module <ansible_collections.community.docker.docker_config_module>` allows you to create and modify Docker Swarm configs. + + docker_secret module + The :ref:`community.docker.docker_secret module <ansible_collections.community.docker.docker_secret_module>` allows you to create and modify Docker Swarm secrets. + + +Swarm services +.............. + +Docker Swarm services can be created and updated with the :ref:`community.docker.docker_swarm_service module <ansible_collections.community.docker.docker_swarm_service_module>`, and information on them can be queried by the :ref:`community.docker.docker_swarm_service_info module <ansible_collections.community.docker.docker_swarm_service_info_module>`. + + +Helpful links +------------- + +Still using Dockerfile to build images? Check out `ansible-bender <https://github.com/ansible-community/ansible-bender>`_, and start building images from your Ansible playbooks. + +Use `Ansible Operator <https://learn.openshift.com/ansibleop/ansible-operator-overview/>`_ to launch your docker-compose file on `OpenShift <https://www.okd.io/>`_. Go from an app on your laptop to a fully scalable app in the cloud with Kubernetes in just a few moments. diff --git a/docs/docsite/rst/scenario_guides/guide_gce.rst b/docs/docsite/rst/scenario_guides/guide_gce.rst new file mode 100644 index 00000000..6d9ca65a --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_gce.rst @@ -0,0 +1,302 @@ +Google Cloud Platform Guide +=========================== + +.. gce_intro: + +Introduction +-------------------------- + +Ansible + Google have been working together on a set of auto-generated +Ansible modules designed to consistently and comprehensively cover the entirety +of the Google Cloud Platform (GCP). + +Ansible contains modules for managing Google Cloud Platform resources, +including creating instances, controlling network access, working with +persistent disks, managing load balancers, and a lot more. + +These new modules can be found under a new consistent name scheme "gcp_*" +(Note: gcp_target_proxy and gcp_url_map are legacy modules, despite the "gcp_*" +name. Please use gcp_compute_target_proxy and gcp_compute_url_map instead). + +Additionally, the gcp_compute inventory plugin can discover all +Google Compute Engine (GCE) instances +and make them automatically available in your Ansible inventory. + +You may see a collection of other GCP modules that do not conform to this +naming convention. These are the original modules primarily developed by the +Ansible community. You will find some overlapping functionality such as with +the "gce" module and the new "gcp_compute_instance" module. Either can be +used, but you may experience issues trying to use them together. + +While the community GCP modules are not going away, Google is investing effort +into the new "gcp_*" modules. Google is committed to ensuring the Ansible +community has a great experience with GCP and therefore recommends adopting +these new modules if possible. + + +Requisites +--------------- +The GCP modules require both the ``requests`` and the +``google-auth`` libraries to be installed. + +.. code-block:: bash + + $ pip install requests google-auth + +Alternatively for RHEL / CentOS, the ``python-requests`` package is also +available to satisfy ``requests`` libraries. + +.. code-block:: bash + + $ yum install python-requests + +Credentials +----------- +It's easy to create a GCP account with credentials for Ansible. You have multiple options to +get your credentials - here are two of the most common options: + +* Service Accounts (Recommended): Use JSON service accounts with specific permissions. +* Machine Accounts: Use the permissions associated with the GCP Instance you're using Ansible on. + +For the following examples, we'll be using service account credentials. + +To work with the GCP modules, you'll first need to get some credentials in the +JSON format: + +1. `Create a Service Account <https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount>`_ +2. `Download JSON credentials <https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts>`_ + +Once you have your credentials, there are two different ways to provide them to Ansible: + +* by specifying them directly as module parameters +* by setting environment variables + +Providing Credentials as Module Parameters +`````````````````````````````````````````` + +For the GCE modules you can specify the credentials as arguments: + +* ``auth_kind``: type of authentication being used (choices: machineaccount, serviceaccount, application) +* ``service_account_email``: email associated with the project +* ``service_account_file``: path to the JSON credentials file +* ``project``: id of the project +* ``scopes``: The specific scopes that you want the actions to use. + +For example, to create a new IP address using the ``gcp_compute_address`` module, +you can use the following configuration: + +.. code-block:: yaml + + - name: Create IP address + hosts: localhost + gather_facts: no + + vars: + service_account_file: /home/my_account.json + project: my-project + auth_kind: serviceaccount + scopes: + - https://www.googleapis.com/auth/compute + + tasks: + + - name: Allocate an IP Address + gcp_compute_address: + state: present + name: 'test-address1' + region: 'us-west1' + project: "{{ project }}" + auth_kind: "{{ auth_kind }}" + service_account_file: "{{ service_account_file }}" + scopes: "{{ scopes }}" + +Providing Credentials as Environment Variables +`````````````````````````````````````````````` + +Set the following environment variables before running Ansible in order to configure your credentials: + +.. code-block:: bash + + GCP_AUTH_KIND + GCP_SERVICE_ACCOUNT_EMAIL + GCP_SERVICE_ACCOUNT_FILE + GCP_SCOPES + +GCE Dynamic Inventory +--------------------- + +The best way to interact with your hosts is to use the gcp_compute inventory plugin, which dynamically queries GCE and tells Ansible what nodes can be managed. + +To be able to use this GCE dynamic inventory plugin, you need to enable it first by specifying the following in the ``ansible.cfg`` file: + +.. code-block:: ini + + [inventory] + enable_plugins = gcp_compute + +Then, create a file that ends in ``.gcp.yml`` in your root directory. + +The gcp_compute inventory script takes in the same authentication information as any module. + +Here's an example of a valid inventory file: + +.. code-block:: yaml + + plugin: gcp_compute + projects: + - graphite-playground + auth_kind: serviceaccount + service_account_file: /home/alexstephen/my_account.json + + +Executing ``ansible-inventory --list -i <filename>.gcp.yml`` will create a list of GCP instances that are ready to be configured using Ansible. + +Create an instance +`````````````````` + +The full range of GCP modules provide the ability to create a wide variety of +GCP resources with the full support of the entire GCP API. + +The following playbook creates a GCE Instance. This instance relies on other GCP +resources like Disk. By creating other resources separately, we can give as +much detail as necessary about how we want to configure the other resources, for example +formatting of the Disk. By registering it to a variable, we can simply insert the +variable into the instance task. The gcp_compute_instance module will figure out the +rest. + +.. code-block:: yaml + + - name: Create an instance + hosts: localhost + gather_facts: no + vars: + gcp_project: my-project + gcp_cred_kind: serviceaccount + gcp_cred_file: /home/my_account.json + zone: "us-central1-a" + region: "us-central1" + + tasks: + - name: create a disk + gcp_compute_disk: + name: 'disk-instance' + size_gb: 50 + source_image: 'projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts' + zone: "{{ zone }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + scopes: + - https://www.googleapis.com/auth/compute + state: present + register: disk + - name: create a address + gcp_compute_address: + name: 'address-instance' + region: "{{ region }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + scopes: + - https://www.googleapis.com/auth/compute + state: present + register: address + - name: create a instance + gcp_compute_instance: + state: present + name: test-vm + machine_type: n1-standard-1 + disks: + - auto_delete: true + boot: true + source: "{{ disk }}" + network_interfaces: + - network: null # use default + access_configs: + - name: 'External NAT' + nat_ip: "{{ address }}" + type: 'ONE_TO_ONE_NAT' + zone: "{{ zone }}" + project: "{{ gcp_project }}" + auth_kind: "{{ gcp_cred_kind }}" + service_account_file: "{{ gcp_cred_file }}" + scopes: + - https://www.googleapis.com/auth/compute + register: instance + + - name: Wait for SSH to come up + wait_for: host={{ address.address }} port=22 delay=10 timeout=60 + + - name: Add host to groupname + add_host: hostname={{ address.address }} groupname=new_instances + + + - name: Manage new instances + hosts: new_instances + connection: ssh + become: True + roles: + - base_configuration + - production_server + +Note that use of the "add_host" module above creates a temporary, in-memory group. This means that a play in the same playbook can then manage machines +in the 'new_instances' group, if so desired. Any sort of arbitrary configuration is possible at this point. + +For more information about Google Cloud, please visit the `Google Cloud website <https://cloud.google.com>`_. + +Migration Guides +---------------- + +gce.py -> gcp_compute_instance.py +````````````````````````````````` +As of Ansible 2.8, we're encouraging everyone to move from the ``gce`` module to the +``gcp_compute_instance`` module. The ``gcp_compute_instance`` module has better +support for all of GCP's features, fewer dependencies, more flexibility, and +better supports GCP's authentication systems. + +The ``gcp_compute_instance`` module supports all of the features of the ``gce`` +module (and more!). Below is a mapping of ``gce`` fields over to +``gcp_compute_instance`` fields. + +============================ ========================================== ====================== + gce.py gcp_compute_instance.py Notes +============================ ========================================== ====================== + state state/status State on gce has multiple values: "present", "absent", "stopped", "started", "terminated". State on gcp_compute_instance is used to describe if the instance exists (present) or does not (absent). Status is used to describe if the instance is "started", "stopped" or "terminated". + image disks[].initialize_params.source_image You'll need to create a single disk using the disks[] parameter and set it to be the boot disk (disks[].boot = true) + image_family disks[].initialize_params.source_image See above. + external_projects disks[].initialize_params.source_image The name of the source_image will include the name of the project. + instance_names Use a loop or multiple tasks. Using loops is a more Ansible-centric way of creating multiple instances and gives you the most flexibility. + service_account_email service_accounts[].email This is the service_account email address that you want the instance to be associated with. It is not the service_account email address that is used for the credentials necessary to create the instance. + service_account_permissions service_accounts[].scopes These are the permissions you want to grant to the instance. + pem_file Not supported. We recommend using JSON service account credentials instead of PEM files. + credentials_file service_account_file + project_id project + name name This field does not accept an array of names. Use a loop to create multiple instances. + num_instances Use a loop For maximum flexibility, we're encouraging users to use Ansible features to create multiple instances, rather than letting the module do it for you. + network network_interfaces[].network + subnetwork network_interfaces[].subnetwork + persistent_boot_disk disks[].type = 'PERSISTENT' + disks disks[] + ip_forward can_ip_forward + external_ip network_interfaces[].access_configs.nat_ip This field takes multiple types of values. You can create an IP address with ``gcp_compute_address`` and place the name/output of the address here. You can also place the string value of the IP address's GCP name or the actual IP address. + disks_auto_delete disks[].auto_delete + preemptible scheduling.preemptible + disk_size disks[].initialize_params.disk_size_gb +============================ ========================================== ====================== + +An example playbook is below: + +.. code:: yaml + + gcp_compute_instance: + name: "{{ item }}" + machine_type: n1-standard-1 + ... # any other settings + zone: us-central1-a + project: "my-project" + auth_kind: "service_account_file" + service_account_file: "~/my_account.json" + state: present + loop: + - instance-1 + - instance-2 diff --git a/docs/docsite/rst/scenario_guides/guide_infoblox.rst b/docs/docsite/rst/scenario_guides/guide_infoblox.rst new file mode 100644 index 00000000..d4597d90 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_infoblox.rst @@ -0,0 +1,292 @@ +.. _nios_guide: + +************************ + Infoblox Guide +************************ + +.. contents:: Topics + +This guide describes how to use Ansible with the Infoblox Network Identity Operating System (NIOS). With Ansible integration, you can use Ansible playbooks to automate Infoblox Core Network Services for IP address management (IPAM), DNS, and inventory tracking. + +You can review simple example tasks in the documentation for any of the :ref:`NIOS modules <nios_net tools_modules>` or look at the `Use cases with modules`_ section for more elaborate examples. See the `Infoblox <https://www.infoblox.com/>`_ website for more information on the Infoblox product. + +.. note:: You can retrieve most of the example playbooks used in this guide from the `network-automation/infoblox_ansible <https://github.com/network-automation/infoblox_ansible>`_ GitHub repository. + +Prerequisites +============= +Before using Ansible ``nios`` modules with Infoblox, you must install the ``infoblox-client`` on your Ansible control node: + +.. code-block:: bash + + $ sudo pip install infoblox-client + +.. note:: + You need an NIOS account with the WAPI feature enabled to use Ansible with Infoblox. + +.. _nios_credentials: + +Credentials and authenticating +============================== + +To use Infoblox ``nios`` modules in playbooks, you need to configure the credentials to access your Infoblox system. The examples in this guide use credentials stored in ``<playbookdir>/group_vars/nios.yml``. Replace these values with your Infoblox credentials: + +.. code-block:: yaml + + --- + nios_provider: + host: 192.0.0.2 + username: admin + password: ansible + +NIOS lookup plugins +=================== + +Ansible includes the following lookup plugins for NIOS: + +- :ref:`nios <nios_lookup>` Uses the Infoblox WAPI API to fetch NIOS specified objects, for example network views, DNS views, and host records. +- :ref:`nios_next_ip <nios_next_ip_lookup>` Provides the next available IP address from a network. You'll see an example of this in `Creating a host record`_. +- :ref:`nios_next_network <nios_next_network_lookup>` - Returns the next available network range for a network-container. + +You must run the NIOS lookup plugins locally by specifying ``connection: local``. See :ref:`lookup plugins <lookup_plugins>` for more detail. + + +Retrieving all network views +---------------------------- + +To retrieve all network views and save them in a variable, use the :ref:`set_fact <set_fact_module>` module with the :ref:`nios <nios_lookup>` lookup plugin: + +.. code-block:: yaml + + --- + - hosts: nios + connection: local + tasks: + - name: fetch all networkview objects + set_fact: + networkviews: "{{ lookup('nios', 'networkview', provider=nios_provider) }}" + + - name: check the networkviews + debug: + var: networkviews + + +Retrieving a host record +------------------------ + +To retrieve a set of host records, use the ``set_fact`` module with the ``nios`` lookup plugin and include a filter for the specific hosts you want to retrieve: + +.. code-block:: yaml + + --- + - hosts: nios + connection: local + tasks: + - name: fetch host leaf01 + set_fact: + host: "{{ lookup('nios', 'record:host', filter={'name': 'leaf01.ansible.com'}, provider=nios_provider) }}" + + - name: check the leaf01 return variable + debug: + var: host + + - name: debug specific variable (ipv4 address) + debug: + var: host.ipv4addrs[0].ipv4addr + + - name: fetch host leaf02 + set_fact: + host: "{{ lookup('nios', 'record:host', filter={'name': 'leaf02.ansible.com'}, provider=nios_provider) }}" + + - name: check the leaf02 return variable + debug: + var: host + + +If you run this ``get_host_record.yml`` playbook, you should see results similar to the following: + +.. code-block:: none + + $ ansible-playbook get_host_record.yml + + PLAY [localhost] *************************************************************************************** + + TASK [fetch host leaf01] ****************************************************************************** + ok: [localhost] + + TASK [check the leaf01 return variable] ************************************************************* + ok: [localhost] => { + < ...output shortened...> + "host": { + "ipv4addrs": [ + { + "configure_for_dhcp": false, + "host": "leaf01.ansible.com", + } + ], + "name": "leaf01.ansible.com", + "view": "default" + } + } + + TASK [debug specific variable (ipv4 address)] ****************************************************** + ok: [localhost] => { + "host.ipv4addrs[0].ipv4addr": "192.168.1.11" + } + + TASK [fetch host leaf02] ****************************************************************************** + ok: [localhost] + + TASK [check the leaf02 return variable] ************************************************************* + ok: [localhost] => { + < ...output shortened...> + "host": { + "ipv4addrs": [ + { + "configure_for_dhcp": false, + "host": "leaf02.example.com", + "ipv4addr": "192.168.1.12" + } + ], + } + } + + PLAY RECAP ****************************************************************************************** + localhost : ok=5 changed=0 unreachable=0 failed=0 + +The output above shows the host record for ``leaf01.ansible.com`` and ``leaf02.ansible.com`` that were retrieved by the ``nios`` lookup plugin. This playbook saves the information in variables which you can use in other playbooks. This allows you to use Infoblox as a single source of truth to gather and use information that changes dynamically. See :ref:`playbooks_variables` for more information on using Ansible variables. See the :ref:`nios <nios_lookup>` examples for more data options that you can retrieve. + +You can access these playbooks at `Infoblox lookup playbooks <https://github.com/network-automation/infoblox_ansible/tree/master/lookup_playbooks>`_. + +Use cases with modules +====================== + +You can use the ``nios`` modules in tasks to simplify common Infoblox workflows. Be sure to set up your :ref:`NIOS credentials<nios_credentials>` before following these examples. + +Configuring an IPv4 network +--------------------------- + +To configure an IPv4 network, use the :ref:`nios_network <nios_network_module>` module: + +.. code-block:: yaml + + --- + - hosts: nios + connection: local + tasks: + - name: Create a network on the default network view + nios_network: + network: 192.168.100.0/24 + comment: sets the IPv4 network + options: + - name: domain-name + value: ansible.com + state: present + provider: "{{nios_provider}}" + +Notice the last parameter, ``provider``, uses the variable ``nios_provider`` defined in the ``group_vars/`` directory. + +Creating a host record +---------------------- + +To create a host record named `leaf03.ansible.com` on the newly-created IPv4 network: + +.. code-block:: yaml + + --- + - hosts: nios + connection: local + tasks: + - name: configure an IPv4 host record + nios_host_record: + name: leaf03.ansible.com + ipv4addrs: + - ipv4addr: + "{{ lookup('nios_next_ip', '192.168.100.0/24', provider=nios_provider)[0] }}" + state: present + provider: "{{nios_provider}}" + +Notice the IPv4 address in this example uses the :ref:`nios_next_ip <nios_next_ip_lookup>` lookup plugin to find the next available IPv4 address on the network. + +Creating a forward DNS zone +--------------------------- + +To configure a forward DNS zone use, the ``nios_zone`` module: + +.. code-block:: yaml + + --- + - hosts: nios + connection: local + tasks: + - name: Create a forward DNS zone called ansible-test.com + nios_zone: + name: ansible-test.com + comment: local DNS zone + state: present + provider: "{{ nios_provider }}" + +Creating a reverse DNS zone +--------------------------- + +To configure a reverse DNS zone: + +.. code-block:: yaml + + --- + - hosts: nios + connection: local + tasks: + - name: configure a reverse mapping zone on the system using IPV6 zone format + nios_zone: + name: 100::1/128 + zone_format: IPV6 + state: present + provider: "{{ nios_provider }}" + +Dynamic inventory script +======================== + +You can use the Infoblox dynamic inventory script to import your network node inventory with Infoblox NIOS. To gather the inventory from Infoblox, you need two files: + +- `infoblox.yaml <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/infoblox.yaml>`_ - A file that specifies the NIOS provider arguments and optional filters. + +- `infoblox.py <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/infoblox.py>`_ - The python script that retrieves the NIOS inventory. + +.. note:: + + Please note that the inventory script only works when Ansible 2.9, 2.10 or 3 have been installed. The inventory script will eventually be removed from `community.general <https://galaxy.ansible.com/community/general>`_, and will not work if `community.general` is only installed with `ansible-galaxy collection install`. Please use the inventory plugin from `infoblox.nios_modules <https://galaxy.ansible.com/infoblox/nios_modules>`_ instead. + +To use the Infoblox dynamic inventory script: + +#. Download the ``infoblox.yaml`` file and save it in the ``/etc/ansible`` directory. + +#. Modify the ``infoblox.yaml`` file with your NIOS credentials. + +#. Download the ``infoblox.py`` file and save it in the ``/etc/ansible/hosts`` directory. + +#. Change the permissions on the ``infoblox.py`` file to make the file an executable: + +.. code-block:: bash + + $ sudo chmod +x /etc/ansible/hosts/infoblox.py + +You can optionally use ``./infoblox.py --list`` to test the script. After a few minutes, you should see your Infoblox inventory in JSON format. You can explicitly use the Infoblox dynamic inventory script as follows: + +.. code-block:: bash + + $ ansible -i infoblox.py all -m ping + +You can also implicitly use the Infoblox dynamic inventory script by including it in your inventory directory (``etc/ansible/hosts`` by default). See :ref:`dynamic_inventory` for more details. + +.. seealso:: + + `Infoblox website <https://www.infoblox.com//>`_ + The Infoblox website + `Infoblox and Ansible Deployment Guide <https://www.infoblox.com/resources/deployment-guides/infoblox-and-ansible-integration>`_ + The deployment guide for Ansible integration provided by Infoblox. + `Infoblox Integration in Ansible 2.5 <https://www.ansible.com/blog/infoblox-integration-in-ansible-2.5>`_ + Ansible blog post about Infoblox. + :ref:`Ansible NIOS modules <nios_net tools_modules>` + The list of supported NIOS modules, with examples. + `Infoblox Ansible Examples <https://github.com/network-automation/infoblox_ansible>`_ + Infoblox example playbooks. diff --git a/docs/docsite/rst/scenario_guides/guide_kubernetes.rst b/docs/docsite/rst/scenario_guides/guide_kubernetes.rst new file mode 100644 index 00000000..abd548de --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_kubernetes.rst @@ -0,0 +1,63 @@ +Kubernetes and OpenShift Guide +============================== + +Modules for interacting with the Kubernetes (K8s) and OpenShift API are under development, and can be used in preview mode. To use them, review the requirements, and then follow the installation and use instructions. + +Requirements +------------ + +To use the modules, you'll need the following: + +- Run Ansible from source. For assistance, view :ref:`from_source`. +- `OpenShift Rest Client <https://github.com/openshift/openshift-restclient-python>`_ installed on the host that will execute the modules. + + +Installation and use +-------------------- + +The Kubernetes modules are part of the `Ansible Kubernetes collection <https://github.com/ansible-collections/community.kubernetes>`_. + +To install the collection, run the following: + +.. code-block:: bash + + $ ansible-galaxy collection install community.kubernetes + +Next, include it in a playbook, as follows: + +.. code-block:: bash + + --- + - hosts: localhost + tasks: + - name: Create a pod + community.kubernetes.k8s: + state: present + definition: + apiVersion: v1 + kind: Pod + metadata: + name: "utilitypod-1" + namespace: default + labels: + app: galaxy + spec: + containers: + - name: utilitypod + image: busybox + + +Authenticating with the API +--------------------------- + +By default the OpenShift Rest Client will look for ``~/.kube/config``, and if found, connect using the active context. You can override the location of the file using the``kubeconfig`` parameter, and the context, using the ``context`` parameter. + +Basic authentication is also supported using the ``username`` and ``password`` options. You can override the URL using the ``host`` parameter. Certificate authentication works through the ``ssl_ca_cert``, ``cert_file``, and ``key_file`` parameters, and for token authentication, use the ``api_key`` parameter. + +To disable SSL certificate verification, set ``verify_ssl`` to false. + +Filing issues +````````````` + +If you find a bug or have a suggestion regarding modules, please file issues at `Ansible Kubernetes collection <https://github.com/ansible-collections/community.kubernetes>`_. +If you find a bug regarding OpenShift client, please file issues at `OpenShift REST Client issues <https://github.com/openshift/openshift-restclient-python/issues>`_. diff --git a/docs/docsite/rst/scenario_guides/guide_meraki.rst b/docs/docsite/rst/scenario_guides/guide_meraki.rst new file mode 100644 index 00000000..94c5b161 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_meraki.rst @@ -0,0 +1,193 @@ +.. _meraki_guide: + +****************** +Cisco Meraki Guide +****************** + +.. contents:: + :local: + + +.. _meraki_guide_intro: + +What is Cisco Meraki? +===================== + +Cisco Meraki is an easy-to-use, cloud-based, network infrastructure platform for enterprise environments. While most network hardware uses command-line interfaces (CLIs) for configuration, Meraki uses an easy-to-use Dashboard hosted in the Meraki cloud. No on-premises management hardware or software is required - only the network infrastructure to run your business. + +MS Switches +----------- + +Meraki MS switches come in multiple flavors and form factors. Meraki switches support 10/100/1000/10000 ports, as well as Cisco's mGig technology for 2.5/5/10Gbps copper connectivity. 8, 24, and 48 port flavors are available with PoE (802.3af/802.3at/UPoE) available on many models. + +MX Firewalls +------------ + +Meraki's MX firewalls support full layer 3-7 deep packet inspection. MX firewalls are compatible with a variety of VPN technologies including IPSec, SSL VPN, and Meraki's easy-to-use AutoVPN. + +MR Wireless Access Points +------------------------- + +MR access points are enterprise-class, high-performance access points for the enterprise. MR access points have MIMO technology and integrated beamforming built-in for high performance applications. BLE allows for advanced location applications to be developed with no on-premises analytics platforms. + +Using the Meraki modules +======================== + +Meraki modules provide a user-friendly interface to manage your Meraki environment using Ansible. For example, details about SNMP settings for a particular organization can be discovered using the module `meraki_snmp <meraki_snmp_module>`. + +.. code-block:: yaml + + - name: Query SNMP settings + meraki_snmp: + api_key: abc123 + org_name: AcmeCorp + state: query + delegate_to: localhost + +Information about a particular object can be queried. For example, the `meraki_admin <meraki_admin_module>` module supports + +.. code-block:: yaml + + - name: Gather information about Jane Doe + meraki_admin: + api_key: abc123 + org_name: AcmeCorp + state: query + email: janedoe@email.com + delegate_to: localhost + +Common Parameters +================= + +All Ansible Meraki modules support the following parameters which affect communication with the Meraki Dashboard API. Most of these should only be used by Meraki developers and not the general public. + + host + Hostname or IP of Meraki Dashboard. + + use_https + Specifies whether communication should be over HTTPS. (Defaults to ``yes``) + + use_proxy + Whether to use a proxy for any communication. + + validate_certs + Determine whether certificates should be validated or trusted. (Defaults to ``yes``) + +These are the common parameters which are used for most every module. + + org_name + Name of organization to perform actions in. + + org_id + ID of organization to perform actions in. + + net_name + Name of network to perform actions in. + + net_id + ID of network to perform actions in. + + state + General specification of what action to take. ``query`` does lookups. ``present`` creates or edits. ``absent`` deletes. + +.. hint:: Use the ``org_id`` and ``net_id`` parameters when possible. ``org_name`` and ``net_name`` require additional behind-the-scenes API calls to learn the ID values. ``org_id`` and ``net_id`` will perform faster. + +Meraki Authentication +===================== + +All API access with the Meraki Dashboard requires an API key. An API key can be generated from the organization's settings page. Each play in a playbook requires the ``api_key`` parameter to be specified. + +The "Vault" feature of Ansible allows you to keep sensitive data such as passwords or keys in encrypted files, rather than as plain text in your playbooks or roles. These vault files can then be distributed or placed in source control. See :ref:`playbooks_vault` for more information. + +Meraki's API returns a 404 error if the API key is not correct. It does not provide any specific error saying the key is incorrect. If you receive a 404 error, check the API key first. + +Returned Data Structures +======================== + +Meraki and its related Ansible modules return most information in the form of a list. For example, this is returned information by ``meraki_admin`` querying administrators. It returns a list even though there's only one. + +.. code-block:: json + + [ + { + "orgAccess": "full", + "name": "John Doe", + "tags": [], + "networks": [], + "email": "john@doe.com", + "id": "12345677890" + } + ] + +Handling Returned Data +====================== + +Since Meraki's response data uses lists instead of properly keyed dictionaries for responses, certain strategies should be used when querying data for particular information. For many situations, use the ``selectattr()`` Jinja2 function. + +Merging Existing and New Data +============================= + +Ansible's Meraki modules do not allow for manipulating data. For example, you may need to insert a rule in the middle of a firewall ruleset. Ansible and the Meraki modules lack a way to directly merge to manipulate data. However, a playlist can use a few tasks to split the list where you need to insert a rule and then merge them together again with the new rule added. The steps involved are as follows: + +1. Create blank "front" and "back" lists. + :: + + vars: + - front_rules: [] + - back_rules: [] +2. Get existing firewall rules from Meraki and create a new variable. + :: + + - name: Get firewall rules + meraki_mx_l3_firewall: + auth_key: abc123 + org_name: YourOrg + net_name: YourNet + state: query + delegate_to: localhost + register: rules + - set_fact: + original_ruleset: '{{rules.data}}' +3. Write the new rule. The new rule needs to be in a list so it can be merged with other lists in an upcoming step. The blank `-` puts the rule in a list so it can be merged. + :: + + - set_fact: + new_rule: + - + - comment: Block traffic to server + src_cidr: 192.0.1.0/24 + src_port: any + dst_cidr: 192.0.1.2/32 + dst_port: any + protocol: any + policy: deny +4. Split the rules into two lists. This assumes the existing ruleset is 2 rules long. + :: + + - set_fact: + front_rules: '{{front_rules + [ original_ruleset[:1] ]}}' + - set_fact: + back_rules: '{{back_rules + [ original_ruleset[1:] ]}}' +5. Merge rules with the new rule in the middle. + :: + + - set_fact: + new_ruleset: '{{front_rules + new_rule + back_rules}}' +6. Upload new ruleset to Meraki. + :: + + - name: Set two firewall rules + meraki_mx_l3_firewall: + auth_key: abc123 + org_name: YourOrg + net_name: YourNet + state: present + rules: '{{ new_ruleset }}' + delegate_to: localhost + +Error Handling +============== + +Ansible's Meraki modules will often fail if improper or incompatible parameters are specified. However, there will likely be scenarios where the module accepts the information but the Meraki API rejects the data. If this happens, the error will be returned in the ``body`` field for HTTP status of 400 return code. + +Meraki's API returns a 404 error if the API key is not correct. It does not provide any specific error saying the key is incorrect. If you receive a 404 error, check the API key first. 404 errors can also occur if improper object IDs (ex. ``org_id``) are specified. diff --git a/docs/docsite/rst/scenario_guides/guide_online.rst b/docs/docsite/rst/scenario_guides/guide_online.rst new file mode 100644 index 00000000..2c181a94 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_online.rst @@ -0,0 +1,41 @@ +**************** +Online.net Guide +**************** + +Introduction +============ + +Online is a French hosting company mainly known for providing bare-metal servers named Dedibox. +Check it out: `https://www.online.net/en <https://www.online.net/en>`_ + +Dynamic inventory for Online resources +-------------------------------------- + +Ansible has a dynamic inventory plugin that can list your resources. + +1. Create a YAML configuration such as ``online_inventory.yml`` with this content: + +.. code-block:: yaml + + plugin: online + +2. Set your ``ONLINE_TOKEN`` environment variable with your token. + You need to open an account and log into it before you can get a token. + You can find your token at the following page: `https://console.online.net/en/api/access <https://console.online.net/en/api/access>`_ + +3. You can test that your inventory is working by running: + +.. code-block:: bash + + $ ansible-inventory -v -i online_inventory.yml --list + + +4. Now you can run your playbook or any other module with this inventory: + +.. code-block:: console + + $ ansible all -i online_inventory.yml -m ping + sd-96735 | SUCCESS => { + "changed": false, + "ping": "pong" + } diff --git a/docs/docsite/rst/scenario_guides/guide_oracle.rst b/docs/docsite/rst/scenario_guides/guide_oracle.rst new file mode 100644 index 00000000..170ea903 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_oracle.rst @@ -0,0 +1,103 @@ +=================================== +Oracle Cloud Infrastructure Guide +=================================== + +************ +Introduction +************ + +Oracle provides a number of Ansible modules to interact with Oracle Cloud Infrastructure (OCI). In this guide, we will explain how you can use these modules to orchestrate, provision and configure your infrastructure on OCI. + +************ +Requirements +************ +To use the OCI Ansible modules, you must have the following prerequisites on your control node, the computer from which Ansible playbooks are executed. + +1. `An Oracle Cloud Infrastructure account. <https://cloud.oracle.com/en_US/tryit>`_ + +2. A user created in that account, in a security group with a policy that grants the necessary permissions for working with resources in those compartments. For guidance, see `How Policies Work <https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/policies.htm>`_. + +3. The necessary credentials and OCID information. + +************ +Installation +************ +1. Install the Oracle Cloud Infrastructure Python SDK (`detailed installation instructions <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/installation.html>`_): + +.. code-block:: bash + + pip install oci + +2. Install the Ansible OCI Modules in one of two ways: + +a. From Galaxy: + +.. code-block:: bash + + ansible-galaxy install oracle.oci_ansible_modules + +b. From GitHub: + +.. code-block:: bash + + $ git clone https://github.com/oracle/oci-ansible-modules.git + +.. code-block:: bash + + $ cd oci-ansible-modules + + +Run one of the following commands: + +- If Ansible is installed only for your user: + +.. code-block:: bash + + $ ./install.py + +- If Ansible is installed as root: + +.. code-block:: bash + + $ sudo ./install.py + +************* +Configuration +************* + +When creating and configuring Oracle Cloud Infrastructure resources, Ansible modules use the authentication information outlined `here <https://docs.cloud.oracle.com/iaas/Content/API/Concepts/sdkconfig.htm>`_. +. + +******** +Examples +******** +Launch a compute instance +========================= +This `sample launch playbook <https://github.com/oracle/oci-ansible-modules/tree/master/samples/compute/launch_compute_instance>`_ +launches a public Compute instance and then accesses the instance from an Ansible module over an SSH connection. The sample illustrates how to: + +- Generate a temporary, host-specific SSH key pair. +- Specify the public key from the key pair for connecting to the instance, and then launch the instance. +- Connect to the newly launched instance using SSH. + +Create and manage Autonomous Data Warehouses +============================================ +This `sample warehouse playbook <https://github.com/oracle/oci-ansible-modules/tree/master/samples/database/autonomous_data_warehouse>`_ creates an Autonomous Data Warehouse and manage its lifecycle. The sample shows how to: + +- Set up an Autonomous Data Warehouse. +- List all of the Autonomous Data Warehouse instances available in a compartment, filtered by the display name. +- Get the "facts" for a specified Autonomous Data Warehouse. +- Stop and start an Autonomous Data Warehouse instance. +- Delete an Autonomous Data Warehouse instance. + +Create and manage Autonomous Transaction Processing +=================================================== +This `sample playbook <https://github.com/oracle/oci-ansible-modules/tree/master/samples/database/autonomous_database>`_ +creates an Autonomous Transaction Processing database and manage its lifecycle. The sample shows how to: + +- Set up an Autonomous Transaction Processing database instance. +- List all of the Autonomous Transaction Processing instances in a compartment, filtered by the display name. +- Get the "facts" for a specified Autonomous Transaction Processing instance. +- Delete an Autonomous Transaction Processing database instance. + +You can find more examples here: `Sample Ansible Playbooks <https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/ansiblesamples.htm>`_. diff --git a/docs/docsite/rst/scenario_guides/guide_packet.rst b/docs/docsite/rst/scenario_guides/guide_packet.rst new file mode 100644 index 00000000..c08eb947 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_packet.rst @@ -0,0 +1,311 @@ +********************************** +Packet.net Guide +********************************** + +Introduction +============ + +`Packet.net <https://packet.net>`_ is a bare metal infrastructure host that's supported by Ansible (>=2.3) via a dynamic inventory script and two cloud modules. The two modules are: + +- packet_sshkey: adds a public SSH key from file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys. +- packet_device: manages servers on Packet. You can use this module to create, restart and delete devices. + +Note, this guide assumes you are familiar with Ansible and how it works. If you're not, have a look at their :ref:`docs <ansible_documentation>` before getting started. + +Requirements +============ + +The Packet modules and inventory script connect to the Packet API using the packet-python package. You can install it with pip: + +.. code-block:: bash + + $ pip install packet-python + +In order to check the state of devices created by Ansible on Packet, it's a good idea to install one of the `Packet CLI clients <https://www.packet.net/developers/integrations/>`_. Otherwise you can check them via the `Packet portal <https://app.packet.net/portal>`_. + +To use the modules and inventory script you'll need a Packet API token. You can generate an API token via the Packet portal `here <https://app.packet.net/portal#/api-keys>`__. The simplest way to authenticate yourself is to set the Packet API token in an environment variable: + +.. code-block:: bash + + $ export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs + +If you're not comfortable exporting your API token, you can pass it as a parameter to the modules. + +On Packet, devices and reserved IP addresses belong to `projects <https://www.packet.com/developers/api/#projects>`_. In order to use the packet_device module, you need to specify the UUID of the project in which you want to create or manage devices. You can find a project's UUID in the Packet portal `here <https://app.packet.net/portal#/projects/list/table/>`_ (it's just under the project table) or via one of the available `CLIs <https://www.packet.net/developers/integrations/>`_. + + +If you want to use a new SSH keypair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as: + +.. code-block:: bash + + $ ssh-keygen -t rsa -f ./id_rsa + +If you want to use an existing keypair, just copy the private and public key over to the playbook directory. + + +Device Creation +=============== + +The following code block is a simple playbook that creates one `Type 0 <https://www.packet.com/cloud/servers/t1-small/>`_ server (the 'plan' parameter). You have to supply 'plan' and 'operating_system'. 'location' defaults to 'ewr1' (Parsippany, NJ). You can find all the possible values for the parameters via a `CLI client <https://www.packet.net/developers/integrations/>`_. + +.. code-block:: yaml + + # playbook_create.yml + + - name: create ubuntu device + hosts: localhost + tasks: + + - packet_sshkey: + key_file: ./id_rsa.pub + label: tutorial key + + - packet_device: + project_id: <your_project_id> + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + +After running ``ansible-playbook playbook_create.yml``, you should have a server provisioned on Packet. You can verify via a CLI or in the `Packet portal <https://app.packet.net/portal#/projects/list/table>`__. + +If you get an error with the message "failed to set machine state present, error: Error 404: Not Found", please verify your project UUID. + + +Updating Devices +================ + +The two parameters used to uniquely identify Packet devices are: "device_ids" and "hostnames". Both parameters accept either a single string (later converted to a one-element list), or a list of strings. + +The 'device_ids' and 'hostnames' parameters are mutually exclusive. The following values are all acceptable: + +- device_ids: a27b7a83-fc93-435b-a128-47a5b04f2dcf + +- hostnames: mydev1 + +- device_ids: [a27b7a83-fc93-435b-a128-47a5b04f2dcf, 4887130f-0ccd-49a0-99b0-323c1ceb527b] + +- hostnames: [mydev1, mydev2] + +In addition, hostnames can contain a special '%d' formatter along with a 'count' parameter that lets you easily expand hostnames that follow a simple name and number pattern; in other words, ``hostnames: "mydev%d", count: 2`` will expand to [mydev1, mydev2]. + +If your playbook acts on existing Packet devices, you can only pass the 'hostname' and 'device_ids' parameters. The following playbook shows how you can reboot a specific Packet device by setting the 'hostname' parameter: + +.. code-block:: yaml + + # playbook_reboot.yml + + - name: reboot myserver + hosts: localhost + tasks: + + - packet_device: + project_id: <your_project_id> + hostnames: myserver + state: rebooted + +You can also identify specific Packet devices with the 'device_ids' parameter. The device's UUID can be found in the `Packet Portal <https://app.packet.net/portal>`_ or by using a `CLI <https://www.packet.net/developers/integrations/>`_. The following playbook removes a Packet device using the 'device_ids' field: + +.. code-block:: yaml + + # playbook_remove.yml + + - name: remove a device + hosts: localhost + tasks: + + - packet_device: + project_id: <your_project_id> + device_ids: <myserver_device_id> + state: absent + + +More Complex Playbooks +====================== + +In this example, we'll create a CoreOS cluster with `user data <https://packet.com/developers/docs/servers/key-features/user-data/>`_. + + +The CoreOS cluster will use `etcd <https://etcd.io/>`_ for discovery of other servers in the cluster. Before provisioning your servers, you'll need to generate a discovery token for your cluster: + +.. code-block:: bash + + $ curl -w "\n" 'https://discovery.etcd.io/new?size=3' + +The following playbook will create an SSH key, 3 Packet servers, and then wait until SSH is ready (or until 5 minutes passed). Make sure to substitute the discovery token URL in 'user_data', and the 'project_id' before running ``ansible-playbook``. Also, feel free to change 'plan' and 'facility'. + +.. code-block:: yaml + + # playbook_coreos.yml + + - name: Start 3 CoreOS nodes in Packet and wait until SSH is ready + hosts: localhost + tasks: + + - packet_sshkey: + key_file: ./id_rsa.pub + label: new + + - packet_device: + hostnames: [coreos-one, coreos-two, coreos-three] + operating_system: coreos_beta + plan: baremetal_0 + facility: ewr1 + project_id: <your_project_id> + wait_for_public_IPv: 4 + user_data: | + #cloud-config + coreos: + etcd2: + discovery: https://discovery.etcd.io/<token> + advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001 + initial-advertise-peer-urls: http://$private_ipv4:2380 + listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 + listen-peer-urls: http://$private_ipv4:2380 + fleet: + public-ip: $private_ipv4 + units: + - name: etcd2.service + command: start + - name: fleet.service + command: start + register: newhosts + + - name: wait for ssh + wait_for: + delay: 1 + host: "{{ item.public_ipv4 }}" + port: 22 + state: started + timeout: 500 + loop: "{{ newhosts.results[0].devices }}" + + +As with most Ansible modules, the default states of the Packet modules are idempotent, meaning the resources in your project will remain the same after re-runs of a playbook. Thus, we can keep the ``packet_sshkey`` module call in our playbook. If the public key is already in your Packet account, the call will have no effect. + +The second module call provisions 3 Packet Type 0 (specified using the 'plan' parameter) servers in the project identified via the 'project_id' parameter. The servers are all provisioned with CoreOS beta (the 'operating_system' parameter) and are customized with cloud-config user data passed to the 'user_data' parameter. + +The ``packet_device`` module has a ``wait_for_public_IPv`` that is used to specify the version of the IP address to wait for (valid values are ``4`` or ``6`` for IPv4 or IPv6). If specified, Ansible will wait until the GET API call for a device contains an Internet-routeable IP address of the specified version. When referring to an IP address of a created device in subsequent module calls, it's wise to use the ``wait_for_public_IPv`` parameter, or ``state: active`` in the packet_device module call. + +Run the playbook: + +.. code-block:: bash + + $ ansible-playbook playbook_coreos.yml + +Once the playbook quits, your new devices should be reachable via SSH. Try to connect to one and check if etcd has started properly: + +.. code-block:: bash + + tomk@work $ ssh -i id_rsa core@$one_of_the_servers_ip + core@coreos-one ~ $ etcdctl cluster-health + +Once you create a couple of devices, you might appreciate the dynamic inventory script... + + +Dynamic Inventory Script +======================== + +The dynamic inventory script queries the Packet API for a list of hosts, and exposes it to Ansible so you can easily identify and act on Packet devices. + +You can find it in Ansible Community General Collection's git repo at `scripts/inventory/packet_net.py <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/packet_net.py>`_. + +The inventory script is configurable via a `ini file <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/packet_net.ini>`_. + +If you want to use the inventory script, you must first export your Packet API token to a PACKET_API_TOKEN environment variable. + +You can either copy the inventory and ini config out from the cloned git repo, or you can download it to your working directory like so: + +.. code-block:: bash + + $ wget https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/packet_net.py + $ chmod +x packet_net.py + $ wget https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/packet_net.ini + +In order to understand what the inventory script gives to Ansible you can run: + +.. code-block:: bash + + $ ./packet_net.py --list + +It should print a JSON document looking similar to following trimmed dictionary: + +.. code-block:: json + + { + "_meta": { + "hostvars": { + "147.75.64.169": { + "packet_billing_cycle": "hourly", + "packet_created_at": "2017-02-09T17:11:26Z", + "packet_facility": "ewr1", + "packet_hostname": "coreos-two", + "packet_href": "/devices/d0ab8972-54a8-4bff-832b-28549d1bec96", + "packet_id": "d0ab8972-54a8-4bff-832b-28549d1bec96", + "packet_locked": false, + "packet_operating_system": "coreos_beta", + "packet_plan": "baremetal_0", + "packet_state": "active", + "packet_updated_at": "2017-02-09T17:16:35Z", + "packet_user": "core", + "packet_userdata": "#cloud-config\ncoreos:\n etcd2:\n discovery: https://discovery.etcd.io/e0c8a4a9b8fe61acd51ec599e2a4f68e\n advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001\n initial-advertise-peer-urls: http://$private_ipv4:2380\n listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001\n listen-peer-urls: http://$private_ipv4:2380\n fleet:\n public-ip: $private_ipv4\n units:\n - name: etcd2.service\n command: start\n - name: fleet.service\n command: start" + } + } + }, + "baremetal_0": [ + "147.75.202.255", + "147.75.202.251", + "147.75.202.249", + "147.75.64.129", + "147.75.192.51", + "147.75.64.169" + ], + "coreos_beta": [ + "147.75.202.255", + "147.75.202.251", + "147.75.202.249", + "147.75.64.129", + "147.75.192.51", + "147.75.64.169" + ], + "ewr1": [ + "147.75.64.129", + "147.75.192.51", + "147.75.64.169" + ], + "sjc1": [ + "147.75.202.255", + "147.75.202.251", + "147.75.202.249" + ], + "coreos-two": [ + "147.75.64.169" + ], + "d0ab8972-54a8-4bff-832b-28549d1bec96": [ + "147.75.64.169" + ] + } + +In the ``['_meta']['hostvars']`` key, there is a list of devices (uniquely identified by their public IPv4 address) with their parameters. The other keys under ``['_meta']`` are lists of devices grouped by some parameter. Here, it is type (all devices are of type baremetal_0), operating system, and facility (ewr1 and sjc1). + +In addition to the parameter groups, there are also one-item groups with the UUID or hostname of the device. + +You can now target groups in playbooks! The following playbook will install a role that supplies resources for an Ansible target into all devices in the "coreos_beta" group: + +.. code-block:: yaml + + # playbook_bootstrap.yml + + - hosts: coreos_beta + gather_facts: false + roles: + - defunctzombie.coreos-boostrap + +Don't forget to supply the dynamic inventory in the ``-i`` argument! + +.. code-block:: bash + + $ ansible-playbook -u core -i packet_net.py playbook_bootstrap.yml + + +If you have any questions or comments let us know! help@packet.net diff --git a/docs/docsite/rst/scenario_guides/guide_rax.rst b/docs/docsite/rst/scenario_guides/guide_rax.rst new file mode 100644 index 00000000..b6100b8b --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_rax.rst @@ -0,0 +1,810 @@ +Rackspace Cloud Guide +===================== + +.. _rax_introduction: + +Introduction +```````````` + +.. note:: This section of the documentation is under construction. We are in the process of adding more examples about the Rackspace modules and how they work together. Once complete, there will also be examples for Rackspace Cloud in `ansible-examples <https://github.com/ansible/ansible-examples/>`_. + +Ansible contains a number of core modules for interacting with Rackspace Cloud. + +The purpose of this section is to explain how to put Ansible modules together +(and use inventory scripts) to use Ansible in a Rackspace Cloud context. + +Prerequisites for using the rax modules are minimal. In addition to ansible itself, +all of the modules require and are tested against pyrax 1.5 or higher. +You'll need this Python module installed on the execution host. + +``pyrax`` is not currently available in many operating system +package repositories, so you will likely need to install it via pip: + +.. code-block:: bash + + $ pip install pyrax + +Ansible creates an implicit localhost that executes in the same context as the ``ansible-playbook`` and the other CLI tools. +If for any reason you need or want to have it in your inventory you should do something like the following: + +.. code-block:: ini + + [localhost] + localhost ansible_connection=local ansible_python_interpreter=/usr/local/bin/python2 + +For more information see :ref:`Implicit Localhost <implicit_localhost>` + +In playbook steps, we'll typically be using the following pattern: + +.. code-block:: yaml + + - hosts: localhost + gather_facts: False + tasks: + +.. _credentials_file: + +Credentials File +```````````````` + +The `rax.py` inventory script and all `rax` modules support a standard `pyrax` credentials file that looks like: + +.. code-block:: ini + + [rackspace_cloud] + username = myraxusername + api_key = d41d8cd98f00b204e9800998ecf8427e + +Setting the environment parameter ``RAX_CREDS_FILE`` to the path of this file will help Ansible find how to load +this information. + +More information about this credentials file can be found at +https://github.com/pycontribs/pyrax/blob/master/docs/getting_started.md#authenticating + + +.. _virtual_environment: + +Running from a Python Virtual Environment (Optional) +++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Most users will not be using virtualenv, but some users, particularly Python developers sometimes like to. + +There are special considerations when Ansible is installed to a Python virtualenv, rather than the default of installing at a global scope. Ansible assumes, unless otherwise instructed, that the python binary will live at /usr/bin/python. This is done via the interpreter line in modules, however when instructed by setting the inventory variable 'ansible_python_interpreter', Ansible will use this specified path instead to find Python. This can be a cause of confusion as one may assume that modules running on 'localhost', or perhaps running via 'local_action', are using the virtualenv Python interpreter. By setting this line in the inventory, the modules will execute in the virtualenv interpreter and have available the virtualenv packages, specifically pyrax. If using virtualenv, you may wish to modify your localhost inventory definition to find this location as follows: + +.. code-block:: ini + + [localhost] + localhost ansible_connection=local ansible_python_interpreter=/path/to/ansible_venv/bin/python + +.. note:: + + pyrax may be installed in the global Python package scope or in a virtual environment. There are no special considerations to keep in mind when installing pyrax. + +.. _provisioning: + +Provisioning +```````````` + +Now for the fun parts. + +The 'rax' module provides the ability to provision instances within Rackspace Cloud. Typically the provisioning task will be performed from your Ansible control server (in our example, localhost) against the Rackspace cloud API. This is done for several reasons: + + - Avoiding installing the pyrax library on remote nodes + - No need to encrypt and distribute credentials to remote nodes + - Speed and simplicity + +.. note:: + + Authentication with the Rackspace-related modules is handled by either + specifying your username and API key as environment variables or passing + them as module arguments, or by specifying the location of a credentials + file. + +Here is a basic example of provisioning an instance in ad-hoc mode: + +.. code-block:: bash + + $ ansible localhost -m rax -a "name=awx flavor=4 image=ubuntu-1204-lts-precise-pangolin wait=yes" + +Here's what it would look like in a playbook, assuming the parameters were defined in variables: + +.. code-block:: yaml + + tasks: + - name: Provision a set of instances + rax: + name: "{{ rax_name }}" + flavor: "{{ rax_flavor }}" + image: "{{ rax_image }}" + count: "{{ rax_count }}" + group: "{{ group }}" + wait: yes + register: rax + delegate_to: localhost + +The rax module returns data about the nodes it creates, like IP addresses, hostnames, and login passwords. By registering the return value of the step, it is possible used this data to dynamically add the resulting hosts to inventory (temporarily, in memory). This facilitates performing configuration actions on the hosts in a follow-on task. In the following example, the servers that were successfully created using the above task are dynamically added to a group called "raxhosts", with each nodes hostname, IP address, and root password being added to the inventory. + +.. code-block:: yaml + + - name: Add the instances we created (by public IP) to the group 'raxhosts' + add_host: + hostname: "{{ item.name }}" + ansible_host: "{{ item.rax_accessipv4 }}" + ansible_password: "{{ item.rax_adminpass }}" + groups: raxhosts + loop: "{{ rax.success }}" + when: rax.action == 'create' + +With the host group now created, the next play in this playbook could now configure servers belonging to the raxhosts group. + +.. code-block:: yaml + + - name: Configuration play + hosts: raxhosts + user: root + roles: + - ntp + - webserver + +The method above ties the configuration of a host with the provisioning step. This isn't always what you want, and leads us +to the next section. + +.. _host_inventory: + +Host Inventory +`````````````` + +Once your nodes are spun up, you'll probably want to talk to them again. The best way to handle this is to use the "rax" inventory plugin, which dynamically queries Rackspace Cloud and tells Ansible what nodes you have to manage. You might want to use this even if you are spinning up cloud instances via other tools, including the Rackspace Cloud user interface. The inventory plugin can be used to group resources by metadata, region, OS, and so on. Utilizing metadata is highly recommended in "rax" and can provide an easy way to sort between host groups and roles. If you don't want to use the ``rax.py`` dynamic inventory script, you could also still choose to manually manage your INI inventory file, though this is less recommended. + +In Ansible it is quite possible to use multiple dynamic inventory plugins along with INI file data. Just put them in a common directory and be sure the scripts are chmod +x, and the INI-based ones are not. + +.. _raxpy: + +rax.py +++++++ + +To use the Rackspace dynamic inventory script, copy ``rax.py`` into your inventory directory and make it executable. You can specify a credentials file for ``rax.py`` utilizing the ``RAX_CREDS_FILE`` environment variable. + +.. note:: Dynamic inventory scripts (like ``rax.py``) are saved in ``/usr/share/ansible/inventory`` if Ansible has been installed globally. If installed to a virtualenv, the inventory scripts are installed to ``$VIRTUALENV/share/inventory``. + +.. note:: Users of :ref:`ansible_tower` will note that dynamic inventory is natively supported by Tower, and all you have to do is associate a group with your Rackspace Cloud credentials, and it will easily synchronize without going through these steps:: + + $ RAX_CREDS_FILE=~/.raxpub ansible all -i rax.py -m setup + +``rax.py`` also accepts a ``RAX_REGION`` environment variable, which can contain an individual region, or a comma separated list of regions. + +When using ``rax.py``, you will not have a 'localhost' defined in the inventory. + +As mentioned previously, you will often be running most of these modules outside of the host loop, and will need 'localhost' defined. The recommended way to do this, would be to create an ``inventory`` directory, and place both the ``rax.py`` script and a file containing ``localhost`` in it. + +Executing ``ansible`` or ``ansible-playbook`` and specifying the ``inventory`` directory instead +of an individual file, will cause ansible to evaluate each file in that directory for inventory. + +Let's test our inventory script to see if it can talk to Rackspace Cloud. + +.. code-block:: bash + + $ RAX_CREDS_FILE=~/.raxpub ansible all -i inventory/ -m setup + +Assuming things are properly configured, the ``rax.py`` inventory script will output information similar to the +following information, which will be utilized for inventory and variables. + +.. code-block:: json + + { + "ORD": [ + "test" + ], + "_meta": { + "hostvars": { + "test": { + "ansible_host": "198.51.100.1", + "rax_accessipv4": "198.51.100.1", + "rax_accessipv6": "2001:DB8::2342", + "rax_addresses": { + "private": [ + { + "addr": "192.0.2.2", + "version": 4 + } + ], + "public": [ + { + "addr": "198.51.100.1", + "version": 4 + }, + { + "addr": "2001:DB8::2342", + "version": 6 + } + ] + }, + "rax_config_drive": "", + "rax_created": "2013-11-14T20:48:22Z", + "rax_flavor": { + "id": "performance1-1", + "links": [ + { + "href": "https://ord.servers.api.rackspacecloud.com/111111/flavors/performance1-1", + "rel": "bookmark" + } + ] + }, + "rax_hostid": "e7b6961a9bd943ee82b13816426f1563bfda6846aad84d52af45a4904660cde0", + "rax_human_id": "test", + "rax_id": "099a447b-a644-471f-87b9-a7f580eb0c2a", + "rax_image": { + "id": "b211c7bf-b5b4-4ede-a8de-a4368750c653", + "links": [ + { + "href": "https://ord.servers.api.rackspacecloud.com/111111/images/b211c7bf-b5b4-4ede-a8de-a4368750c653", + "rel": "bookmark" + } + ] + }, + "rax_key_name": null, + "rax_links": [ + { + "href": "https://ord.servers.api.rackspacecloud.com/v2/111111/servers/099a447b-a644-471f-87b9-a7f580eb0c2a", + "rel": "self" + }, + { + "href": "https://ord.servers.api.rackspacecloud.com/111111/servers/099a447b-a644-471f-87b9-a7f580eb0c2a", + "rel": "bookmark" + } + ], + "rax_metadata": { + "foo": "bar" + }, + "rax_name": "test", + "rax_name_attr": "name", + "rax_networks": { + "private": [ + "192.0.2.2" + ], + "public": [ + "198.51.100.1", + "2001:DB8::2342" + ] + }, + "rax_os-dcf_diskconfig": "AUTO", + "rax_os-ext-sts_power_state": 1, + "rax_os-ext-sts_task_state": null, + "rax_os-ext-sts_vm_state": "active", + "rax_progress": 100, + "rax_status": "ACTIVE", + "rax_tenant_id": "111111", + "rax_updated": "2013-11-14T20:49:27Z", + "rax_user_id": "22222" + } + } + } + } + +.. _standard_inventory: + +Standard Inventory +++++++++++++++++++ + +When utilizing a standard ini formatted inventory file (as opposed to the inventory plugin), it may still be advantageous to retrieve discoverable hostvar information from the Rackspace API. + +This can be achieved with the ``rax_facts`` module and an inventory file similar to the following: + +.. code-block:: ini + + [test_servers] + hostname1 rax_region=ORD + hostname2 rax_region=ORD + +.. code-block:: yaml + + - name: Gather info about servers + hosts: test_servers + gather_facts: False + tasks: + - name: Get facts about servers + rax_facts: + credentials: ~/.raxpub + name: "{{ inventory_hostname }}" + region: "{{ rax_region }}" + delegate_to: localhost + - name: Map some facts + set_fact: + ansible_host: "{{ rax_accessipv4 }}" + +While you don't need to know how it works, it may be interesting to know what kind of variables are returned. + +The ``rax_facts`` module provides facts as followings, which match the ``rax.py`` inventory script: + +.. code-block:: json + + { + "ansible_facts": { + "rax_accessipv4": "198.51.100.1", + "rax_accessipv6": "2001:DB8::2342", + "rax_addresses": { + "private": [ + { + "addr": "192.0.2.2", + "version": 4 + } + ], + "public": [ + { + "addr": "198.51.100.1", + "version": 4 + }, + { + "addr": "2001:DB8::2342", + "version": 6 + } + ] + }, + "rax_config_drive": "", + "rax_created": "2013-11-14T20:48:22Z", + "rax_flavor": { + "id": "performance1-1", + "links": [ + { + "href": "https://ord.servers.api.rackspacecloud.com/111111/flavors/performance1-1", + "rel": "bookmark" + } + ] + }, + "rax_hostid": "e7b6961a9bd943ee82b13816426f1563bfda6846aad84d52af45a4904660cde0", + "rax_human_id": "test", + "rax_id": "099a447b-a644-471f-87b9-a7f580eb0c2a", + "rax_image": { + "id": "b211c7bf-b5b4-4ede-a8de-a4368750c653", + "links": [ + { + "href": "https://ord.servers.api.rackspacecloud.com/111111/images/b211c7bf-b5b4-4ede-a8de-a4368750c653", + "rel": "bookmark" + } + ] + }, + "rax_key_name": null, + "rax_links": [ + { + "href": "https://ord.servers.api.rackspacecloud.com/v2/111111/servers/099a447b-a644-471f-87b9-a7f580eb0c2a", + "rel": "self" + }, + { + "href": "https://ord.servers.api.rackspacecloud.com/111111/servers/099a447b-a644-471f-87b9-a7f580eb0c2a", + "rel": "bookmark" + } + ], + "rax_metadata": { + "foo": "bar" + }, + "rax_name": "test", + "rax_name_attr": "name", + "rax_networks": { + "private": [ + "192.0.2.2" + ], + "public": [ + "198.51.100.1", + "2001:DB8::2342" + ] + }, + "rax_os-dcf_diskconfig": "AUTO", + "rax_os-ext-sts_power_state": 1, + "rax_os-ext-sts_task_state": null, + "rax_os-ext-sts_vm_state": "active", + "rax_progress": 100, + "rax_status": "ACTIVE", + "rax_tenant_id": "111111", + "rax_updated": "2013-11-14T20:49:27Z", + "rax_user_id": "22222" + }, + "changed": false + } + + +Use Cases +````````` + +This section covers some additional usage examples built around a specific use case. + +.. _network_and_server: + +Network and Server +++++++++++++++++++ + +Create an isolated cloud network and build a server + +.. code-block:: yaml + + - name: Build Servers on an Isolated Network + hosts: localhost + gather_facts: False + tasks: + - name: Network create request + rax_network: + credentials: ~/.raxpub + label: my-net + cidr: 192.168.3.0/24 + region: IAD + state: present + delegate_to: localhost + + - name: Server create request + rax: + credentials: ~/.raxpub + name: web%04d.example.org + flavor: 2 + image: ubuntu-1204-lts-precise-pangolin + disk_config: manual + networks: + - public + - my-net + region: IAD + state: present + count: 5 + exact_count: yes + group: web + wait: yes + wait_timeout: 360 + register: rax + delegate_to: localhost + +.. _complete_environment: + +Complete Environment +++++++++++++++++++++ + +Build a complete webserver environment with servers, custom networks and load balancers, install nginx and create a custom index.html + +.. code-block:: yaml + + --- + - name: Build environment + hosts: localhost + gather_facts: False + tasks: + - name: Load Balancer create request + rax_clb: + credentials: ~/.raxpub + name: my-lb + port: 80 + protocol: HTTP + algorithm: ROUND_ROBIN + type: PUBLIC + timeout: 30 + region: IAD + wait: yes + state: present + meta: + app: my-cool-app + register: clb + + - name: Network create request + rax_network: + credentials: ~/.raxpub + label: my-net + cidr: 192.168.3.0/24 + state: present + region: IAD + register: network + + - name: Server create request + rax: + credentials: ~/.raxpub + name: web%04d.example.org + flavor: performance1-1 + image: ubuntu-1204-lts-precise-pangolin + disk_config: manual + networks: + - public + - private + - my-net + region: IAD + state: present + count: 5 + exact_count: yes + group: web + wait: yes + register: rax + + - name: Add servers to web host group + add_host: + hostname: "{{ item.name }}" + ansible_host: "{{ item.rax_accessipv4 }}" + ansible_password: "{{ item.rax_adminpass }}" + ansible_user: root + groups: web + loop: "{{ rax.success }}" + when: rax.action == 'create' + + - name: Add servers to Load balancer + rax_clb_nodes: + credentials: ~/.raxpub + load_balancer_id: "{{ clb.balancer.id }}" + address: "{{ item.rax_networks.private|first }}" + port: 80 + condition: enabled + type: primary + wait: yes + region: IAD + loop: "{{ rax.success }}" + when: rax.action == 'create' + + - name: Configure servers + hosts: web + handlers: + - name: restart nginx + service: name=nginx state=restarted + + tasks: + - name: Install nginx + apt: pkg=nginx state=latest update_cache=yes cache_valid_time=86400 + notify: + - restart nginx + + - name: Ensure nginx starts on boot + service: name=nginx state=started enabled=yes + + - name: Create custom index.html + copy: content="{{ inventory_hostname }}" dest=/usr/share/nginx/www/index.html + owner=root group=root mode=0644 + +.. _rackconnect_and_manged_cloud: + +RackConnect and Managed Cloud ++++++++++++++++++++++++++++++ + +When using RackConnect version 2 or Rackspace Managed Cloud there are Rackspace automation tasks that are executed on the servers you create after they are successfully built. If your automation executes before the RackConnect or Managed Cloud automation, you can cause failures and unusable servers. + +These examples show creating servers, and ensuring that the Rackspace automation has completed before Ansible continues onwards. + +For simplicity, these examples are joined, however both are only needed when using RackConnect. When only using Managed Cloud, the RackConnect portion can be ignored. + +The RackConnect portions only apply to RackConnect version 2. + +.. _using_a_control_machine: + +Using a Control Machine +*********************** + +.. code-block:: yaml + + - name: Create an exact count of servers + hosts: localhost + gather_facts: False + tasks: + - name: Server build requests + rax: + credentials: ~/.raxpub + name: web%03d.example.org + flavor: performance1-1 + image: ubuntu-1204-lts-precise-pangolin + disk_config: manual + region: DFW + state: present + count: 1 + exact_count: yes + group: web + wait: yes + register: rax + + - name: Add servers to in memory groups + add_host: + hostname: "{{ item.name }}" + ansible_host: "{{ item.rax_accessipv4 }}" + ansible_password: "{{ item.rax_adminpass }}" + ansible_user: root + rax_id: "{{ item.rax_id }}" + groups: web,new_web + loop: "{{ rax.success }}" + when: rax.action == 'create' + + - name: Wait for rackconnect and managed cloud automation to complete + hosts: new_web + gather_facts: false + tasks: + - name: ensure we run all tasks from localhost + delegate_to: localhost + block: + - name: Wait for rackconnnect automation to complete + rax_facts: + credentials: ~/.raxpub + id: "{{ rax_id }}" + region: DFW + register: rax_facts + until: rax_facts.ansible_facts['rax_metadata']['rackconnect_automation_status']|default('') == 'DEPLOYED' + retries: 30 + delay: 10 + + - name: Wait for managed cloud automation to complete + rax_facts: + credentials: ~/.raxpub + id: "{{ rax_id }}" + region: DFW + register: rax_facts + until: rax_facts.ansible_facts['rax_metadata']['rax_service_level_automation']|default('') == 'Complete' + retries: 30 + delay: 10 + + - name: Update new_web hosts with IP that RackConnect assigns + hosts: new_web + gather_facts: false + tasks: + - name: Get facts about servers + rax_facts: + name: "{{ inventory_hostname }}" + region: DFW + delegate_to: localhost + - name: Map some facts + set_fact: + ansible_host: "{{ rax_accessipv4 }}" + + - name: Base Configure Servers + hosts: web + roles: + - role: users + + - role: openssh + opensshd_PermitRootLogin: "no" + + - role: ntp + +.. _using_ansible_pull: + +Using Ansible Pull +****************** + +.. code-block:: yaml + + --- + - name: Ensure Rackconnect and Managed Cloud Automation is complete + hosts: all + tasks: + - name: ensure we run all tasks from localhost + delegate_to: localhost + block: + - name: Check for completed bootstrap + stat: + path: /etc/bootstrap_complete + register: bootstrap + + - name: Get region + command: xenstore-read vm-data/provider_data/region + register: rax_region + when: bootstrap.stat.exists != True + + - name: Wait for rackconnect automation to complete + uri: + url: "https://{{ rax_region.stdout|trim }}.api.rackconnect.rackspace.com/v1/automation_status?format=json" + return_content: yes + register: automation_status + when: bootstrap.stat.exists != True + until: automation_status['automation_status']|default('') == 'DEPLOYED' + retries: 30 + delay: 10 + + - name: Wait for managed cloud automation to complete + wait_for: + path: /tmp/rs_managed_cloud_automation_complete + delay: 10 + when: bootstrap.stat.exists != True + + - name: Set bootstrap completed + file: + path: /etc/bootstrap_complete + state: touch + owner: root + group: root + mode: 0400 + + - name: Base Configure Servers + hosts: all + roles: + - role: users + + - role: openssh + opensshd_PermitRootLogin: "no" + + - role: ntp + +.. _using_ansible_pull_with_xenstore: + +Using Ansible Pull with XenStore +******************************** + +.. code-block:: yaml + + --- + - name: Ensure Rackconnect and Managed Cloud Automation is complete + hosts: all + tasks: + - name: Check for completed bootstrap + stat: + path: /etc/bootstrap_complete + register: bootstrap + + - name: Wait for rackconnect_automation_status xenstore key to exist + command: xenstore-exists vm-data/user-metadata/rackconnect_automation_status + register: rcas_exists + when: bootstrap.stat.exists != True + failed_when: rcas_exists.rc|int > 1 + until: rcas_exists.rc|int == 0 + retries: 30 + delay: 10 + + - name: Wait for rackconnect automation to complete + command: xenstore-read vm-data/user-metadata/rackconnect_automation_status + register: rcas + when: bootstrap.stat.exists != True + until: rcas.stdout|replace('"', '') == 'DEPLOYED' + retries: 30 + delay: 10 + + - name: Wait for rax_service_level_automation xenstore key to exist + command: xenstore-exists vm-data/user-metadata/rax_service_level_automation + register: rsla_exists + when: bootstrap.stat.exists != True + failed_when: rsla_exists.rc|int > 1 + until: rsla_exists.rc|int == 0 + retries: 30 + delay: 10 + + - name: Wait for managed cloud automation to complete + command: xenstore-read vm-data/user-metadata/rackconnect_automation_status + register: rsla + when: bootstrap.stat.exists != True + until: rsla.stdout|replace('"', '') == 'DEPLOYED' + retries: 30 + delay: 10 + + - name: Set bootstrap completed + file: + path: /etc/bootstrap_complete + state: touch + owner: root + group: root + mode: 0400 + + - name: Base Configure Servers + hosts: all + roles: + - role: users + + - role: openssh + opensshd_PermitRootLogin: "no" + + - role: ntp + +.. _advanced_usage: + +Advanced Usage +`````````````` + +.. _awx_autoscale: + +Autoscaling with Tower +++++++++++++++++++++++ + +:ref:`ansible_tower` also contains a very nice feature for auto-scaling use cases. +In this mode, a simple curl script can call a defined URL and the server will "dial out" to the requester +and configure an instance that is spinning up. This can be a great way to reconfigure ephemeral nodes. +See the Tower documentation for more details. + +A benefit of using the callback in Tower over pull mode is that job results are still centrally recorded +and less information has to be shared with remote hosts. + +.. _pending_information: + +Orchestration in the Rackspace Cloud +++++++++++++++++++++++++++++++++++++ + +Ansible is a powerful orchestration tool, and rax modules allow you the opportunity to orchestrate complex tasks, deployments, and configurations. The key here is to automate provisioning of infrastructure, like any other piece of software in an environment. Complex deployments might have previously required manual manipulation of load balancers, or manual provisioning of servers. Utilizing the rax modules included with Ansible, one can make the deployment of additional nodes contingent on the current number of running nodes, or the configuration of a clustered application dependent on the number of nodes with common metadata. One could automate the following scenarios, for example: + +* Servers that are removed from a Cloud Load Balancer one-by-one, updated, verified, and returned to the load balancer pool +* Expansion of an already-online environment, where nodes are provisioned, bootstrapped, configured, and software installed +* A procedure where app log files are uploaded to a central location, like Cloud Files, before a node is decommissioned +* Servers and load balancers that have DNS records created and destroyed on creation and decommissioning, respectively + + + + diff --git a/docs/docsite/rst/scenario_guides/guide_scaleway.rst b/docs/docsite/rst/scenario_guides/guide_scaleway.rst new file mode 100644 index 00000000..77af9ba7 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_scaleway.rst @@ -0,0 +1,293 @@ +.. _guide_scaleway: + +************** +Scaleway Guide +************** + +.. _scaleway_introduction: + +Introduction +============ + +`Scaleway <https://scaleway.com>`_ is a cloud provider supported by Ansible, version 2.6 or higher via a dynamic inventory plugin and modules. +Those modules are: + +- :ref:`scaleway_sshkey_module`: adds a public SSH key from a file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys. +- :ref:`scaleway_compute_module`: manages servers on Scaleway. You can use this module to create, restart and delete servers. +- :ref:`scaleway_volume_module`: manages volumes on Scaleway. + +.. note:: + This guide assumes you are familiar with Ansible and how it works. + If you're not, have a look at :ref:`ansible_documentation` before getting started. + +.. _scaleway_requirements: + +Requirements +============ + +The Scaleway modules and inventory script connect to the Scaleway API using `Scaleway REST API <https://developer.scaleway.com>`_. +To use the modules and inventory script you'll need a Scaleway API token. +You can generate an API token via the Scaleway console `here <https://cloud.scaleway.com/#/credentials>`__. +The simplest way to authenticate yourself is to set the Scaleway API token in an environment variable: + +.. code-block:: bash + + $ export SCW_TOKEN=00000000-1111-2222-3333-444444444444 + +If you're not comfortable exporting your API token, you can pass it as a parameter to the modules using the ``api_token`` argument. + +If you want to use a new SSH keypair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as: + +.. code-block:: bash + + $ ssh-keygen -t rsa -f ./id_rsa + +If you want to use an existing keypair, just copy the private and public key over to the playbook directory. + +.. _scaleway_add_sshkey: + +How to add an SSH key? +====================== + +Connection to Scaleway Compute nodes use Secure Shell. +SSH keys are stored at the account level, which means that you can re-use the same SSH key in multiple nodes. +The first step to configure Scaleway compute resources is to have at least one SSH key configured. + +:ref:`scaleway_sshkey_module` is a module that manages SSH keys on your Scaleway account. +You can add an SSH key to your account by including the following task in a playbook: + +.. code-block:: yaml + + - name: "Add SSH key" + scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "present" + +The ``ssh_pub_key`` parameter contains your ssh public key as a string. Here is an example inside a playbook: + + +.. code-block:: yaml + + - name: Test SSH key lifecycle on a Scaleway account + hosts: localhost + gather_facts: no + environment: + SCW_API_KEY: "" + + tasks: + + - scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAAB...424242 developer@example.com" + state: present + register: result + + - assert: + that: + - result is success and result is changed + +.. _scaleway_create_instance: + +How to create a compute instance? +================================= + +Now that we have an SSH key configured, the next step is to spin up a server! +:ref:`scaleway_compute_module` is a module that can create, update and delete Scaleway compute instances: + +.. code-block:: yaml + + - name: Create a server + scaleway_compute: + name: foobar + state: present + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + +Here are the parameter details for the example shown above: + +- ``name`` is the name of the instance (the one that will show up in your web console). +- ``image`` is the UUID of the system image you would like to use. + A list of all images is available for each availability zone. +- ``organization`` represents the organization that your account is attached to. +- ``region`` represents the Availability Zone which your instance is in (for this example, par1 and ams1). +- ``commercial_type`` represents the name of the commercial offers. + You can check out the Scaleway pricing page to find which instance is right for you. + +Take a look at this short playbook to see a working example using ``scaleway_compute``: + +.. code-block:: yaml + + - name: Test compute instance lifecycle on a Scaleway account + hosts: localhost + gather_facts: no + environment: + SCW_API_KEY: "" + + tasks: + + - name: Create a server + register: server_creation_task + scaleway_compute: + name: foobar + state: present + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + wait: true + + - debug: var=server_creation_task + + - assert: + that: + - server_creation_task is success + - server_creation_task is changed + + - name: Run it + scaleway_compute: + name: foobar + state: running + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + wait: true + tags: + - web_server + register: server_run_task + + - debug: var=server_run_task + + - assert: + that: + - server_run_task is success + - server_run_task is changed + +.. _scaleway_dynamic_inventory_tutorial: + +Dynamic Inventory Script +======================== + +Ansible ships with :ref:`scaleway_inventory`. +You can now get a complete inventory of your Scaleway resources through this plugin and filter it on +different parameters (``regions`` and ``tags`` are currently supported). + +Let's create an example! +Suppose that we want to get all hosts that got the tag web_server. +Create a file named ``scaleway_inventory.yml`` with the following content: + +.. code-block:: yaml + + plugin: scaleway + regions: + - ams1 + - par1 + tags: + - web_server + +This inventory means that we want all hosts that got the tag ``web_server`` on the zones ``ams1`` and ``par1``. +Once you have configured this file, you can get the information using the following command: + +.. code-block:: bash + + $ ansible-inventory --list -i scaleway_inventory.yml + +The output will be: + +.. code-block:: yaml + + { + "_meta": { + "hostvars": { + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d": { + "ansible_verbosity": 6, + "arch": "x86_64", + "commercial_type": "START1-S", + "hostname": "foobar", + "ipv4": "192.0.2.1", + "organization": "00000000-1111-2222-3333-444444444444", + "state": "running", + "tags": [ + "web_server" + ] + } + } + }, + "all": { + "children": [ + "ams1", + "par1", + "ungrouped", + "web_server" + ] + }, + "ams1": {}, + "par1": { + "hosts": [ + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d" + ] + }, + "ungrouped": {}, + "web_server": { + "hosts": [ + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d" + ] + } + } + +As you can see, we get different groups of hosts. +``par1`` and ``ams1`` are groups based on location. +``web_server`` is a group based on a tag. + +In case a filter parameter is not defined, the plugin supposes all values possible are wanted. +This means that for each tag that exists on your Scaleway compute nodes, a group based on each tag will be created. + +Scaleway S3 object storage +========================== + +`Object Storage <https://www.scaleway.com/object-storage>`_ allows you to store any kind of objects (documents, images, videos, and so on). +As the Scaleway API is S3 compatible, Ansible supports it natively through the modules: :ref:`s3_bucket_module`, :ref:`aws_s3_module`. + +You can find many examples in the `scaleway_s3 integration tests <https://github.com/ansible/ansible-legacy-tests/tree/devel/test/legacy/roles/scaleway_s3>`_. + +.. code-block:: yaml+jinja + + - hosts: myserver + vars: + scaleway_region: nl-ams + s3_url: https://s3.nl-ams.scw.cloud + environment: + # AWS_ACCESS_KEY matches your scaleway organization id available at https://cloud.scaleway.com/#/account + AWS_ACCESS_KEY: 00000000-1111-2222-3333-444444444444 + # AWS_SECRET_KEY matches a secret token that you can retrieve at https://cloud.scaleway.com/#/credentials + AWS_SECRET_KEY: aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + module_defaults: + group/aws: + s3_url: '{{ s3_url }}' + region: '{{ scaleway_region }}' + tasks: + # use a fact instead of a variable, otherwise template is evaluate each time variable is used + - set_fact: + bucket_name: "{{ 99999999 | random | to_uuid }}" + + # "requester_pays:" is mandatory because Scaleway doesn't implement related API + # another way is to use aws_s3 and "mode: create" ! + - s3_bucket: + name: '{{ bucket_name }}' + requester_pays: + + - name: Another way to create the bucket + aws_s3: + bucket: '{{ bucket_name }}' + mode: create + encrypt: false + register: bucket_creation_check + + - name: add something in the bucket + aws_s3: + mode: put + bucket: '{{ bucket_name }}' + src: /tmp/test.txt # needs to be created before + object: test.txt + encrypt: false # server side encryption must be disabled diff --git a/docs/docsite/rst/scenario_guides/guide_vagrant.rst b/docs/docsite/rst/scenario_guides/guide_vagrant.rst new file mode 100644 index 00000000..f49477b0 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_vagrant.rst @@ -0,0 +1,136 @@ +Vagrant Guide +============= + +.. _vagrant_intro: + +Introduction +```````````` + +`Vagrant <https://www.vagrantup.com/>`_ is a tool to manage virtual machine +environments, and allows you to configure and use reproducible work +environments on top of various virtualization and cloud platforms. +It also has integration with Ansible as a provisioner for these virtual +machines, and the two tools work together well. + +This guide will describe how to use Vagrant 1.7+ and Ansible together. + +If you're not familiar with Vagrant, you should visit `the documentation +<https://www.vagrantup.com/docs/>`_. + +This guide assumes that you already have Ansible installed and working. +Running from a Git checkout is fine. Follow the :ref:`installation_guide` +guide for more information. + +.. _vagrant_setup: + +Vagrant Setup +````````````` + +The first step once you've installed Vagrant is to create a ``Vagrantfile`` +and customize it to suit your needs. This is covered in detail in the Vagrant +documentation, but here is a quick example that includes a section to use the +Ansible provisioner to manage a single machine: + +.. code-block:: ruby + + # This guide is optimized for Vagrant 1.8 and above. + # Older versions of Vagrant put less info in the inventory they generate. + Vagrant.require_version ">= 1.8.0" + + Vagrant.configure(2) do |config| + + config.vm.box = "ubuntu/bionic64" + + config.vm.provision "ansible" do |ansible| + ansible.verbose = "v" + ansible.playbook = "playbook.yml" + end + end + +Notice the ``config.vm.provision`` section that refers to an Ansible playbook +called ``playbook.yml`` in the same directory as the ``Vagrantfile``. Vagrant +runs the provisioner once the virtual machine has booted and is ready for SSH +access. + +There are a lot of Ansible options you can configure in your ``Vagrantfile``. +Visit the `Ansible Provisioner documentation +<https://www.vagrantup.com/docs/provisioning/ansible.html>`_ for more +information. + +.. code-block:: bash + + $ vagrant up + +This will start the VM, and run the provisioning playbook (on the first VM +startup). + + +To re-run a playbook on an existing VM, just run: + +.. code-block:: bash + + $ vagrant provision + +This will re-run the playbook against the existing VM. + +Note that having the ``ansible.verbose`` option enabled will instruct Vagrant +to show the full ``ansible-playbook`` command used behind the scene, as +illustrated by this example: + +.. code-block:: bash + + $ PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ControlMaster=auto -o ControlPersist=60s' ansible-playbook --connection=ssh --timeout=30 --limit="default" --inventory-file=/home/someone/coding-in-a-project/.vagrant/provisioners/ansible/inventory -v playbook.yml + +This information can be quite useful to debug integration issues and can also +be used to manually execute Ansible from a shell, as explained in the next +section. + +.. _running_ansible: + +Running Ansible Manually +```````````````````````` + +Sometimes you may want to run Ansible manually against the machines. This is +faster than kicking ``vagrant provision`` and pretty easy to do. + +With our ``Vagrantfile`` example, Vagrant automatically creates an Ansible +inventory file in ``.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory``. +This inventory is configured according to the SSH tunnel that Vagrant +automatically creates. A typical automatically-created inventory file for a +single machine environment may look something like this: + +.. code-block:: none + + # Generated by Vagrant + + default ansible_host=127.0.0.1 ansible_port=2222 ansible_user='vagrant' ansible_ssh_private_key_file='/home/someone/coding-in-a-project/.vagrant/machines/default/virtualbox/private_key' + +If you want to run Ansible manually, you will want to make sure to pass +``ansible`` or ``ansible-playbook`` commands the correct arguments, at least +for the *inventory*. + +.. code-block:: bash + + $ ansible-playbook -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory playbook.yml + +Advanced Usages +``````````````` + +The "Tips and Tricks" chapter of the `Ansible Provisioner documentation +<https://www.vagrantup.com/docs/provisioning/ansible.html>`_ provides detailed information about more advanced Ansible features like: + + - how to execute a playbook in parallel within a multi-machine environment + - how to integrate a local ``ansible.cfg`` configuration file + +.. seealso:: + + `Vagrant Home <https://www.vagrantup.com/>`_ + The Vagrant homepage with downloads + `Vagrant Documentation <https://www.vagrantup.com/docs/>`_ + Vagrant Documentation + `Ansible Provisioner <https://www.vagrantup.com/docs/provisioning/ansible.html>`_ + The Vagrant documentation for the Ansible provisioner + `Vagrant Issue Tracker <https://github.com/hashicorp/vagrant/issues?q=is%3Aopen+is%3Aissue+label%3Aprovisioners%2Fansible>`_ + The open issues for the Ansible provisioner in the Vagrant project + :ref:`working_with_playbooks` + An introduction to playbooks diff --git a/docs/docsite/rst/scenario_guides/guide_vmware.rst b/docs/docsite/rst/scenario_guides/guide_vmware.rst new file mode 100644 index 00000000..b31553d5 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_vmware.rst @@ -0,0 +1,33 @@ +.. _vmware_ansible: + +****************** +VMware Guide +****************** + +Welcome to the Ansible for VMware Guide! + +The purpose of this guide is to teach you everything you need to know about using Ansible with VMware. + +To get started, please select one of the following topics. + +.. toctree:: + :maxdepth: 1 + + vmware_scenarios/vmware_intro + vmware_scenarios/vmware_concepts + vmware_scenarios/vmware_requirements + vmware_scenarios/vmware_inventory + vmware_scenarios/vmware_inventory_vm_attributes + vmware_scenarios/vmware_inventory_hostnames + vmware_scenarios/vmware_inventory_filters + vmware_scenarios/vmware_scenarios + vmware_scenarios/vmware_troubleshooting + vmware_scenarios/vmware_external_doc_links + vmware_scenarios/faq +.. comments look like this - start with two dots +.. getting_started content not ready +.. vmware_scenarios/vmware_getting_started +.. module index page not ready +.. vmware_scenarios/vmware_module_reference +.. always exclude the template file +.. vmware_scenarios/vmware_scenario_1 diff --git a/docs/docsite/rst/scenario_guides/guide_vultr.rst b/docs/docsite/rst/scenario_guides/guide_vultr.rst new file mode 100644 index 00000000..c5d5adec --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guide_vultr.rst @@ -0,0 +1,171 @@ +Vultr Guide +=========== + +Ansible offers a set of modules to interact with `Vultr <https://www.vultr.com>`_ cloud platform. + +This set of module forms a framework that allows one to easily manage and orchestrate one's infrastructure on Vultr cloud platform. + + +Requirements +------------ + +There is actually no technical requirement; simply an already created Vultr account. + + +Configuration +------------- + +Vultr modules offer a rather flexible way with regard to configuration. + +Configuration is read in that order: + +- Environment Variables (eg. ``VULTR_API_KEY``, ``VULTR_API_TIMEOUT``) +- File specified by environment variable ``VULTR_API_CONFIG`` +- ``vultr.ini`` file located in current working directory +- ``$HOME/.vultr.ini`` + + +Ini file are structured this way: + +.. code-block:: ini + + [default] + key = MY_API_KEY + timeout = 60 + + [personal_account] + key = MY_PERSONAL_ACCOUNT_API_KEY + timeout = 30 + + +If ``VULTR_API_ACCOUNT`` environment variable or ``api_account`` module parameter is not specified, modules will look for the section named "default". + + +Authentication +-------------- + +Before using the Ansible modules to interact with Vultr, ones need an API key. +If one doesn't own one yet, log in to `Vultr <https://www.vultr.com>`_ go to Account, then API, enable API then the API key should show up. + +Ensure you allow the usage of the API key from the proper IP addresses. + +Refer to the Configuration section to find out where to put this information. + +To check that everything is working properly run the following command: + +.. code-block:: console + + #> VULTR_API_KEY=XXX ansible -m vultr_account_info localhost + localhost | SUCCESS => { + "changed": false, + "vultr_account_info": { + "balance": -8.9, + "last_payment_amount": -10.0, + "last_payment_date": "2018-07-21 11:34:46", + "pending_charges": 6.0 + }, + "vultr_api": { + "api_account": "default", + "api_endpoint": "https://api.vultr.com", + "api_retries": 5, + "api_timeout": 60 + } + } + + +If a similar output displays then everything is setup properly, else please ensure the proper ``VULTR_API_KEY`` has been specified and that Access Control on Vultr > Account > API page are accurate. + + +Usage +----- + +Since `Vultr <https://www.vultr.com>`_ offers a public API, the execution of the module to manage the infrastructure on their platform will happen on localhost. This translates to: + +.. code-block:: yaml + + --- + - hosts: localhost + tasks: + - name: Create a 10G volume + vultr_block_storage: + name: my_disk + size: 10 + region: New Jersey + + +From that point on, only your creativity is the limit. Make sure to read the documentation of the `available modules <https://docs.ansible.com/ansible/latest/modules/list_of_cloud_modules.html#vultr>`_. + + +Dynamic Inventory +----------------- + +Ansible provides a dynamic inventory plugin for `Vultr <https://www.vultr.com>`_. +The configuration process is exactly the same as the one for the modules. + +To be able to use it you need to enable it first by specifying the following in the ``ansible.cfg`` file: + +.. code-block:: ini + + [inventory] + enable_plugins=vultr + +And provide a configuration file to be used with the plugin, the minimal configuration file looks like this: + +.. code-block:: yaml + + --- + plugin: vultr + +To list the available hosts one can simply run: + +.. code-block:: console + + #> ansible-inventory -i vultr.yml --list + + +For example, this allows you to take action on nodes grouped by location or OS name: + +.. code-block:: yaml + + --- + - hosts: Amsterdam + tasks: + - name: Rebooting the machine + shell: reboot + become: True + + +Integration tests +----------------- + +Ansible includes integration tests for all Vultr modules. + +These tests are meant to run against the public Vultr API and that is why they require a valid key to access the API. + +Prepare the test setup: + +.. code-block:: shell + + $ cd ansible # location the ansible source is + $ source ./hacking/env-setup + +Set the Vultr API key: + +.. code-block:: shell + + $ cd test/integration + $ cp cloud-config-vultr.ini.template cloud-config-vultr.ini + $ vi cloud-config-vultr.ini + +Run all Vultr tests: + +.. code-block:: shell + + $ ansible-test integration cloud/vultr/ -v --diff --allow-unsupported + + +To run a specific test, for example vultr_account_info: + +.. code-block:: shell + + $ ansible-test integration cloud/vultr/vultr_account_info -v --diff --allow-unsupported diff --git a/docs/docsite/rst/scenario_guides/guides.rst b/docs/docsite/rst/scenario_guides/guides.rst new file mode 100644 index 00000000..2ff65bbc --- /dev/null +++ b/docs/docsite/rst/scenario_guides/guides.rst @@ -0,0 +1,43 @@ +:orphan: + +.. unified index page included for backwards compatibility + +****************** +Scenario Guides +****************** + +The guides in this section cover integrating Ansible with a variety of +platforms, products, and technologies. They explore particular use cases in greater depth and provide a more "top-down" explanation of some basic features. + +.. toctree:: + :maxdepth: 1 + :caption: Public Cloud Guides + + guide_alicloud + guide_aws + guide_cloudstack + guide_gce + guide_azure + guide_online + guide_oracle + guide_packet + guide_rax + guide_scaleway + guide_vultr + +.. toctree:: + :maxdepth: 1 + :caption: Network Technology Guides + + guide_aci + guide_meraki + guide_infoblox + +.. toctree:: + :maxdepth: 1 + :caption: Virtualization & Containerization Guides + + guide_docker + guide_kubernetes + guide_vagrant + guide_vmware diff --git a/docs/docsite/rst/scenario_guides/network_guides.rst b/docs/docsite/rst/scenario_guides/network_guides.rst new file mode 100644 index 00000000..2b538ff0 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/network_guides.rst @@ -0,0 +1,16 @@ +.. _network_guides: + +************************* +Network Technology Guides +************************* + +The guides in this section cover using Ansible with specific network technologies. They explore particular use cases in greater depth and provide a more "top-down" explanation of some basic features. + +.. toctree:: + :maxdepth: 1 + + guide_aci + guide_meraki + guide_infoblox + +To learn more about Network Automation with Ansible, see :ref:`network_getting_started` and :ref:`network_advanced`. diff --git a/docs/docsite/rst/scenario_guides/scenario_template.rst b/docs/docsite/rst/scenario_guides/scenario_template.rst new file mode 100644 index 00000000..14695bed --- /dev/null +++ b/docs/docsite/rst/scenario_guides/scenario_template.rst @@ -0,0 +1,53 @@ +:orphan: + +.. _scenario_template: + +************************************* +Sample scenario for Ansible platforms +************************************* + +*Use this ``rst`` file as a starting point to create a scenario guide for your platform. The sections below are suggestions on what should be in a scenario guide.* + +Introductory paragraph. + +.. contents:: + :local: + +Prerequisites +============= + +Describe the requirements and assumptions for this scenario. This should include applicable subsections for hardware, software, and any other caveats to using the scenarios in this guide. + +Credentials and authenticating +============================== + +Describe credential requirements and how to authenticate to this platform. + +Using dynamic inventory +========================= + +If applicable, describe how to use a dynamic inventory plugin for this platform. + + +Example description +=================== + +Description and code here. Change the section header to something descriptive about this example, such as "Renaming a virtual machine". The goal is that this is the text someone would search for to find your example. + + +Example output +-------------- + +What the user should expect to see. + + +Troubleshooting +--------------- + +What to look for if it breaks. + + +Conclusion and where to go next +=============================== + +Recap of important points. For more information please see: links. diff --git a/docs/docsite/rst/scenario_guides/virt_guides.rst b/docs/docsite/rst/scenario_guides/virt_guides.rst new file mode 100644 index 00000000..b623799f --- /dev/null +++ b/docs/docsite/rst/scenario_guides/virt_guides.rst @@ -0,0 +1,15 @@ +.. _virtualization_guides: + +****************************************** +Virtualization and Containerization Guides +****************************************** + +The guides in this section cover integrating Ansible with popular tools for creating virtual machines and containers. They explore particular use cases in greater depth and provide a more "top-down" explanation of some basic features. + +.. toctree:: + :maxdepth: 1 + + guide_docker + guide_kubernetes + guide_vagrant + guide_vmware diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/faq.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/faq.rst new file mode 100644 index 00000000..6987df0b --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/faq.rst @@ -0,0 +1,26 @@ +.. _vmware_faq: + +****************** +Ansible VMware FAQ +****************** + +vmware_guest +============ + +Can I deploy a virtual machine on a standalone ESXi server ? +------------------------------------------------------------ + +Yes. ``vmware_guest`` can deploy a virtual machine with required settings on a standalone ESXi server. +However, you must have a paid license to deploy virtual machines this way. If you are using the free version, the API is read-only. + +Is ``/vm`` required for ``vmware_guest`` module ? +------------------------------------------------- + +Prior to Ansible version 2.5, ``folder`` was an optional parameter with a default value of ``/vm``. + +The folder parameter was used to discover information about virtual machines in the given infrastructure. + +Starting with Ansible version 2.5, ``folder`` is still an optional parameter with no default value. +This parameter will be now used to identify a user's virtual machine, if multiple virtual machines or virtual +machine templates are found with same name. VMware does not restrict the system administrator from creating virtual +machines with same name. diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_clone_template.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_clone_template.rst new file mode 100644 index 00000000..2c7647ef --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_clone_template.rst @@ -0,0 +1,222 @@ +.. _vmware_guest_from_template: + +**************************************** +Deploy a virtual machine from a template +**************************************** + +.. contents:: Topics + +Introduction +============ + +This guide will show you how to utilize Ansible to clone a virtual machine from already existing VMware template or existing VMware guest. + +Scenario Requirements +===================== + +* Software + + * Ansible 2.5 or later must be installed + + * The Python module ``Pyvmomi`` must be installed on the Ansible (or Target host if not executing against localhost) + + * Installing the latest ``Pyvmomi`` via ``pip`` is recommended [as the OS provided packages are usually out of date and incompatible] + +* Hardware + + * vCenter Server with at least one ESXi server + +* Access / Credentials + + * Ansible (or the target server) must have network access to the either vCenter server or the ESXi server you will be deploying to + + * Username and Password + + * Administrator user with following privileges + + - ``Datastore.AllocateSpace`` on the destination datastore or datastore folder + - ``Network.Assign`` on the network to which the virtual machine will be assigned + - ``Resource.AssignVMToPool`` on the destination host, cluster, or resource pool + - ``VirtualMachine.Config.AddNewDisk`` on the datacenter or virtual machine folder + - ``VirtualMachine.Config.AddRemoveDevice`` on the datacenter or virtual machine folder + - ``VirtualMachine.Interact.PowerOn`` on the datacenter or virtual machine folder + - ``VirtualMachine.Inventory.CreateFromExisting`` on the datacenter or virtual machine folder + - ``VirtualMachine.Provisioning.Clone`` on the virtual machine you are cloning + - ``VirtualMachine.Provisioning.Customize`` on the virtual machine or virtual machine folder if you are customizing the guest operating system + - ``VirtualMachine.Provisioning.DeployTemplate`` on the template you are using + - ``VirtualMachine.Provisioning.ReadCustSpecs`` on the root vCenter Server if you are customizing the guest operating system + + Depending on your requirements, you could also need one or more of the following privileges: + + - ``VirtualMachine.Config.CPUCount`` on the datacenter or virtual machine folder + - ``VirtualMachine.Config.Memory`` on the datacenter or virtual machine folder + - ``VirtualMachine.Config.DiskExtend`` on the datacenter or virtual machine folder + - ``VirtualMachine.Config.Annotation`` on the datacenter or virtual machine folder + - ``VirtualMachine.Config.AdvancedConfig`` on the datacenter or virtual machine folder + - ``VirtualMachine.Config.EditDevice`` on the datacenter or virtual machine folder + - ``VirtualMachine.Config.Resource`` on the datacenter or virtual machine folder + - ``VirtualMachine.Config.Settings`` on the datacenter or virtual machine folder + - ``VirtualMachine.Config.UpgradeVirtualHardware`` on the datacenter or virtual machine folder + - ``VirtualMachine.Interact.SetCDMedia`` on the datacenter or virtual machine folder + - ``VirtualMachine.Interact.SetFloppyMedia`` on the datacenter or virtual machine folder + - ``VirtualMachine.Interact.DeviceConnection`` on the datacenter or virtual machine folder + +Assumptions +=========== + +- All variable names and VMware object names are case sensitive +- VMware allows creation of virtual machine and templates with same name across datacenters and within datacenters +- You need to use Python 2.7.9 version in order to use ``validate_certs`` option, as this version is capable of changing the SSL verification behaviours + +Caveats +======= + +- Hosts in the ESXi cluster must have access to the datastore that the template resides on. +- Multiple templates with the same name will cause module failures. +- In order to utilize Guest Customization, VMware Tools must be installed on the template. For Linux, the ``open-vm-tools`` package is recommended, and it requires that ``Perl`` be installed. + + +Example Description +=================== + +In this use case / example, we will be selecting a virtual machine template and cloning it into a specific folder in our Datacenter / Cluster. The following Ansible playbook showcases the basic parameters that are needed for this. + +.. code-block:: yaml + + --- + - name: Create a VM from a template + hosts: localhost + gather_facts: no + tasks: + - name: Clone the template + vmware_guest: + hostname: "{{ vcenter_ip }}" + username: "{{ vcenter_username }}" + password: "{{ vcenter_password }}" + validate_certs: False + name: testvm_2 + template: template_el7 + datacenter: "{{ datacenter_name }}" + folder: /DC1/vm + state: poweredon + cluster: "{{ cluster_name }}" + wait_for_ip_address: yes + + +Since Ansible utilizes the VMware API to perform actions, in this use case we will be connecting directly to the API from our localhost. This means that our playbooks will not be running from the vCenter or ESXi Server. We do not necessarily need to collect facts about our localhost, so the ``gather_facts`` parameter will be disabled. You can run these modules against another server that would then connect to the API if your localhost does not have access to vCenter. If so, the required Python modules will need to be installed on that target server. + +To begin, there are a few bits of information we will need. First and foremost is the hostname of the ESXi server or vCenter server. After this, you will need the username and password for this server. For now, you will be entering these directly, but in a more advanced playbook this can be abstracted out and stored in a more secure fashion using :ref:`ansible-vault` or using `Ansible Tower credentials <https://docs.ansible.com/ansible-tower/latest/html/userguide/credentials.html>`_. If your vCenter or ESXi server is not setup with proper CA certificates that can be verified from the Ansible server, then it is necessary to disable validation of these certificates by using the ``validate_certs`` parameter. To do this you need to set ``validate_certs=False`` in your playbook. + +Now you need to supply the information about the virtual machine which will be created. Give your virtual machine a name, one that conforms to all VMware requirements for naming conventions. Next, select the display name of the template from which you want to clone new virtual machine. This must match what's displayed in VMware Web UI exactly. Then you can specify a folder to place this new virtual machine in. This path can either be a relative path or a full path to the folder including the Datacenter. You may need to specify a state for the virtual machine. This simply tells the module which action you want to take, in this case you will be ensure that the virtual machine exists and is powered on. An optional parameter is ``wait_for_ip_address``, this will tell Ansible to wait for the virtual machine to fully boot up and VMware Tools is running before completing this task. + + +What to expect +-------------- + +- You will see a bit of JSON output after this playbook completes. This output shows various parameters that are returned from the module and from vCenter about the newly created VM. + +.. code-block:: yaml + + { + "changed": true, + "instance": { + "annotation": "", + "current_snapshot": null, + "customvalues": {}, + "guest_consolidation_needed": false, + "guest_question": null, + "guest_tools_status": "guestToolsNotRunning", + "guest_tools_version": "0", + "hw_cores_per_socket": 1, + "hw_datastores": [ + "ds_215" + ], + "hw_esxi_host": "192.0.2.44", + "hw_eth0": { + "addresstype": "assigned", + "ipaddresses": null, + "label": "Network adapter 1", + "macaddress": "00:50:56:8c:19:f4", + "macaddress_dash": "00-50-56-8c-19-f4", + "portgroup_key": "dvportgroup-17", + "portgroup_portkey": "0", + "summary": "DVSwitch: 50 0c 5b 22 b6 68 ab 89-fc 0b 59 a4 08 6e 80 fa" + }, + "hw_files": [ + "[ds_215] testvm_2/testvm_2.vmx", + "[ds_215] testvm_2/testvm_2.vmsd", + "[ds_215] testvm_2/testvm_2.vmdk" + ], + "hw_folder": "/DC1/vm", + "hw_guest_full_name": null, + "hw_guest_ha_state": null, + "hw_guest_id": null, + "hw_interfaces": [ + "eth0" + ], + "hw_is_template": false, + "hw_memtotal_mb": 512, + "hw_name": "testvm_2", + "hw_power_status": "poweredOff", + "hw_processor_count": 2, + "hw_product_uuid": "420cb25b-81e8-8d3b-dd2d-a439ee54fcc5", + "hw_version": "vmx-13", + "instance_uuid": "500cd53b-ed57-d74e-2da8-0dc0eddf54d5", + "ipv4": null, + "ipv6": null, + "module_hw": true, + "snapshots": [] + }, + "invocation": { + "module_args": { + "annotation": null, + "cdrom": {}, + "cluster": "DC1_C1", + "customization": {}, + "customization_spec": null, + "customvalues": [], + "datacenter": "DC1", + "disk": [], + "esxi_hostname": null, + "folder": "/DC1/vm", + "force": false, + "guest_id": null, + "hardware": {}, + "hostname": "192.0.2.44", + "is_template": false, + "linked_clone": false, + "name": "testvm_2", + "name_match": "first", + "networks": [], + "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "port": 443, + "resource_pool": null, + "snapshot_src": null, + "state": "present", + "state_change_timeout": 0, + "template": "template_el7", + "username": "administrator@vsphere.local", + "uuid": null, + "validate_certs": false, + "vapp_properties": [], + "wait_for_ip_address": true + } + } + } + +- State is changed to ``True`` which notifies that the virtual machine is built using given template. The module will not complete until the clone task in VMware is finished. This can take some time depending on your environment. + +- If you utilize the ``wait_for_ip_address`` parameter, then it will also increase the clone time as it will wait until virtual machine boots into the OS and an IP Address has been assigned to the given NIC. + + + +Troubleshooting +--------------- + +Things to inspect + +- Check if the values provided for username and password are correct +- Check if the datacenter you provided is available +- Check if the template specified exists and you have permissions to access the datastore +- Ensure the full folder path you specified already exists. It will not create folders automatically for you + diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_find_vm_folder.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_find_vm_folder.rst new file mode 100644 index 00000000..62758867 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_find_vm_folder.rst @@ -0,0 +1,120 @@ +.. _vmware_guest_find_folder: + +****************************************************** +Find folder path of an existing VMware virtual machine +****************************************************** + +.. contents:: Topics + +Introduction +============ + +This guide will show you how to utilize Ansible to find folder path of an existing VMware virtual machine. + +Scenario Requirements +===================== + +* Software + + * Ansible 2.5 or later must be installed. + + * The Python module ``Pyvmomi`` must be installed on the Ansible control node (or Target host if not executing against localhost). + + * We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible). + +* Hardware + + * At least one standalone ESXi server or + + * vCenter Server with at least one ESXi server + +* Access / Credentials + + * Ansible (or the target server) must have network access to the either vCenter server or the ESXi server + + * Username and Password for vCenter or ESXi server + +Caveats +======= + +- All variable names and VMware object names are case sensitive. +- You need to use Python 2.7.9 version in order to use ``validate_certs`` option, as this version is capable of changing the SSL verification behaviours. + + +Example Description +=================== + +With the following Ansible playbook you can find the folder path of an existing virtual machine using name. + +.. code-block:: yaml + + --- + - name: Find folder path of an existing virtual machine + hosts: localhost + gather_facts: False + vars_files: + - vcenter_vars.yml + vars: + ansible_python_interpreter: "/usr/bin/env python3" + tasks: + - set_fact: + vm_name: "DC0_H0_VM0" + + - name: "Find folder for VM - {{ vm_name }}" + vmware_guest_find: + hostname: "{{ vcenter_server }}" + username: "{{ vcenter_user }}" + password: "{{ vcenter_pass }}" + validate_certs: False + name: "{{ vm_name }}" + delegate_to: localhost + register: vm_facts + + +Since Ansible utilizes the VMware API to perform actions, in this use case it will be connecting directly to the API from localhost. + +This means that playbooks will not be running from the vCenter or ESXi Server. + +Note that this play disables the ``gather_facts`` parameter, since you don't want to collect facts about localhost. + +You can run these modules against another server that would then connect to the API if localhost does not have access to vCenter. If so, the required Python modules will need to be installed on that target server. We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible). + +Before you begin, make sure you have: + +- Hostname of the ESXi server or vCenter server +- Username and password for the ESXi or vCenter server +- Name of the existing Virtual Machine for which you want to collect folder path + +For now, you will be entering these directly, but in a more advanced playbook this can be abstracted out and stored in a more secure fashion using :ref:`ansible-vault` or using `Ansible Tower credentials <https://docs.ansible.com/ansible-tower/latest/html/userguide/credentials.html>`_. + +If your vCenter or ESXi server is not setup with proper CA certificates that can be verified from the Ansible server, then it is necessary to disable validation of these certificates by using the ``validate_certs`` parameter. To do this you need to set ``validate_certs=False`` in your playbook. + +The name of existing virtual machine will be used as input for ``vmware_guest_find`` module via ``name`` parameter. + + +What to expect +-------------- + +Running this playbook can take some time, depending on your environment and network connectivity. When the run is complete you will see + +.. code-block:: yaml + + "vm_facts": { + "changed": false, + "failed": false, + ... + "folders": [ + "/F0/DC0/vm/F0" + ] + } + + +Troubleshooting +--------------- + +If your playbook fails: + +- Check if the values provided for username and password are correct. +- Check if the datacenter you provided is available. +- Check if the virtual machine specified exists and you have respective permissions to access VMware object. +- Ensure the full folder path you specified already exists. diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_remove_vm.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_remove_vm.rst new file mode 100644 index 00000000..620f8e0a --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_remove_vm.rst @@ -0,0 +1,126 @@ +.. _vmware_guest_remove_virtual_machine: + +***************************************** +Remove an existing VMware virtual machine +***************************************** + +.. contents:: Topics + +Introduction +============ + +This guide will show you how to utilize Ansible to remove an existing VMware virtual machine. + +Scenario Requirements +===================== + +* Software + + * Ansible 2.5 or later must be installed. + + * The Python module ``Pyvmomi`` must be installed on the Ansible control node (or Target host if not executing against localhost). + + * We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible). + +* Hardware + + * At least one standalone ESXi server or + + * vCenter Server with at least one ESXi server + +* Access / Credentials + + * Ansible (or the target server) must have network access to the either vCenter server or the ESXi server + + * Username and Password for vCenter or ESXi server + + * Hosts in the ESXi cluster must have access to the datastore that the template resides on. + +Caveats +======= + +- All variable names and VMware object names are case sensitive. +- You need to use Python 2.7.9 version in order to use ``validate_certs`` option, as this version is capable of changing the SSL verification behaviours. +- ``vmware_guest`` module tries to mimic VMware Web UI and workflow, so the virtual machine must be in powered off state in order to remove it from the VMware inventory. + +.. warning:: + + The removal VMware virtual machine using ``vmware_guest`` module is destructive operation and can not be reverted, so it is strongly recommended to take the backup of virtual machine and related files (vmx and vmdk files) before proceeding. + +Example Description +=================== + +In this use case / example, user will be removing a virtual machine using name. The following Ansible playbook showcases the basic parameters that are needed for this. + +.. code-block:: yaml + + --- + - name: Remove virtual machine + gather_facts: no + vars_files: + - vcenter_vars.yml + vars: + ansible_python_interpreter: "/usr/bin/env python3" + hosts: localhost + tasks: + - set_fact: + vm_name: "VM_0003" + datacenter: "DC1" + + - name: Remove "{{ vm_name }}" + vmware_guest: + hostname: "{{ vcenter_server }}" + username: "{{ vcenter_user }}" + password: "{{ vcenter_pass }}" + validate_certs: no + cluster: "DC1_C1" + name: "{{ vm_name }}" + state: absent + delegate_to: localhost + register: facts + + +Since Ansible utilizes the VMware API to perform actions, in this use case it will be connecting directly to the API from localhost. + +This means that playbooks will not be running from the vCenter or ESXi Server. + +Note that this play disables the ``gather_facts`` parameter, since you don't want to collect facts about localhost. + +You can run these modules against another server that would then connect to the API if localhost does not have access to vCenter. If so, the required Python modules will need to be installed on that target server. We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible). + +Before you begin, make sure you have: + +- Hostname of the ESXi server or vCenter server +- Username and password for the ESXi or vCenter server +- Name of the existing Virtual Machine you want to remove + +For now, you will be entering these directly, but in a more advanced playbook this can be abstracted out and stored in a more secure fashion using :ref:`ansible-vault` or using `Ansible Tower credentials <https://docs.ansible.com/ansible-tower/latest/html/userguide/credentials.html>`_. + +If your vCenter or ESXi server is not setup with proper CA certificates that can be verified from the Ansible server, then it is necessary to disable validation of these certificates by using the ``validate_certs`` parameter. To do this you need to set ``validate_certs=False`` in your playbook. + +The name of existing virtual machine will be used as input for ``vmware_guest`` module via ``name`` parameter. + + +What to expect +-------------- + +- You will not see any JSON output after this playbook completes as compared to other operations performed using ``vmware_guest`` module. + +.. code-block:: yaml + + { + "changed": true + } + +- State is changed to ``True`` which notifies that the virtual machine is removed from the VMware inventory. This can take some time depending upon your environment and network connectivity. + + +Troubleshooting +--------------- + +If your playbook fails: + +- Check if the values provided for username and password are correct. +- Check if the datacenter you provided is available. +- Check if the virtual machine specified exists and you have permissions to access the datastore. +- Ensure the full folder path you specified already exists. It will not create folders automatically for you. diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_rename_vm.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_rename_vm.rst new file mode 100644 index 00000000..81272897 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_rename_vm.rst @@ -0,0 +1,173 @@ +.. _vmware_guest_rename_virtual_machine: + +********************************** +Rename an existing virtual machine +********************************** + +.. contents:: Topics + +Introduction +============ + +This guide will show you how to utilize Ansible to rename an existing virtual machine. + +Scenario Requirements +===================== + +* Software + + * Ansible 2.5 or later must be installed. + + * The Python module ``Pyvmomi`` must be installed on the Ansible control node (or Target host if not executing against localhost). + + * We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible). + +* Hardware + + * At least one standalone ESXi server or + + * vCenter Server with at least one ESXi server + +* Access / Credentials + + * Ansible (or the target server) must have network access to the either vCenter server or the ESXi server + + * Username and Password for vCenter or ESXi server + + * Hosts in the ESXi cluster must have access to the datastore that the template resides on. + +Caveats +======= + +- All variable names and VMware object names are case sensitive. +- You need to use Python 2.7.9 version in order to use ``validate_certs`` option, as this version is capable of changing the SSL verification behaviours. + + +Example Description +=================== + +With the following Ansible playbook you can rename an existing virtual machine by changing the UUID. + +.. code-block:: yaml + + --- + - name: Rename virtual machine from old name to new name using UUID + gather_facts: no + vars_files: + - vcenter_vars.yml + vars: + ansible_python_interpreter: "/usr/bin/env python3" + hosts: localhost + tasks: + - set_fact: + vm_name: "old_vm_name" + new_vm_name: "new_vm_name" + datacenter: "DC1" + cluster_name: "DC1_C1" + + - name: Get VM "{{ vm_name }}" uuid + vmware_guest_facts: + hostname: "{{ vcenter_server }}" + username: "{{ vcenter_user }}" + password: "{{ vcenter_pass }}" + validate_certs: False + datacenter: "{{ datacenter }}" + folder: "/{{datacenter}}/vm" + name: "{{ vm_name }}" + register: vm_facts + + - name: Rename "{{ vm_name }}" to "{{ new_vm_name }}" + vmware_guest: + hostname: "{{ vcenter_server }}" + username: "{{ vcenter_user }}" + password: "{{ vcenter_pass }}" + validate_certs: False + cluster: "{{ cluster_name }}" + uuid: "{{ vm_facts.instance.hw_product_uuid }}" + name: "{{ new_vm_name }}" + +Since Ansible utilizes the VMware API to perform actions, in this use case it will be connecting directly to the API from localhost. + +This means that playbooks will not be running from the vCenter or ESXi Server. + +Note that this play disables the ``gather_facts`` parameter, since you don't want to collect facts about localhost. + +You can run these modules against another server that would then connect to the API if localhost does not have access to vCenter. If so, the required Python modules will need to be installed on that target server. We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible). + +Before you begin, make sure you have: + +- Hostname of the ESXi server or vCenter server +- Username and password for the ESXi or vCenter server +- The UUID of the existing Virtual Machine you want to rename + +For now, you will be entering these directly, but in a more advanced playbook this can be abstracted out and stored in a more secure fashion using :ref:`ansible-vault` or using `Ansible Tower credentials <https://docs.ansible.com/ansible-tower/latest/html/userguide/credentials.html>`_. + +If your vCenter or ESXi server is not setup with proper CA certificates that can be verified from the Ansible server, then it is necessary to disable validation of these certificates by using the ``validate_certs`` parameter. To do this you need to set ``validate_certs=False`` in your playbook. + +Now you need to supply the information about the existing virtual machine which will be renamed. For renaming virtual machine, ``vmware_guest`` module uses VMware UUID, which is unique across vCenter environment. This value is autogenerated and can not be changed. You will use ``vmware_guest_facts`` module to find virtual machine and get information about VMware UUID of the virtual machine. + +This value will be used input for ``vmware_guest`` module. Specify new name to virtual machine which conforms to all VMware requirements for naming conventions as ``name`` parameter. Also, provide ``uuid`` as the value of VMware UUID. + +What to expect +-------------- + +Running this playbook can take some time, depending on your environment and network connectivity. When the run is complete you will see + +.. code-block:: yaml + + { + "changed": true, + "instance": { + "annotation": "", + "current_snapshot": null, + "customvalues": {}, + "guest_consolidation_needed": false, + "guest_question": null, + "guest_tools_status": "guestToolsNotRunning", + "guest_tools_version": "10247", + "hw_cores_per_socket": 1, + "hw_datastores": ["ds_204_2"], + "hw_esxi_host": "10.x.x.x", + "hw_eth0": { + "addresstype": "assigned", + "ipaddresses": [], + "label": "Network adapter 1", + "macaddress": "00:50:56:8c:b8:42", + "macaddress_dash": "00-50-56-8c-b8-42", + "portgroup_key": "dvportgroup-31", + "portgroup_portkey": "15", + "summary": "DVSwitch: 50 0c 3a 69 df 78 2c 7b-6e 08 0a 89 e3 a6 31 17" + }, + "hw_files": ["[ds_204_2] old_vm_name/old_vm_name.vmx", "[ds_204_2] old_vm_name/old_vm_name.nvram", "[ds_204_2] old_vm_name/old_vm_name.vmsd", "[ds_204_2] old_vm_name/vmware.log", "[ds_204_2] old_vm_name/old_vm_name.vmdk"], + "hw_folder": "/DC1/vm", + "hw_guest_full_name": null, + "hw_guest_ha_state": null, + "hw_guest_id": null, + "hw_interfaces": ["eth0"], + "hw_is_template": false, + "hw_memtotal_mb": 1024, + "hw_name": "new_vm_name", + "hw_power_status": "poweredOff", + "hw_processor_count": 1, + "hw_product_uuid": "420cbebb-835b-980b-7050-8aea9b7b0a6d", + "hw_version": "vmx-13", + "instance_uuid": "500c60a6-b7b4-8ae5-970f-054905246a6f", + "ipv4": null, + "ipv6": null, + "module_hw": true, + "snapshots": [] + } + } + +confirming that you've renamed the virtual machine. + + +Troubleshooting +--------------- + +If your playbook fails: + +- Check if the values provided for username and password are correct. +- Check if the datacenter you provided is available. +- Check if the virtual machine specified exists and you have permissions to access the datastore. +- Ensure the full folder path you specified already exists. diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_vmware_http.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_vmware_http.rst new file mode 100644 index 00000000..e893c9d0 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_vmware_http.rst @@ -0,0 +1,161 @@ +.. _vmware_http_api_usage: + +*********************************** +Using VMware HTTP API using Ansible +*********************************** + +.. contents:: Topics + +Introduction +============ + +This guide will show you how to utilize Ansible to use VMware HTTP APIs to automate various tasks. + +Scenario Requirements +===================== + +* Software + + * Ansible 2.5 or later must be installed. + + * We recommend installing the latest version with pip: ``pip install Pyvmomi`` on the Ansible control node + (as the OS packages are usually out of date and incompatible) if you are planning to use any existing VMware modules. + +* Hardware + + * vCenter Server 6.5 and above with at least one ESXi server + +* Access / Credentials + + * Ansible (or the target server) must have network access to either the vCenter server or the ESXi server + + * Username and Password for vCenter + +Caveats +======= + +- All variable names and VMware object names are case sensitive. +- You need to use Python 2.7.9 version in order to use ``validate_certs`` option, as this version is capable of changing the SSL verification behaviours. +- VMware HTTP APIs are introduced in vSphere 6.5 and above so minimum level required in 6.5. +- There are very limited number of APIs exposed, so you may need to rely on XMLRPC based VMware modules. + + +Example Description +=================== + +With the following Ansible playbook you can find the VMware ESXi host system(s) and can perform various tasks depending on the list of host systems. +This is a generic example to show how Ansible can be utilized to consume VMware HTTP APIs. + +.. code-block:: yaml + + --- + - name: Example showing VMware HTTP API utilization + hosts: localhost + gather_facts: no + vars_files: + - vcenter_vars.yml + vars: + ansible_python_interpreter: "/usr/bin/env python3" + tasks: + - name: Login into vCenter and get cookies + uri: + url: https://{{ vcenter_server }}/rest/com/vmware/cis/session + force_basic_auth: yes + validate_certs: no + method: POST + user: "{{ vcenter_user }}" + password: "{{ vcenter_pass }}" + register: login + + - name: Get all hosts from vCenter using cookies from last task + uri: + url: https://{{ vcenter_server }}/rest/vcenter/host + force_basic_auth: yes + validate_certs: no + headers: + Cookie: "{{ login.set_cookie }}" + register: vchosts + + - name: Change Log level configuration of the given hostsystem + vmware_host_config_manager: + hostname: "{{ vcenter_server }}" + username: "{{ vcenter_user }}" + password: "{{ vcenter_pass }}" + esxi_hostname: "{{ item.name }}" + options: + 'Config.HostAgent.log.level': 'error' + validate_certs: no + loop: "{{ vchosts.json.value }}" + register: host_config_results + + +Since Ansible utilizes the VMware HTTP API using the ``uri`` module to perform actions, in this use case it will be connecting directly to the VMware HTTP API from localhost. + +This means that playbooks will not be running from the vCenter or ESXi Server. + +Note that this play disables the ``gather_facts`` parameter, since you don't want to collect facts about localhost. + +Before you begin, make sure you have: + +- Hostname of the vCenter server +- Username and password for the vCenter server +- Version of vCenter is at least 6.5 + +For now, you will be entering these directly, but in a more advanced playbook this can be abstracted out and stored in a more secure fashion using :ref:`ansible-vault` or using `Ansible Tower credentials <https://docs.ansible.com/ansible-tower/latest/html/userguide/credentials.html>`_. + +If your vCenter server is not setup with proper CA certificates that can be verified from the Ansible server, then it is necessary to disable validation of these certificates by using the ``validate_certs`` parameter. To do this you need to set ``validate_certs=False`` in your playbook. + +As you can see, we are using the ``uri`` module in first task to login into the vCenter server and storing result in the ``login`` variable using register. In the second task, using cookies from the first task we are gathering information about the ESXi host system. + +Using this information, we are changing the ESXi host system's advance configuration. + +What to expect +-------------- + +Running this playbook can take some time, depending on your environment and network connectivity. When the run is complete you will see + +.. code-block:: yaml + + "results": [ + { + ... + "invocation": { + "module_args": { + "cluster_name": null, + "esxi_hostname": "10.76.33.226", + "hostname": "10.65.223.114", + "options": { + "Config.HostAgent.log.level": "error" + }, + "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "port": 443, + "username": "administrator@vsphere.local", + "validate_certs": false + } + }, + "item": { + "connection_state": "CONNECTED", + "host": "host-21", + "name": "10.76.33.226", + "power_state": "POWERED_ON" + }, + "msg": "Config.HostAgent.log.level changed." + ... + } + ] + + +Troubleshooting +--------------- + +If your playbook fails: + +- Check if the values provided for username and password are correct. +- Check if you are using vCenter 6.5 and onwards to use this HTTP APIs. + +.. seealso:: + + `VMware vSphere and Ansible From Zero to Useful by @arielsanchezmor <https://www.youtube.com/watch?v=0_qwOKlBlo8>`_ + vBrownBag session video related to VMware HTTP APIs + `Sample Playbooks for using VMware HTTP APIs <https://github.com/Akasurde/ansible-vmware-http>`_ + GitHub repo for examples of Ansible playbook to manage VMware using HTTP APIs diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_concepts.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_concepts.rst new file mode 100644 index 00000000..ce1e831a --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_concepts.rst @@ -0,0 +1,45 @@ +.. _vmware_concepts: + +*************************** +Ansible for VMware Concepts +*************************** + +Some of these concepts are common to all uses of Ansible, including VMware automation; some are specific to VMware. You need to understand them to use Ansible for VMware automation. This introduction provides the background you need to follow the :ref:`scenarios<vmware_scenarios>` in this guide. + +.. contents:: + :local: + +Control Node +============ + +Any machine with Ansible installed. You can run commands and playbooks, invoking ``/usr/bin/ansible`` or ``/usr/bin/ansible-playbook``, from any control node. You can use any computer that has Python installed on it as a control node - laptops, shared desktops, and servers can all run Ansible. However, you cannot use a Windows machine as a control node. You can have multiple control nodes. + +Delegation +========== + +Delegation allows you to select the system that executes a given task. If you do not have ``pyVmomi`` installed on your control node, use the ``delegate_to`` keyword on VMware-specific tasks to execute them on any host where you have ``pyVmomi`` installed. + +Modules +======= + +The units of code Ansible executes. Each module has a particular use, from creating virtual machines on vCenter to managing distributed virtual switches in the vCenter environment. You can invoke a single module with a task, or invoke several different modules in a playbook. For an idea of how many modules Ansible includes, take a look at the :ref:`list of cloud modules<cloud_modules>`, which includes VMware modules. + +Playbooks +========= + +Ordered lists of tasks, saved so you can run those tasks in that order repeatedly. Playbooks can include variables as well as tasks. Playbooks are written in YAML and are easy to read, write, share and understand. + +pyVmomi +======= + +Ansible VMware modules are written on top of `pyVmomi <https://github.com/vmware/pyvmomi>`_. ``pyVmomi`` is the official Python SDK for the VMware vSphere API that allows user to manage ESX, ESXi, and vCenter infrastructure. + +You need to install this Python SDK on host from where you want to invoke VMware automation. For example, if you are using control node then ``pyVmomi`` must be installed on control node. + +If you are using any ``delegate_to`` host which is different from your control node then you need to install ``pyVmomi`` on that ``delegate_to`` node. + +You can install pyVmomi using pip: + +.. code-block:: bash + + $ pip install pyvmomi diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_external_doc_links.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_external_doc_links.rst new file mode 100644 index 00000000..b50837dd --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_external_doc_links.rst @@ -0,0 +1,11 @@ +.. _vmware_external_doc_links: + +***************************** +Other useful VMware resources +***************************** + +* `VMware API and SDK Documentation <https://www.vmware.com/support/pubs/sdk_pubs.html>`_ +* `VCSIM test container image <https://quay.io/repository/ansible/vcenter-test-container>`_ +* `Ansible VMware community wiki page <https://github.com/ansible/community/wiki/VMware>`_ +* `VMware's official Guest Operating system customization matrix <https://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf>`_ +* `VMware Compatibility Guide <https://www.vmware.com/resources/compatibility/search.php>`_ diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_getting_started.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_getting_started.rst new file mode 100644 index 00000000..fc5691b7 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_getting_started.rst @@ -0,0 +1,9 @@ +:orphan: + +.. _vmware_ansible_getting_started: + +*************************************** +Getting Started with Ansible for VMware +*************************************** + +This will have a basic "hello world" scenario/walkthrough that gets the user introduced to the basics. diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_intro.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_intro.rst new file mode 100644 index 00000000..7006e665 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_intro.rst @@ -0,0 +1,53 @@ +.. _vmware_ansible_intro: + +********************************** +Introduction to Ansible for VMware +********************************** + +.. contents:: Topics + +Introduction +============ + +Ansible provides various modules to manage VMware infrastructure, which includes datacenter, cluster, +host system and virtual machine. + +Requirements +============ + +Ansible VMware modules are written on top of `pyVmomi <https://github.com/vmware/pyvmomi>`_. +pyVmomi is the Python SDK for the VMware vSphere API that allows user to manage ESX, ESXi, +and vCenter infrastructure. You can install pyVmomi using pip: + +.. code-block:: bash + + $ pip install pyvmomi + +Ansible VMware modules leveraging latest vSphere(6.0+) features are using `vSphere Automation Python SDK <https://github.com/vmware/vsphere-automation-sdk-python>`_. The vSphere Automation Python SDK also has client libraries, documentation, and sample code for VMware Cloud on AWS Console APIs, NSX VMware Cloud on AWS integration APIs, VMware Cloud on AWS site recovery APIs, NSX-T APIs. + +You can install vSphere Automation Python SDK using pip: + +.. code-block:: bash + + $ pip install --upgrade git+https://github.com/vmware/vsphere-automation-sdk-python.git + +Note: + Installing vSphere Automation Python SDK also installs ``pyvmomi``. A separate installation of ``pyvmomi`` is not required. + +vmware_guest module +=================== + +The :ref:`vmware_guest<vmware_guest_module>` module manages various operations related to virtual machines in the given ESXi or vCenter server. + + +.. seealso:: + + `pyVmomi <https://github.com/vmware/pyvmomi>`_ + The GitHub Page of pyVmomi + `pyVmomi Issue Tracker <https://github.com/vmware/pyvmomi/issues>`_ + The issue tracker for the pyVmomi project + `govc <https://github.com/vmware/govmomi/tree/master/govc>`_ + govc is a vSphere CLI built on top of govmomi + :ref:`working_with_playbooks` + An introduction to playbooks + diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory.rst new file mode 100644 index 00000000..f942dd00 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory.rst @@ -0,0 +1,90 @@ +.. _vmware_ansible_inventory: + +************************************* +Using VMware dynamic inventory plugin +************************************* + +.. contents:: Topics + +VMware Dynamic Inventory Plugin +=============================== + + +The best way to interact with your hosts is to use the VMware dynamic inventory plugin, which dynamically queries VMware APIs and +tells Ansible what nodes can be managed. + +Requirements +------------ + +To use the VMware dynamic inventory plugins, you must install `pyVmomi <https://github.com/vmware/pyvmomi>`_ +on your control node (the host running Ansible). + +To include tag-related information for the virtual machines in your dynamic inventory, you also need the `vSphere Automation SDK <https://code.vmware.com/web/sdk/65/vsphere-automation-python>`_, which supports REST API features like tagging and content libraries, on your control node. +You can install the ``vSphere Automation SDK`` following `these instructions <https://github.com/vmware/vsphere-automation-sdk-python#installing-required-python-packages>`_. + +.. code-block:: bash + + $ pip install pyvmomi + +To use this VMware dynamic inventory plugin, you need to enable it first by specifying the following in the ``ansible.cfg`` file: + +.. code-block:: ini + + [inventory] + enable_plugins = vmware_vm_inventory + +Then, create a file that ends in ``.vmware.yml`` or ``.vmware.yaml`` in your working directory. + +The ``vmware_vm_inventory`` script takes in the same authentication information as any VMware module. + +Here's an example of a valid inventory file: + +.. code-block:: yaml + + plugin: vmware_vm_inventory + strict: False + hostname: 10.65.223.31 + username: administrator@vsphere.local + password: Esxi@123$% + validate_certs: False + with_tags: True + + +Executing ``ansible-inventory --list -i <filename>.vmware.yml`` will create a list of VMware instances that are ready to be configured using Ansible. + +Using vaulted configuration files +================================= + +Since the inventory configuration file contains vCenter password in plain text, a security risk, you may want to +encrypt your entire inventory configuration file. + +You can encrypt a valid inventory configuration file as follows: + +.. code-block:: bash + + $ ansible-vault encrypt <filename>.vmware.yml + New Vault password: + Confirm New Vault password: + Encryption successful + +And you can use this vaulted inventory configuration file using: + +.. code-block:: bash + + $ ansible-inventory -i filename.vmware.yml --list --vault-password-file=/path/to/vault_password_file + + +.. seealso:: + + `pyVmomi <https://github.com/vmware/pyvmomi>`_ + The GitHub Page of pyVmomi + `pyVmomi Issue Tracker <https://github.com/vmware/pyvmomi/issues>`_ + The issue tracker for the pyVmomi project + `vSphere Automation SDK GitHub Page <https://github.com/vmware/vsphere-automation-sdk-python>`_ + The GitHub Page of vSphere Automation SDK for Python + `vSphere Automation SDK Issue Tracker <https://github.com/vmware/vsphere-automation-sdk-python/issues>`_ + The issue tracker for vSphere Automation SDK for Python + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_vault` + Using Vault in playbooks diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_filters.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_filters.rst new file mode 100644 index 00000000..1208dcad --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_filters.rst @@ -0,0 +1,216 @@ +.. _vmware_ansible_inventory_using_filters: + +*********************************************** +Using VMware dynamic inventory plugin - Filters +*********************************************** + +.. contents:: + :local: + +VMware dynamic inventory plugin - filtering VMware guests +========================================================= + + +VMware inventory plugin allows you to filter VMware guests using the ``filters`` configuration parameter. + +This section shows how you configure ``filters`` for the given VMware guest in the inventory. + +Requirements +------------ + +To use the VMware dynamic inventory plugins, you must install `pyVmomi <https://github.com/vmware/pyvmomi>`_ +on your control node (the host running Ansible). + +To include tag-related information for the virtual machines in your dynamic inventory, you also need the `vSphere Automation SDK <https://code.vmware.com/web/sdk/65/vsphere-automation-python>`_, which supports REST API features such as tagging and content libraries, on your control node. +You can install the ``vSphere Automation SDK`` following `these instructions <https://github.com/vmware/vsphere-automation-sdk-python#installing-required-python-packages>`_. + +.. code-block:: bash + + $ pip install pyvmomi + +Starting in Ansible 2.10, the VMware dynamic inventory plugin is available in the ``community.vmware`` collection included Ansible. +Alternately, to install the latest ``community.vmware`` collection: + +.. code-block:: bash + + $ ansible-galaxy collection install community.vmware + +To use this VMware dynamic inventory plugin: + +1. Enable it first by specifying the following in the ``ansible.cfg`` file: + +.. code-block:: ini + + [inventory] + enable_plugins = community.vmware.vmware_vm_inventory + +2. Create a file that ends in ``vmware.yml`` or ``vmware.yaml`` in your working directory. + +The ``vmware_vm_inventory`` inventory plugin takes in the same authentication information as any other VMware modules does. + +Let us assume we want to list all RHEL7 VMs with the power state as "poweredOn". A valid inventory file with filters for the given VMware guest looks as follows: + +.. code-block:: yaml + + plugin: community.vmware.vmware_vm_inventory + strict: False + hostname: 10.65.223.31 + username: administrator@vsphere.local + password: Esxi@123$% + validate_certs: False + with_tags: False + hostnames: + - config.name + filters: + - config.guestId == "rhel7_64Guest" + - summary.runtime.powerState == "poweredOn" + + +Here, we have configured two filters - + +* ``config.guestId`` is equal to ``rhel7_64Guest`` +* ``summary.runtime.powerState`` is equal to ``poweredOn`` + +This retrieves all the VMs which satisfy these two conditions and populates them in the inventory. +Notice that the conditions are combined using an ``and`` operation. + +Using ``or`` conditions in filters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Let us assume you want filter RHEL7 and Ubuntu VMs. You can use multiple filters using ``or`` condition in your inventory file. + +A valid filter in the VMware inventory file for this example is: + +.. code-block:: yaml + + plugin: community.vmware.vmware_vm_inventory + strict: False + hostname: 10.65.223.31 + username: administrator@vsphere.local + password: Esxi@123$% + validate_certs: False + with_tags: False + hostnames: + - config.name + filters: + - config.guestId == "rhel7_64Guest" or config.guestId == "ubuntu64Guest" + + +You can check all allowed properties for filters for the given virtual machine at :ref:`vmware_inventory_vm_attributes`. + +If you are using the ``properties`` parameter with custom VM properties, make sure that you include all the properties used by filters as well in your VM property list. + +For example, if we want all RHEL7 and Ubuntu VMs that are poweredOn, you can use inventory file: + +.. code-block:: yaml + + plugin: community.vmware.vmware_vm_inventory + strict: False + hostname: 10.65.223.31 + username: administrator@vsphere.local + password: Esxi@123$% + validate_certs: False + with_tags: False + hostnames: + - 'config.name' + properties: + - 'config.name' + - 'config.guestId' + - 'guest.ipAddress' + - 'summary.runtime.powerState' + filters: + - config.guestId == "rhel7_64Guest" or config.guestId == "ubuntu64Guest" + - summary.runtime.powerState == "poweredOn" + +Here, we are using minimum VM properties, that is ``config.name``, ``config.guestId``, ``summary.runtime.powerState``, and ``guest.ipAddress``. + +* ``config.name`` is used by the ``hostnames`` parameter. +* ``config.guestId`` and ``summary.runtime.powerState`` are used by the ``filters`` parameter. +* ``guest.guestId`` is used by ``ansible_host`` internally by the inventory plugin. + +Using regular expression in filters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Let us assume you want filter VMs with specific IP range. You can use regular expression in ``filters`` in your inventory file. + +For example, if we want all RHEL7 and Ubuntu VMs that are poweredOn, you can use inventory file: + +.. code-block:: yaml + + plugin: community.vmware.vmware_vm_inventory + strict: False + hostname: 10.65.223.31 + username: administrator@vsphere.local + password: Esxi@123$% + validate_certs: False + with_tags: False + hostnames: + - 'config.name' + properties: + - 'config.name' + - 'config.guestId' + - 'guest.ipAddress' + - 'summary.runtime.powerState' + filters: + - guest.ipAddress is defined and guest.ipAddress is match('192.168.*') + +Here, we are using ``guest.ipAddress`` VM property. This property is optional and depended upon VMware tools installed on VMs. +We are using ``match`` to validate the regular expression for the given IP range. + +Executing ``ansible-inventory --list -i <filename>.vmware.yml`` creates a list of the virtual machines that are ready to be configured using Ansible. + +What to expect +-------------- + +You will notice that the inventory hosts are filtered depending on your ``filters`` section. + + +.. code-block:: yaml + + { + "_meta": { + "hostvars": { + "template_001": { + "config.name": "template_001", + "config.guestId": "ubuntu64Guest", + ... + "guest.toolsStatus": "toolsNotInstalled", + "summary.runtime.powerState": "poweredOn", + }, + "vm_8046": { + "config.name": "vm_8046", + "config.guestId": "rhel7_64Guest", + ... + "guest.toolsStatus": "toolsNotInstalled", + "summary.runtime.powerState": "poweredOn", + }, + ... + } + +Troubleshooting filters +----------------------- + +If the custom property specified in ``filters`` fails: + +- Check if the values provided for username and password are correct. +- Make sure it is a valid property, see :ref:`vmware_inventory_vm_attributes`. +- Use ``strict: True`` to get more information about the error. +- Please make sure that you are using latest version of the VMware collection. + + +.. seealso:: + + `pyVmomi <https://github.com/vmware/pyvmomi>`_ + The GitHub Page of pyVmomi + `pyVmomi Issue Tracker <https://github.com/vmware/pyvmomi/issues>`_ + The issue tracker for the pyVmomi project + `vSphere Automation SDK GitHub Page <https://github.com/vmware/vsphere-automation-sdk-python>`_ + The GitHub Page of vSphere Automation SDK for Python + `vSphere Automation SDK Issue Tracker <https://github.com/vmware/vsphere-automation-sdk-python/issues>`_ + The issue tracker for vSphere Automation SDK for Python + :ref:`vmware_inventory_vm_attributes` + Using Virtual machine attributes in VMware dynamic inventory plugin + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_vault` + Using Vault in playbooks diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_hostnames.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_hostnames.rst new file mode 100644 index 00000000..9d284562 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_hostnames.rst @@ -0,0 +1,128 @@ +.. _vmware_ansible_inventory_using_hostnames: + +************************************************* +Using VMware dynamic inventory plugin - Hostnames +************************************************* + +.. contents:: + :local: + +VMware dynamic inventory plugin - customizing hostnames +======================================================= + + +VMware inventory plugin allows you to configure hostnames using the ``hostnames`` configuration parameter. + +In this scenario guide we will see how you configure hostnames from the given VMware guest in the inventory. + +Requirements +------------ + +To use the VMware dynamic inventory plugins, you must install `pyVmomi <https://github.com/vmware/pyvmomi>`_ +on your control node (the host running Ansible). + +To include tag-related information for the virtual machines in your dynamic inventory, you also need the `vSphere Automation SDK <https://code.vmware.com/web/sdk/65/vsphere-automation-python>`_, which supports REST API features such as tagging and content libraries, on your control node. +You can install the ``vSphere Automation SDK`` following `these instructions <https://github.com/vmware/vsphere-automation-sdk-python#installing-required-python-packages>`_. + +.. code-block:: bash + + $ pip install pyvmomi + +Starting in Ansible 2.10, the VMware dynamic inventory plugin is available in the ``community.vmware`` collection included Ansible. +To install the latest ``community.vmware`` collection: + +.. code-block:: bash + + $ ansible-galaxy collection install community.vmware + +To use this VMware dynamic inventory plugin: + +1. Enable it first by specifying the following in the ``ansible.cfg`` file: + +.. code-block:: ini + + [inventory] + enable_plugins = community.vmware.vmware_vm_inventory + +2. Create a file that ends in ``vmware.yml`` or ``vmware.yaml`` in your working directory. + +The ``vmware_vm_inventory`` inventory plugin takes in the same authentication information as any other VMware modules does. + +Here's an example of a valid inventory file with custom hostname for the given VMware guest: + +.. code-block:: yaml + + plugin: community.vmware.vmware_vm_inventory + strict: False + hostname: 10.65.223.31 + username: administrator@vsphere.local + password: Esxi@123$% + validate_certs: False + with_tags: False + hostnames: + - config.name + + +Here, we have configured a custom hostname by setting the ``hostnames`` parameter to ``config.name``. This will retrieve +the ``config.name`` property from the virtual machine and populate it in the inventory. + +You can check all allowed properties for the given virtual machine at :ref:`vmware_inventory_vm_attributes`. + +Executing ``ansible-inventory --list -i <filename>.vmware.yml`` creates a list of the virtual machines that are ready to be configured using Ansible. + +What to expect +-------------- + +You will notice that instead of default behavior of representing the hostname as ``config.name + _ + config.uuid``, +the inventory hosts show value as ``config.name``. + + +.. code-block:: yaml + + { + "_meta": { + "hostvars": { + "template_001": { + "config.name": "template_001", + "guest.toolsRunningStatus": "guestToolsNotRunning", + ... + "guest.toolsStatus": "toolsNotInstalled", + "name": "template_001" + }, + "vm_8046": { + "config.name": "vm_8046", + "guest.toolsRunningStatus": "guestToolsNotRunning", + ... + "guest.toolsStatus": "toolsNotInstalled", + "name": "vm_8046" + }, + ... + } + +Troubleshooting +--------------- + +If the custom property specified in ``hostnames`` fails: + +- Check if the values provided for username and password are correct. +- Make sure it is a valid property, see :ref:`vmware_inventory_vm_attributes`. +- Use ``strict: True`` to get more information about the error. +- Please make sure that you are using latest version VMware collection. + + +.. seealso:: + + `pyVmomi <https://github.com/vmware/pyvmomi>`_ + The GitHub Page of pyVmomi + `pyVmomi Issue Tracker <https://github.com/vmware/pyvmomi/issues>`_ + The issue tracker for the pyVmomi project + `vSphere Automation SDK GitHub Page <https://github.com/vmware/vsphere-automation-sdk-python>`_ + The GitHub Page of vSphere Automation SDK for Python + `vSphere Automation SDK Issue Tracker <https://github.com/vmware/vsphere-automation-sdk-python/issues>`_ + The issue tracker for vSphere Automation SDK for Python + :ref:`vmware_inventory_vm_attributes` + Using Virtual machine attributes in VMware dynamic inventory plugin + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_vault` + Using Vault in playbooks diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_vm_attributes.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_vm_attributes.rst new file mode 100644 index 00000000..089c13d7 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_vm_attributes.rst @@ -0,0 +1,1183 @@ +.. _vmware_inventory_vm_attributes: + +******************************************************************* +Using Virtual machine attributes in VMware dynamic inventory plugin +******************************************************************* + +.. contents:: Topics + +Virtual machine attributes +========================== + +You can use virtual machine properties which can be used to populate ``hostvars`` for the given +virtual machine in a VMware dynamic inventory plugin. + +capability +---------- + +This section describes settings for the runtime capabilities of the virtual machine. + +snapshotOperationsSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not a virtual machine supports snapshot operations. + +multipleSnapshotsSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not a virtual machine supports multiple snapshots. + This value is not set when the virtual machine is unavailable, for instance, when it is being created or deleted. + +snapshotConfigSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not a virtual machine supports snapshot config. + +poweredOffSnapshotsSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not a virtual machine supports snapshot operations in ``poweredOff`` state. + +memorySnapshotsSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not a virtual machine supports memory snapshots. + +revertToSnapshotSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not a virtual machine supports reverting to a snapshot. + +quiescedSnapshotsSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not a virtual machine supports quiesced snapshots. + +disableSnapshotsSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not snapshots can be disabled. + +lockSnapshotsSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not the snapshot tree can be locked. + +consolePreferencesSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether console preferences can be set for the virtual machine. + +cpuFeatureMaskSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether CPU feature requirements masks can be set for the virtual machine. + +s1AcpiManagementSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not a virtual machine supports ACPI S1 settings management. + +settingScreenResolutionSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not the virtual machine supports setting the screen resolution of the console window. + +toolsAutoUpdateSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Supports tools auto-update. + +vmNpivWwnSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^ + + Supports virtual machine NPIV WWN. + +npivWwnOnNonRdmVmSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Supports assigning NPIV WWN to virtual machines that do not have RDM disks. + +vmNpivWwnDisableSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether the NPIV disabling operation is supported on the virtual machine. + +vmNpivWwnUpdateSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether the update of NPIV WWNs are supported on the virtual machine. + +swapPlacementSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Flag indicating whether the virtual machine has a configurable (swapfile placement policy). + +toolsSyncTimeSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether asking tools to sync time with the host is supported. + +virtualMmuUsageSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not the use of nested page table hardware support can be explicitly set. + +diskSharesSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether resource settings for disks can be applied to the virtual machine. + +bootOptionsSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether boot options can be configured for the virtual machine. + +bootRetryOptionsSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether automatic boot retry can be configured for the virtual machine. + +settingVideoRamSizeSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Flag indicating whether the video RAM size of the virtual machine can be configured. + +settingDisplayTopologySupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not the virtual machine supports setting the display topology of the console window. + +recordReplaySupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether record and replay functionality is supported on the virtual machine. + +changeTrackingSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates that change tracking is supported for virtual disks of the virtual machine. + However, even if change tracking is supported, it might not be available for all disks of the virtual machine. + For example, passthru raw disk mappings or disks backed by any Ver1BackingInfo cannot be tracked. + +multipleCoresPerSocketSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether multiple virtual cores per socket is supported on the virtual machine. + +hostBasedReplicationSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates that host based replication is supported on the virtual machine. + However, even if host based replication is supported, it might not be available for all disk types. + For example, passthru raw disk mappings can not be replicated. + +guestAutoLockSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not guest autolock is supported on the virtual machine. + +memoryReservationLockSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether :ref:`memory_reservation_locked_to_max` may be set to true for the virtual machine. + +featureRequirementSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether the featureRequirement feature is supported. + +poweredOnMonitorTypeChangeSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether a monitor type change is supported while the virtual machine is in the ``poweredOn`` state. + +seSparseDiskSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether the virtual machine supports the Flex-SE (space-efficent, sparse) format for virtual disks. + +nestedHVSupported (bool) +^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether the virtual machine supports nested hardware-assisted virtualization. + +vPMCSupported (bool) +^^^^^^^^^^^^^^^^^^^^ + + Indicates whether the virtual machine supports virtualized CPU performance counters. + + +config +------ + +This section describes the configuration settings of the virtual machine, including the name and UUID. +This property is set when a virtual machine is created or when the ``reconfigVM`` method is called. +The virtual machine configuration is not guaranteed to be available. +For example, the configuration information would be unavailable if the server is unable to access the virtual machine files on disk, and is often also unavailable during the initial phases of virtual machine creation. + +changeVersion (str) +^^^^^^^^^^^^^^^^^^^ + + The changeVersion is a unique identifier for a given version of the configuration. + Each change to the configuration updates this value. This is typically implemented as an ever increasing count or a time-stamp. + However, a client should always treat this as an opaque string. + +modified (datetime) +^^^^^^^^^^^^^^^^^^^ + + Last time a virtual machine's configuration was modified. + +name (str) +^^^^^^^^^^ + + Display name of the virtual machine. Any / (slash), \ (backslash), character used in this name element is escaped. Similarly, any % (percent) character used in this name element is escaped, unless it is used to start an escape sequence. A slash is escaped as %2F or %2f. A backslash is escaped as %5C or %5c, and a percent is escaped as %25. + +.. _guest_full_name: + +guestFullName (str) +^^^^^^^^^^^^^^^^^^^ + + This is the full name of the guest operating system for the virtual machine. For example: Windows 2000 Professional. See :ref:`alternate_guest_name`. + +version (str) +^^^^^^^^^^^^^ + + The version string for the virtual machine. + +uuid (str) +^^^^^^^^^^ + + 128-bit SMBIOS UUID of a virtual machine represented as a hexadecimal string in "12345678-abcd-1234-cdef-123456789abc" format. + +instanceUuid (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + VirtualCenter-specific 128-bit UUID of a virtual machine, represented as a hexademical string. This identifier is used by VirtualCenter to uniquely identify all virtual machine instances, including those that may share the same SMBIOS UUID. + +npivNodeWorldWideName (long, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + A 64-bit node WWN (World Wide Name). + +npivPortWorldWideName (long, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + A 64-bit port WWN (World Wide Name). + +npivWorldWideNameType (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The source that provides/generates the assigned WWNs. + +npivDesiredNodeWwns (short, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The NPIV node WWNs to be extended from the original list of WWN numbers. + +npivDesiredPortWwns (short, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The NPIV port WWNs to be extended from the original list of WWN numbers. + +npivTemporaryDisabled (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + This property is used to enable or disable the NPIV capability on a desired virtual machine on a temporary basis. + +npivOnNonRdmDisks (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + This property is used to check whether the NPIV can be enabled on the Virtual machine with non-rdm disks in the configuration, so this is potentially not enabling npiv on vmfs disks. + Also this property is used to check whether RDM is required to generate WWNs for a virtual machine. + +locationId (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Hash incorporating the virtual machine's config file location and the UUID of the host assigned to run the virtual machine. + +template (bool) +^^^^^^^^^^^^^^^ + + Flag indicating whether or not a virtual machine is a template. + +guestId (str) +^^^^^^^^^^^^^ + + Guest operating system configured on a virtual machine. + +.. _alternate_guest_name: + +alternateGuestName (str) +^^^^^^^^^^^^^^^^^^^^^^^^ + + Used as display name for the operating system if guestId isotherorother-64. See :ref:`guest_full_name`. + +annotation (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Description for the virtual machine. + +files (vim.vm.FileInfo) +^^^^^^^^^^^^^^^^^^^^^^^ + + Information about the files associated with a virtual machine. + This information does not include files for specific virtual disks or snapshots. + +tools (vim.vm.ToolsConfigInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Configuration of VMware Tools running in the guest operating system. + +flags (vim.vm.FlagInfo) +^^^^^^^^^^^^^^^^^^^^^^^ + + Additional flags for a virtual machine. + +consolePreferences (vim.vm.ConsolePreferences, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Legacy console viewer preferences when doing power operations. + +defaultPowerOps (vim.vm.DefaultPowerOpInfo) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Configuration of default power operations. + +hardware (vim.vm.VirtualHardware) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Processor, memory, and virtual devices for a virtual machine. + +cpuAllocation (vim.ResourceAllocationInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Resource limits for CPU. + +memoryAllocation (vim.ResourceAllocationInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Resource limits for memory. + +latencySensitivity (vim.LatencySensitivity, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The latency-sensitivity of the virtual machine. + +memoryHotAddEnabled (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Whether memory can be added while the virtual machine is running. + +cpuHotAddEnabled (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Whether virtual processors can be added while the virtual machine is running. + +cpuHotRemoveEnabled (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Whether virtual processors can be removed while the virtual machine is running. + +hotPlugMemoryLimit (long, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The maximum amount of memory, in MB, than can be added to a running virtual machine. + +hotPlugMemoryIncrementSize (long, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Memory, in MB that can be added to a running virtual machine. + +cpuAffinity (vim.vm.AffinityInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Affinity settings for CPU. + +memoryAffinity (vim.vm.AffinityInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Affinity settings for memory. + +networkShaper (vim.vm.NetworkShaperInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Resource limits for network. + +extraConfig (vim.option.OptionValue, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Additional configuration information for the virtual machine. + +cpuFeatureMask (vim.host.CpuIdInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Specifies CPU feature compatibility masks that override the defaults from the ``GuestOsDescriptor`` of the virtual machine's guest OS. + +datastoreUrl (vim.vm.ConfigInfo.DatastoreUrlPair, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Enumerates the set of datastores that the virtual machine is stored on, as well as the URL identification for each of these. + +swapPlacement (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Virtual machine swapfile placement policy. + +bootOptions (vim.vm.BootOptions, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Configuration options for the boot behavior of the virtual machine. + +ftInfo (vim.vm.FaultToleranceConfigInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Fault tolerance settings for the virtual machine. + +vAppConfig (vim.vApp.VmConfigInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + vApp meta-data for the virtual machine. + +vAssertsEnabled (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether user-configured virtual asserts will be triggered during virtual machine replay. + +changeTrackingEnabled (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether changed block tracking for the virtual machine's disks is active. + +firmware (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^ + + Information about firmware type for the virtual machine. + +maxMksConnections (int, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates the maximum number of active remote display connections that the virtual machine will support. + +guestAutoLockEnabled (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether the guest operating system will logout any active sessions whenever there are no remote display connections open to the virtual machine. + +managedBy (vim.ext.ManagedByInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Specifies that the virtual machine is managed by a VC Extension. + +.. _memory_reservation_locked_to_max: + +memoryReservationLockedToMax (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + If set true, memory resource reservation for the virtual machine will always be equal to the virtual machine's memory size; increases in memory size will be rejected when a corresponding reservation increase is not possible. + +initialOverhead (vim.vm.ConfigInfo.OverheadInfo), optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Set of values to be used only to perform admission control when determining if a host has sufficient resources for the virtual machine to power on. + +nestedHVEnabled (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether the virtual machine is configured to use nested hardware-assisted virtualization. + +vPMCEnabled (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether the virtual machine have virtual CPU performance counters enabled. + +scheduledHardwareUpgradeInfo (vim.vm.ScheduledHardwareUpgradeInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Configuration of scheduled hardware upgrades and result from last attempt to run scheduled hardware upgrade. + +vFlashCacheReservation (long, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Specifies the total vFlash resource reservation for the vFlash caches associated with the virtual machine's virtual disks, in bytes. + +layout +------ + +Detailed information about the files that comprise the virtual machine. + +configFile (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + A list of files that makes up the configuration of the virtual machine (excluding the .vmx file, since that file is represented in the FileInfo). + These are relative paths from the configuration directory. + A slash is always used as a separator. + This list will typically include the NVRAM file, but could also include other meta-data files. + +logFile (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^ + + A list of files stored in the virtual machine's log directory. + These are relative paths from the ``logDirectory``. + A slash is always used as a separator. + +disk (vim.vm.FileLayout.DiskLayout, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Files making up each virtual disk. + +snapshot (vim.vm.FileLayout.SnapshotLayout, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Files of each snapshot. + +swapFile (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^ + + The swapfile specific to the virtual machine, if any. This is a complete datastore path, not a relative path. + + +layoutEx +-------- + +Detailed information about the files that comprise the virtual machine. + +file (vim.vm.FileLayoutEx.FileInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Information about all the files that constitute the virtual machine including configuration files, disks, swap file, suspend file, log files, core files, memory file and so on. + +disk (vim.vm.FileLayoutEx.DiskLayout, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Layout of each virtual disk attached to the virtual machine. + For a virtual machine with snaphots, this property gives only those disks that are attached to it at the current point of running. + +snapshot (vim.vm.FileLayoutEx.SnapshotLayout, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Layout of each snapshot of the virtual machine. + +timestamp (datetime) +^^^^^^^^^^^^^^^^^^^^ + + Time when values in this structure were last updated. + +storage (vim.vm.StorageInfo) +---------------------------- + +Storage space used by the virtual machine, split by datastore. + +perDatastoreUsage (vim.vm.StorageInfo.UsageOnDatastore, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Storage space used by the virtual machine on all datastores that it is located on. + Total storage space committed to the virtual machine across all datastores is simply an aggregate of the property ``committed`` + +timestamp (datetime) +^^^^^^^^^^^^^^^^^^^^ + + Time when values in this structure were last updated. + +environmentBrowser (vim.EnvironmentBrowser) +------------------------------------------- + +The current virtual machine's environment browser object. +This contains information on all the configurations that can be used on the virtual machine. +This is identical to the environment browser on the ComputeResource to which the virtual machine belongs. + +datastoreBrowser (vim.host.DatastoreBrowser) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + DatastoreBrowser to browse datastores that are available on this entity. + +resourcePool (vim.ResourcePool) +------------------------------- + +The current resource pool that specifies resource allocation for the virtual machine. +This property is set when a virtual machine is created or associated with a different resource pool. +Returns null if the virtual machine is a template or the session has no access to the resource pool. + +summary (vim.ResourcePool.Summary) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Basic information about a resource pool. + +runtime (vim.ResourcePool.RuntimeInfo) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Runtime information about a resource pool. + +owner (vim.ComputeResource) +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The ComputeResource to which this set of one or more nested resource pools belong. + +resourcePool (vim.ResourcePool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The set of child resource pools. + +vm (vim.VirtualMachine) +^^^^^^^^^^^^^^^^^^^^^^^ + + The set of virtual machines associated with this resource pool. + +config (vim.ResourceConfigSpec) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Configuration of this resource pool. + +childConfiguration (vim.ResourceConfigSpec) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The resource configuration of all direct children (VirtualMachine and ResourcePool) of this resource group. + +parentVApp (vim.ManagedEntity) +------------------------------ + +Reference to the parent vApp. + +parent (vim.ManagedEntity) +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Parent of this entity. + This value is null for the root object and for (VirtualMachine) objects that are part of a (VirtualApp). + +customValue (vim.CustomFieldsManager.Value) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Custom field values. + +overallStatus (vim.ManagedEntity.Status) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + General health of this managed entity. + +configStatus (vim.ManagedEntity.Status) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The configStatus indicates whether or not the system has detected a configuration issue involving this entity. + For example, it might have detected a duplicate IP address or MAC address, or a host in a cluster might be out of ``compliance.property``. + +configIssue (vim.event.Event) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Current configuration issues that have been detected for this entity. + +effectiveRole (int) +^^^^^^^^^^^^^^^^^^^ + + Access rights the current session has to this entity. + +permission (vim.AuthorizationManager.Permission) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + List of permissions defined for this entity. + +name (str) +^^^^^^^^^^ + + Name of this entity, unique relative to its parent. + Any / (slash), \ (backslash), character used in this name element will be escaped. + Similarly, any % (percent) character used in this name element will be escaped, unless it is used to start an escape sequence. + A slash is escaped as %2F or %2f. A backslash is escaped as %5C or %5c, and a percent is escaped as %25. + +disabledMethod (str) +^^^^^^^^^^^^^^^^^^^^ + + List of operations that are disabled, given the current runtime state of the entity. + For example, a power-on operation always fails if a virtual machine is already powered on. + +recentTask (vim.Task) +^^^^^^^^^^^^^^^^^^^^^ + + The set of recent tasks operating on this managed entity. + A task in this list could be in one of the four states: pending, running, success or error. + +declaredAlarmState (vim.alarm.AlarmState) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + A set of alarm states for alarms that apply to this managed entity. + +triggeredAlarmState (vim.alarm.AlarmState) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + A set of alarm states for alarms triggered by this entity or by its descendants. + +alarmActionsEnabled (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Whether alarm actions are enabled for this entity. True if enabled; false otherwise. + +tag (vim.Tag) +^^^^^^^^^^^^^ + + The set of tags associated with this managed entity. Experimental. Subject to change. + +resourceConfig (vim.ResourceConfigSpec) +--------------------------------------- + + The resource configuration for a virtual machine. + +entity (vim.ManagedEntity, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Reference to the entity with this resource specification: either a VirtualMachine or a ResourcePool. + +changeVersion (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The changeVersion is a unique identifier for a given version of the configuration. Each change to the configuration will update this value. + This is typically implemented as an ever increasing count or a time-stamp. + + +lastModified (datetime, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Timestamp when the resources were last modified. This is ignored when the object is used to update a configuration. + +cpuAllocation (vim.ResourceAllocationInfo) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Resource allocation for CPU. + +memoryAllocation (vim.ResourceAllocationInfo) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Resource allocation for memory. + +runtime (vim.vm.RuntimeInfo) +---------------------------- + +Execution state and history for the virtual machine. + +device (vim.vm.DeviceRuntimeInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Per-device runtime info. This array will be empty if the host software does not provide runtime info for any of the device types currently in use by the virtual machine. + +host (vim.HostSystem, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The host that is responsible for running a virtual machine. + This property is null if the virtual machine is not running and is not assigned to run on a particular host. + +connectionState (vim.VirtualMachine.ConnectionState) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Indicates whether or not the virtual machine is available for management. + +powerState (vim.VirtualMachine.PowerState) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The current power state of the virtual machine. + +faultToleranceState (vim.VirtualMachine.FaultToleranceState) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The fault tolerance state of the virtual machine. + +dasVmProtection (vim.vm.RuntimeInfo.DasProtectionState, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The vSphere HA protection state for a virtual machine. + Property is unset if vSphere HA is not enabled. + +toolsInstallerMounted (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Flag to indicate whether or not the VMware Tools installer is mounted as a CD-ROM. + +suspendTime (datetime, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The timestamp when the virtual machine was most recently suspended. + This property is updated every time the virtual machine is suspended. + +bootTime (datetime, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The timestamp when the virtual machine was most recently powered on. + This property is updated when the virtual machine is powered on from the poweredOff state, and is cleared when the virtual machine is powered off. + This property is not updated when a virtual machine is resumed from a suspended state. + +suspendInterval (long, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The total time the virtual machine has been suspended since it was initially powered on. + This time excludes the current period, if the virtual machine is currently suspended. + This property is updated when the virtual machine resumes, and is reset to zero when the virtual machine is powered off. + +question (vim.vm.QuestionInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The current question, if any, that is blocking the virtual machine's execution. + +memoryOverhead (long, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The amount of memory resource (in bytes) that will be used by the virtual machine above its guest memory requirements. + This value is set if and only if the virtual machine is registered on a host that supports memory resource allocation features. + For powered off VMs, this is the minimum overhead required to power on the VM on the registered host. + For powered on VMs, this is the current overhead reservation, a value which is almost always larger than the minimum overhead, and which grows with time. + +maxCpuUsage (int, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Current upper-bound on CPU usage. + The upper-bound is based on the host the virtual machine is current running on, as well as limits configured on the virtual machine itself or any parent resource pool. + Valid while the virtual machine is running. + +maxMemoryUsage (int, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Current upper-bound on memory usage. + The upper-bound is based on memory configuration of the virtual machine, as well as limits configured on the virtual machine itself or any parent resource pool. + Valid while the virtual machine is running. + +numMksConnections (int) +^^^^^^^^^^^^^^^^^^^^^^^ + + Number of active MKS connections to the virtual machine. + +recordReplayState (vim.VirtualMachine.RecordReplayState) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Record / replay state of the virtual machine. + +cleanPowerOff (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + For a powered off virtual machine, indicates whether the virtual machine's last shutdown was an orderly power off or not. + Unset if the virtual machine is running or suspended. + +needSecondaryReason (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + If set, indicates the reason the virtual machine needs a secondary. + +onlineStandby (bool) +^^^^^^^^^^^^^^^^^^^^ + + This property indicates whether the guest has gone into one of the s1, s2 or s3 standby modes. False indicates the guest is awake. + +minRequiredEVCModeKey (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + For a powered-on or suspended virtual machine in a cluster with Enhanced VMotion Compatibility (EVC) enabled, this identifies the least-featured EVC mode (among those for the appropriate CPU vendor) that could admit the virtual machine. + This property will be unset if the virtual machine is powered off or is not in an EVC cluster. + This property may be used as a general indicator of the CPU feature baseline currently in use by the virtual machine. + However, the virtual machine may be suppressing some of the features present in the CPU feature baseline of the indicated mode, either explicitly (in the virtual machine's configured ``cpuFeatureMask``) or implicitly (in the default masks for the ``GuestOsDescriptor`` appropriate for the virtual machine's configured guest OS). + +consolidationNeeded (bool) +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Whether any disk of the virtual machine requires consolidation. + This can happen for example when a snapshot is deleted but its associated disk is not committed back to the base disk. + +offlineFeatureRequirement (vim.vm.FeatureRequirement, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + These requirements must have equivalent host capabilities ``featureCapability`` in order to power on. + +featureRequirement (vim.vm.FeatureRequirement, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + These requirements must have equivalent host capabilities ``featureCapability`` in order to power on, resume, or migrate to the host. + +featureMask (vim.host.FeatureMask, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The masks applied to an individual virtual machine as a result of its configuration. + +vFlashCacheAllocation (long, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Specifies the total allocated vFlash resource for the vFlash caches associated with VM's VMDKs when VM is powered on, in bytes. + + +guest (vim.vm.GuestInfo) +------------------------ + +Information about VMware Tools and about the virtual machine from the perspective of VMware Tools. +Information about the guest operating system is available in VirtualCenter. +Guest operating system information reflects the last known state of the virtual machine. +For powered on machines, this is current information. +For powered off machines, this is the last recorded state before the virtual machine was powered off. + +toolsStatus (vim.vm.GuestInfo.ToolsStatus, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Current status of VMware Tools in the guest operating system, if known. + +toolsVersionStatus (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Current version status of VMware Tools in the guest operating system, if known. + +toolsVersionStatus2 (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Current version status of VMware Tools in the guest operating system, if known. + +toolsRunningStatus (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Current running status of VMware Tools in the guest operating system, if known. + +toolsVersion (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Current version of VMware Tools, if known. + +guestId (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^ + + Guest operating system identifier (short name), if known. + +guestFamily (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Guest operating system family, if known. + +guestFullName (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + See :ref:`guest_full_name`. + +hostName (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^ + + Hostname of the guest operating system, if known. + +ipAddress (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^ + + Primary IP address assigned to the guest operating system, if known. + +net (vim.vm.GuestInfo.NicInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Guest information about network adapters, if known. + +ipStack (vim.vm.GuestInfo.StackInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Guest information about IP networking stack, if known. + +disk (vim.vm.GuestInfo.DiskInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Guest information about disks. + You can obtain Linux guest disk information for the following file system types only: Ext2, Ext3, Ext4, ReiserFS, ZFS, NTFS, VFAT, UFS, PCFS, HFS, and MS-DOS. + +screen (vim.vm.GuestInfo.ScreenInfo, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Guest screen resolution info, if known. + +guestState (str) +^^^^^^^^^^^^^^^^ + + Operation mode of guest operating system. + +appHeartbeatStatus (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Application heartbeat status. + +appState (str, optional) +^^^^^^^^^^^^^^^^^^^^^^^^ + + Application state. + If vSphere HA is enabled and the vm is configured for Application Monitoring and this field's value is ``appStateNeedReset`` then HA will attempt immediately reset the virtual machine. + There are some system conditions which may delay the immediate reset. + The immediate reset will be performed as soon as allowed by vSphere HA and ESX. + If during these conditions the value is changed to ``appStateOk`` the reset will be cancelled. + +guestOperationsReady (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Guest Operations availability. If true, the vitrual machine is ready to process guest operations. + +interactiveGuestOperationsReady (bool, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Interactive Guest Operations availability. If true, the virtual machine is ready to process guest operations as the user interacting with the guest desktop. + +generationInfo (vim.vm.GuestInfo.NamespaceGenerationInfo, privilege: VirtualMachine.Namespace.EventNotify, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + A list of namespaces and their corresponding generation numbers. Only namespaces with non-zero ``maxSizeEventsFromGuest`` are guaranteed to be present here. + + +summary (vim.vm.Summary) +------------------------ + + Basic information about the virtual machine. + +vm (vim.VirtualMachine, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Reference to the virtual machine managed object. + +runtime (vim.vm.RuntimeInfo) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Runtime and state information of a running virtual machine. + Most of this information is also available when a virtual machine is powered off. + In that case, it contains information from the last run, if available. + +guest (vim.vm.Summary.GuestSummary, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Guest operating system and VMware Tools information. + +config (vim.vm.Summary.ConfigSummary) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Basic configuration information about the virtual machine. + This information is not available when the virtual machine is unavailable, for instance, when it is being created or deleted. + +storage (vim.vm.Summary.StorageSummary, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Storage information of the virtual machine. + +quickStats (vim.vm.Summary.QuickStats) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + A set of statistics that are typically updated with near real-time regularity. + +overallStatus (vim.ManagedEntity.Status) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Overall alarm status on this node. + +customValue (vim.CustomFieldsManager.Value, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Custom field values. + + +datastore (vim.Datastore) +------------------------- + + A collection of references to the subset of datastore objects in the datacenter that is used by the virtual machine. + +info (vim.Datastore.Info) +^^^^^^^^^^^^^^^^^^^^^^^^^ + + Specific information about the datastore. + +summary (vim.Datastore.Summary) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Global properties of the datastore. + +host (vim.Datastore.HostMount) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Hosts attached to this datastore. + +vm (vim.VirtualMachine) +^^^^^^^^^^^^^^^^^^^^^^^ + + Virtual machines stored on this datastore. + +browser (vim.host.DatastoreBrowser) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + DatastoreBrowser used to browse this datastore. + +capability (vim.Datastore.Capability) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Capabilities of this datastore. + +iormConfiguration (vim.StorageResourceManager.IORMConfigInfo) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Configuration of storage I/O resource management for the datastore. + Currently VMware only support storage I/O resource management on VMFS volumes of a datastore. + This configuration may not be available if the datastore is not accessible from any host, or if the datastore does not have VMFS volume. + +network (vim.Network) +--------------------- + + A collection of references to the subset of network objects in the datacenter that is used by the virtual machine. + +name (str) +^^^^^^^^^^ + + Name of this network. + +summary (vim.Network.Summary) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Properties of a network. + +host (vim.HostSystem) +^^^^^^^^^^^^^^^^^^^^^ + + Hosts attached to this network. + +vm (vim.VirtualMachine) +^^^^^^^^^^^^^^^^^^^^^^^ + + Virtual machines using this network. + + +snapshot (vim.vm.SnapshotInfo) +------------------------------- + +Current snapshot and tree. +The property is valid if snapshots have been created for the virtual machine. + +currentSnapshot (vim.vm.Snapshot, optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Current snapshot of the virtual machineThis property is set by calling ``Snapshot.revert`` or ``VirtualMachine.createSnapshot``. + This property will be empty when the working snapshot is at the root of the snapshot tree. + +rootSnapshotList (vim.vm.SnapshotTree) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Data for the entire set of snapshots for one virtual machine. + +rootSnapshot (vim.vm.Snapshot) +------------------------------ + +The roots of all snapshot trees for the virtual machine. + +config (vim.vm.ConfigInfo) +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Information about the configuration of the virtual machine when this snapshot was taken. + The datastore paths for the virtual machine disks point to the head of the disk chain that represents the disk at this given snapshot. + +childSnapshot (vim.vm.Snapshot) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + All snapshots for which this snapshot is the parent. + +guestHeartbeatStatus (vim.ManagedEntity.Status) +----------------------------------------------- + + The guest heartbeat. + +.. seealso:: + + `pyVmomi <https://github.com/vmware/pyvmomi>`_ + The GitHub Page of pyVmomi + `pyVmomi Issue Tracker <https://github.com/vmware/pyvmomi/issues>`_ + The issue tracker for the pyVmomi project + rst/scenario_guides/guide_vmware.rst + The GitHub Page of vSphere Automation SDK for Python + `vSphere Automation SDK Issue Tracker <https://github.com/vmware/vsphere-automation-sdk-python/issues>`_ + The issue tracker for vSphere Automation SDK for Python + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_vault` + Using Vault in playbooks diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_module_reference.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_module_reference.rst new file mode 100644 index 00000000..3c7de1dd --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_module_reference.rst @@ -0,0 +1,9 @@ +:orphan: + +.. _vmware_ansible_module_index: + +*************************** +Ansible VMware Module Guide +*************************** + +This will be a listing similar to the module index in our core docs. diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_requirements.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_requirements.rst new file mode 100644 index 00000000..45e3ec8f --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_requirements.rst @@ -0,0 +1,44 @@ +.. _vmware_requirements: + +******************** +VMware Prerequisites +******************** + +.. contents:: + :local: + +Installing SSL Certificates +=========================== + +All vCenter and ESXi servers require SSL encryption on all connections to enforce secure communication. You must enable SSL encryption for Ansible by installing the server's SSL certificates on your Ansible control node or delegate node. + +If the SSL certificate of your vCenter or ESXi server is not correctly installed on your Ansible control node, you will see the following warning when using Ansible VMware modules: + +``Unable to connect to vCenter or ESXi API at xx.xx.xx.xx on TCP/443: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:777)`` + +To install the SSL certificate for your VMware server, and run your Ansible VMware modules in encrypted mode, please follow the instructions for the server you are running with VMware. + +Installing vCenter SSL certificates for Ansible +----------------------------------------------- + +* From any web browser, go to the base URL of the vCenter Server without port number like ``https://vcenter-domain.example.com`` + +* Click the "Download trusted root CA certificates" link at the bottom of the grey box on the right and download the file. + +* Change the extension of the file to .zip. The file is a ZIP file of all root certificates and all CRLs. + +* Extract the contents of the zip file. The extracted directory contains a ``.certs`` directory that contains two types of files. Files with a number as the extension (.0, .1, and so on) are root certificates. + +* Install the certificate files are trusted certificates by the process that is appropriate for your operating system. + + +Installing ESXi SSL certificates for Ansible +-------------------------------------------- + +* Enable SSH Service on ESXi either by using Ansible VMware module `vmware_host_service_manager <https://github.com/ansible-collections/vmware/blob/main/plugins/modules/vmware_host_config_manager.py>`_ or manually using vSphere Web interface. + +* SSH to ESXi server using administrative credentials, and navigate to directory ``/etc/vmware/ssl`` + +* Secure copy (SCP) ``rui.crt`` located in ``/etc/vmware/ssl`` directory to Ansible control node. + +* Install the certificate file by the process that is appropriate for your operating system. diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_scenarios.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_scenarios.rst new file mode 100644 index 00000000..b044740b --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_scenarios.rst @@ -0,0 +1,16 @@ +.. _vmware_scenarios: + +**************************** +Ansible for VMware Scenarios +**************************** + +These scenarios teach you how to accomplish common VMware tasks using Ansible. To get started, please select the task you want to accomplish. + +.. toctree:: + :maxdepth: 1 + + scenario_clone_template + scenario_rename_vm + scenario_remove_vm + scenario_find_vm_folder + scenario_vmware_http diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_troubleshooting.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_troubleshooting.rst new file mode 100644 index 00000000..3ca5eac2 --- /dev/null +++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_troubleshooting.rst @@ -0,0 +1,102 @@ +.. _vmware_troubleshooting: + +********************************** +Troubleshooting Ansible for VMware +********************************** + +.. contents:: Topics + +This section lists things that can go wrong and possible ways to fix them. + +Debugging Ansible for VMware +============================ + +When debugging or creating a new issue, you will need information about your VMware infrastructure. You can get this information using +`govc <https://github.com/vmware/govmomi/tree/master/govc>`_, For example: + + +.. code-block:: bash + + $ export GOVC_USERNAME=ESXI_OR_VCENTER_USERNAME + $ export GOVC_PASSWORD=ESXI_OR_VCENTER_PASSWORD + $ export GOVC_URL=https://ESXI_OR_VCENTER_HOSTNAME:443 + $ govc find / + +Known issues with Ansible for VMware +==================================== + + +Network settings with vmware_guest in Ubuntu 18.04 +-------------------------------------------------- + +Setting the network with ``vmware_guest`` in Ubuntu 18.04 is known to be broken, due to missing support for ``netplan`` in the ``open-vm-tools``. +This issue is tracked via: + +* https://github.com/vmware/open-vm-tools/issues/240 +* https://github.com/ansible/ansible/issues/41133 + +Potential Workarounds +^^^^^^^^^^^^^^^^^^^^^ + +There are several workarounds for this issue. + +1) Modify the Ubuntu 18.04 images and installing ``ifupdown`` in them via ``sudo apt install ifupdown``. + If so you need to remove ``netplan`` via ``sudo apt remove netplan.io`` and you need stop ``systemd-networkd`` via ``sudo systemctl disable systemctl-networkd``. + +2) Generate the ``systemd-networkd`` files with a task in your VMware Ansible role: + +.. code-block:: yaml + + - name: make sure cache directory exists + file: path="{{ inventory_dir }}/cache" state=directory + delegate_to: localhost + + - name: generate network templates + template: src=network.j2 dest="{{ inventory_dir }}/cache/{{ inventory_hostname }}.network" + delegate_to: localhost + + - name: copy generated files to vm + vmware_guest_file_operation: + hostname: "{{ vmware_general.hostname }}" + username: "{{ vmware_username }}" + password: "{{ vmware_password }}" + datacenter: "{{ vmware_general.datacenter }}" + validate_certs: "{{ vmware_general.validate_certs }}" + vm_id: "{{ inventory_hostname }}" + vm_username: root + vm_password: "{{ template_password }}" + copy: + src: "{{ inventory_dir }}/cache/{{ inventory_hostname }}.network" + dest: "/etc/systemd/network/ens160.network" + overwrite: False + delegate_to: localhost + + - name: restart systemd-networkd + vmware_vm_shell: + hostname: "{{ vmware_general.hostname }}" + username: "{{ vmware_username }}" + password: "{{ vmware_password }}" + datacenter: "{{ vmware_general.datacenter }}" + folder: /vm + vm_id: "{{ inventory_hostname}}" + vm_username: root + vm_password: "{{ template_password }}" + vm_shell: /bin/systemctl + vm_shell_args: " restart systemd-networkd" + delegate_to: localhost + + - name: restart systemd-resolved + vmware_vm_shell: + hostname: "{{ vmware_general.hostname }}" + username: "{{ vmware_username }}" + password: "{{ vmware_password }}" + datacenter: "{{ vmware_general.datacenter }}" + folder: /vm + vm_id: "{{ inventory_hostname}}" + vm_username: root + vm_password: "{{ template_password }}" + vm_shell: /bin/systemctl + vm_shell_args: " restart systemd-resolved" + delegate_to: localhost + +3) Wait for ``netplan`` support in ``open-vm-tools`` diff --git a/docs/docsite/rst/shared_snippets/basic_concepts.txt b/docs/docsite/rst/shared_snippets/basic_concepts.txt new file mode 100644 index 00000000..e10e2d4f --- /dev/null +++ b/docs/docsite/rst/shared_snippets/basic_concepts.txt @@ -0,0 +1,34 @@ +Control node +============ + +Any machine with Ansible installed. You can run Ansible commands and playbooks by invoking the ``ansible`` or ``ansible-playbook`` command from any control node. You can use any computer that has a Python installation as a control node - laptops, shared desktops, and servers can all run Ansible. However, you cannot use a Windows machine as a control node. You can have multiple control nodes. + +Managed nodes +============= + +The network devices (and/or servers) you manage with Ansible. Managed nodes are also sometimes called "hosts". Ansible is not installed on managed nodes. + +Inventory +========= + +A list of managed nodes. An inventory file is also sometimes called a "hostfile". Your inventory can specify information like IP address for each managed node. An inventory can also organize managed nodes, creating and nesting groups for easier scaling. To learn more about inventory, see :ref:`the Working with Inventory<intro_inventory>` section. + +Collections +=========== + +Collections are a distribution format for Ansible content that can include playbooks, roles, modules, and plugins. You can install and use collections through `Ansible Galaxy <https://galaxy.ansible.com>`_. To learn more about collections, see :ref:`collections`. + +Modules +======= + +The units of code Ansible executes. Each module has a particular use, from administering users on a specific type of database to managing VLAN interfaces on a specific type of network device. You can invoke a single module with a task, or invoke several different modules in a playbook. Starting in Ansible 2.10, modules are grouped in collections. For an idea of how many collections Ansible includes, take a look at the :ref:`list_of_collections`. + +Tasks +===== + +The units of action in Ansible. You can execute a single task once with an ad-hoc command. + +Playbooks +========= + +Ordered lists of tasks, saved so you can run those tasks in that order repeatedly. Playbooks can include variables as well as tasks. Playbooks are written in YAML and are easy to read, write, share and understand. To learn more about playbooks, see :ref:`about_playbooks`. diff --git a/docs/docsite/rst/shared_snippets/download_tarball_collections.txt b/docs/docsite/rst/shared_snippets/download_tarball_collections.txt new file mode 100644 index 00000000..045004be --- /dev/null +++ b/docs/docsite/rst/shared_snippets/download_tarball_collections.txt @@ -0,0 +1,8 @@ + + +To download the collection tarball from Galaxy for offline use: + +#. Navigate to the collection page. +#. Click on :guilabel:`Download tarball`. + +You may also need to manually download any dependent collections. diff --git a/docs/docsite/rst/shared_snippets/galaxy_server_list.txt b/docs/docsite/rst/shared_snippets/galaxy_server_list.txt new file mode 100644 index 00000000..0cbb7bbe --- /dev/null +++ b/docs/docsite/rst/shared_snippets/galaxy_server_list.txt @@ -0,0 +1,80 @@ + + +By default, ``ansible-galaxy`` uses https://galaxy.ansible.com as the Galaxy server (as listed in the :file:`ansible.cfg` file under :ref:`galaxy_server`). + +You can use either option below to configure ``ansible-galaxy collection`` to use other servers (such as Red Hat Automation Hub or a custom Galaxy server): + +* Set the server list in the :ref:`galaxy_server_list` configuration option in :ref:`ansible_configuration_settings_locations`. +* Use the ``--server`` command line argument to limit to an individual server. + +To configure a Galaxy server list in ``ansible.cfg``: + + +#. Add the ``server_list`` option under the ``[galaxy]`` section to one or more server names. +#. Create a new section for each server name. +#. Set the ``url`` option for each server name. +#. Optionally, set the API token for each server name. See :ref:`API token <collections_installing>` for details. + +.. note:: + The ``url`` option for each server name must end with a forward slash ``/``. If you do not set the API token in your Galaxy server list, use the ``--api-key`` argument to pass in the token to the ``ansible-galaxy collection publish`` command. + +For Automation Hub, you additionally need to: + +#. Set the ``auth_url`` option for each server name. +#. Set the API token for each server name. Go to https://cloud.redhat.com/ansible/automation-hub/token/ and click ::guilabel:`Get API token` from the version dropdown to copy your API token. + +The following example shows how to configure multiple servers: + +.. code-block:: ini + + [galaxy] + server_list = automation_hub, my_org_hub, release_galaxy, test_galaxy + + [galaxy_server.automation_hub] + url=https://cloud.redhat.com/api/automation-hub/ + auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token + token=my_ah_token + + [galaxy_server.my_org_hub] + url=https://automation.my_org/ + username=my_user + password=my_pass + + [galaxy_server.release_galaxy] + url=https://galaxy.ansible.com/ + token=my_token + + [galaxy_server.test_galaxy] + url=https://galaxy-dev.ansible.com/ + token=my_test_token + +.. note:: + You can use the ``--server`` command line argument to select an explicit Galaxy server in the ``server_list`` and + the value of this argument should match the name of the server. To use a server not in the server list, set the value to the URL to access that server (all servers in the server list will be ignored). Also you cannot use the ``--api-key`` argument for any of the predefined servers. You can only use the ``api_key`` argument if you did not define a server list or if you specify a URL in the + ``--server`` argument. + +**Galaxy server list configuration options** + +The :ref:`galaxy_server_list` option is a list of server identifiers in a prioritized order. When searching for a +collection, the install process will search in that order, for example, ``automation_hub`` first, then ``my_org_hub``, ``release_galaxy``, and +finally ``test_galaxy`` until the collection is found. The actual Galaxy instance is then defined under the section +``[galaxy_server.{{ id }}]`` where ``{{ id }}`` is the server identifier defined in the list. This section can then +define the following keys: + +* ``url``: The URL of the Galaxy instance to connect to. Required. +* ``token``: An API token key to use for authentication against the Galaxy instance. Mutually exclusive with ``username``. +* ``username``: The username to use for basic authentication against the Galaxy instance. Mutually exclusive with ``token``. +* ``password``: The password to use, in conjunction with ``username``, for basic authentication. +* ``auth_url``: The URL of a Keycloak server 'token_endpoint' if using SSO authentication (for example, Automation Hub). Mutually exclusive with ``username``. Requires ``token``. + +As well as defining these server options in the ``ansible.cfg`` file, you can also define them as environment variables. +The environment variable is in the form ``ANSIBLE_GALAXY_SERVER_{{ id }}_{{ key }}`` where ``{{ id }}`` is the upper +case form of the server identifier and ``{{ key }}`` is the key to define. For example I can define ``token`` for +``release_galaxy`` by setting ``ANSIBLE_GALAXY_SERVER_RELEASE_GALAXY_TOKEN=secret_token``. + +For operations that use only one Galaxy server (for example, the ``publish``, ``info``, or ``install`` commands). the ``ansible-galaxy collection`` command uses the first entry in the +``server_list``, unless you pass in an explicit server with the ``--server`` argument. + +.. note:: + Once a collection is found, any of its requirements are only searched within the same Galaxy instance as the parent + collection. The install process will not search for a collection requirement in a different Galaxy instance. diff --git a/docs/docsite/rst/shared_snippets/installing_collections.txt b/docs/docsite/rst/shared_snippets/installing_collections.txt new file mode 100644 index 00000000..16d14405 --- /dev/null +++ b/docs/docsite/rst/shared_snippets/installing_collections.txt @@ -0,0 +1,42 @@ + + +By default, ``ansible-galaxy collection install`` uses https://galaxy.ansible.com as the Galaxy server (as listed in the +:file:`ansible.cfg` file under :ref:`galaxy_server`). You do not need any +further configuration. + +See :ref:`Configuring the ansible-galaxy client <galaxy_server_config>` if you are using any other Galaxy server, such as Red Hat Automation Hub. + +To install a collection hosted in Galaxy: + +.. code-block:: bash + + ansible-galaxy collection install my_namespace.my_collection + +You can also directly use the tarball from your build: + +.. code-block:: bash + + ansible-galaxy collection install my_namespace-my_collection-1.0.0.tar.gz -p ./collections + +.. note:: + The install command automatically appends the path ``ansible_collections`` to the one specified with the ``-p`` option unless the + parent directory is already in a folder called ``ansible_collections``. + + +When using the ``-p`` option to specify the install path, use one of the values configured in :ref:`COLLECTIONS_PATHS`, as this is +where Ansible itself will expect to find collections. If you don't specify a path, ``ansible-galaxy collection install`` installs +the collection to the first path defined in :ref:`COLLECTIONS_PATHS`, which by default is ``~/.ansible/collections`` + +You can also keep a collection adjacent to the current playbook, under a ``collections/ansible_collections/`` directory structure. + +.. code-block:: text + + ./ + ├── play.yml + ├── collections/ + │ └── ansible_collections/ + │ └── my_namespace/ + │ └── my_collection/<collection structure lives here> + + +See :ref:`collection_structure` for details on the collection directory structure. diff --git a/docs/docsite/rst/shared_snippets/installing_collections_git_repo.txt b/docs/docsite/rst/shared_snippets/installing_collections_git_repo.txt new file mode 100644 index 00000000..7eb87829 --- /dev/null +++ b/docs/docsite/rst/shared_snippets/installing_collections_git_repo.txt @@ -0,0 +1,84 @@ +You can install a collection in a git repository by providing the URI to the repository instead of a collection name or path to a ``tar.gz`` file. The collection must contain a ``galaxy.yml`` file, which will be used to generate the would-be collection artifact data from the directory. The URI should be prefixed with ``git+`` (or with ``git@`` to use a private repository with ssh authentication) and optionally supports a comma-separated `git commit-ish <https://git-scm.com/docs/gitglossary#def_commit-ish>`_ version (for example, a commit or tag). + +.. warning:: + + Embedding credentials into a git URI is not secure. Make sure to use safe auth options for security reasons. For example, use `SSH <https://help.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh>`_, `netrc <https://linux.die.net/man/5/netrc>`_ or `http.extraHeader <https://git-scm.com/docs/git-config#Documentation/git-config.txt-httpextraHeader>`_/`url.<base>.pushInsteadOf <https://git-scm.com/docs/git-config#Documentation/git-config.txt-urlltbasegtpushInsteadOf>`_ in Git config to prevent your creds from being exposed in logs. + +.. code-block:: bash + + # Install a collection in a repository using the latest commit on the branch 'devel' + ansible-galaxy collection install git+https://github.com/organization/repo_name.git,devel + + # Install a collection from a private github repository + ansible-galaxy collection install git@github.com:organization/repo_name.git + + # Install a collection from a local git repository + ansible-galaxy collection install git+file:///home/user/path/to/repo/.git + +In a ``requirements.yml`` file, you can also use the ``type`` and ``version`` keys in addition to using the ``git+repo,version`` syntax for the collection name. + +.. code-block:: yaml + + collections: + - name: https://github.com/organization/repo_name.git + type: git + version: devel + +Git repositories can be used for collection dependencies as well. This can be helpful for local development and testing but built/published artifacts should only have dependencies on other artifacts. + +.. code-block:: yaml + + dependencies: {'git@github.com:organization/repo_name.git': 'devel'} + +Default repository search locations +----------------------------------- + +There are two paths searched in a repository for collections by default. + +The first is the ``galaxy.yml`` file in the top level of the repository path. If the ``galaxy.yml`` file exists it's used as the collection metadata and the individual collection will be installed. + +.. code-block:: text + + ├── galaxy.yml + ├── plugins/ + │ ├── lookup/ + │ ├── modules/ + │ └── module_utils/ + └─── README.md + +The second is a ``galaxy.yml`` file in each directory in the repository path (one level deep). In this scenario, each directory with a ``galaxy.yml`` is installed as a collection. + +.. code-block:: text + + directory/ + ├── docs/ + ├── galaxy.yml + ├── plugins/ + │ ├── inventory/ + │ └── modules/ + └── roles/ + +Specifying the location to search for collections +------------------------------------------------- + +If you have a different repository structure or only want to install a subset of collections, you can add a fragment to the end of your URI (before the optional comma-separated version) to indicate which path ansible-galaxy should inspect for ``galaxy.yml`` file(s). The path should be a directory to a collection or multiple collections (rather than the path to a ``galaxy.yml`` file). + +.. code-block:: text + + namespace/ + └── name/ + ├── docs/ + ├── galaxy.yml + ├── plugins/ + │ ├── README.md + │ └── modules/ + ├── README.md + └── roles/ + +.. code-block:: bash + + # Install all collections in a particular namespace + ansible-galaxy collection install git+https://github.com/organization/repo_name.git#/namespace/ + + # Install an individual collection using a specific commit + ansible-galaxy collection install git+https://github.com/organization/repo_name.git#/namespace/name/,7b60ddc245bc416b72d8ea6ed7b799885110f5e5 diff --git a/docs/docsite/rst/shared_snippets/installing_multiple_collections.txt b/docs/docsite/rst/shared_snippets/installing_multiple_collections.txt new file mode 100644 index 00000000..e8c40b23 --- /dev/null +++ b/docs/docsite/rst/shared_snippets/installing_multiple_collections.txt @@ -0,0 +1,51 @@ + +You can also setup a ``requirements.yml`` file to install multiple collections in one command. This file is a YAML file in the format: + +.. code-block:: yaml+jinja + + --- + collections: + # With just the collection name + - my_namespace.my_collection + + # With the collection name, version, and source options + - name: my_namespace.my_other_collection + version: 'version range identifiers (default: ``*``)' + source: 'The Galaxy URL to pull the collection from (default: ``--api-server`` from cmdline)' + +The supported keys for collection requirement entries are ``name``, ``version``, ``source``, and ``type``. + +The ``version`` key can take in the same range identifier format documented above. If you're installing a collection from a git repository instead of a built collection artifact, the ``version`` key refers to a `git commit-ish <https://git-scm.com/docs/gitglossary#def_commit-ish>`_. + +The ``type`` key can be set to ``galaxy``, ``url``, ``file``, and ``git``. If ``type`` is omitted, the ``name`` key is used to implicitly determine the source of the collection. + +Roles can also be specified and placed under the ``roles`` key. The values follow the same format as a requirements +file used in older Ansible releases. + +.. code-block:: yaml + + --- + roles: + # Install a role from Ansible Galaxy. + - name: geerlingguy.java + version: 1.9.6 + + collections: + # Install a collection from Ansible Galaxy. + - name: geerlingguy.php_roles + version: 0.9.3 + source: https://galaxy.ansible.com + +To install both roles and collections at the same time with one command, run the following: + +.. code-block:: bash + + $ ansible-galaxy install -r requirements.yml + +Running ``ansible-galaxy collection install -r`` or ``ansible-galaxy role install -r`` will only install collections, +or roles respectively. + +.. note:: + Installing both roles and collections from the same requirements file will not work when specifying a custom + collection or role install path. In this scenario the collections will be skipped and the command will process + each like ``ansible-galaxy role install`` would. diff --git a/docs/docsite/rst/shared_snippets/installing_older_collection.txt b/docs/docsite/rst/shared_snippets/installing_older_collection.txt new file mode 100644 index 00000000..511dd2a7 --- /dev/null +++ b/docs/docsite/rst/shared_snippets/installing_older_collection.txt @@ -0,0 +1,25 @@ + +You can only have one version of a collection installed at a time. By default ``ansible-galaxy`` installs the latest available version. If you want to install a specific version, you can add a version range identifier. For example, to install the 1.0.0-beta.1 version of the collection: + +.. code-block:: bash + + ansible-galaxy collection install my_namespace.my_collection:==1.0.0-beta.1 + +You can specify multiple range identifiers separated by ``,``. Use single quotes so the shell passes the entire command, including ``>``, ``!``, and other operators, along. For example, to install the most recent version that is greater than or equal to 1.0.0 and less than 2.0.0: + +.. code-block:: bash + + ansible-galaxy collection install 'my_namespace.my_collection:>=1.0.0,<2.0.0' + +Ansible will always install the most recent version that meets the range identifiers you specify. You can use the following range identifiers: + +* ``*``: The most recent version. This is the default. +* ``!=``: Not equal to the version specified. +* ``==``: Exactly the version specified. +* ``>=``: Greater than or equal to the version specified. +* ``>``: Greater than the version specified. +* ``<=``: Less than or equal to the version specified. +* ``<``: Less than the version specified. + +.. note:: + By default ``ansible-galaxy`` ignores pre-release versions. To install a pre-release version, you must use the ``==`` range identifier to require it explicitly. diff --git a/docs/docsite/rst/user_guide/basic_concepts.rst b/docs/docsite/rst/user_guide/basic_concepts.rst new file mode 100644 index 00000000..76adc684 --- /dev/null +++ b/docs/docsite/rst/user_guide/basic_concepts.rst @@ -0,0 +1,12 @@ +.. _basic_concepts: + +**************** +Ansible concepts +**************** + +These concepts are common to all uses of Ansible. You need to understand them to use Ansible for any kind of automation. This basic introduction provides the background you need to follow the rest of the User Guide. + +.. contents:: + :local: + +.. include:: /shared_snippets/basic_concepts.txt diff --git a/docs/docsite/rst/user_guide/become.rst b/docs/docsite/rst/user_guide/become.rst new file mode 100644 index 00000000..fed806bb --- /dev/null +++ b/docs/docsite/rst/user_guide/become.rst @@ -0,0 +1,702 @@ +.. _become: + +****************************************** +Understanding privilege escalation: become +****************************************** + +Ansible uses existing privilege escalation systems to execute tasks with root privileges or with another user's permissions. Because this feature allows you to 'become' another user, different from the user that logged into the machine (remote user), we call it ``become``. The ``become`` keyword leverages existing privilege escalation tools like `sudo`, `su`, `pfexec`, `doas`, `pbrun`, `dzdo`, `ksu`, `runas`, `machinectl` and others. + +.. contents:: + :local: + +Using become +============ + +You can control the use of ``become`` with play or task directives, connection variables, or at the command line. If you set privilege escalation properties in multiple ways, review the :ref:`general precedence rules<general_precedence_rules>` to understand which settings will be used. + +A full list of all become plugins that are included in Ansible can be found in the :ref:`become_plugin_list`. + +Become directives +----------------- + +You can set the directives that control ``become`` at the play or task level. You can override these by setting connection variables, which often differ from one host to another. These variables and directives are independent. For example, setting ``become_user`` does not set ``become``. + +become + set to ``yes`` to activate privilege escalation. + +become_user + set to user with desired privileges — the user you `become`, NOT the user you login as. Does NOT imply ``become: yes``, to allow it to be set at host level. Default value is ``root``. + +become_method + (at play or task level) overrides the default method set in ansible.cfg, set to use any of the :ref:`become_plugins`. + +become_flags + (at play or task level) permit the use of specific flags for the tasks or role. One common use is to change the user to nobody when the shell is set to nologin. Added in Ansible 2.2. + +For example, to manage a system service (which requires ``root`` privileges) when connected as a non-``root`` user, you can use the default value of ``become_user`` (``root``): + +.. code-block:: yaml + + - name: Ensure the httpd service is running + service: + name: httpd + state: started + become: yes + +To run a command as the ``apache`` user: + +.. code-block:: yaml + + - name: Run a command as the apache user + command: somecommand + become: yes + become_user: apache + +To do something as the ``nobody`` user when the shell is nologin: + +.. code-block:: yaml + + - name: Run a command as nobody + command: somecommand + become: yes + become_method: su + become_user: nobody + become_flags: '-s /bin/sh' + +To specify a password for sudo, run ``ansible-playbook`` with ``--ask-become-pass`` (``-K`` for short). +If you run a playbook utilizing ``become`` and the playbook seems to hang, most likely it is stuck at the privilege escalation prompt. Stop it with `CTRL-c`, then execute the playbook with ``-K`` and the appropriate password. + +Become connection variables +--------------------------- + +You can define different ``become`` options for each managed node or group. You can define these variables in inventory or use them as normal variables. + +ansible_become + equivalent of the become directive, decides if privilege escalation is used or not. + +ansible_become_method + which privilege escalation method should be used + +ansible_become_user + set the user you become through privilege escalation; does not imply ``ansible_become: yes`` + +ansible_become_password + set the privilege escalation password. See :ref:`playbooks_vault` for details on how to avoid having secrets in plain text + +For example, if you want to run all tasks as ``root`` on a server named ``webserver``, but you can only connect as the ``manager`` user, you could use an inventory entry like this: + +.. code-block:: text + + webserver ansible_user=manager ansible_become=yes + +.. note:: + The variables defined above are generic for all become plugins but plugin specific ones can also be set instead. + Please see the documentation for each plugin for a list of all options the plugin has and how they can be defined. + A full list of become plugins in Ansible can be found at :ref:`become_plugins`. + +Become command-line options +--------------------------- + +--ask-become-pass, -K + ask for privilege escalation password; does not imply become will be used. Note that this password will be used for all hosts. + +--become, -b + run operations with become (no password implied) + +--become-method=BECOME_METHOD + privilege escalation method to use (default=sudo), + valid choices: [ sudo | su | pbrun | pfexec | doas | dzdo | ksu | runas | machinectl ] + +--become-user=BECOME_USER + run operations as this user (default=root), does not imply --become/-b + +Risks and limitations of become +=============================== + +Although privilege escalation is mostly intuitive, there are a few limitations +on how it works. Users should be aware of these to avoid surprises. + +Risks of becoming an unprivileged user +-------------------------------------- + +Ansible modules are executed on the remote machine by first substituting the +parameters into the module file, then copying the file to the remote machine, +and finally executing it there. + +Everything is fine if the module file is executed without using ``become``, +when the ``become_user`` is root, or when the connection to the remote machine +is made as root. In these cases Ansible creates the module file with permissions +that only allow reading by the user and root, or only allow reading by the unprivileged +user being switched to. + +However, when both the connection user and the ``become_user`` are unprivileged, +the module file is written as the user that Ansible connects as, but the file needs to +be readable by the user Ansible is set to ``become``. In this case, Ansible makes +the module file world-readable for the duration of the Ansible module execution. +Once the module is done executing, Ansible deletes the temporary file. + +If any of the parameters passed to the module are sensitive in nature, and you do +not trust the client machines, then this is a potential danger. + +Ways to resolve this include: + +* Use `pipelining`. When pipelining is enabled, Ansible does not save the + module to a temporary file on the client. Instead it pipes the module to + the remote python interpreter's stdin. Pipelining does not work for + python modules involving file transfer (for example: :ref:`copy <copy_module>`, + :ref:`fetch <fetch_module>`, :ref:`template <template_module>`), or for non-python modules. + +* Install POSIX.1e filesystem acl support on the + managed host. If the temporary directory on the remote host is mounted with + POSIX acls enabled and the :command:`setfacl` tool is in the remote ``PATH`` + then Ansible will use POSIX acls to share the module file with the second + unprivileged user instead of having to make the file readable by everyone. + +* Avoid becoming an unprivileged + user. Temporary files are protected by UNIX file permissions when you + ``become`` root or do not use ``become``. In Ansible 2.1 and above, UNIX + file permissions are also secure if you make the connection to the managed + machine as root and then use ``become`` to access an unprivileged account. + +.. warning:: Although the Solaris ZFS filesystem has filesystem ACLs, the ACLs + are not POSIX.1e filesystem acls (they are NFSv4 ACLs instead). Ansible + cannot use these ACLs to manage its temp file permissions so you may have + to resort to ``allow_world_readable_tmpfiles`` if the remote machines use ZFS. + +.. versionchanged:: 2.1 + +Ansible makes it hard to unknowingly use ``become`` insecurely. Starting in Ansible 2.1, +Ansible defaults to issuing an error if it cannot execute securely with ``become``. +If you cannot use pipelining or POSIX ACLs, you must connect as an unprivileged user, +you must use ``become`` to execute as a different unprivileged user, +and you decide that your managed nodes are secure enough for the +modules you want to run there to be world readable, you can turn on +``allow_world_readable_tmpfiles`` in the :file:`ansible.cfg` file. Setting +``allow_world_readable_tmpfiles`` will change this from an error into +a warning and allow the task to run as it did prior to 2.1. + +Not supported by all connection plugins +--------------------------------------- + +Privilege escalation methods must also be supported by the connection plugin +used. Most connection plugins will warn if they do not support become. Some +will just ignore it as they always run as root (jail, chroot, and so on). + +Only one method may be enabled per host +--------------------------------------- + +Methods cannot be chained. You cannot use ``sudo /bin/su -`` to become a user, +you need to have privileges to run the command as that user in sudo or be able +to su directly to it (the same for pbrun, pfexec or other supported methods). + +Privilege escalation must be general +------------------------------------ + +You cannot limit privilege escalation permissions to certain commands. +Ansible does not always +use a specific command to do something but runs modules (code) from +a temporary file name which changes every time. If you have '/sbin/service' +or '/bin/chmod' as the allowed commands this will fail with ansible as those +paths won't match with the temporary file that Ansible creates to run the +module. If you have security rules that constrain your sudo/pbrun/doas environment +to running specific command paths only, use Ansible from a special account that +does not have this constraint, or use :ref:`ansible_tower` to manage indirect access to SSH credentials. + +May not access environment variables populated by pamd_systemd +-------------------------------------------------------------- + +For most Linux distributions using ``systemd`` as their init, the default +methods used by ``become`` do not open a new "session", in the sense of +systemd. Because the ``pam_systemd`` module will not fully initialize a new +session, you might have surprises compared to a normal session opened through +ssh: some environment variables set by ``pam_systemd``, most notably +``XDG_RUNTIME_DIR``, are not populated for the new user and instead inherited +or just emptied. + +This might cause trouble when trying to invoke systemd commands that depend on +``XDG_RUNTIME_DIR`` to access the bus: + +.. code-block:: console + + $ echo $XDG_RUNTIME_DIR + + $ systemctl --user status + Failed to connect to bus: Permission denied + +To force ``become`` to open a new systemd session that goes through +``pam_systemd``, you can use ``become_method: machinectl``. + +For more information, see `this systemd issue +<https://github.com/systemd/systemd/issues/825#issuecomment-127917622>`_. + +.. _become_network: + +Become and network automation +============================= + +As of version 2.6, Ansible supports ``become`` for privilege escalation (entering ``enable`` mode or privileged EXEC mode) on all Ansible-maintained network platforms that support ``enable`` mode. Using ``become`` replaces the ``authorize`` and ``auth_pass`` options in a ``provider`` dictionary. + +You must set the connection type to either ``connection: ansible.netcommon.network_cli`` or ``connection: ansible.netcommon.httpapi`` to use ``become`` for privilege escalation on network devices. Check the :ref:`platform_options` documentation for details. + +You can use escalated privileges on only the specific tasks that need them, on an entire play, or on all plays. Adding ``become: yes`` and ``become_method: enable`` instructs Ansible to enter ``enable`` mode before executing the task, play, or playbook where those parameters are set. + +If you see this error message, the task that generated it requires ``enable`` mode to succeed: + +.. code-block:: console + + Invalid input (privileged mode required) + +To set ``enable`` mode for a specific task, add ``become`` at the task level: + +.. code-block:: yaml + + - name: Gather facts (eos) + arista.eos.eos_facts: + gather_subset: + - "!hardware" + become: yes + become_method: enable + +To set enable mode for all tasks in a single play, add ``become`` at the play level: + +.. code-block:: yaml + + - hosts: eos-switches + become: yes + become_method: enable + tasks: + - name: Gather facts (eos) + arista.eos.eos_facts: + gather_subset: + - "!hardware" + +Setting enable mode for all tasks +--------------------------------- + +Often you wish for all tasks in all plays to run using privilege mode, that is best achieved by using ``group_vars``: + +**group_vars/eos.yml** + +.. code-block:: yaml + + ansible_connection: ansible.netcommon.network_cli + ansible_network_os: arista.eos.eos + ansible_user: myuser + ansible_become: yes + ansible_become_method: enable + +Passwords for enable mode +^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you need a password to enter ``enable`` mode, you can specify it in one of two ways: + +* providing the :option:`--ask-become-pass <ansible-playbook --ask-become-pass>` command line option +* setting the ``ansible_become_password`` connection variable + +.. warning:: + + As a reminder passwords should never be stored in plain text. For information on encrypting your passwords and other secrets with Ansible Vault, see :ref:`vault`. + +authorize and auth_pass +----------------------- + +Ansible still supports ``enable`` mode with ``connection: local`` for legacy network playbooks. To enter ``enable`` mode with ``connection: local``, use the module options ``authorize`` and ``auth_pass``: + +.. code-block:: yaml + + - hosts: eos-switches + ansible_connection: local + tasks: + - name: Gather facts (eos) + eos_facts: + gather_subset: + - "!hardware" + provider: + authorize: yes + auth_pass: " {{ secret_auth_pass }}" + +We recommend updating your playbooks to use ``become`` for network-device ``enable`` mode consistently. The use of ``authorize`` and of ``provider`` dictionaries will be deprecated in future. Check the :ref:`platform_options` and :ref:`network_modules` documentation for details. + +.. _become_windows: + +Become and Windows +================== + +Since Ansible 2.3, ``become`` can be used on Windows hosts through the +``runas`` method. Become on Windows uses the same inventory setup and +invocation arguments as ``become`` on a non-Windows host, so the setup and +variable names are the same as what is defined in this document. + +While ``become`` can be used to assume the identity of another user, there are other uses for +it with Windows hosts. One important use is to bypass some of the +limitations that are imposed when running on WinRM, such as constrained network +delegation or accessing forbidden system calls like the WUA API. You can use +``become`` with the same user as ``ansible_user`` to bypass these limitations +and run commands that are not normally accessible in a WinRM session. + +Administrative rights +--------------------- + +Many tasks in Windows require administrative privileges to complete. When using +the ``runas`` become method, Ansible will attempt to run the module with the +full privileges that are available to the remote user. If it fails to elevate +the user token, it will continue to use the limited token during execution. + +A user must have the ``SeDebugPrivilege`` to run a become process with elevated +privileges. This privilege is assigned to Administrators by default. If the +debug privilege is not available, the become process will run with a limited +set of privileges and groups. + +To determine the type of token that Ansible was able to get, run the following +task: + +.. code-block:: yaml + + - Check my user name + ansible.windows.win_whoami: + become: yes + +The output will look something similar to the below: + +.. code-block:: ansible-output + + ok: [windows] => { + "account": { + "account_name": "vagrant-domain", + "domain_name": "DOMAIN", + "sid": "S-1-5-21-3088887838-4058132883-1884671576-1105", + "type": "User" + }, + "authentication_package": "Kerberos", + "changed": false, + "dns_domain_name": "DOMAIN.LOCAL", + "groups": [ + { + "account_name": "Administrators", + "attributes": [ + "Mandatory", + "Enabled by default", + "Enabled", + "Owner" + ], + "domain_name": "BUILTIN", + "sid": "S-1-5-32-544", + "type": "Alias" + }, + { + "account_name": "INTERACTIVE", + "attributes": [ + "Mandatory", + "Enabled by default", + "Enabled" + ], + "domain_name": "NT AUTHORITY", + "sid": "S-1-5-4", + "type": "WellKnownGroup" + }, + ], + "impersonation_level": "SecurityAnonymous", + "label": { + "account_name": "High Mandatory Level", + "domain_name": "Mandatory Label", + "sid": "S-1-16-12288", + "type": "Label" + }, + "login_domain": "DOMAIN", + "login_time": "2018-11-18T20:35:01.9696884+00:00", + "logon_id": 114196830, + "logon_server": "DC01", + "logon_type": "Interactive", + "privileges": { + "SeBackupPrivilege": "disabled", + "SeChangeNotifyPrivilege": "enabled-by-default", + "SeCreateGlobalPrivilege": "enabled-by-default", + "SeCreatePagefilePrivilege": "disabled", + "SeCreateSymbolicLinkPrivilege": "disabled", + "SeDebugPrivilege": "enabled", + "SeDelegateSessionUserImpersonatePrivilege": "disabled", + "SeImpersonatePrivilege": "enabled-by-default", + "SeIncreaseBasePriorityPrivilege": "disabled", + "SeIncreaseQuotaPrivilege": "disabled", + "SeIncreaseWorkingSetPrivilege": "disabled", + "SeLoadDriverPrivilege": "disabled", + "SeManageVolumePrivilege": "disabled", + "SeProfileSingleProcessPrivilege": "disabled", + "SeRemoteShutdownPrivilege": "disabled", + "SeRestorePrivilege": "disabled", + "SeSecurityPrivilege": "disabled", + "SeShutdownPrivilege": "disabled", + "SeSystemEnvironmentPrivilege": "disabled", + "SeSystemProfilePrivilege": "disabled", + "SeSystemtimePrivilege": "disabled", + "SeTakeOwnershipPrivilege": "disabled", + "SeTimeZonePrivilege": "disabled", + "SeUndockPrivilege": "disabled" + }, + "rights": [ + "SeNetworkLogonRight", + "SeBatchLogonRight", + "SeInteractiveLogonRight", + "SeRemoteInteractiveLogonRight" + ], + "token_type": "TokenPrimary", + "upn": "vagrant-domain@DOMAIN.LOCAL", + "user_flags": [] + } + +Under the ``label`` key, the ``account_name`` entry determines whether the user +has Administrative rights. Here are the labels that can be returned and what +they represent: + +* ``Medium``: Ansible failed to get an elevated token and ran under a limited + token. Only a subset of the privileges assigned to user are available during + the module execution and the user does not have administrative rights. + +* ``High``: An elevated token was used and all the privileges assigned to the + user are available during the module execution. + +* ``System``: The ``NT AUTHORITY\System`` account is used and has the highest + level of privileges available. + +The output will also show the list of privileges that have been granted to the +user. When the privilege value is ``disabled``, the privilege is assigned to +the logon token but has not been enabled. In most scenarios these privileges +are automatically enabled when required. + +If running on a version of Ansible that is older than 2.5 or the normal +``runas`` escalation process fails, an elevated token can be retrieved by: + +* Set the ``become_user`` to ``System`` which has full control over the + operating system. + +* Grant ``SeTcbPrivilege`` to the user Ansible connects with on + WinRM. ``SeTcbPrivilege`` is a high-level privilege that grants + full control over the operating system. No user is given this privilege by + default, and care should be taken if you grant this privilege to a user or group. + For more information on this privilege, please see + `Act as part of the operating system <https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-R2-and-2012/dn221957(v=ws.11)>`_. + You can use the below task to set this privilege on a Windows host: + + .. code-block:: yaml + + - name: grant the ansible user the SeTcbPrivilege right + ansible.windows.win_user_right: + name: SeTcbPrivilege + users: '{{ansible_user}}' + action: add + +* Turn UAC off on the host and reboot before trying to become the user. UAC is + a security protocol that is designed to run accounts with the + ``least privilege`` principle. You can turn UAC off by running the following + tasks: + + .. code-block:: yaml + + - name: turn UAC off + win_regedit: + path: HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\policies\system + name: EnableLUA + data: 0 + type: dword + state: present + register: uac_result + + - name: reboot after disabling UAC + win_reboot: + when: uac_result is changed + +.. Note:: Granting the ``SeTcbPrivilege`` or turning UAC off can cause Windows + security vulnerabilities and care should be given if these steps are taken. + +Local service accounts +---------------------- + +Prior to Ansible version 2.5, ``become`` only worked on Windows with a local or domain +user account. Local service accounts like ``System`` or ``NetworkService`` +could not be used as ``become_user`` in these older versions. This restriction +has been lifted since the 2.5 release of Ansible. The three service accounts +that can be set under ``become_user`` are: + +* System +* NetworkService +* LocalService + +Because local service accounts do not have passwords, the +``ansible_become_password`` parameter is not required and is ignored if +specified. + +Become without setting a password +--------------------------------- + +As of Ansible 2.8, ``become`` can be used to become a Windows local or domain account +without requiring a password for that account. For this method to work, the +following requirements must be met: + +* The connection user has the ``SeDebugPrivilege`` privilege assigned +* The connection user is part of the ``BUILTIN\Administrators`` group +* The ``become_user`` has either the ``SeBatchLogonRight`` or ``SeNetworkLogonRight`` user right + +Using become without a password is achieved in one of two different methods: + +* Duplicating an existing logon session's token if the account is already logged on +* Using S4U to generate a logon token that is valid on the remote host only + +In the first scenario, the become process is spawned from another logon of that +user account. This could be an existing RDP logon, console logon, but this is +not guaranteed to occur all the time. This is similar to the +``Run only when user is logged on`` option for a Scheduled Task. + +In the case where another logon of the become account does not exist, S4U is +used to create a new logon and run the module through that. This is similar to +the ``Run whether user is logged on or not`` with the ``Do not store password`` +option for a Scheduled Task. In this scenario, the become process will not be +able to access any network resources like a normal WinRM process. + +To make a distinction between using become with no password and becoming an +account that has no password make sure to keep ``ansible_become_password`` as +undefined or set ``ansible_become_password:``. + +.. Note:: Because there are no guarantees an existing token will exist for a + user when Ansible runs, there's a high change the become process will only + have access to local resources. Use become with a password if the task needs + to access network resources + +Accounts without a password +--------------------------- + +.. Warning:: As a general security best practice, you should avoid allowing accounts without passwords. + +Ansible can be used to become a Windows account that does not have a password (like the +``Guest`` account). To become an account without a password, set up the +variables like normal but set ``ansible_become_password: ''``. + +Before become can work on an account like this, the local policy +`Accounts: Limit local account use of blank passwords to console logon only <https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-R2-and-2012/jj852174(v=ws.11)>`_ +must be disabled. This can either be done through a Group Policy Object (GPO) +or with this Ansible task: + +.. code-block:: yaml + + - name: allow blank password on become + ansible.windows.win_regedit: + path: HKLM:\SYSTEM\CurrentControlSet\Control\Lsa + name: LimitBlankPasswordUse + data: 0 + type: dword + state: present + +.. Note:: This is only for accounts that do not have a password. You still need + to set the account's password under ``ansible_become_password`` if the + become_user has a password. + +Become flags for Windows +------------------------ + +Ansible 2.5 added the ``become_flags`` parameter to the ``runas`` become method. +This parameter can be set using the ``become_flags`` task directive or set in +Ansible's configuration using ``ansible_become_flags``. The two valid values +that are initially supported for this parameter are ``logon_type`` and +``logon_flags``. + +.. Note:: These flags should only be set when becoming a normal user account, not a local service account like LocalSystem. + +The key ``logon_type`` sets the type of logon operation to perform. The value +can be set to one of the following: + +* ``interactive``: The default logon type. The process will be run under a + context that is the same as when running a process locally. This bypasses all + WinRM restrictions and is the recommended method to use. + +* ``batch``: Runs the process under a batch context that is similar to a + scheduled task with a password set. This should bypass most WinRM + restrictions and is useful if the ``become_user`` is not allowed to log on + interactively. + +* ``new_credentials``: Runs under the same credentials as the calling user, but + outbound connections are run under the context of the ``become_user`` and + ``become_password``, similar to ``runas.exe /netonly``. The ``logon_flags`` + flag should also be set to ``netcredentials_only``. Use this flag if + the process needs to access a network resource (like an SMB share) using a + different set of credentials. + +* ``network``: Runs the process under a network context without any cached + credentials. This results in the same type of logon session as running a + normal WinRM process without credential delegation, and operates under the same + restrictions. + +* ``network_cleartext``: Like the ``network`` logon type, but instead caches + the credentials so it can access network resources. This is the same type of + logon session as running a normal WinRM process with credential delegation. + +For more information, see +`dwLogonType <https://docs.microsoft.com/en-gb/windows/desktop/api/winbase/nf-winbase-logonusera>`_. + +The ``logon_flags`` key specifies how Windows will log the user on when creating +the new process. The value can be set to none or multiple of the following: + +* ``with_profile``: The default logon flag set. The process will load the + user's profile in the ``HKEY_USERS`` registry key to ``HKEY_CURRENT_USER``. + +* ``netcredentials_only``: The process will use the same token as the caller + but will use the ``become_user`` and ``become_password`` when accessing a remote + resource. This is useful in inter-domain scenarios where there is no trust + relationship, and should be used with the ``new_credentials`` ``logon_type``. + +By default ``logon_flags=with_profile`` is set, if the profile should not be +loaded set ``logon_flags=`` or if the profile should be loaded with +``netcredentials_only``, set ``logon_flags=with_profile,netcredentials_only``. + +For more information, see `dwLogonFlags <https://docs.microsoft.com/en-gb/windows/desktop/api/winbase/nf-winbase-createprocesswithtokenw>`_. + +Here are some examples of how to use ``become_flags`` with Windows tasks: + +.. code-block:: yaml + + - name: copy a file from a fileshare with custom credentials + ansible.windows.win_copy: + src: \\server\share\data\file.txt + dest: C:\temp\file.txt + remote_src: yes + vars: + ansible_become: yes + ansible_become_method: runas + ansible_become_user: DOMAIN\user + ansible_become_password: Password01 + ansible_become_flags: logon_type=new_credentials logon_flags=netcredentials_only + + - name: run a command under a batch logon + ansible.windows.win_whoami: + become: yes + become_flags: logon_type=batch + + - name: run a command and not load the user profile + ansible.windows.win_whomai: + become: yes + become_flags: logon_flags= + + +Limitations of become on Windows +-------------------------------- + +* Running a task with ``async`` and ``become`` on Windows Server 2008, 2008 R2 + and Windows 7 only works when using Ansible 2.7 or newer. + +* By default, the become user logs on with an interactive session, so it must + have the right to do so on the Windows host. If it does not inherit the + ``SeAllowLogOnLocally`` privilege or inherits the ``SeDenyLogOnLocally`` + privilege, the become process will fail. Either add the privilege or set the + ``logon_type`` flag to change the logon type used. + +* Prior to Ansible version 2.3, become only worked when + ``ansible_winrm_transport`` was either ``basic`` or ``credssp``. This + restriction has been lifted since the 2.4 release of Ansible for all hosts + except Windows Server 2008 (non R2 version). + +* The Secondary Logon service ``seclogon`` must be running to use ``ansible_become_method: runas`` + +.. seealso:: + + `Mailing List <https://groups.google.com/forum/#!forum/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `webchat.freenode.net <https://webchat.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/collections_using.rst b/docs/docsite/rst/user_guide/collections_using.rst new file mode 100644 index 00000000..a9530a9e --- /dev/null +++ b/docs/docsite/rst/user_guide/collections_using.rst @@ -0,0 +1,324 @@ + +.. _collections: + +***************** +Using collections +***************** + +Collections are a distribution format for Ansible content that can include playbooks, roles, modules, and plugins. As modules move from the core Ansible repository into collections, the module documentation will move to the :ref:`collections pages <list_of_collections>`. + +You can install and use collections through `Ansible Galaxy <https://galaxy.ansible.com>`_. + +* For details on how to *develop* collections see :ref:`developing_collections`. +* For the current development status of Collections and FAQ see `Ansible Collections Community Guide <https://github.com/ansible-collections/overview/blob/main/README.rst>`_. + +.. contents:: + :local: + :depth: 2 + +.. _collections_installing: + +Installing collections +====================== + + +Installing collections with ``ansible-galaxy`` +---------------------------------------------- + +.. include:: ../shared_snippets/installing_collections.txt + +.. _collections_older_version: + +Installing an older version of a collection +------------------------------------------- + +.. include:: ../shared_snippets/installing_older_collection.txt + +Installing a collection from a git repository +--------------------------------------------- + +.. include:: ../shared_snippets/installing_collections_git_repo.txt + +.. _collection_requirements_file: + +Install multiple collections with a requirements file +----------------------------------------------------- + +.. include:: ../shared_snippets/installing_multiple_collections.txt + +.. _collection_offline_download: + +Downloading a collection for offline use +----------------------------------------- + +.. include:: ../shared_snippets/download_tarball_collections.txt + + +.. _galaxy_server_config: + +Configuring the ``ansible-galaxy`` client +------------------------------------------ + +.. include:: ../shared_snippets/galaxy_server_list.txt + +.. _collections_downloading: + +Downloading collections +======================= + +To download a collection and its dependencies for an offline install, run ``ansible-galaxy collection download``. This +downloads the collections specified and their dependencies to the specified folder and creates a ``requirements.yml`` +file which can be used to install those collections on a host without access to a Galaxy server. All the collections +are downloaded by default to the ``./collections`` folder. + +Just like the ``install`` command, the collections are sourced based on the +:ref:`configured galaxy server config <galaxy_server_config>`. Even if a collection to download was specified by a URL +or path to a tarball, the collection will be redownloaded from the configured Galaxy server. + +Collections can be specified as one or multiple collections or with a ``requirements.yml`` file just like +``ansible-galaxy collection install``. + +To download a single collection and its dependencies: + +.. code-block:: bash + + ansible-galaxy collection download my_namespace.my_collection + +To download a single collection at a specific version: + +.. code-block:: bash + + ansible-galaxy collection download my_namespace.my_collection:1.0.0 + +To download multiple collections either specify multiple collections as command line arguments as shown above or use a +requirements file in the format documented with :ref:`collection_requirements_file`. + +.. code-block:: bash + + ansible-galaxy collection download -r requirements.yml + +All the collections are downloaded by default to the ``./collections`` folder but you can use ``-p`` or +``--download-path`` to specify another path: + +.. code-block:: bash + + ansible-galaxy collection download my_namespace.my_collection -p ~/offline-collections + +Once you have downloaded the collections, the folder contains the collections specified, their dependencies, and a +``requirements.yml`` file. You can use this folder as is with ``ansible-galaxy collection install`` to install the +collections on a host without access to a Galaxy or Automation Hub server. + +.. code-block:: bash + + # This must be run from the folder that contains the offline collections and requirements.yml file downloaded + # by the internet-connected host + cd ~/offline-collections + ansible-galaxy collection install -r requirements.yml + +.. _collections_listing: + +Listing collections +=================== + +To list installed collections, run ``ansible-galaxy collection list``. This shows all of the installed collections found in the configured collections search paths. It will also show collections under development which contain a galaxy.yml file instead of a MANIFEST.json. The path where the collections are located are displayed as well as version information. If no version information is available, a ``*`` is displayed for the version number. + +.. code-block:: shell + + # /home/astark/.ansible/collections/ansible_collections + Collection Version + -------------------------- ------- + cisco.aci 0.0.5 + cisco.mso 0.0.4 + sandwiches.ham * + splunk.es 0.0.5 + + # /usr/share/ansible/collections/ansible_collections + Collection Version + ----------------- ------- + fortinet.fortios 1.0.6 + pureport.pureport 0.0.8 + sensu.sensu_go 1.3.0 + +Run with ``-vvv`` to display more detailed information. + +To list a specific collection, pass a valid fully qualified collection name (FQCN) to the command ``ansible-galaxy collection list``. All instances of the collection will be listed. + +.. code-block:: shell + + > ansible-galaxy collection list fortinet.fortios + + # /home/astark/.ansible/collections/ansible_collections + Collection Version + ---------------- ------- + fortinet.fortios 1.0.1 + + # /usr/share/ansible/collections/ansible_collections + Collection Version + ---------------- ------- + fortinet.fortios 1.0.6 + +To search other paths for collections, use the ``-p`` option. Specify multiple search paths by separating them with a ``:``. The list of paths specified on the command line will be added to the beginning of the configured collections search paths. + +.. code-block:: shell + + > ansible-galaxy collection list -p '/opt/ansible/collections:/etc/ansible/collections' + + # /opt/ansible/collections/ansible_collections + Collection Version + --------------- ------- + sandwiches.club 1.7.2 + + # /etc/ansible/collections/ansible_collections + Collection Version + -------------- ------- + sandwiches.pbj 1.2.0 + + # /home/astark/.ansible/collections/ansible_collections + Collection Version + -------------------------- ------- + cisco.aci 0.0.5 + cisco.mso 0.0.4 + fortinet.fortios 1.0.1 + sandwiches.ham * + splunk.es 0.0.5 + + # /usr/share/ansible/collections/ansible_collections + Collection Version + ----------------- ------- + fortinet.fortios 1.0.6 + pureport.pureport 0.0.8 + sensu.sensu_go 1.3.0 + + +.. _using_collections: + +Verifying collections +===================== + +Verifying collections with ``ansible-galaxy`` +--------------------------------------------- + +Once installed, you can verify that the content of the installed collection matches the content of the collection on the server. This feature expects that the collection is installed in one of the configured collection paths and that the collection exists on one of the configured galaxy servers. + +.. code-block:: bash + + ansible-galaxy collection verify my_namespace.my_collection + +The output of the ``ansible-galaxy collection verify`` command is quiet if it is successful. If a collection has been modified, the altered files are listed under the collection name. + +.. code-block:: bash + + ansible-galaxy collection verify my_namespace.my_collection + Collection my_namespace.my_collection contains modified content in the following files: + my_namespace.my_collection + plugins/inventory/my_inventory.py + plugins/modules/my_module.py + +You can use the ``-vvv`` flag to display additional information, such as the version and path of the installed collection, the URL of the remote collection used for validation, and successful verification output. + +.. code-block:: bash + + ansible-galaxy collection verify my_namespace.my_collection -vvv + ... + Verifying 'my_namespace.my_collection:1.0.0'. + Installed collection found at '/path/to/ansible_collections/my_namespace/my_collection/' + Remote collection found at 'https://galaxy.ansible.com/download/my_namespace-my_collection-1.0.0.tar.gz' + Successfully verified that checksums for 'my_namespace.my_collection:1.0.0' match the remote collection + +If you have a pre-release or non-latest version of a collection installed you should include the specific version to verify. If the version is omitted, the installed collection is verified against the latest version available on the server. + +.. code-block:: bash + + ansible-galaxy collection verify my_namespace.my_collection:1.0.0 + +In addition to the ``namespace.collection_name:version`` format, you can provide the collections to verify in a ``requirements.yml`` file. Dependencies listed in ``requirements.yml`` are not included in the verify process and should be verified separately. + +.. code-block:: bash + + ansible-galaxy collection verify -r requirements.yml + +Verifying against ``tar.gz`` files is not supported. If your ``requirements.yml`` contains paths to tar files or URLs for installation, you can use the ``--ignore-errors`` flag to ensure that all collections using the ``namespace.name`` format in the file are processed. + +.. _collections_using_playbook: + +Using collections in a Playbook +=============================== + +Once installed, you can reference a collection content by its fully qualified collection name (FQCN): + +.. code-block:: yaml + + - hosts: all + tasks: + - my_namespace.my_collection.mymodule: + option1: value + +This works for roles or any type of plugin distributed within the collection: + +.. code-block:: yaml + + - hosts: all + tasks: + - import_role: + name: my_namespace.my_collection.role1 + + - my_namespace.mycollection.mymodule: + option1: value + + - debug: + msg: '{{ lookup("my_namespace.my_collection.lookup1", 'param1')| my_namespace.my_collection.filter1 }}' + +Simplifying module names with the ``collections`` keyword +========================================================= + +The ``collections`` keyword lets you define a list of collections that your role or playbook should search for unqualified module and action names. So you can use the ``collections`` keyword, then simply refer to modules and action plugins by their short-form names throughout that role or playbook. + +.. warning:: + If your playbook uses both the ``collections`` keyword and one or more roles, the roles do not inherit the collections set by the playbook. See below for details. + +Using ``collections`` in roles +------------------------------ + +Within a role, you can control which collections Ansible searches for the tasks inside the role using the ``collections`` keyword in the role's ``meta/main.yml``. Ansible will use the collections list defined inside the role even if the playbook that calls the role defines different collections in a separate ``collections`` keyword entry. Roles defined inside a collection always implicitly search their own collection first, so you don't need to use the ``collections`` keyword to access modules, actions, or other roles contained in the same collection. + +.. code-block:: yaml + + # myrole/meta/main.yml + collections: + - my_namespace.first_collection + - my_namespace.second_collection + - other_namespace.other_collection + +Using ``collections`` in playbooks +---------------------------------- + +In a playbook, you can control the collections Ansible searches for modules and action plugins to execute. However, any roles you call in your playbook define their own collections search order; they do not inherit the calling playbook's settings. This is true even if the role does not define its own ``collections`` keyword. + +.. code-block:: yaml + + - hosts: all + collections: + - my_namespace.my_collection + + tasks: + - import_role: + name: role1 + + - mymodule: + option1: value + + - debug: + msg: '{{ lookup("my_namespace.my_collection.lookup1", 'param1')| my_namespace.my_collection.filter1 }}' + +The ``collections`` keyword merely creates an ordered 'search path' for non-namespaced plugin and role references. It does not install content or otherwise change Ansible's behavior around the loading of plugins or roles. Note that an FQCN is still required for non-action or module plugins (for example, lookups, filters, tests). + +.. seealso:: + + :ref:`developing_collections` + Develop or modify a collection. + :ref:`collections_galaxy_meta` + Understand the collections metadata structure. + `Mailing List <https://groups.google.com/group/ansible-devel>`_ + The development mailing list + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/command_line_tools.rst b/docs/docsite/rst/user_guide/command_line_tools.rst new file mode 100644 index 00000000..56561b59 --- /dev/null +++ b/docs/docsite/rst/user_guide/command_line_tools.rst @@ -0,0 +1,20 @@ +.. _command_line_tools: + +Working with command line tools +=============================== + +Most users are familiar with `ansible` and `ansible-playbook`, but those are not the only utilities Ansible provides. +Below is a complete list of Ansible utilities. Each page contains a description of the utility and a listing of supported parameters. + +.. toctree:: + :maxdepth: 1 + + ../cli/ansible.rst + ../cli/ansible-config.rst + ../cli/ansible-console.rst + ../cli/ansible-doc.rst + ../cli/ansible-galaxy.rst + ../cli/ansible-inventory.rst + ../cli/ansible-playbook.rst + ../cli/ansible-pull.rst + ../cli/ansible-vault.rst diff --git a/docs/docsite/rst/user_guide/complex_data_manipulation.rst b/docs/docsite/rst/user_guide/complex_data_manipulation.rst new file mode 100644 index 00000000..253362b7 --- /dev/null +++ b/docs/docsite/rst/user_guide/complex_data_manipulation.rst @@ -0,0 +1,246 @@ +.. _complex_data_manipulation: + +Data manipulation +######################### + +In many cases, you need to do some complex operation with your variables, while Ansible is not recommended as a data processing/manipulation tool, you can use the existing Jinja2 templating in conjunction with the many added Ansible filters, lookups and tests to do some very complex transformations. + +Let's start with a quick definition of each type of plugin: + - lookups: Mainly used to query 'external data', in Ansible these were the primary part of loops using the ``with_<lookup>`` construct, but they can be used independently to return data for processing. They normally return a list due to their primary function in loops as mentioned previously. Used with the ``lookup`` or ``query`` Jinja2 operators. + - filters: used to change/transform data, used with the ``|`` Jinja2 operator. + - tests: used to validate data, used with the ``is`` Jinja2 operator. + +.. _note: + * Some tests and filters are provided directly by Jinja2, so their availability depends on the Jinja2 version, not Ansible. + +.. _for_loops_or_list_comprehensions: + +Loops and list comprehensions +============================= + +Most programming languages have loops (``for``, ``while``, and so on) and list comprehensions to do transformations on lists including lists of objects. Jinja2 has a few filters that provide this functionality: ``map``, ``select``, ``reject``, ``selectattr``, ``rejectattr``. + +- map: this is a basic for loop that just allows you to change every item in a list, using the 'attribute' keyword you can do the transformation based on attributes of the list elements. +- select/reject: this is a for loop with a condition, that allows you to create a subset of a list that matches (or not) based on the result of the condition. +- selectattr/rejectattr: very similar to the above but it uses a specific attribute of the list elements for the conditional statement. + + +.. _keys_from_dict_matching_list: + +Extract keys from a dictionary matching elements from a list +------------------------------------------------------------ + +The Python equivalent code would be: + +.. code-block:: python + + chains = [1, 2] + for chain in chains: + for config in chains_config[chain]['configs']: + print(config['type']) + +There are several ways to do it in Ansible, this is just one example: + +.. code-block:: YAML+Jinja + :emphasize-lines: 3 + :caption: Way to extract matching keys from a list of dictionaries + + tasks: + - name: Show extracted list of keys from a list of dictionaries + ansible.builtin.debug: + msg: "{{ chains | map('extract', chains_config) | map(attribute='configs') | flatten | map(attribute='type') | flatten }}" + vars: + chains: [1, 2] + chains_config: + 1: + foo: bar + configs: + - type: routed + version: 0.1 + - type: bridged + version: 0.2 + 2: + foo: baz + configs: + - type: routed + version: 1.0 + - type: bridged + version: 1.1 + + +.. code-block:: ansible-output + :caption: Results of debug task, a list with the extracted keys + + ok: [localhost] => { + "msg": [ + "routed", + "bridged", + "routed", + "bridged" + ] + } + + +.. _find_mount_point: + +Find mount point +---------------- + +In this case, we want to find the mount point for a given path across our machines, since we already collect mount facts, we can use the following: + +.. code-block:: YAML+Jinja + :caption: Use selectattr to filter mounts into list I can then sort and select the last from + :emphasize-lines: 7 + + - hosts: all + gather_facts: True + vars: + path: /var/lib/cache + tasks: + - name: The mount point for {{path}}, found using the Ansible mount facts, [-1] is the same as the 'last' filter + ansible.builtin.debug: + msg: "{{(ansible_facts.mounts | selectattr('mount', 'in', path) | list | sort(attribute='mount'))[-1]['mount']}}" + + + +Omit elements from a list +------------------------- + +The special ``omit`` variable ONLY works with module options, but we can still use it in other ways as an identifier to tailor a list of elements: + +.. code-block:: YAML+Jinja + :caption: Inline list filtering when feeding a module option + :emphasize-lines: 3, 7 + + - name: Enable a list of Windows features, by name + ansible.builtin.set_fact: + win_feature_list: "{{ namestuff | reject('equalto', omit) | list }}" + vars: + namestuff: + - "{{ (fs_installed_smb_v1 | default(False)) | ternary(omit, 'FS-SMB1') }}" + - "foo" + - "bar" + + +Another way is to avoid adding elements to the list in the first place, so you can just use it directly: + +.. code-block:: YAML+Jinja + :caption: Using set_fact in a loop to increment a list conditionally + :emphasize-lines: 3, 4, 6 + + - name: Build unique list with some items conditionally omitted + ansible.builtin.set_fact: + namestuff: ' {{ (namestuff | default([])) | union([item]) }}' + when: item != omit + loop: + - "{{ (fs_installed_smb_v1 | default(False)) | ternary(omit, 'FS-SMB1') }}" + - "foo" + - "bar" + + +.. _complex_type_transfomations: + +Complex Type transformations +============================= + +Jinja provides filters for simple data type transformations (``int``, ``bool``, and so on), but when you want to transform data structures things are not as easy. +You can use loops and list comprehensions as shown above to help, also other filters and lookups can be chained and leveraged to achieve more complex transformations. + + +.. _create_dictionary_from_list: + +Create dictionary from list +--------------------------- + +In most languages it is easy to create a dictionary (a.k.a. map/associative array/hash and so on) from a list of pairs, in Ansible there are a couple of ways to do it and the best one for you might depend on the source of your data. + + +These example produces ``{"a": "b", "c": "d"}`` + +.. code-block:: YAML+Jinja + :caption: Simple list to dict by assuming the list is [key, value , key, value, ...] + + vars: + single_list: [ 'a', 'b', 'c', 'd' ] + mydict: "{{ dict(single_list) | slice(2) | list }}" + + +.. code-block:: YAML+Jinja + :caption: It is simpler when we have a list of pairs: + + vars: + list_of_pairs: [ ['a', 'b'], ['c', 'd'] ] + mydict: "{{ dict(list_of_pairs) }}" + +Both end up being the same thing, with the ``slice(2) | list`` transforming ``single_list`` to the same structure as ``list_of_pairs``. + + + +A bit more complex, using ``set_fact`` and a ``loop`` to create/update a dictionary with key value pairs from 2 lists: + +.. code-block:: YAML+Jinja + :caption: Using set_fact to create a dictionary from a set of lists + :emphasize-lines: 3, 4 + + - name: Uses 'combine' to update the dictionary and 'zip' to make pairs of both lists + ansible.builtin.set_fact: + mydict: "{{ mydict | default({}) | combine({item[0]: item[1]}) }}" + loop: "{{ (keys | zip(values)) | list }}" + vars: + keys: + - foo + - var + - bar + values: + - a + - b + - c + +This results in ``{"foo": "a", "var": "b", "bar": "c"}``. + + +You can even combine these simple examples with other filters and lookups to create a dictionary dynamically by matching patterns to variable names: + +.. code-block:: YAML+Jinja + :caption: Using 'vars' to define dictionary from a set of lists without needing a task + + vars: + myvarnames: "{{ q('varnames', '^my') }}" + mydict: "{{ dict(myvarnames | zip(q('vars', *myvarnames))) }}" + +A quick explanation, since there is a lot to unpack from these two lines: + + - The ``varnames`` lookup returns a list of variables that match "begin with ``my``". + - Then feeding the list from the previous step into the ``vars`` lookup to get the list of values. + The ``*`` is used to 'dereference the list' (a pythonism that works in Jinja), otherwise it would take the list as a single argument. + - Both lists get passed to the ``zip`` filter to pair them off into a unified list (key, value, key2, value2, ...). + - The dict function then takes this 'list of pairs' to create the dictionary. + + +An example on how to use facts to find a host's data that meets condition X: + + +.. code-block:: YAML+Jinja + + vars: + uptime_of_host_most_recently_rebooted: "{{ansible_play_hosts_all | map('extract', hostvars, 'ansible_uptime_seconds') | sort | first}}" + + +Using an example from @zoradache on reddit, to show the 'uptime in days/hours/minutes' (assumes facts where gathered). +https://www.reddit.com/r/ansible/comments/gj5a93/trying_to_get_uptime_from_seconds/fqj2qr3/ + +.. code-block:: YAML+Jinja + + - name: Show the uptime in a certain format + ansible.builtin.debug: + msg: Timedelta {{ now() - now().fromtimestamp(now(fmt='%s') | int - ansible_uptime_seconds) }} + + +.. seealso:: + + :doc:`playbooks_filters` + Jinja2 filters included with Ansible + :doc:`playbooks_tests` + Jinja2 tests included with Ansible + `Jinja2 Docs <http://jinja.pocoo.org/docs/>`_ + Jinja2 documentation, includes lists for core filters and tests diff --git a/docs/docsite/rst/user_guide/connection_details.rst b/docs/docsite/rst/user_guide/connection_details.rst new file mode 100644 index 00000000..60f93cad --- /dev/null +++ b/docs/docsite/rst/user_guide/connection_details.rst @@ -0,0 +1,116 @@ +.. _connections: + +****************************** +Connection methods and details +****************************** + +This section shows you how to expand and refine the connection methods Ansible uses for your inventory. + +ControlPersist and paramiko +--------------------------- + +By default, Ansible uses native OpenSSH, because it supports ControlPersist (a performance feature), Kerberos, and options in ``~/.ssh/config`` such as Jump Host setup. If your control machine uses an older version of OpenSSH that does not support ControlPersist, Ansible will fallback to a Python implementation of OpenSSH called 'paramiko'. + +.. _connection_set_user: + +Setting a remote user +--------------------- + +By default, Ansible connects to all remote devices with the user name you are using on the control node. If that user name does not exist on a remote device, you can set a different user name for the connection. If you just need to do some tasks as a different user, look at :ref:`become`. You can set the connection user in a playbook: + +.. code-block:: yaml + + --- + - name: update webservers + hosts: webservers + remote_user: admin + + tasks: + - name: thing to do first in this playbook + . . . + +as a host variable in inventory: + +.. code-block:: text + + other1.example.com ansible_connection=ssh ansible_user=myuser + other2.example.com ansible_connection=ssh ansible_user=myotheruser + +or as a group variable in inventory: + +.. code-block:: yaml + + cloud: + hosts: + cloud1: my_backup.cloud.com + cloud2: my_backup2.cloud.com + vars: + ansible_user: admin + +Setting up SSH keys +------------------- + +By default, Ansible assumes you are using SSH keys to connect to remote machines. SSH keys are encouraged, but you can use password authentication if needed with the ``--ask-pass`` option. If you need to provide a password for :ref:`privilege escalation <become>` (sudo, pbrun, and so on), use ``--ask-become-pass``. + +.. include:: shared_snippets/SSH_password_prompt.txt + +To set up SSH agent to avoid retyping passwords, you can do: + +.. code-block:: bash + + $ ssh-agent bash + $ ssh-add ~/.ssh/id_rsa + +Depending on your setup, you may wish to use Ansible's ``--private-key`` command line option to specify a pem file instead. You can also add the private key file: + +.. code-block:: bash + + $ ssh-agent bash + $ ssh-add ~/.ssh/keypair.pem + +Another way to add private key files without using ssh-agent is using ``ansible_ssh_private_key_file`` in an inventory file as explained here: :ref:`intro_inventory`. + +Running against localhost +------------------------- + +You can run commands against the control node by using "localhost" or "127.0.0.1" for the server name: + +.. code-block:: bash + + $ ansible localhost -m ping -e 'ansible_python_interpreter="/usr/bin/env python"' + +You can specify localhost explicitly by adding this to your inventory file: + +.. code-block:: bash + + localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" + +.. _host_key_checking_on: + +Managing host key checking +-------------------------- + +Ansible enables host key checking by default. Checking host keys guards against server spoofing and man-in-the-middle attacks, but it does require some maintenance. + +If a host is reinstalled and has a different key in 'known_hosts', this will result in an error message until corrected. If a new host is not in 'known_hosts' your control node may prompt for confirmation of the key, which results in an interactive experience if using Ansible, from say, cron. You might not want this. + +If you understand the implications and wish to disable this behavior, you can do so by editing ``/etc/ansible/ansible.cfg`` or ``~/.ansible.cfg``: + +.. code-block:: text + + [defaults] + host_key_checking = False + +Alternatively this can be set by the :envvar:`ANSIBLE_HOST_KEY_CHECKING` environment variable: + +.. code-block:: bash + + $ export ANSIBLE_HOST_KEY_CHECKING=False + +Also note that host key checking in paramiko mode is reasonably slow, therefore switching to 'ssh' is also recommended when using this feature. + +Other connection methods +------------------------ + +Ansible can use a variety of connection methods beyond SSH. You can select any connection plugin, including managing things locally and managing chroot, lxc, and jail containers. +A mode called 'ansible-pull' can also invert the system and have systems 'phone home' via scheduled git checkouts to pull configuration directives from a central repository. diff --git a/docs/docsite/rst/user_guide/guide_rolling_upgrade.rst b/docs/docsite/rst/user_guide/guide_rolling_upgrade.rst new file mode 100644 index 00000000..6f2ca742 --- /dev/null +++ b/docs/docsite/rst/user_guide/guide_rolling_upgrade.rst @@ -0,0 +1,324 @@ +********************************************************** +Playbook Example: Continuous Delivery and Rolling Upgrades +********************************************************** + +.. contents:: + :local: + +.. _lamp_introduction: + +What is continuous delivery? +============================ + +Continuous delivery (CD) means frequently delivering updates to your software application. + +The idea is that by updating more often, you do not have to wait for a specific timed period, and your organization +gets better at the process of responding to change. + +Some Ansible users are deploying updates to their end users on an hourly or even more frequent basis -- sometimes every time +there is an approved code change. To achieve this, you need tools to be able to quickly apply those updates in a zero-downtime way. + +This document describes in detail how to achieve this goal, using one of Ansible's most complete example +playbooks as a template: lamp_haproxy. This example uses a lot of Ansible features: roles, templates, +and group variables, and it also comes with an orchestration playbook that can do zero-downtime +rolling upgrades of the web application stack. + +.. note:: + + `Click here for the latest playbooks for this example + <https://github.com/ansible/ansible-examples/tree/master/lamp_haproxy>`_. + +The playbooks deploy Apache, PHP, MySQL, Nagios, and HAProxy to a CentOS-based set of servers. + +We're not going to cover how to run these playbooks here. Read the included README in the github project along with the +example for that information. Instead, we're going to take a close look at every part of the playbook and describe what it does. + +.. _lamp_deployment: + +Site deployment +=============== + +Let's start with ``site.yml``. This is our site-wide deployment playbook. It can be used to initially deploy the site, as well +as push updates to all of the servers: + +.. code-block:: yaml + + --- + # This playbook deploys the whole application stack in this site. + + # Apply common configuration to all hosts + - hosts: all + + roles: + - common + + # Configure and deploy database servers. + - hosts: dbservers + + roles: + - db + + # Configure and deploy the web servers. Note that we include two roles + # here, the 'base-apache' role which simply sets up Apache, and 'web' + # which includes our example web application. + + - hosts: webservers + + roles: + - base-apache + - web + + # Configure and deploy the load balancer(s). + - hosts: lbservers + + roles: + - haproxy + + # Configure and deploy the Nagios monitoring node(s). + - hosts: monitoring + + roles: + - base-apache + - nagios + +.. note:: + + If you're not familiar with terms like playbooks and plays, you should review :ref:`working_with_playbooks`. + +In this playbook we have 5 plays. The first one targets ``all`` hosts and applies the ``common`` role to all of the hosts. +This is for site-wide things like yum repository configuration, firewall configuration, and anything else that needs to apply to all of the servers. + +The next four plays run against specific host groups and apply specific roles to those servers. +Along with the roles for Nagios monitoring, the database, and the web application, we've implemented a +``base-apache`` role that installs and configures a basic Apache setup. This is used by both the +sample web application and the Nagios hosts. + +.. _lamp_roles: + +Reusable content: roles +======================= + +By now you should have a bit of understanding about roles and how they work in Ansible. Roles are a way to organize +content: tasks, handlers, templates, and files, into reusable components. + +This example has six roles: ``common``, ``base-apache``, ``db``, ``haproxy``, ``nagios``, and ``web``. How you organize +your roles is up to you and your application, but most sites will have one or more common roles that are applied to +all systems, and then a series of application-specific roles that install and configure particular parts of the site. + +Roles can have variables and dependencies, and you can pass in parameters to roles to modify their behavior. +You can read more about roles in the :ref:`playbooks_reuse_roles` section. + +.. _lamp_group_variables: + +Configuration: group variables +============================== + +Group variables are variables that are applied to groups of servers. They can be used in templates and in +playbooks to customize behavior and to provide easily-changed settings and parameters. They are stored in +a directory called ``group_vars`` in the same location as your inventory. +Here is lamp_haproxy's ``group_vars/all`` file. As you might expect, these variables are applied to all of the machines in your inventory: + +.. code-block:: yaml + + --- + httpd_port: 80 + ntpserver: 192.0.2.23 + +This is a YAML file, and you can create lists and dictionaries for more complex variable structures. +In this case, we are just setting two variables, one for the port for the web server, and one for the +NTP server that our machines should use for time synchronization. + +Here's another group variables file. This is ``group_vars/dbservers`` which applies to the hosts in the ``dbservers`` group: + +.. code-block:: yaml + + --- + mysqlservice: mysqld + mysql_port: 3306 + dbuser: root + dbname: foodb + upassword: usersecret + +If you look in the example, there are group variables for the ``webservers`` group and the ``lbservers`` group, similarly. + +These variables are used in a variety of places. You can use them in playbooks, like this, in ``roles/db/tasks/main.yml``: + +.. code-block:: yaml + + - name: Create Application Database + mysql_db: + name: "{{ dbname }}" + state: present + + - name: Create Application DB User + mysql_user: + name: "{{ dbuser }}" + password: "{{ upassword }}" + priv: "*.*:ALL" + host: '%' + state: present + +You can also use these variables in templates, like this, in ``roles/common/templates/ntp.conf.j2``: + +.. code-block:: text + + driftfile /var/lib/ntp/drift + + restrict 127.0.0.1 + restrict -6 ::1 + + server {{ ntpserver }} + + includefile /etc/ntp/crypto/pw + + keys /etc/ntp/keys + +You can see that the variable substitution syntax of {{ and }} is the same for both templates and variables. The syntax +inside the curly braces is Jinja2, and you can do all sorts of operations and apply different filters to the +data inside. In templates, you can also use for loops and if statements to handle more complex situations, +like this, in ``roles/common/templates/iptables.j2``: + +.. code-block:: jinja + + {% if inventory_hostname in groups['dbservers'] %} + -A INPUT -p tcp --dport 3306 -j ACCEPT + {% endif %} + +This is testing to see if the inventory name of the machine we're currently operating on (``inventory_hostname``) +exists in the inventory group ``dbservers``. If so, that machine will get an iptables ACCEPT line for port 3306. + +Here's another example, from the same template: + +.. code-block:: jinja + + {% for host in groups['monitoring'] %} + -A INPUT -p tcp -s {{ hostvars[host].ansible_default_ipv4.address }} --dport 5666 -j ACCEPT + {% endfor %} + +This loops over all of the hosts in the group called ``monitoring``, and adds an ACCEPT line for +each monitoring hosts' default IPv4 address to the current machine's iptables configuration, so that Nagios can monitor those hosts. + +You can learn a lot more about Jinja2 and its capabilities `here <http://jinja.pocoo.org/docs/>`_, and you +can read more about Ansible variables in general in the :ref:`playbooks_variables` section. + +.. _lamp_rolling_upgrade: + +The rolling upgrade +=================== + +Now you have a fully-deployed site with web servers, a load balancer, and monitoring. How do you update it? This is where Ansible's +orchestration features come into play. While some applications use the term 'orchestration' to mean basic ordering or command-blasting, Ansible +refers to orchestration as 'conducting machines like an orchestra', and has a pretty sophisticated engine for it. + +Ansible has the capability to do operations on multi-tier applications in a coordinated way, making it easy to orchestrate a sophisticated zero-downtime rolling upgrade of our web application. This is implemented in a separate playbook, called ``rolling_update.yml``. + +Looking at the playbook, you can see it is made up of two plays. The first play is very simple and looks like this: + +.. code-block:: yaml + + - hosts: monitoring + tasks: [] + +What's going on here, and why are there no tasks? You might know that Ansible gathers "facts" from the servers before operating upon them. These facts are useful for all sorts of things: networking information, OS/distribution versions, and so on. In our case, we need to know something about all of the monitoring servers in our environment before we perform the update, so this simple play forces a fact-gathering step on our monitoring servers. You will see this pattern sometimes, and it's a useful trick to know. + +The next part is the update play. The first part looks like this: + +.. code-block:: yaml + + - hosts: webservers + user: root + serial: 1 + +This is just a normal play definition, operating on the ``webservers`` group. The ``serial`` keyword tells Ansible how many servers to operate on at once. If it's not specified, Ansible will parallelize these operations up to the default "forks" limit specified in the configuration file. But for a zero-downtime rolling upgrade, you may not want to operate on that many hosts at once. If you had just a handful of webservers, you may want to set ``serial`` to 1, for one host at a time. If you have 100, maybe you could set ``serial`` to 10, for ten at a time. + +Here is the next part of the update play: + +.. code-block:: yaml + + pre_tasks: + - name: disable nagios alerts for this host webserver service + nagios: + action: disable_alerts + host: "{{ inventory_hostname }}" + services: webserver + delegate_to: "{{ item }}" + loop: "{{ groups.monitoring }}" + + - name: disable the server in haproxy + shell: echo "disable server myapplb/{{ inventory_hostname }}" | socat stdio /var/lib/haproxy/stats + delegate_to: "{{ item }}" + loop: "{{ groups.lbservers }}" + +.. note:: + - The ``serial`` keyword forces the play to be executed in 'batches'. Each batch counts as a full play with a subselection of hosts. + This has some consequences on play behavior. For example, if all hosts in a batch fails, the play fails, which in turn fails the entire run. You should consider this when combining with ``max_fail_percentage``. + +The ``pre_tasks`` keyword just lets you list tasks to run before the roles are called. This will make more sense in a minute. If you look at the names of these tasks, you can see that we are disabling Nagios alerts and then removing the webserver that we are currently updating from the HAProxy load balancing pool. + +The ``delegate_to`` and ``loop`` arguments, used together, cause Ansible to loop over each monitoring server and load balancer, and perform that operation (delegate that operation) on the monitoring or load balancing server, "on behalf" of the webserver. In programming terms, the outer loop is the list of web servers, and the inner loop is the list of monitoring servers. + +Note that the HAProxy step looks a little complicated. We're using HAProxy in this example because it's freely available, though if you have (for instance) an F5 or Netscaler in your infrastructure (or maybe you have an AWS Elastic IP setup?), you can use Ansible modules to communicate with them instead. You might also wish to use other monitoring modules instead of nagios, but this just shows the main goal of the 'pre tasks' section -- take the server out of monitoring, and take it out of rotation. + +The next step simply re-applies the proper roles to the web servers. This will cause any configuration management declarations in ``web`` and ``base-apache`` roles to be applied to the web servers, including an update of the web application code itself. We don't have to do it this way--we could instead just purely update the web application, but this is a good example of how roles can be used to reuse tasks: + +.. code-block:: yaml + + roles: + - common + - base-apache + - web + +Finally, in the ``post_tasks`` section, we reverse the changes to the Nagios configuration and put the web server back in the load balancing pool: + +.. code-block:: yaml + + post_tasks: + - name: Enable the server in haproxy + shell: echo "enable server myapplb/{{ inventory_hostname }}" | socat stdio /var/lib/haproxy/stats + delegate_to: "{{ item }}" + loop: "{{ groups.lbservers }}" + + - name: re-enable nagios alerts + nagios: + action: enable_alerts + host: "{{ inventory_hostname }}" + services: webserver + delegate_to: "{{ item }}" + loop: "{{ groups.monitoring }}" + +Again, if you were using a Netscaler or F5 or Elastic Load Balancer, you would just substitute in the appropriate modules instead. + +.. _lamp_end_notes: + +Managing other load balancers +============================= + +In this example, we use the simple HAProxy load balancer to front-end the web servers. It's easy to configure and easy to manage. As we have mentioned, Ansible has support for a variety of other load balancers like Citrix NetScaler, F5 BigIP, Amazon Elastic Load Balancers, and more. See the :ref:`working_with_modules` documentation for more information. + +For other load balancers, you may need to send shell commands to them (like we do for HAProxy above), or call an API, if your load balancer exposes one. For the load balancers for which Ansible has modules, you may want to run them as a ``local_action`` if they contact an API. You can read more about local actions in the :ref:`playbooks_delegation` section. Should you develop anything interesting for some hardware where there is not a module, it might make for a good contribution! + +.. _lamp_end_to_end: + +Continuous delivery end-to-end +============================== + +Now that you have an automated way to deploy updates to your application, how do you tie it all together? A lot of organizations use a continuous integration tool like `Jenkins <https://jenkins.io/>`_ or `Atlassian Bamboo <https://www.atlassian.com/software/bamboo>`_ to tie the development, test, release, and deploy steps together. You may also want to use a tool like `Gerrit <https://www.gerritcodereview.com/>`_ to add a code review step to commits to either the application code itself, or to your Ansible playbooks, or both. + +Depending on your environment, you might be deploying continuously to a test environment, running an integration test battery against that environment, and then deploying automatically into production. Or you could keep it simple and just use the rolling-update for on-demand deployment into test or production specifically. This is all up to you. + +For integration with Continuous Integration systems, you can easily trigger playbook runs using the ``ansible-playbook`` command line tool, or, if you're using :ref:`ansible_tower`, the ``tower-cli`` or the built-in REST API. (The tower-cli command 'joblaunch' will spawn a remote job over the REST API and is pretty slick). + +This should give you a good idea of how to structure a multi-tier application with Ansible, and orchestrate operations upon that app, with the eventual goal of continuous delivery to your customers. You could extend the idea of the rolling upgrade to lots of different parts of the app; maybe add front-end web servers along with application servers, for instance, or replace the SQL database with something like MongoDB or Riak. Ansible gives you the capability to easily manage complicated environments and automate common operations. + +.. seealso:: + + `lamp_haproxy example <https://github.com/ansible/ansible-examples/tree/master/lamp_haproxy>`_ + The lamp_haproxy example discussed here. + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_reuse_roles` + An introduction to playbook roles + :ref:`playbooks_variables` + An introduction to Ansible variables + `Ansible.com: Continuous Delivery <https://www.ansible.com/use-cases/continuous-delivery>`_ + An introduction to Continuous Delivery with Ansible diff --git a/docs/docsite/rst/user_guide/index.rst b/docs/docsite/rst/user_guide/index.rst new file mode 100644 index 00000000..e3f2aaf3 --- /dev/null +++ b/docs/docsite/rst/user_guide/index.rst @@ -0,0 +1,133 @@ +.. _user_guide_index: + +########## +User Guide +########## + +Welcome to the Ansible User Guide! This guide covers how to work with Ansible, including using the command line, working with inventory, interacting with data, writing tasks, plays, and playbooks; executing playbooks, and reference materials. This page outlines the most common situations and questions that bring readers to this section. If you prefer a traditional table of contents, you can find one at the bottom of the page. + +Getting started +=============== + +* I'd like an overview of how Ansible works. Where can I find: + + * a :ref:`quick video overview <quickstart_guide>` + * a :ref:`text introduction <intro_getting_started>` + +* I'm ready to learn about Ansible. What :ref:`basic_concepts` do I need to learn? +* I want to use Ansible without writing a playbook. How do I use :ref:`ad-hoc commands <intro_adhoc>`? + +Writing tasks, plays, and playbooks +=================================== + +* I'm writing my first playbook. What should I :ref:`know before I begin <playbooks_tips_and_tricks>`? +* I have a specific use case for a task or play: + + * Executing tasks with elevated privileges or as a different user with :ref:`become <become>` + * Repeating a task once for each item in a list with :ref:`loops <playbooks_loops>` + * Executing tasks on a different machine with :ref:`delegation <playbooks_delegation>` + * Running tasks only when certain conditions apply with :ref:`conditionals <playbooks_conditionals>` and evaluating conditions with :ref:`tests <playbooks_tests>` + * Grouping a set of tasks together with :ref:`blocks <playbooks_blocks>` + * Running tasks only when something has changed with :ref:`handlers <handlers>` + * Changing the way Ansible :ref:`handles failures <playbooks_error_handling>` + * Setting remote :ref:`environment values <playbooks_environment>` + +* I want to leverage the power of re-usable Ansible artifacts. How do I create re-usable :ref:`files <playbooks_reuse>` and :ref:`roles <playbooks_reuse_roles>`? +* I need to incorporate one file or playbook inside another. What is the difference between :ref:`including and importing <playbooks_reuse_includes>`? +* I want to run selected parts of my playbook. How do I add and use :ref:`tags <tags>`? + +Working with inventory +====================== + +* I have a list of servers and devices I want to automate. How do I create :ref:`inventory <intro_inventory>` to track them? +* I use cloud services and constantly have servers and devices starting and stopping. How do I track them using :ref:`dynamic inventory <intro_dynamic_inventory>`? +* I want to automate specific sub-sets of my inventory. How do I use :ref:`patterns <intro_patterns>`? + +Interacting with data +===================== + +* I want to use a single playbook against multiple systems with different attributes. How do I use :ref:`variables <playbooks_variables>` to handle the differences? +* I want to retrieve data about my systems. How do I access :ref:`Ansible facts <vars_and_facts>`? +* I need to access sensitive data like passwords with Ansible. How can I protect that data with :ref:`Ansible vault <vault>`? +* I want to change the data I have, so I can use it in a task. How do I use :ref:`filters <playbooks_filters>` to transform my data? +* I need to retrieve data from an external datastore. How do I use :ref:`lookups <playbooks_lookups>` to access databases and APIs? +* I want to ask playbook users to supply data. How do I get user input with :ref:`prompts <playbooks_prompts>`? +* I use certain modules frequently. How do I streamline my inventory and playbooks by :ref:`setting default values for module parameters <module_defaults>`? + +Executing playbooks +=================== + +Once your playbook is ready to run, you may need to use these topics: + +* Executing "dry run" playbooks with :ref:`check mode and diff <check_mode_dry>` +* Running playbooks while troubleshooting with :ref:`start and step <playbooks_start_and_step>` +* Correcting tasks during execution with the :ref:`Ansible debugger <playbook_debugger>` +* Controlling how my playbook executes with :ref:`strategies and more <playbooks_strategies>` +* Running tasks, plays, and playbooks :ref:`asynchronously <playbooks_async>` + +Advanced features and reference +=============================== + +* Using :ref:`advanced syntax <playbooks_advanced_syntax>` +* Manipulating :ref:`complex data <complex_data_manipulation>` +* Using :ref:`plugins <plugins_lookup>` +* Using :ref:`playbook keywords <playbook_keywords>` +* Using :ref:`command-line tools <command_line_tools>` +* Rejecting :ref:`specific modules <plugin_filtering_config>` +* Module :ref:`maintenance <modules_support>` + +Traditional Table of Contents +============================= + +If you prefer to read the entire User Guide, here's a list of the pages in order: + +.. toctree:: + :maxdepth: 2 + + quickstart + basic_concepts + intro_getting_started + intro_adhoc + playbooks + playbooks_intro + playbooks_best_practices + become + playbooks_loops + playbooks_delegation + playbooks_conditionals + playbooks_tests + playbooks_blocks + playbooks_handlers + playbooks_error_handling + playbooks_environment + playbooks_reuse + playbooks_reuse_roles + playbooks_reuse_includes + playbooks_tags + intro_inventory + intro_dynamic_inventory + intro_patterns + connection_details + command_line_tools + playbooks_variables + playbooks_vars_facts + vault + playbooks_filters + playbooks_lookups + playbooks_prompts + playbooks_module_defaults + playbooks_checkmode + playbooks_startnstep + playbooks_debugger + playbooks_strategies + playbooks_async + playbooks_advanced_syntax + complex_data_manipulation + plugin_filtering_config + sample_setup + modules + ../plugins/plugins + ../reference_appendices/playbooks_keywords + intro_bsd + windows + collections_using diff --git a/docs/docsite/rst/user_guide/intro.rst b/docs/docsite/rst/user_guide/intro.rst new file mode 100644 index 00000000..d6ff243f --- /dev/null +++ b/docs/docsite/rst/user_guide/intro.rst @@ -0,0 +1,15 @@ +:orphan: + +Introduction +============ + +Before we start exploring the main components of Ansible -- playbooks, configuration management, deployment, and orchestration -- we'll learn how to get Ansible installed and cover some basic concepts. We'll also go over how to execute ad-hoc commands in parallel across your nodes using /usr/bin/ansible, and see what modules are available in Ansible's core (you can also write your own, which is covered later). + +.. toctree:: + :maxdepth: 1 + + ../installation_guide/index + ../dev_guide/overview_architecture + ../installation_guide/intro_configuration + intro_bsd + intro_windows diff --git a/docs/docsite/rst/user_guide/intro_adhoc.rst b/docs/docsite/rst/user_guide/intro_adhoc.rst new file mode 100644 index 00000000..a7aa8da3 --- /dev/null +++ b/docs/docsite/rst/user_guide/intro_adhoc.rst @@ -0,0 +1,206 @@ +.. _intro_adhoc: + +******************************* +Introduction to ad-hoc commands +******************************* + +An Ansible ad-hoc command uses the `/usr/bin/ansible` command-line tool to automate a single task on one or more managed nodes. Ad-hoc commands are quick and easy, but they are not reusable. So why learn about ad-hoc commands first? Ad-hoc commands demonstrate the simplicity and power of Ansible. The concepts you learn here will port over directly to the playbook language. Before reading and executing these examples, please read :ref:`intro_inventory`. + +.. contents:: + :local: + +Why use ad-hoc commands? +======================== + +Ad-hoc commands are great for tasks you repeat rarely. For example, if you want to power off all the machines in your lab for Christmas vacation, you could execute a quick one-liner in Ansible without writing a playbook. An ad-hoc command looks like this: + +.. code-block:: bash + + $ ansible [pattern] -m [module] -a "[module options]" + +You can learn more about :ref:`patterns<intro_patterns>` and :ref:`modules<working_with_modules>` on other pages. + +Use cases for ad-hoc tasks +========================== + +Ad-hoc tasks can be used to reboot servers, copy files, manage packages and users, and much more. You can use any Ansible module in an ad-hoc task. Ad-hoc tasks, like playbooks, use a declarative model, +calculating and executing the actions required to reach a specified final state. They +achieve a form of idempotence by checking the current state before they begin and doing nothing unless the current state is different from the specified final state. + +Rebooting servers +----------------- + +The default module for the ``ansible`` command-line utility is the :ref:`ansible.builtin.command module<command_module>`. You can use an ad-hoc task to call the command module and reboot all web servers in Atlanta, 10 at a time. Before Ansible can do this, you must have all servers in Atlanta listed in a group called [atlanta] in your inventory, and you must have working SSH credentials for each machine in that group. To reboot all the servers in the [atlanta] group: + +.. code-block:: bash + + $ ansible atlanta -a "/sbin/reboot" + +By default Ansible uses only 5 simultaneous processes. If you have more hosts than the value set for the fork count, Ansible will talk to them, but it will take a little longer. To reboot the [atlanta] servers with 10 parallel forks: + +.. code-block:: bash + + $ ansible atlanta -a "/sbin/reboot" -f 10 + +/usr/bin/ansible will default to running from your user account. To connect as a different user: + +.. code-block:: bash + + $ ansible atlanta -a "/sbin/reboot" -f 10 -u username + +Rebooting probably requires privilege escalation. You can connect to the server as ``username`` and run the command as the ``root`` user by using the :ref:`become <become>` keyword: + +.. code-block:: bash + + $ ansible atlanta -a "/sbin/reboot" -f 10 -u username --become [--ask-become-pass] + +If you add ``--ask-become-pass`` or ``-K``, Ansible prompts you for the password to use for privilege escalation (sudo/su/pfexec/doas/etc). + +.. note:: + The :ref:`command module <command_module>` does not support extended shell syntax like piping and + redirects (although shell variables will always work). If your command requires shell-specific + syntax, use the `shell` module instead. Read more about the differences on the + :ref:`working_with_modules` page. + +So far all our examples have used the default 'command' module. To use a different module, pass ``-m`` for module name. For example, to use the :ref:`ansible.builtin.shell module <shell_module>`: + +.. code-block:: bash + + $ ansible raleigh -m ansible.builtin.shell -a 'echo $TERM' + +When running any command with the Ansible *ad hoc* CLI (as opposed to +:ref:`Playbooks <working_with_playbooks>`), pay particular attention to shell quoting rules, so +the local shell retains the variable and passes it to Ansible. +For example, using double rather than single quotes in the above example would +evaluate the variable on the box you were on. + +.. _file_transfer: + +Managing files +-------------- + +An ad-hoc task can harness the power of Ansible and SCP to transfer many files to multiple machines in parallel. To transfer a file directly to all servers in the [atlanta] group: + +.. code-block:: bash + + $ ansible atlanta -m ansible.builtin.copy -a "src=/etc/hosts dest=/tmp/hosts" + +If you plan to repeat a task like this, use the :ref:`ansible.builtin.template<template_module>` module in a playbook. + +The :ref:`ansible.builtin.file<file_module>` module allows changing ownership and permissions on files. These +same options can be passed directly to the ``copy`` module as well: + +.. code-block:: bash + + $ ansible webservers -m ansible.builtin.file -a "dest=/srv/foo/a.txt mode=600" + $ ansible webservers -m ansible.builtin.file -a "dest=/srv/foo/b.txt mode=600 owner=mdehaan group=mdehaan" + +The ``file`` module can also create directories, similar to ``mkdir -p``: + +.. code-block:: bash + + $ ansible webservers -m ansible.builtin.file -a "dest=/path/to/c mode=755 owner=mdehaan group=mdehaan state=directory" + +As well as delete directories (recursively) and delete files: + +.. code-block:: bash + + $ ansible webservers -m ansible.builtin.file -a "dest=/path/to/c state=absent" + +.. _managing_packages: + +Managing packages +----------------- + +You might also use an ad-hoc task to install, update, or remove packages on managed nodes using a package management module like yum. To ensure a package is installed without updating it: + +.. code-block:: bash + + $ ansible webservers -m ansible.builtin.yum -a "name=acme state=present" + +To ensure a specific version of a package is installed: + +.. code-block:: bash + + $ ansible webservers -m ansible.builtin.yum -a "name=acme-1.5 state=present" + +To ensure a package is at the latest version: + +.. code-block:: bash + + $ ansible webservers -m ansible.builtin.yum -a "name=acme state=latest" + +To ensure a package is not installed: + +.. code-block:: bash + + $ ansible webservers -m ansible.builtin.yum -a "name=acme state=absent" + +Ansible has modules for managing packages under many platforms. If there is no module for your package manager, you can install packages using the command module or create a module for your package manager. + +.. _users_and_groups: + +Managing users and groups +------------------------- + +You can create, manage, and remove user accounts on your managed nodes with ad-hoc tasks: + +.. code-block:: bash + + $ ansible all -m ansible.builtin.user -a "name=foo password=<crypted password here>" + + $ ansible all -m ansible.builtin.user -a "name=foo state=absent" + +See the :ref:`ansible.builtin.user <user_module>` module documentation for details on all of the available options, including +how to manipulate groups and group membership. + +.. _managing_services: + +Managing services +----------------- + +Ensure a service is started on all webservers: + +.. code-block:: bash + + $ ansible webservers -m ansible.builtin.service -a "name=httpd state=started" + +Alternatively, restart a service on all webservers: + +.. code-block:: bash + + $ ansible webservers -m ansible.builtin.service -a "name=httpd state=restarted" + +Ensure a service is stopped: + +.. code-block:: bash + + $ ansible webservers -m ansible.builtin.service -a "name=httpd state=stopped" + +.. _gathering_facts: + +Gathering facts +--------------- + +Facts represent discovered variables about a system. You can use facts to implement conditional execution of tasks but also just to get ad-hoc information about your systems. To see all facts: + +.. code-block:: bash + + $ ansible all -m ansible.builtin.setup + +You can also filter this output to display only certain facts, see the :ref:`ansible.builtin.setup <setup_module>` module documentation for details. + +Now that you understand the basic elements of Ansible execution, you are ready to learn to automate repetitive tasks using :ref:`Ansible Playbooks <playbooks_intro>`. + +.. seealso:: + + :ref:`intro_configuration` + All about the Ansible config file + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + :ref:`working_with_playbooks` + Using Ansible for configuration management & deployment + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/intro_bsd.rst b/docs/docsite/rst/user_guide/intro_bsd.rst new file mode 100644 index 00000000..68a62f31 --- /dev/null +++ b/docs/docsite/rst/user_guide/intro_bsd.rst @@ -0,0 +1,106 @@ +.. _working_with_bsd: + +Ansible and BSD +=============== + +Managing BSD machines is different from managing other Unix-like machines. If you have managed nodes running BSD, review these topics. + +.. contents:: + :local: + +Connecting to BSD nodes +----------------------- + +Ansible connects to managed nodes using OpenSSH by default. This works on BSD if you use SSH keys for authentication. However, if you use SSH passwords for authentication, Ansible relies on sshpass. Most +versions of sshpass do not deal well with BSD login prompts, so when using SSH passwords against BSD machines, use ``paramiko`` to connect instead of OpenSSH. You can do this in ansible.cfg globally or you can set it as an inventory/group/host variable. For example: + +.. code-block:: text + + [freebsd] + mybsdhost1 ansible_connection=paramiko + +.. _bootstrap_bsd: + +Bootstrapping BSD +----------------- + +Ansible is agentless by default, however, it requires Python on managed nodes. Only the :ref:`raw <raw_module>` module will operate without Python. Although this module can be used to bootstrap Ansible and install Python on BSD variants (see below), it is very limited and the use of Python is required to make full use of Ansible's features. + +The following example installs Python 2.7 which includes the json library required for full functionality of Ansible. +On your control machine you can execute the following for most versions of FreeBSD: + +.. code-block:: bash + + ansible -m raw -a "pkg install -y python27" mybsdhost1 + +Or for OpenBSD: + +.. code-block:: bash + + ansible -m raw -a "pkg_add python%3.7" + +Once this is done you can now use other Ansible modules apart from the ``raw`` module. + +.. note:: + This example demonstrated using pkg on FreeBSD and pkg_add on OpenBSD, however you should be able to substitute the appropriate package tool for your BSD; the package name may also differ. Refer to the package list or documentation of the BSD variant you are using for the exact Python package name you intend to install. + +.. BSD_python_location: + +Setting the Python interpreter +------------------------------ + +To support a variety of Unix-like operating systems and distributions, Ansible cannot always rely on the existing environment or ``env`` variables to locate the correct Python binary. By default, modules point at ``/usr/bin/python`` as this is the most common location. On BSD variants, this path may differ, so it is advised to inform Ansible of the binary's location, through the ``ansible_python_interpreter`` inventory variable. For example: + +.. code-block:: text + + [freebsd:vars] + ansible_python_interpreter=/usr/local/bin/python2.7 + [openbsd:vars] + ansible_python_interpreter=/usr/local/bin/python3.7 + +If you use additional plugins beyond those bundled with Ansible, you can set similar variables for ``bash``, ``perl`` or ``ruby``, depending on how the plugin is written. For example: + +.. code-block:: text + + [freebsd:vars] + ansible_python_interpreter=/usr/local/bin/python + ansible_perl_interpreter=/usr/bin/perl5 + + +Which modules are available? +---------------------------- + +The majority of the core Ansible modules are written for a combination of Unix-like machines and other generic services, so most should function well on the BSDs with the obvious exception of those that are aimed at Linux-only technologies (such as LVG). + +Using BSD as the control node +----------------------------- + +Using BSD as the control machine is as simple as installing the Ansible package for your BSD variant or by following the ``pip`` or 'from source' instructions. + +.. _bsd_facts: + +BSD facts +--------- + +Ansible gathers facts from the BSDs in a similar manner to Linux machines, but since the data, names and structures can vary for network, disks and other devices, one should expect the output to be slightly different yet still familiar to a BSD administrator. + +.. _bsd_contributions: + +BSD efforts and contributions +----------------------------- + +BSD support is important to us at Ansible. Even though the majority of our contributors use and target Linux we have an active BSD community and strive to be as BSD-friendly as possible. +Please feel free to report any issues or incompatibilities you discover with BSD; pull requests with an included fix are also welcome! + +.. seealso:: + + :ref:`intro_adhoc` + Examples of basic commands + :ref:`working_with_playbooks` + Learning ansible's configuration management language + :ref:`developing_modules` + How to write modules + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/intro_dynamic_inventory.rst b/docs/docsite/rst/user_guide/intro_dynamic_inventory.rst new file mode 100644 index 00000000..69016655 --- /dev/null +++ b/docs/docsite/rst/user_guide/intro_dynamic_inventory.rst @@ -0,0 +1,249 @@ +.. _intro_dynamic_inventory: +.. _dynamic_inventory: + +****************************** +Working with dynamic inventory +****************************** + +.. contents:: + :local: + +If your Ansible inventory fluctuates over time, with hosts spinning up and shutting down in response to business demands, the static inventory solutions described in :ref:`inventory` will not serve your needs. You may need to track hosts from multiple sources: cloud providers, LDAP, `Cobbler <https://cobbler.github.io>`_, and/or enterprise CMDB systems. + +Ansible integrates all of these options through a dynamic external inventory system. Ansible supports two ways to connect with external inventory: :ref:`inventory_plugins` and `inventory scripts`. + +Inventory plugins take advantage of the most recent updates to the Ansible core code. We recommend plugins over scripts for dynamic inventory. You can :ref:`write your own plugin <developing_inventory>` to connect to additional dynamic inventory sources. + +You can still use inventory scripts if you choose. When we implemented inventory plugins, we ensured backwards compatibility through the script inventory plugin. The examples below illustrate how to use inventory scripts. + +If you prefer a GUI for handling dynamic inventory, the :ref:`ansible_tower` inventory database syncs with all your dynamic inventory sources, provides web and REST access to the results, and offers a graphical inventory editor. With a database record of all of your hosts, you can correlate past event history and see which hosts have had failures on their last playbook runs. + +.. _cobbler_example: + +Inventory script example: Cobbler +================================= + +Ansible integrates seamlessly with `Cobbler <https://cobbler.github.io>`_, a Linux installation server originally written by Michael DeHaan and now led by James Cammarata, who works for Ansible. + +While primarily used to kickoff OS installations and manage DHCP and DNS, Cobbler has a generic +layer that can represent data for multiple configuration management systems (even at the same time) and serve as a 'lightweight CMDB'. + +To tie your Ansible inventory to Cobbler, copy `this script <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/cobbler.py>`_ to ``/etc/ansible`` and ``chmod +x`` the file. Run ``cobblerd`` any time you use Ansible and use the ``-i`` command line option (for example, ``-i /etc/ansible/cobbler.py``) to communicate with Cobbler using Cobbler's XMLRPC API. + +Add a ``cobbler.ini`` file in ``/etc/ansible`` so Ansible knows where the Cobbler server is and some cache improvements can be used. For example: + +.. code-block:: text + + [cobbler] + + # Set Cobbler's hostname or IP address + host = http://127.0.0.1/cobbler_api + + # API calls to Cobbler can be slow. For this reason, we cache the results of an API + # call. Set this to the path you want cache files to be written to. Two files + # will be written to this directory: + # - ansible-cobbler.cache + # - ansible-cobbler.index + + cache_path = /tmp + + # The number of seconds a cache file is considered valid. After this many + # seconds, a new API call will be made, and the cache file will be updated. + + cache_max_age = 900 + + +First test the script by running ``/etc/ansible/cobbler.py`` directly. You should see some JSON data output, but it may not have anything in it just yet. + +Let's explore what this does. In Cobbler, assume a scenario somewhat like the following: + +.. code-block:: bash + + cobbler profile add --name=webserver --distro=CentOS6-x86_64 + cobbler profile edit --name=webserver --mgmt-classes="webserver" --ksmeta="a=2 b=3" + cobbler system edit --name=foo --dns-name="foo.example.com" --mgmt-classes="atlanta" --ksmeta="c=4" + cobbler system edit --name=bar --dns-name="bar.example.com" --mgmt-classes="atlanta" --ksmeta="c=5" + +In the example above, the system 'foo.example.com' is addressable by ansible directly, but is also addressable when using the group names 'webserver' or 'atlanta'. Since Ansible uses SSH, it contacts system foo over 'foo.example.com', only, never just 'foo'. Similarly, if you tried "ansible foo", it would not find the system... but "ansible 'foo*'" would do, because the system DNS name starts with 'foo'. + +The script provides more than host and group info. In addition, as a bonus, when the 'setup' module is run (which happens automatically when using playbooks), the variables 'a', 'b', and 'c' will all be auto-populated in the templates: + +.. code-block:: text + + # file: /srv/motd.j2 + Welcome, I am templated with a value of a={{ a }}, b={{ b }}, and c={{ c }} + +Which could be executed just like this: + +.. code-block:: bash + + ansible webserver -m setup + ansible webserver -m template -a "src=/tmp/motd.j2 dest=/etc/motd" + +.. note:: + The name 'webserver' came from Cobbler, as did the variables for + the config file. You can still pass in your own variables like + normal in Ansible, but variables from the external inventory script + will override any that have the same name. + +So, with the template above (``motd.j2``), this results in the following data being written to ``/etc/motd`` for system 'foo': + +.. code-block:: text + + Welcome, I am templated with a value of a=2, b=3, and c=4 + +And on system 'bar' (bar.example.com): + +.. code-block:: text + + Welcome, I am templated with a value of a=2, b=3, and c=5 + +And technically, though there is no major good reason to do it, this also works: + +.. code-block:: bash + + ansible webserver -m ansible.builtin.shell -a "echo {{ a }}" + +So, in other words, you can use those variables in arguments/actions as well. + +.. _openstack_example: + +Inventory script example: OpenStack +=================================== + +If you use an OpenStack-based cloud, instead of manually maintaining your own inventory file, you can use the ``openstack_inventory.py`` dynamic inventory to pull information about your compute instances directly from OpenStack. + +You can download the latest version of the OpenStack inventory script `here <https://raw.githubusercontent.com/openstack/ansible-collections-openstack/master/scripts/inventory/openstack_inventory.py>`_. + +You can use the inventory script explicitly (by passing the `-i openstack_inventory.py` argument to Ansible) or implicitly (by placing the script at `/etc/ansible/hosts`). + +Explicit use of OpenStack inventory script +------------------------------------------ + +Download the latest version of the OpenStack dynamic inventory script and make it executable:: + + wget https://raw.githubusercontent.com/openstack/ansible-collections-openstack/master/scripts/inventory/openstack_inventory.py + chmod +x openstack_inventory.py + +.. note:: + Do not name it `openstack.py`. This name will conflict with imports from openstacksdk. + +Source an OpenStack RC file: + +.. code-block:: bash + + source openstack.rc + +.. note:: + + An OpenStack RC file contains the environment variables required by the client tools to establish a connection with the cloud provider, such as the authentication URL, user name, password and region name. For more information on how to download, create or source an OpenStack RC file, please refer to `Set environment variables using the OpenStack RC file <https://docs.openstack.org/user-guide/common/cli_set_environment_variables_using_openstack_rc.html>`_. + +You can confirm the file has been successfully sourced by running a simple command, such as `nova list` and ensuring it returns no errors. + +.. note:: + + The OpenStack command line clients are required to run the `nova list` command. For more information on how to install them, please refer to `Install the OpenStack command-line clients <https://docs.openstack.org/user-guide/common/cli_install_openstack_command_line_clients.html>`_. + +You can test the OpenStack dynamic inventory script manually to confirm it is working as expected:: + + ./openstack_inventory.py --list + +After a few moments you should see some JSON output with information about your compute instances. + +Once you confirm the dynamic inventory script is working as expected, you can tell Ansible to use the `openstack_inventory.py` script as an inventory file, as illustrated below: + +.. code-block:: bash + + ansible -i openstack_inventory.py all -m ansible.builtin.ping + +Implicit use of OpenStack inventory script +------------------------------------------ + +Download the latest version of the OpenStack dynamic inventory script, make it executable and copy it to `/etc/ansible/hosts`: + +.. code-block:: bash + + wget https://raw.githubusercontent.com/openstack/ansible-collections-openstack/master/scripts/inventory/openstack_inventory.py + chmod +x openstack_inventory.py + sudo cp openstack_inventory.py /etc/ansible/hosts + +Download the sample configuration file, modify it to suit your needs and copy it to `/etc/ansible/openstack.yml`: + +.. code-block:: bash + + wget https://raw.githubusercontent.com/openstack/ansible-collections-openstack/master/scripts/inventory/openstack.yml + vi openstack.yml + sudo cp openstack.yml /etc/ansible/ + +You can test the OpenStack dynamic inventory script manually to confirm it is working as expected: + +.. code-block:: bash + + /etc/ansible/hosts --list + +After a few moments you should see some JSON output with information about your compute instances. + +Refreshing the cache +-------------------- + +Note that the OpenStack dynamic inventory script will cache results to avoid repeated API calls. To explicitly clear the cache, you can run the openstack_inventory.py (or hosts) script with the ``--refresh`` parameter: + +.. code-block:: bash + + ./openstack_inventory.py --refresh --list + +.. _other_inventory_scripts: + +Other inventory scripts +======================= + +In Ansible 2.10 and later, inventory scripts moved to their associated collections. Many are now in the `community.general scripts/inventory directory <https://github.com/ansible-collections/community.general/tree/main/scripts/inventory>`_. We recommend you use :ref:`inventory_plugins` instead. + +.. _using_multiple_sources: + +Using inventory directories and multiple inventory sources +========================================================== + +If the location given to ``-i`` in Ansible is a directory (or as so configured in ``ansible.cfg``), Ansible can use multiple inventory sources +at the same time. When doing so, it is possible to mix both dynamic and statically managed inventory sources in the same ansible run. Instant +hybrid cloud! + +In an inventory directory, executable files are treated as dynamic inventory sources and most other files as static sources. Files which end with any of the following are ignored: + +.. code-block:: text + + ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo + +You can replace this list with your own selection by configuring an ``inventory_ignore_extensions`` list in ``ansible.cfg``, or setting the :envvar:`ANSIBLE_INVENTORY_IGNORE` environment variable. The value in either case must be a comma-separated list of patterns, as shown above. + +Any ``group_vars`` and ``host_vars`` subdirectories in an inventory directory are interpreted as expected, making inventory directories a powerful way to organize different sets of configurations. See :ref:`using_multiple_inventory_sources` for more information. + +.. _static_groups_of_dynamic: + +Static groups of dynamic groups +=============================== + +When defining groups of groups in the static inventory file, the child groups +must also be defined in the static inventory file, otherwise ansible returns an +error. If you want to define a static group of dynamic child groups, define +the dynamic groups as empty in the static inventory file. For example: + +.. code-block:: text + + [tag_Name_staging_foo] + + [tag_Name_staging_bar] + + [staging:children] + tag_Name_staging_foo + tag_Name_staging_bar + + +.. seealso:: + + :ref:`intro_inventory` + All about static inventory files + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/intro_getting_started.rst b/docs/docsite/rst/user_guide/intro_getting_started.rst new file mode 100644 index 00000000..0fde0281 --- /dev/null +++ b/docs/docsite/rst/user_guide/intro_getting_started.rst @@ -0,0 +1,139 @@ +.. _intro_getting_started: + +*************** +Getting Started +*************** + +Now that you have read the :ref:`installation guide<installation_guide>` and installed Ansible on a control node, you are ready to learn how Ansible works. A basic Ansible command or playbook: + +* selects machines to execute against from inventory +* connects to those machines (or network devices, or other managed nodes), usually over SSH +* copies one or more modules to the remote machines and starts execution there + +Ansible can do much more, but you should understand the most common use case before exploring all the powerful configuration, deployment, and orchestration features of Ansible. This page illustrates the basic process with a simple inventory and an ad-hoc command. Once you understand how Ansible works, you can read more details about :ref:`ad-hoc commands<intro_adhoc>`, organize your infrastructure with :ref:`inventory<intro_inventory>`, and harness the full power of Ansible with :ref:`playbooks<playbooks_intro>`. + +.. contents:: + :local: + +Selecting machines from inventory +================================= + +Ansible reads information about which machines you want to manage from your inventory. Although you can pass an IP address to an ad-hoc command, you need inventory to take advantage of the full flexibility and repeatability of Ansible. + +Action: create a basic inventory +-------------------------------- +For this basic inventory, edit (or create) ``/etc/ansible/hosts`` and add a few remote systems to it. For this example, use either IP addresses or FQDNs: + +.. code-block:: text + + 192.0.2.50 + aserver.example.org + bserver.example.org + +Beyond the basics +----------------- +Your inventory can store much more than IPs and FQDNs. You can create :ref:`aliases<inventory_aliases>`, set variable values for a single host with :ref:`host vars<host_variables>`, or set variable values for multiple hosts with :ref:`group vars<group_variables>`. + +.. _remote_connection_information: + +Connecting to remote nodes +========================== + +Ansible communicates with remote machines over the `SSH protocol <https://www.ssh.com/ssh/protocol/>`_. By default, Ansible uses native OpenSSH and connects to remote machines using your current user name, just as SSH does. + +Action: check your SSH connections +---------------------------------- +Confirm that you can connect using SSH to all the nodes in your inventory using the same username. If necessary, add your public SSH key to the ``authorized_keys`` file on those systems. + +Beyond the basics +----------------- +You can override the default remote user name in several ways, including: + +* passing the ``-u`` parameter at the command line +* setting user information in your inventory file +* setting user information in your configuration file +* setting environment variables + +See :ref:`general_precedence_rules` for details on the (sometimes unintuitive) precedence of each method of passing user information. You can read more about connections in :ref:`connections`. + +Copying and executing modules +============================= + +Once it has connected, Ansible transfers the modules required by your command or playbook to the remote machine(s) for execution. + +Action: run your first Ansible commands +--------------------------------------- +Use the ping module to ping all the nodes in your inventory: + +.. code-block:: bash + + $ ansible all -m ping + +Now run a live command on all of your nodes: + +.. code-block:: bash + + $ ansible all -a "/bin/echo hello" + +You should see output for each host in your inventory, similar to this: + +.. code-block:: ansible-output + + aserver.example.org | SUCCESS => { + "ansible_facts": { + "discovered_interpreter_python": "/usr/bin/python" + }, + "changed": false, + "ping": "pong" + } + +Beyond the basics +----------------- +By default Ansible uses SFTP to transfer files. If the machine or device you want to manage does not support SFTP, you can switch to SCP mode in :ref:`intro_configuration`. The files are placed in a temporary directory and executed from there. + +If you need privilege escalation (sudo and similar) to run a command, pass the ``become`` flags: + +.. code-block:: bash + + # as bruce + $ ansible all -m ping -u bruce + # as bruce, sudoing to root (sudo is default method) + $ ansible all -m ping -u bruce --become + # as bruce, sudoing to batman + $ ansible all -m ping -u bruce --become --become-user batman + +You can read more about privilege escalation in :ref:`become`. + +Congratulations! You have contacted your nodes using Ansible. You used a basic inventory file and an ad-hoc command to direct Ansible to connect to specific remote nodes, copy a module file there and execute it, and return output. You have a fully working infrastructure. + +Resources +================================= +- `Product Demos <https://github.com/ansible/product-demos>`_ +- `Katakoda <https://katacoda.com/rhel-labs>`_ +- `Workshops <https://github.com/ansible/workshops>`_ +- `Ansible Examples <https://github.com/ansible/ansible-examples>`_ +- `Ansible Baseline <https://github.com/ansible/ansible-baseline>`_ + +Next steps +========== +Next you can read about more real-world cases in :ref:`intro_adhoc`, +explore what you can do with different modules, or read about the Ansible +:ref:`working_with_playbooks` language. Ansible is not just about running commands, it +also has powerful configuration management and deployment features. + +.. seealso:: + + :ref:`intro_inventory` + More information about inventory + :ref:`intro_adhoc` + Examples of basic commands + :ref:`working_with_playbooks` + Learning Ansible's configuration management language + `Ansible Demos <https://github.com/ansible/product-demos>`_ + Demonstrations of different Ansible usecases + `RHEL Labs <https://katacoda.com/rhel-labs>`_ + Labs to provide further knowledge on different topics + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/intro_inventory.rst b/docs/docsite/rst/user_guide/intro_inventory.rst new file mode 100644 index 00000000..0b8b002c --- /dev/null +++ b/docs/docsite/rst/user_guide/intro_inventory.rst @@ -0,0 +1,788 @@ +.. _intro_inventory: +.. _inventory: + +*************************** +How to build your inventory +*************************** + +Ansible works against multiple managed nodes or "hosts" in your infrastructure at the same time, using a list or group of lists known as inventory. Once your inventory is defined, you use :ref:`patterns <intro_patterns>` to select the hosts or groups you want Ansible to run against. + +The default location for inventory is a file called ``/etc/ansible/hosts``. You can specify a different inventory file at the command line using the ``-i <path>`` option. You can also use multiple inventory files at the same time, and/or pull inventory from dynamic or cloud sources or different formats (YAML, ini, and so on), as described in :ref:`intro_dynamic_inventory`. +Introduced in version 2.4, Ansible has :ref:`inventory_plugins` to make this flexible and customizable. + +.. contents:: + :local: + +.. _inventoryformat: + +Inventory basics: formats, hosts, and groups +============================================ + +The inventory file can be in one of many formats, depending on the inventory plugins you have. +The most common formats are INI and YAML. A basic INI ``/etc/ansible/hosts`` might look like this: + +.. code-block:: text + + mail.example.com + + [webservers] + foo.example.com + bar.example.com + + [dbservers] + one.example.com + two.example.com + three.example.com + +The headings in brackets are group names, which are used in classifying hosts +and deciding what hosts you are controlling at what times and for what purpose. +Group names should follow the same guidelines as :ref:`valid_variable_names`. + +Here's that same basic inventory file in YAML format: + +.. code-block:: yaml + + all: + hosts: + mail.example.com: + children: + webservers: + hosts: + foo.example.com: + bar.example.com: + dbservers: + hosts: + one.example.com: + two.example.com: + three.example.com: + +.. _default_groups: + +Default groups +-------------- + +There are two default groups: ``all`` and ``ungrouped``. The ``all`` group contains every host. +The ``ungrouped`` group contains all hosts that don't have another group aside from ``all``. +Every host will always belong to at least 2 groups (``all`` and ``ungrouped`` or ``all`` and some other group). Though ``all`` and ``ungrouped`` are always present, they can be implicit and not appear in group listings like ``group_names``. + +.. _host_multiple_groups: + +Hosts in multiple groups +------------------------ + +You can (and probably will) put each host in more than one group. For example a production webserver in a datacenter in Atlanta might be included in groups called [prod] and [atlanta] and [webservers]. You can create groups that track: + +* What - An application, stack or microservice (for example, database servers, web servers, and so on). +* Where - A datacenter or region, to talk to local DNS, storage, and so on (for example, east, west). +* When - The development stage, to avoid testing on production resources (for example, prod, test). + +Extending the previous YAML inventory to include what, when, and where would look like: + +.. code-block:: yaml + + all: + hosts: + mail.example.com: + children: + webservers: + hosts: + foo.example.com: + bar.example.com: + dbservers: + hosts: + one.example.com: + two.example.com: + three.example.com: + east: + hosts: + foo.example.com: + one.example.com: + two.example.com: + west: + hosts: + bar.example.com: + three.example.com: + prod: + hosts: + foo.example.com: + one.example.com: + two.example.com: + test: + hosts: + bar.example.com: + three.example.com: + +You can see that ``one.example.com`` exists in the ``dbservers``, ``east``, and ``prod`` groups. + +You can also use nested groups to simplify ``prod`` and ``test`` in this inventory, for the same result: + +.. code-block:: yaml + + all: + hosts: + mail.example.com: + children: + webservers: + hosts: + foo.example.com: + bar.example.com: + dbservers: + hosts: + one.example.com: + two.example.com: + three.example.com: + east: + hosts: + foo.example.com: + one.example.com: + two.example.com: + west: + hosts: + bar.example.com: + three.example.com: + prod: + children: + east: + test: + children: + west: + +You can find more examples on how to organize your inventories and group your hosts in :ref:`inventory_setup_examples`. + +Adding ranges of hosts +---------------------- + +If you have a lot of hosts with a similar pattern, you can add them as a range rather than listing each hostname separately: + +In INI: + +.. code-block:: text + + [webservers] + www[01:50].example.com + +In YAML: + +.. code-block:: yaml + + ... + webservers: + hosts: + www[01:50].example.com: + +You can specify a stride (increments between sequence numbers) when defining a numeric range of hosts: + +In INI: + +.. code-block:: text + + [webservers] + www[01:50:2].example.com + +In YAML: + +.. code-block:: yaml + + ... + webservers: + hosts: + www[01:50:2].example.com: + +For numeric patterns, leading zeros can be included or removed, as desired. Ranges are inclusive. You can also define alphabetic ranges: + +.. code-block:: text + + [databases] + db-[a:f].example.com + +.. _variables_in_inventory: + +Adding variables to inventory +============================= + +You can store variable values that relate to a specific host or group in inventory. To start with, you may add variables directly to the hosts and groups in your main inventory file. As you add more and more managed nodes to your Ansible inventory, however, you will likely want to store variables in separate host and group variable files. See :ref:`define_variables_in_inventory` for details. + +.. _host_variables: + +Assigning a variable to one machine: host variables +=================================================== + +You can easily assign a variable to a single host, then use it later in playbooks. In INI: + +.. code-block:: text + + [atlanta] + host1 http_port=80 maxRequestsPerChild=808 + host2 http_port=303 maxRequestsPerChild=909 + +In YAML: + +.. code-block:: yaml + + atlanta: + host1: + http_port: 80 + maxRequestsPerChild: 808 + host2: + http_port: 303 + maxRequestsPerChild: 909 + +Unique values like non-standard SSH ports work well as host variables. You can add them to your Ansible inventory by adding the port number after the hostname with a colon: + +.. code-block:: text + + badwolf.example.com:5309 + +Connection variables also work well as host variables: + +.. code-block:: text + + [targets] + + localhost ansible_connection=local + other1.example.com ansible_connection=ssh ansible_user=myuser + other2.example.com ansible_connection=ssh ansible_user=myotheruser + +.. note:: If you list non-standard SSH ports in your SSH config file, the ``openssh`` connection will find and use them, but the ``paramiko`` connection will not. + +.. _inventory_aliases: + +Inventory aliases +----------------- + +You can also define aliases in your inventory: + +In INI: + +.. code-block:: text + + jumper ansible_port=5555 ansible_host=192.0.2.50 + +In YAML: + +.. code-block:: yaml + + ... + hosts: + jumper: + ansible_port: 5555 + ansible_host: 192.0.2.50 + +In the above example, running Ansible against the host alias "jumper" will connect to 192.0.2.50 on port 5555. See :ref:`behavioral inventory parameters <behavioral_parameters>` to further customize the connection to hosts. + +.. note:: + Values passed in the INI format using the ``key=value`` syntax are interpreted differently depending on where they are declared: + + * When declared inline with the host, INI values are interpreted as Python literal structures (strings, numbers, tuples, lists, dicts, booleans, None). Host lines accept multiple ``key=value`` parameters per line. Therefore they need a way to indicate that a space is part of a value rather than a separator. + + * When declared in a ``:vars`` section, INI values are interpreted as strings. For example ``var=FALSE`` would create a string equal to 'FALSE'. Unlike host lines, ``:vars`` sections accept only a single entry per line, so everything after the ``=`` must be the value for the entry. + + * If a variable value set in an INI inventory must be a certain type (for example, a string or a boolean value), always specify the type with a filter in your task. Do not rely on types set in INI inventories when consuming variables. + + * Consider using YAML format for inventory sources to avoid confusion on the actual type of a variable. The YAML inventory plugin processes variable values consistently and correctly. + +Generally speaking, this is not the best way to define variables that describe your system policy. Setting variables in the main inventory file is only a shorthand. See :ref:`splitting_out_vars` for guidelines on storing variable values in individual files in the 'host_vars' directory. + +.. _group_variables: + +Assigning a variable to many machines: group variables +====================================================== + +If all hosts in a group share a variable value, you can apply that variable to an entire group at once. In INI: + +.. code-block:: text + + [atlanta] + host1 + host2 + + [atlanta:vars] + ntp_server=ntp.atlanta.example.com + proxy=proxy.atlanta.example.com + +In YAML: + +.. code-block:: yaml + + atlanta: + hosts: + host1: + host2: + vars: + ntp_server: ntp.atlanta.example.com + proxy: proxy.atlanta.example.com + +Group variables are a convenient way to apply variables to multiple hosts at once. Before executing, however, Ansible always flattens variables, including inventory variables, to the host level. If a host is a member of multiple groups, Ansible reads variable values from all of those groups. If you assign different values to the same variable in different groups, Ansible chooses which value to use based on internal :ref:`rules for merging <how_we_merge>`. + +.. _subgroups: + +Inheriting variable values: group variables for groups of groups +---------------------------------------------------------------- + +You can make groups of groups using the ``:children`` suffix in INI or the ``children:`` entry in YAML. +You can apply variables to these groups of groups using ``:vars`` or ``vars:``: + +In INI: + +.. code-block:: text + + [atlanta] + host1 + host2 + + [raleigh] + host2 + host3 + + [southeast:children] + atlanta + raleigh + + [southeast:vars] + some_server=foo.southeast.example.com + halon_system_timeout=30 + self_destruct_countdown=60 + escape_pods=2 + + [usa:children] + southeast + northeast + southwest + northwest + +In YAML: + +.. code-block:: yaml + + all: + children: + usa: + children: + southeast: + children: + atlanta: + hosts: + host1: + host2: + raleigh: + hosts: + host2: + host3: + vars: + some_server: foo.southeast.example.com + halon_system_timeout: 30 + self_destruct_countdown: 60 + escape_pods: 2 + northeast: + northwest: + southwest: + +If you need to store lists or hash data, or prefer to keep host and group specific variables separate from the inventory file, see :ref:`splitting_out_vars`. + +Child groups have a couple of properties to note: + + - Any host that is member of a child group is automatically a member of the parent group. + - A child group's variables will have higher precedence (override) a parent group's variables. + - Groups can have multiple parents and children, but not circular relationships. + - Hosts can also be in multiple groups, but there will only be **one** instance of a host, merging the data from the multiple groups. + +.. _splitting_out_vars: + +Organizing host and group variables +=================================== + +Although you can store variables in the main inventory file, storing separate host and group variables files may help you organize your variable values more easily. Host and group variable files must use YAML syntax. Valid file extensions include '.yml', '.yaml', '.json', or no file extension. +See :ref:`yaml_syntax` if you are new to YAML. + +Ansible loads host and group variable files by searching paths relative to the inventory file or the playbook file. If your inventory file at ``/etc/ansible/hosts`` contains a host named 'foosball' that belongs to two groups, 'raleigh' and 'webservers', that host will use variables in YAML files at the following locations: + +.. code-block:: bash + + /etc/ansible/group_vars/raleigh # can optionally end in '.yml', '.yaml', or '.json' + /etc/ansible/group_vars/webservers + /etc/ansible/host_vars/foosball + +For example, if you group hosts in your inventory by datacenter, and each datacenter uses its own NTP server and database server, you can create a file called ``/etc/ansible/group_vars/raleigh`` to store the variables for the ``raleigh`` group: + +.. code-block:: yaml + + --- + ntp_server: acme.example.org + database_server: storage.example.org + +You can also create *directories* named after your groups or hosts. Ansible will read all the files in these directories in lexicographical order. An example with the 'raleigh' group: + +.. code-block:: bash + + /etc/ansible/group_vars/raleigh/db_settings + /etc/ansible/group_vars/raleigh/cluster_settings + +All hosts in the 'raleigh' group will have the variables defined in these files +available to them. This can be very useful to keep your variables organized when a single +file gets too big, or when you want to use :ref:`Ansible Vault<playbooks_vault>` on some group variables. + +You can also add ``group_vars/`` and ``host_vars/`` directories to your playbook directory. The ``ansible-playbook`` command looks for these directories in the current working directory by default. Other Ansible commands (for example, ``ansible``, ``ansible-console``, and so on) will only look for ``group_vars/`` and ``host_vars/`` in the inventory directory. If you want other commands to load group and host variables from a playbook directory, you must provide the ``--playbook-dir`` option on the command line. +If you load inventory files from both the playbook directory and the inventory directory, variables in the playbook directory will override variables set in the inventory directory. + +Keeping your inventory file and variables in a git repo (or other version control) +is an excellent way to track changes to your inventory and host variables. + +.. _how_we_merge: + +How variables are merged +======================== + +By default variables are merged/flattened to the specific host before a play is run. This keeps Ansible focused on the Host and Task, so groups don't really survive outside of inventory and host matching. By default, Ansible overwrites variables including the ones defined for a group and/or host (see :ref:`DEFAULT_HASH_BEHAVIOUR<DEFAULT_HASH_BEHAVIOUR>`). The order/precedence is (from lowest to highest): + +- all group (because it is the 'parent' of all other groups) +- parent group +- child group +- host + +By default Ansible merges groups at the same parent/child level in ASCII order, and the last group loaded overwrites the previous groups. For example, an a_group will be merged with b_group and b_group vars that match will overwrite the ones in a_group. + +You can change this behavior by setting the group variable ``ansible_group_priority`` to change the merge order for groups of the same level (after the parent/child order is resolved). The larger the number, the later it will be merged, giving it higher priority. This variable defaults to ``1`` if not set. For example: + +.. code-block:: yaml + + a_group: + testvar: a + ansible_group_priority: 10 + b_group: + testvar: b + +In this example, if both groups have the same priority, the result would normally have been ``testvar == b``, but since we are giving the ``a_group`` a higher priority the result will be ``testvar == a``. + +.. note:: ``ansible_group_priority`` can only be set in the inventory source and not in group_vars/, as the variable is used in the loading of group_vars. + +.. _using_multiple_inventory_sources: + +Using multiple inventory sources +================================ + +You can target multiple inventory sources (directories, dynamic inventory scripts +or files supported by inventory plugins) at the same time by giving multiple inventory parameters from the command +line or by configuring :envvar:`ANSIBLE_INVENTORY`. This can be useful when you want to target normally +separate environments, like staging and production, at the same time for a specific action. + +Target two sources from the command line like this: + +.. code-block:: bash + + ansible-playbook get_logs.yml -i staging -i production + +Keep in mind that if there are variable conflicts in the inventories, they are resolved according +to the rules described in :ref:`how_we_merge` and :ref:`ansible_variable_precedence`. +The merging order is controlled by the order of the inventory source parameters. +If ``[all:vars]`` in staging inventory defines ``myvar = 1``, but production inventory defines ``myvar = 2``, +the playbook will be run with ``myvar = 2``. The result would be reversed if the playbook was run with +``-i production -i staging``. + +**Aggregating inventory sources with a directory** + +You can also create an inventory by combining multiple inventory sources and source types under a directory. +This can be useful for combining static and dynamic hosts and managing them as one inventory. +The following inventory combines an inventory plugin source, a dynamic inventory script, +and a file with static hosts: + +.. code-block:: text + + inventory/ + openstack.yml # configure inventory plugin to get hosts from Openstack cloud + dynamic-inventory.py # add additional hosts with dynamic inventory script + static-inventory # add static hosts and groups + group_vars/ + all.yml # assign variables to all hosts + +You can target this inventory directory simply like this: + +.. code-block:: bash + + ansible-playbook example.yml -i inventory + +It can be useful to control the merging order of the inventory sources if there's variable +conflicts or group of groups dependencies to the other inventory sources. The inventories +are merged in ASCII order according to the filenames so the result can +be controlled by adding prefixes to the files: + +.. code-block:: text + + inventory/ + 01-openstack.yml # configure inventory plugin to get hosts from Openstack cloud + 02-dynamic-inventory.py # add additional hosts with dynamic inventory script + 03-static-inventory # add static hosts + group_vars/ + all.yml # assign variables to all hosts + +If ``01-openstack.yml`` defines ``myvar = 1`` for the group ``all``, ``02-dynamic-inventory.py`` defines ``myvar = 2``, +and ``03-static-inventory`` defines ``myvar = 3``, the playbook will be run with ``myvar = 3``. + +For more details on inventory plugins and dynamic inventory scripts see :ref:`inventory_plugins` and :ref:`intro_dynamic_inventory`. + +.. _behavioral_parameters: + +Connecting to hosts: behavioral inventory parameters +==================================================== + +As described above, setting the following variables control how Ansible interacts with remote hosts. + +Host connection: + +.. include:: shared_snippets/SSH_password_prompt.txt + +ansible_connection + Connection type to the host. This can be the name of any of ansible's connection plugins. SSH protocol types are ``smart``, ``ssh`` or ``paramiko``. The default is smart. Non-SSH based types are described in the next section. + +General for all connections: + +ansible_host + The name of the host to connect to, if different from the alias you wish to give to it. +ansible_port + The connection port number, if not the default (22 for ssh) +ansible_user + The user name to use when connecting to the host +ansible_password + The password to use to authenticate to the host (never store this variable in plain text; always use a vault. See :ref:`tip_for_variables_and_vaults`) + + +Specific to the SSH connection: + +ansible_ssh_private_key_file + Private key file used by ssh. Useful if using multiple keys and you don't want to use SSH agent. +ansible_ssh_common_args + This setting is always appended to the default command line for :command:`sftp`, :command:`scp`, + and :command:`ssh`. Useful to configure a ``ProxyCommand`` for a certain host (or + group). +ansible_sftp_extra_args + This setting is always appended to the default :command:`sftp` command line. +ansible_scp_extra_args + This setting is always appended to the default :command:`scp` command line. +ansible_ssh_extra_args + This setting is always appended to the default :command:`ssh` command line. +ansible_ssh_pipelining + Determines whether or not to use SSH pipelining. This can override the ``pipelining`` setting in :file:`ansible.cfg`. +ansible_ssh_executable (added in version 2.2) + This setting overrides the default behavior to use the system :command:`ssh`. This can override the ``ssh_executable`` setting in :file:`ansible.cfg`. + + +Privilege escalation (see :ref:`Ansible Privilege Escalation<become>` for further details): + +ansible_become + Equivalent to ``ansible_sudo`` or ``ansible_su``, allows to force privilege escalation +ansible_become_method + Allows to set privilege escalation method +ansible_become_user + Equivalent to ``ansible_sudo_user`` or ``ansible_su_user``, allows to set the user you become through privilege escalation +ansible_become_password + Equivalent to ``ansible_sudo_password`` or ``ansible_su_password``, allows you to set the privilege escalation password (never store this variable in plain text; always use a vault. See :ref:`tip_for_variables_and_vaults`) +ansible_become_exe + Equivalent to ``ansible_sudo_exe`` or ``ansible_su_exe``, allows you to set the executable for the escalation method selected +ansible_become_flags + Equivalent to ``ansible_sudo_flags`` or ``ansible_su_flags``, allows you to set the flags passed to the selected escalation method. This can be also set globally in :file:`ansible.cfg` in the ``sudo_flags`` option + +Remote host environment parameters: + +.. _ansible_shell_type: + +ansible_shell_type + The shell type of the target system. You should not use this setting unless you have set the + :ref:`ansible_shell_executable<ansible_shell_executable>` to a non-Bourne (sh) compatible shell. By default commands are + formatted using ``sh``-style syntax. Setting this to ``csh`` or ``fish`` will cause commands + executed on target systems to follow those shell's syntax instead. + +.. _ansible_python_interpreter: + +ansible_python_interpreter + The target host python path. This is useful for systems with more + than one Python or not located at :command:`/usr/bin/python` such as \*BSD, or where :command:`/usr/bin/python` + is not a 2.X series Python. We do not use the :command:`/usr/bin/env` mechanism as that requires the remote user's + path to be set right and also assumes the :program:`python` executable is named python, where the executable might + be named something like :program:`python2.6`. + +ansible_*_interpreter + Works for anything such as ruby or perl and works just like :ref:`ansible_python_interpreter<ansible_python_interpreter>`. + This replaces shebang of modules which will run on that host. + +.. versionadded:: 2.1 + +.. _ansible_shell_executable: + +ansible_shell_executable + This sets the shell the ansible controller will use on the target machine, + overrides ``executable`` in :file:`ansible.cfg` which defaults to + :command:`/bin/sh`. You should really only change it if is not possible + to use :command:`/bin/sh` (in other words, if :command:`/bin/sh` is not installed on the target + machine or cannot be run from sudo.). + +Examples from an Ansible-INI host file: + +.. code-block:: text + + some_host ansible_port=2222 ansible_user=manager + aws_host ansible_ssh_private_key_file=/home/example/.ssh/aws.pem + freebsd_host ansible_python_interpreter=/usr/local/bin/python + ruby_module_host ansible_ruby_interpreter=/usr/bin/ruby.1.9.3 + +Non-SSH connection types +------------------------ + +As stated in the previous section, Ansible executes playbooks over SSH but it is not limited to this connection type. +With the host specific parameter ``ansible_connection=<connector>``, the connection type can be changed. +The following non-SSH based connectors are available: + +**local** + +This connector can be used to deploy the playbook to the control machine itself. + +**docker** + +This connector deploys the playbook directly into Docker containers using the local Docker client. The following parameters are processed by this connector: + +ansible_host + The name of the Docker container to connect to. +ansible_user + The user name to operate within the container. The user must exist inside the container. +ansible_become + If set to ``true`` the ``become_user`` will be used to operate within the container. +ansible_docker_extra_args + Could be a string with any additional arguments understood by Docker, which are not command specific. This parameter is mainly used to configure a remote Docker daemon to use. + +Here is an example of how to instantly deploy to created containers: + +.. code-block:: yaml + + - name: Create a jenkins container + community.general.docker_container: + docker_host: myserver.net:4243 + name: my_jenkins + image: jenkins + + - name: Add the container to inventory + ansible.builtin.add_host: + name: my_jenkins + ansible_connection: docker + ansible_docker_extra_args: "--tlsverify --tlscacert=/path/to/ca.pem --tlscert=/path/to/client-cert.pem --tlskey=/path/to/client-key.pem -H=tcp://myserver.net:4243" + ansible_user: jenkins + changed_when: false + + - name: Create a directory for ssh keys + delegate_to: my_jenkins + ansible.builtin.file: + path: "/var/jenkins_home/.ssh/jupiter" + state: directory + +For a full list with available plugins and examples, see :ref:`connection_plugin_list`. + +.. note:: If you're reading the docs from the beginning, this may be the first example you've seen of an Ansible playbook. This is not an inventory file. + Playbooks will be covered in great detail later in the docs. + +.. _inventory_setup_examples: + +Inventory setup examples +======================== + +See also :ref:`sample_setup`, which shows inventory along with playbooks and other Ansible artifacts. + +.. _inventory_setup-per_environment: + +Example: One inventory per environment +-------------------------------------- + +If you need to manage multiple environments it's sometimes prudent to +have only hosts of a single environment defined per inventory. This +way, it is harder to, for instance, accidentally change the state of +nodes inside the "test" environment when you actually wanted to update +some "staging" servers. + +For the example mentioned above you could have an +:file:`inventory_test` file: + +.. code-block:: ini + + [dbservers] + db01.test.example.com + db02.test.example.com + + [appservers] + app01.test.example.com + app02.test.example.com + app03.test.example.com + +That file only includes hosts that are part of the "test" +environment. Define the "staging" machines in another file +called :file:`inventory_staging`: + +.. code-block:: ini + + [dbservers] + db01.staging.example.com + db02.staging.example.com + + [appservers] + app01.staging.example.com + app02.staging.example.com + app03.staging.example.com + +To apply a playbook called :file:`site.yml` +to all the app servers in the test environment, use the +following command:: + + ansible-playbook -i inventory_test site.yml -l appservers + +.. _inventory_setup-per_function: + +Example: Group by function +-------------------------- + +In the previous section you already saw an example for using groups in +order to cluster hosts that have the same function. This allows you, +for instance, to define firewall rules inside a playbook or role +without affecting database servers: + +.. code-block:: yaml + + - hosts: dbservers + tasks: + - name: Allow access from 10.0.0.1 + ansible.builtin.iptables: + chain: INPUT + jump: ACCEPT + source: 10.0.0.1 + +.. _inventory_setup-per_location: + +Example: Group by location +-------------------------- + +Other tasks might be focused on where a certain host is located. Let's +say that ``db01.test.example.com`` and ``app01.test.example.com`` are +located in DC1 while ``db02.test.example.com`` is in DC2: + +.. code-block:: ini + + [dc1] + db01.test.example.com + app01.test.example.com + + [dc2] + db02.test.example.com + +In practice, you might even end up mixing all these setups as you +might need to, on one day, update all nodes in a specific data center +while, on another day, update all the application servers no matter +their location. + +.. seealso:: + + :ref:`inventory_plugins` + Pulling inventory from dynamic or static sources + :ref:`intro_dynamic_inventory` + Pulling inventory from dynamic sources, such as cloud providers + :ref:`intro_adhoc` + Examples of basic commands + :ref:`working_with_playbooks` + Learning Ansible's configuration, deployment, and orchestration language. + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/intro_patterns.rst b/docs/docsite/rst/user_guide/intro_patterns.rst new file mode 100644 index 00000000..edc25ad6 --- /dev/null +++ b/docs/docsite/rst/user_guide/intro_patterns.rst @@ -0,0 +1,171 @@ +.. _intro_patterns: + +Patterns: targeting hosts and groups +==================================== + +When you execute Ansible through an ad-hoc command or by running a playbook, you must choose which managed nodes or groups you want to execute against. Patterns let you run commands and playbooks against specific hosts and/or groups in your inventory. An Ansible pattern can refer to a single host, an IP address, an inventory group, a set of groups, or all hosts in your inventory. Patterns are highly flexible - you can exclude or require subsets of hosts, use wildcards or regular expressions, and more. Ansible executes on all inventory hosts included in the pattern. + +.. contents:: + :local: + +Using patterns +-------------- + +You use a pattern almost any time you execute an ad-hoc command or a playbook. The pattern is the only element of an :ref:`ad-hoc command<intro_adhoc>` that has no flag. It is usually the second element:: + + ansible <pattern> -m <module_name> -a "<module options>" + +For example:: + + ansible webservers -m service -a "name=httpd state=restarted" + +In a playbook the pattern is the content of the ``hosts:`` line for each play: + +.. code-block:: yaml + + - name: <play_name> + hosts: <pattern> + +For example:: + + - name: restart webservers + hosts: webservers + +Since you often want to run a command or playbook against multiple hosts at once, patterns often refer to inventory groups. Both the ad-hoc command and the playbook above will execute against all machines in the ``webservers`` group. + +.. _common_patterns: + +Common patterns +--------------- + +This table lists common patterns for targeting inventory hosts and groups. + +.. table:: + :class: documentation-table + + ====================== ================================ =================================================== + Description Pattern(s) Targets + ====================== ================================ =================================================== + All hosts all (or \*) + + One host host1 + + Multiple hosts host1:host2 (or host1,host2) + + One group webservers + + Multiple groups webservers:dbservers all hosts in webservers plus all hosts in dbservers + + Excluding groups webservers:!atlanta all hosts in webservers except those in atlanta + + Intersection of groups webservers:&staging any hosts in webservers that are also in staging + ====================== ================================ =================================================== + +.. note:: You can use either a comma (``,``) or a colon (``:``) to separate a list of hosts. The comma is preferred when dealing with ranges and IPv6 addresses. + +Once you know the basic patterns, you can combine them. This example:: + + webservers:dbservers:&staging:!phoenix + +targets all machines in the groups 'webservers' and 'dbservers' that are also in +the group 'staging', except any machines in the group 'phoenix'. + +You can use wildcard patterns with FQDNs or IP addresses, as long as the hosts are named in your inventory by FQDN or IP address:: + + 192.0.\* + \*.example.com + \*.com + +You can mix wildcard patterns and groups at the same time:: + + one*.com:dbservers + +Limitations of patterns +----------------------- + +Patterns depend on inventory. If a host or group is not listed in your inventory, you cannot use a pattern to target it. If your pattern includes an IP address or hostname that does not appear in your inventory, you will see an error like this: + +.. code-block:: text + + [WARNING]: No inventory was parsed, only implicit localhost is available + [WARNING]: Could not match supplied host pattern, ignoring: *.not_in_inventory.com + +Your pattern must match your inventory syntax. If you define a host as an :ref:`alias<inventory_aliases>`: + +.. code-block:: yaml + + atlanta: + host1: + http_port: 80 + maxRequestsPerChild: 808 + host: 127.0.0.2 + +you must use the alias in your pattern. In the example above, you must use ``host1`` in your pattern. If you use the IP address, you will once again get the error:: + + [WARNING]: Could not match supplied host pattern, ignoring: 127.0.0.2 + +Advanced pattern options +------------------------ + +The common patterns described above will meet most of your needs, but Ansible offers several other ways to define the hosts and groups you want to target. + +Using variables in patterns +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can use variables to enable passing group specifiers via the ``-e`` argument to ansible-playbook:: + + webservers:!{{ excluded }}:&{{ required }} + +Using group position in patterns +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can define a host or subset of hosts by its position in a group. For example, given the following group:: + + [webservers] + cobweb + webbing + weber + +you can use subscripts to select individual hosts or ranges within the webservers group:: + + webservers[0] # == cobweb + webservers[-1] # == weber + webservers[0:2] # == webservers[0],webservers[1] + # == cobweb,webbing + webservers[1:] # == webbing,weber + webservers[:3] # == cobweb,webbing,weber + +Using regexes in patterns +^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can specify a pattern as a regular expression by starting the pattern with ``~``:: + + ~(web|db).*\.example\.com + +Patterns and ansible-playbook flags +----------------------------------- + +You can change the behavior of the patterns defined in playbooks using command-line options. For example, you can run a playbook that defines ``hosts: all`` on a single host by specifying ``-i 127.0.0.2,`` (note the trailing comma). This works even if the host you target is not defined in your inventory. You can also limit the hosts you target on a particular run with the ``--limit`` flag:: + + ansible-playbook site.yml --limit datacenter2 + +Finally, you can use ``--limit`` to read the list of hosts from a file by prefixing the file name with ``@``:: + + ansible-playbook site.yml --limit @retry_hosts.txt + +If :ref:`RETRY_FILES_ENABLED` is set to ``True``, a ``.retry`` file will be created after the ``ansible-playbook`` run containing a list of failed hosts from all plays. This file is overwritten each time ``ansible-playook`` finishes running. + + ansible-playbook site.yml --limit @site.retry + +To apply your knowledge of patterns with Ansible commands and playbooks, read :ref:`intro_adhoc` and :ref:`playbooks_intro`. + +.. seealso:: + + :ref:`intro_adhoc` + Examples of basic commands + :ref:`working_with_playbooks` + Learning the Ansible configuration management language + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/intro_windows.rst b/docs/docsite/rst/user_guide/intro_windows.rst new file mode 100644 index 00000000..ba81f6d6 --- /dev/null +++ b/docs/docsite/rst/user_guide/intro_windows.rst @@ -0,0 +1,4 @@ +Windows Support +=============== + +This page has been split up and moved to the new section :ref:`windows`. diff --git a/docs/docsite/rst/user_guide/modules.rst b/docs/docsite/rst/user_guide/modules.rst new file mode 100644 index 00000000..70dac884 --- /dev/null +++ b/docs/docsite/rst/user_guide/modules.rst @@ -0,0 +1,36 @@ +.. _working_with_modules: + +Working With Modules +==================== + +.. toctree:: + :maxdepth: 1 + + modules_intro + modules_support + ../reference_appendices/common_return_values + + +Ansible ships with a number of modules (called the 'module library') +that can be executed directly on remote hosts or through :ref:`Playbooks <working_with_playbooks>`. + +Users can also write their own modules. These modules can control system resources, +like services, packages, or files (anything really), or handle executing system commands. + + +.. seealso:: + + :ref:`intro_adhoc` + Examples of using modules in /usr/bin/ansible + :ref:`playbooks_intro` + Introduction to using modules with /usr/bin/ansible-playbook + :ref:`developing_modules_general` + How to write your own modules + :ref:`developing_api` + Examples of using modules with the Python API + :ref:`interpreter_discovery` + Configuring the right Python interpreter on target hosts + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/modules_intro.rst b/docs/docsite/rst/user_guide/modules_intro.rst new file mode 100644 index 00000000..bb6d2cd7 --- /dev/null +++ b/docs/docsite/rst/user_guide/modules_intro.rst @@ -0,0 +1,52 @@ +.. _intro_modules: + +Introduction to modules +======================= + +Modules (also referred to as "task plugins" or "library plugins") are discrete units of code that can be used from the command line or in a playbook task. Ansible executes each module, usually on the remote managed node, and collects return values. In Ansible 2.10 and later, most modules are hosted in collections. + +You can execute modules from the command line:: + + ansible webservers -m service -a "name=httpd state=started" + ansible webservers -m ping + ansible webservers -m command -a "/sbin/reboot -t now" + +Each module supports taking arguments. Nearly all modules take ``key=value`` arguments, space delimited. Some modules take no arguments, and the command/shell modules simply take the string of the command you want to run. + +From playbooks, Ansible modules are executed in a very similar way:: + + - name: reboot the servers + command: /sbin/reboot -t now + +Another way to pass arguments to a module is using YAML syntax, also called 'complex args' :: + + - name: restart webserver + service: + name: httpd + state: restarted + +All modules return JSON format data. This means modules can be written in any programming language. Modules should be idempotent, and should avoid making any changes if they detect that the current state matches the desired final state. When used in an Ansible playbook, modules can trigger 'change events' in the form of notifying :ref:`handlers <handlers>` to run additional tasks. + +You can access the documentation for each module from the command line with the ansible-doc tool:: + + ansible-doc yum + +For a list of all available modules, see the :ref:`Collection docs <list_of_collections>`, or run the following at a command prompt:: + + ansible-doc -l + + +.. seealso:: + + :ref:`intro_adhoc` + Examples of using modules in /usr/bin/ansible + :ref:`working_with_playbooks` + Examples of using modules with /usr/bin/ansible-playbook + :ref:`developing_modules` + How to write your own modules + :ref:`developing_api` + Examples of using modules with the Python API + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/modules_support.rst b/docs/docsite/rst/user_guide/modules_support.rst new file mode 100644 index 00000000..6faa7333 --- /dev/null +++ b/docs/docsite/rst/user_guide/modules_support.rst @@ -0,0 +1,70 @@ +.. _modules_support: + +**************************** +Module Maintenance & Support +**************************** + +If you are using a module and you discover a bug, you may want to know where to report that bug, who is responsible for fixing it, and how you can track changes to the module. If you are a Red Hat subscriber, you may want to know whether you can get support for the issue you are facing. + +Starting in Ansible 2.10, most modules live in collections. The distribution method for each collection reflects the maintenance and support for the modules in that collection. + +.. contents:: + :local: + +Maintenance +=========== + +.. table:: + :class: documentation-table + + ============================= ========================================== ========================== + Collection Code location Maintained by + ============================= ========================================== ========================== + ansible.builtin `ansible/ansible repo`_ on GitHub core team + + distributed on Galaxy various; follow ``repo`` link community or partners + + distributed on Automation Hub various; follow ``repo`` link content team or partners + ============================= ========================================== ========================== + +.. _ansible/ansible repo: https://github.com/ansible/ansible/tree/devel/lib/ansible/modules + +Issue Reporting +=============== + +If you find a bug that affects a plugin in the main Ansible repo, also known as ``ansible-base``: + + #. Confirm that you are running the latest stable version of Ansible or the devel branch. + #. Look at the `issue tracker in the Ansible repo <https://github.com/ansible/ansible/issues>`_ to see if an issue has already been filed. + #. Create an issue if one does not already exist. Include as much detail as you can about the behavior you discovered. + +If you find a bug that affects a plugin in a Galaxy collection: + + #. Find the collection on Galaxy. + #. Find the issue tracker for the collection. + #. Look there to see if an issue has already been filed. + #. Create an issue if one does not already exist. Include as much detail as you can about the behavior you discovered. + +Some partner collections may be hosted in private repositories. + +If you are not sure whether the behavior you see is a bug, if you have questions, if you want to discuss development-oriented topics, or if you just want to get in touch, use one of our Google groups or IRC channels to :ref:`communicate with Ansiblers <communication>`. + +If you find a bug that affects a module in an Automation Hub collection: + + #. If the collection offers an Issue Tracker link on Automation Hub, click there and open an issue on the collection repository. If it does not, follow the standard process for reporting issues on the `Red Hat Customer Portal <https://access.redhat.com/>`_. You must have a subscription to the Red Hat Ansible Automation Platform to create an issue on the portal. + +Support +======= + +All plugins that remain in ``ansible-base`` and all collections hosted in Automation Hub are supported by Red Hat. No other plugins or collections are supported by Red Hat. If you have a subscription to the Red Hat Ansible Automation Platform, you can find more information and resources on the `Red Hat Customer Portal. <https://access.redhat.com/>`_ + +.. seealso:: + + :ref:`intro_adhoc` + Examples of using modules in /usr/bin/ansible + :ref:`working_with_playbooks` + Examples of using modules with /usr/bin/ansible-playbook + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbook_pathing.rst b/docs/docsite/rst/user_guide/playbook_pathing.rst new file mode 100644 index 00000000..7fc6059b --- /dev/null +++ b/docs/docsite/rst/user_guide/playbook_pathing.rst @@ -0,0 +1,42 @@ +:orphan: + +*********************** +Search paths in Ansible +*********************** + +You can control the paths Ansible searches to find resources on your control node (including configuration, modules, roles, ssh keys, and more) as well as resources on the remote nodes you are managing. Use absolute paths to tell Ansible where to find resources whenever you can. However, absolute paths are not always practical. This page covers how Ansible interprets relative search paths, along with ways to troubleshoot when Ansible cannot find the resource you need. + +.. contents:: + :local: + +Config paths +============ + +By default these should be relative to the config file, some are specifically relative to the current working directory or the playbook and should have this noted in their description. Things like ssh keys are left to use the current working directory because it mirrors how the underlying tools would use it. + + +Task paths +========== + +Task paths include two different scopes: task evaluation and task execution. For task evaluation, all paths are local, like in lookups. For task execution, which usually happens on the remote nodes, local paths do not usually apply. However, if a task uses an action plugin, it uses a local path. The template and copy modules are examples of modules that use action plugins, and therefore use local paths. + +The magic of 'local' paths +-------------------------- + +Lookups and action plugins both use a special 'search magic' to find things, taking the current play into account, it uses from most specific to most general playbook dir in which a task is contained (this includes roles and includes). + +Using this magic, relative paths get attempted first with a 'files|templates|vars' appended (if not already present), depending on action being taken, 'files' is the default. (in other words, include_vars will use vars/). The paths will be searched from most specific to most general (in other words, role before play). +dependent roles WILL be traversed (in other words, task is in role2, role2 is a dependency of role1, role2 will be looked at first, then role1, then play). +i.e :: + + role search path is rolename/{files|vars|templates}/, rolename/tasks/. + play search path is playdir/{files|vars|templates}/, playdir/. + + +By default, Ansible does not search the current working directory unless it happens to coincide with one of the paths above. If you `include` a task file from a role, it will NOT trigger role behavior, this only happens when running as a role, `include_role` will work. A new variable `ansible_search_path` var will have the search path used, in order (but without the appended subdirs). Using 5 "v"s (`-vvvvv`) should show the detail of the search as it happens. + +As for includes, they try the path of the included file first and fall back to the play/role that includes them. + + + +.. note: The current working directory might vary depending on the connection plugin and if the action is local or remote. For the remote it is normally the directory on which the login shell puts the user. For local it is either the directory you executed ansible from or in some cases the playbook directory. diff --git a/docs/docsite/rst/user_guide/playbooks.rst b/docs/docsite/rst/user_guide/playbooks.rst new file mode 100644 index 00000000..8c851c12 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks.rst @@ -0,0 +1,21 @@ +.. _working_with_playbooks: + +Working with playbooks +====================== + +Playbooks record and execute Ansible's configuration, deployment, and orchestration functions. They can describe a policy you want your remote systems to enforce, or a set of steps in a general IT process. + +If Ansible modules are the tools in your workshop, playbooks are your instruction manuals, and your inventory of hosts are your raw material. + +At a basic level, playbooks can be used to manage configurations of and deployments to remote machines. At a more advanced level, they can sequence multi-tier rollouts involving rolling updates, and can delegate actions to other hosts, interacting with monitoring servers and load balancers along the way. + +Playbooks are designed to be human-readable and are developed in a basic text language. There are multiple ways to organize playbooks and the files they include, and we'll offer up some suggestions on that and making the most out of Ansible. + +You should look at `Example Playbooks <https://github.com/ansible/ansible-examples>`_ while reading along with the playbook documentation. These illustrate best practices as well as how to put many of the various concepts together. + +.. toctree:: + :maxdepth: 2 + + playbooks_templating + playbooks_special_topics + guide_rolling_upgrade diff --git a/docs/docsite/rst/user_guide/playbooks_advanced_syntax.rst b/docs/docsite/rst/user_guide/playbooks_advanced_syntax.rst new file mode 100644 index 00000000..03d4243f --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_advanced_syntax.rst @@ -0,0 +1,112 @@ +.. _playbooks_advanced_syntax: + +*************** +Advanced Syntax +*************** + +The advanced YAML syntax examples on this page give you more control over the data placed in YAML files used by Ansible. You can find additional information about Python-specific YAML in the official `PyYAML Documentation <https://pyyaml.org/wiki/PyYAMLDocumentation#YAMLtagsandPythontypes>`_. + +.. contents:: + :local: + +.. _unsafe_strings: + +Unsafe or raw strings +===================== + +When handling values returned by lookup plugins, Ansible uses a data type called ``unsafe`` to block templating. Marking data as unsafe prevents malicious users from abusing Jinja2 templates to execute arbitrary code on target machines. The Ansible implementation ensures that unsafe values are never templated. It is more comprehensive than escaping Jinja2 with ``{% raw %} ... {% endraw %}`` tags. + +You can use the same ``unsafe`` data type in variables you define, to prevent templating errors and information disclosure. You can mark values supplied by :ref:`vars_prompts<unsafe_prompts>` as unsafe. You can also use ``unsafe`` in playbooks. The most common use cases include passwords that allow special characters like ``{`` or ``%``, and JSON arguments that look like templates but should not be templated. For example: + +.. code-block:: yaml + + --- + mypassword: !unsafe 234%234{435lkj{{lkjsdf + +In a playbook:: + + --- + hosts: all + vars: + my_unsafe_variable: !unsafe 'unsafe % value' + tasks: + ... + +For complex variables such as hashes or arrays, use ``!unsafe`` on the individual elements:: + + --- + my_unsafe_array: + - !unsafe 'unsafe element' + - 'safe element' + + my_unsafe_hash: + unsafe_key: !unsafe 'unsafe value' + +.. _anchors_and_aliases: + +YAML anchors and aliases: sharing variable values +================================================= + +`YAML anchors and aliases <https://yaml.org/spec/1.2/spec.html#id2765878>`_ help you define, maintain, and use shared variable values in a flexible way. +You define an anchor with ``&``, then refer to it using an alias, denoted with ``*``. Here's an example that sets three values with an anchor, uses two of those values with an alias, and overrides the third value:: + + --- + ... + vars: + app1: + jvm: &jvm_opts + opts: '-Xms1G -Xmx2G' + port: 1000 + path: /usr/lib/app1 + app2: + jvm: + <<: *jvm_opts + path: /usr/lib/app2 + ... + +Here, ``app1`` and ``app2`` share the values for ``opts`` and ``port`` using the anchor ``&jvm_opts`` and the alias ``*jvm_opts``. +The value for ``path`` is merged by ``<<`` or `merge operator <https://yaml.org/type/merge.html>`_. + +Anchors and aliases also let you share complex sets of variable values, including nested variables. If you have one variable value that includes another variable value, you can define them separately:: + + vars: + webapp_version: 1.0 + webapp_custom_name: ToDo_App-1.0 + +This is inefficient and, at scale, means more maintenance. To incorporate the version value in the name, you can use an anchor in ``app_version`` and an alias in ``custom_name``:: + + vars: + webapp: + version: &my_version 1.0 + custom_name: + - "ToDo_App" + - *my_version + +Now, you can re-use the value of ``app_version`` within the value of ``custom_name`` and use the output in a template:: + + --- + - name: Using values nested inside dictionary + hosts: localhost + vars: + webapp: + version: &my_version 1.0 + custom_name: + - "ToDo_App" + - *my_version + tasks: + - name: Using Anchor value + ansible.builtin.debug: + msg: My app is called "{{ webapp.custom_name | join('-') }}". + +You've anchored the value of ``version`` with the ``&my_version`` anchor, and re-used it with the ``*my_version`` alias. Anchors and aliases let you access nested values inside dictionaries. + +.. seealso:: + + :ref:`playbooks_variables` + All about variables + :doc:`complex_data_manipulation` + Doing complex data manipulation in Ansible + `User Mailing List <https://groups.google.com/group/ansible-project>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_async.rst b/docs/docsite/rst/user_guide/playbooks_async.rst new file mode 100644 index 00000000..09fe5d5d --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_async.rst @@ -0,0 +1,161 @@ +.. _playbooks_async: + +Asynchronous actions and polling +================================ + +By default Ansible runs tasks synchronously, holding the connection to the remote node open until the action is completed. This means within a playbook, each task blocks the next task by default, meaning subsequent tasks will not run until the current task completes. This behavior can create challenges. For example, a task may take longer to complete than the SSH session allows for, causing a timeout. Or you may want a long-running process to execute in the background while you perform other tasks concurrently. Asynchronous mode lets you control how long-running tasks execute. + +.. contents:: + :local: + +Asynchronous ad-hoc tasks +------------------------- + +You can execute long-running operations in the background with :ref:`ad-hoc tasks <intro_adhoc>`. For example, to execute ``long_running_operation`` asynchronously in the background, with a timeout (``-B``) of 3600 seconds, and without polling (``-P``):: + + $ ansible all -B 3600 -P 0 -a "/usr/bin/long_running_operation --do-stuff" + +To check on the job status later, use the ``async_status`` module, passing it the job ID that was returned when you ran the original job in the background:: + + $ ansible web1.example.com -m async_status -a "jid=488359678239.2844" + +Ansible can also check on the status of your long-running job automatically with polling. In most cases, Ansible will keep the connection to your remote node open between polls. To run for 30 minutes and poll for status every 60 seconds:: + + $ ansible all -B 1800 -P 60 -a "/usr/bin/long_running_operation --do-stuff" + +Poll mode is smart so all jobs will be started before polling begins on any machine. Be sure to use a high enough ``--forks`` value if you want to get all of your jobs started very quickly. After the time limit (in seconds) runs out (``-B``), the process on the remote nodes will be terminated. + +Asynchronous mode is best suited to long-running shell commands or software upgrades. Running the copy module asynchronously, for example, does not do a background file transfer. + +Asynchronous playbook tasks +--------------------------- + +:ref:`Playbooks <working_with_playbooks>` also support asynchronous mode and polling, with a simplified syntax. You can use asynchronous mode in playbooks to avoid connection timeouts or to avoid blocking subsequent tasks. The behavior of asynchronous mode in a playbook depends on the value of `poll`. + +Avoid connection timeouts: poll > 0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to set a longer timeout limit for a certain task in your playbook, use ``async`` with ``poll`` set to a positive value. Ansible will still block the next task in your playbook, waiting until the async task either completes, fails or times out. However, the task will only time out if it exceeds the timeout limit you set with the ``async`` parameter. + +To avoid timeouts on a task, specify its maximum runtime and how frequently you would like to poll for status:: + + --- + + - hosts: all + remote_user: root + + tasks: + + - name: Simulate long running op (15 sec), wait for up to 45 sec, poll every 5 sec + ansible.builtin.command: /bin/sleep 15 + async: 45 + poll: 5 + +.. note:: + The default poll value is set by the :ref:`DEFAULT_POLL_INTERVAL` setting. + There is no default for the async time limit. If you leave off the + 'async' keyword, the task runs synchronously, which is Ansible's + default. + +.. note:: + As of Ansible 2.3, async does not support check mode and will fail the + task when run in check mode. See :ref:`check_mode_dry` on how to + skip a task in check mode. + +Run tasks concurrently: poll = 0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to run multiple tasks in a playbook concurrently, use ``async`` with ``poll`` set to 0. When you set ``poll: 0``, Ansible starts the task and immediately moves on to the next task without waiting for a result. Each async task runs until it either completes, fails or times out (runs longer than its ``async`` value). The playbook run ends without checking back on async tasks. + +To run a playbook task asynchronously:: + + --- + + - hosts: all + remote_user: root + + tasks: + + - name: Simulate long running op, allow to run for 45 sec, fire and forget + ansible.builtin.command: /bin/sleep 15 + async: 45 + poll: 0 + +.. note:: + Do not specify a poll value of 0 with operations that require exclusive locks (such as yum transactions) if you expect to run other commands later in the playbook against those same resources. + +.. note:: + Using a higher value for ``--forks`` will result in kicking off asynchronous tasks even faster. This also increases the efficiency of polling. + +If you need a synchronization point with an async task, you can register it to obtain its job ID and use the :ref:`async_status <async_status_module>` module to observe it in a later task. For example:: + + - name: Run an async task + ansible.builtin.yum: + name: docker-io + state: present + async: 1000 + poll: 0 + register: yum_sleeper + + - name: Check on an async task + async_status: + jid: "{{ yum_sleeper.ansible_job_id }}" + register: job_result + until: job_result.finished + retries: 100 + delay: 10 + +.. note:: + If the value of ``async:`` is not high enough, this will cause the + "check on it later" task to fail because the temporary status file that + the ``async_status:`` is looking for will not have been written or no longer exist + +To run multiple asynchronous tasks while limiting the number of tasks running concurrently:: + + ##################### + # main.yml + ##################### + - name: Run items asynchronously in batch of two items + vars: + sleep_durations: + - 1 + - 2 + - 3 + - 4 + - 5 + durations: "{{ item }}" + include_tasks: execute_batch.yml + loop: "{{ sleep_durations | batch(2) | list }}" + + ##################### + # execute_batch.yml + ##################### + - name: Async sleeping for batched_items + ansible.builtin.command: sleep {{ async_item }} + async: 45 + poll: 0 + loop: "{{ durations }}" + loop_control: + loop_var: "async_item" + register: async_results + + - name: Check sync status + async_status: + jid: "{{ async_result_item.ansible_job_id }}" + loop: "{{ async_results.results }}" + loop_control: + loop_var: "async_result_item" + register: async_poll_results + until: async_poll_results.finished + retries: 30 + +.. seealso:: + + :ref:`playbooks_strategies` + Options for controlling playbook execution + :ref:`playbooks_intro` + An introduction to playbooks + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_best_practices.rst b/docs/docsite/rst/user_guide/playbooks_best_practices.rst new file mode 100644 index 00000000..86915f51 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_best_practices.rst @@ -0,0 +1,167 @@ +.. _playbooks_tips_and_tricks: +.. _playbooks_best_practices: + +*************** +Tips and tricks +*************** + +These tips and tricks have helped us optimize our Ansible usage, and we offer them here as suggestions. We hope they will help you organize content, write playbooks, maintain inventory, and execute Ansible. Ultimately, though, you should use Ansible in the way that makes most sense for your organization and your goals. + +.. contents:: + :local: + +General tips +============ + +These concepts apply to all Ansible activities and artifacts. + +Keep it simple +-------------- + +Whenever you can, do things simply. Use advanced features only when necessary, and select the feature that best matches your use case. For example, you will probably not need ``vars``, ``vars_files``, ``vars_prompt`` and ``--extra-vars`` all at once, while also using an external inventory file. If something feels complicated, it probably is. Take the time to look for a simpler solution. + +Use version control +------------------- + +Keep your playbooks, roles, inventory, and variables files in git or another version control system and make commits to the repository when you make changes. Version control gives you an audit trail describing when and why you changed the rules that automate your infrastructure. + +Playbook tips +============= + +These tips help make playbooks and roles easier to read, maintain, and debug. + +Use whitespace +-------------- + +Generous use of whitespace, for example, a blank line before each block or task, makes a playbook easy to scan. + +Always name tasks +----------------- + +Task names are optional, but extremely useful. In its output, Ansible shows you the name of each task it runs. Choose names that describe what each task does and why. + +Always mention the state +------------------------ + +For many modules, the 'state' parameter is optional. Different modules have different default settings for 'state', and some modules support several 'state' settings. Explicitly setting 'state=present' or 'state=absent' makes playbooks and roles clearer. + +Use comments +------------ + +Even with task names and explicit state, sometimes a part of a playbook or role (or inventory/variable file) needs more explanation. Adding a comment (any line starting with '#') helps others (and possibly yourself in future) understand what a play or task (or variable setting) does, how it does it, and why. + +Inventory tips +============== + +These tips help keep your inventory well organized. + +Use dynamic inventory with clouds +--------------------------------- + +With cloud providers and other systems that maintain canonical lists of your infrastructure, use :ref:`dynamic inventory <intro_dynamic_inventory>` to retrieve those lists instead of manually updating static inventory files. With cloud resources, you can use tags to differentiate production and staging environments. + +Group inventory by function +--------------------------- + +A system can be in multiple groups. See :ref:`intro_inventory` and :ref:`intro_patterns`. If you create groups named for the function of the nodes in the group, for example *webservers* or *dbservers*, your playbooks can target machines based on function. You can assign function-specific variables using the group variable system, and design Ansible roles to handle function-specific use cases. See :ref:`playbooks_reuse_roles`. + +Separate production and staging inventory +----------------------------------------- + +You can keep your production environment separate from development, test, and staging environments by using separate inventory files or directories for each environment. This way you pick with -i what you are targeting. Keeping all your environments in one file can lead to surprises! + +.. _tip_for_variables_and_vaults: + +Keep vaulted variables safely visible +------------------------------------- + +You should encrypt sensitive or secret variables with Ansible Vault. However, encrypting the variable names as well as the variable values makes it hard to find the source of the values. You can keep the names of your variables accessible (by ``grep``, for example) without exposing any secrets by adding a layer of indirection: + +#. Create a ``group_vars/`` subdirectory named after the group. +#. Inside this subdirectory, create two files named ``vars`` and ``vault``. +#. In the ``vars`` file, define all of the variables needed, including any sensitive ones. +#. Copy all of the sensitive variables over to the ``vault`` file and prefix these variables with ``vault_``. +#. Adjust the variables in the ``vars`` file to point to the matching ``vault_`` variables using jinja2 syntax: ``db_password: {{ vault_db_password }}``. +#. Encrypt the ``vault`` file to protect its contents. +#. Use the variable name from the ``vars`` file in your playbooks. + +When running a playbook, Ansible finds the variables in the unencrypted file, which pulls the sensitive variable values from the encrypted file. There is no limit to the number of variable and vault files or their names. + +Execution tricks +================ + +These tips apply to using Ansible, rather than to Ansible artifacts. + +Try it in staging first +----------------------- + +Testing changes in a staging environment before rolling them out in production is always a great idea. Your environments need not be the same size and you can use group variables to control the differences between those environments. + +Update in batches +----------------- + +Use the 'serial' keyword to control how many machines you update at once in the batch. See :ref:`playbooks_delegation`. + +.. _os_variance: + +Handling OS and distro differences +---------------------------------- + +Group variables files and the ``group_by`` module work together to help Ansible execute across a range of operating systems and distributions that require different settings, packages, and tools. The ``group_by`` module creates a dynamic group of hosts matching certain criteria. This group does not need to be defined in the inventory file. This approach lets you execute different tasks on different operating systems or distributions. For example:: + + --- + + - name: talk to all hosts just so we can learn about them + hosts: all + tasks: + - name: Classify hosts depending on their OS distribution + group_by: + key: os_{{ ansible_facts['distribution'] }} + + # now just on the CentOS hosts... + + - hosts: os_CentOS + gather_facts: False + tasks: + - # tasks that only happen on CentOS go in this play + +The first play categorizes all systems into dynamic groups based on the operating system name. Later plays can use these groups as patterns on the ``hosts`` line. You can also add group-specific settings in group vars files. All three names must match: the name created by the ``group_by`` task, the name of the pattern in subsequent plays, and the name of the group vars file. For example:: + + --- + # file: group_vars/all + asdf: 10 + + --- + # file: group_vars/os_CentOS.yml + asdf: 42 + +In this example, CentOS machines get the value of '42' for asdf, but other machines get '10'. +This can be used not only to set variables, but also to apply certain roles to only certain systems. + +You can use the same setup with ``include_vars`` when you only need OS-specific variables, not tasks:: + + - hosts: all + tasks: + - name: Set OS distribution dependent variables + include_vars: "os_{{ ansible_facts['distribution'] }}.yml" + - debug: + var: asdf + +This pulls in variables from the group_vars/os_CentOS.yml file. + +.. seealso:: + + :ref:`yaml_syntax` + Learn about YAML syntax + :ref:`working_with_playbooks` + Review the basic playbook features + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + :ref:`developing_modules` + Learn how to extend Ansible by writing your own modules + :ref:`intro_patterns` + Learn about how to select hosts + `GitHub examples directory <https://github.com/ansible/ansible-examples>`_ + Complete playbook files from the github project source + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups diff --git a/docs/docsite/rst/user_guide/playbooks_blocks.rst b/docs/docsite/rst/user_guide/playbooks_blocks.rst new file mode 100644 index 00000000..dc516312 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_blocks.rst @@ -0,0 +1,189 @@ +.. _playbooks_blocks: + +****** +Blocks +****** + +Blocks create logical groups of tasks. Blocks also offer ways to handle task errors, similar to exception handling in many programming languages. + +.. contents:: + :local: + +Grouping tasks with blocks +========================== + +All tasks in a block inherit directives applied at the block level. Most of what you can apply to a single task (with the exception of loops) can be applied at the block level, so blocks make it much easier to set data or directives common to the tasks. The directive does not affect the block itself, it is only inherited by the tasks enclosed by a block. For example, a `when` statement is applied to the tasks within a block, not to the block itself. + +.. code-block:: YAML + :emphasize-lines: 3 + :caption: Block example with named tasks inside the block + + tasks: + - name: Install, configure, and start Apache + block: + - name: Install httpd and memcached + ansible.builtin.yum: + name: + - httpd + - memcached + state: present + + - name: Apply the foo config template + ansible.builtin.template: + src: templates/src.j2 + dest: /etc/foo.conf + + - name: Start service bar and enable it + ansible.builtin.service: + name: bar + state: started + enabled: True + when: ansible_facts['distribution'] == 'CentOS' + become: true + become_user: root + ignore_errors: yes + +In the example above, the 'when' condition will be evaluated before Ansible runs each of the three tasks in the block. All three tasks also inherit the privilege escalation directives, running as the root user. Finally, ``ignore_errors: yes`` ensures that Ansible continues to execute the playbook even if some of the tasks fail. + +Names for blocks have been available since Ansible 2.3. We recommend using names in all tasks, within blocks or elsewhere, for better visibility into the tasks being executed when you run the playbook. + +.. _block_error_handling: + +Handling errors with blocks +=========================== + +You can control how Ansible responds to task errors using blocks with ``rescue`` and ``always`` sections. + +Rescue blocks specify tasks to run when an earlier task in a block fails. This approach is similar to exception handling in many programming languages. Ansible only runs rescue blocks after a task returns a 'failed' state. Bad task definitions and unreachable hosts will not trigger the rescue block. + +.. _block_rescue: +.. code-block:: YAML + :emphasize-lines: 3,10 + :caption: Block error handling example + + tasks: + - name: Handle the error + block: + - name: Print a message + ansible.builtin.debug: + msg: 'I execute normally' + + - name: Force a failure + ansible.builtin.command: /bin/false + + - name: Never print this + ansible.builtin.debug: + msg: 'I never execute, due to the above task failing, :-(' + rescue: + - name: Print when errors + ansible.builtin.debug: + msg: 'I caught an error, can do stuff here to fix it, :-)' + +You can also add an ``always`` section to a block. Tasks in the ``always`` section run no matter what the task status of the previous block is. + +.. _block_always: +.. code-block:: YAML + :emphasize-lines: 2,9 + :caption: Block with always section + + - name: Always do X + block: + - name: Print a message + ansible.builtin.debug: + msg: 'I execute normally' + + - name: Force a failure + ansible.builtin.command: /bin/false + + - name: Never print this + ansible.builtin.debug: + msg: 'I never execute :-(' + always: + - name: Always do this + ansible.builtin.debug: + msg: "This always executes, :-)" + +Together, these elements offer complex error handling. + +.. code-block:: YAML + :emphasize-lines: 2,9,16 + :caption: Block with all sections + + - name: Attempt and graceful roll back demo + block: + - name: Print a message + ansible.builtin.debug: + msg: 'I execute normally' + + - name: Force a failure + ansible.builtin.command: /bin/false + + - name: Never print this + ansible.builtin.debug: + msg: 'I never execute, due to the above task failing, :-(' + rescue: + - name: Print when errors + ansible.builtin.debug: + msg: 'I caught an error' + + - name: Force a failure in middle of recovery! >:-) + ansible.builtin.command: /bin/false + + - name: Never print this + ansible.builtin.debug: + msg: 'I also never execute :-(' + always: + - name: Always do this + ansible.builtin.debug: + msg: "This always executes" + +The tasks in the ``block`` execute normally. If any tasks in the block return ``failed``, the ``rescue`` section executes tasks to recover from the error. The ``always`` section runs regardless of the results of the ``block`` and ``rescue`` sections. + +If an error occurs in the block and the rescue task succeeds, Ansible reverts the failed status of the original task for the run and continues to run the play as if the original task had succeeded. The rescued task is considered successful, and does not trigger ``max_fail_percentage`` or ``any_errors_fatal`` configurations. However, Ansible still reports a failure in the playbook statistics. + +You can use blocks with ``flush_handlers`` in a rescue task to ensure that all handlers run even if an error occurs: + +.. code-block:: YAML + :emphasize-lines: 6,10 + :caption: Block run handlers in error handling + + tasks: + - name: Attempt and graceful roll back demo + block: + - name: Print a message + ansible.builtin.debug: + msg: 'I execute normally' + changed_when: yes + notify: run me even after an error + + - name: Force a failure + ansible.builtin.command: /bin/false + rescue: + - name: Make sure all handlers run + meta: flush_handlers + handlers: + - name: Run me even after an error + ansible.builtin.debug: + msg: 'This handler runs even on error' + + +.. versionadded:: 2.1 + +Ansible provides a couple of variables for tasks in the ``rescue`` portion of a block: + +ansible_failed_task + The task that returned 'failed' and triggered the rescue. For example, to get the name use ``ansible_failed_task.name``. + +ansible_failed_result + The captured return result of the failed task that triggered the rescue. This would equate to having used this var in the ``register`` keyword. + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + :ref:`playbooks_reuse_roles` + Playbook organization by roles + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_checkmode.rst b/docs/docsite/rst/user_guide/playbooks_checkmode.rst new file mode 100644 index 00000000..36b16aa8 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_checkmode.rst @@ -0,0 +1,97 @@ +.. _check_mode_dry: + +****************************************** +Validating tasks: check mode and diff mode +****************************************** + +Ansible provides two modes of execution that validate tasks: check mode and diff mode. These modes can be used separately or together. They are useful when you are creating or editing a playbook or role and you want to know what it will do. In check mode, Ansible runs without making any changes on remote systems. Modules that support check mode report the changes they would have made. Modules that do not support check mode report nothing and do nothing. In diff mode, Ansible provides before-and-after comparisons. Modules that support diff mode display detailed information. You can combine check mode and diff mode for detailed validation of your playbook or role. + +.. contents:: + :local: + +Using check mode +================ + +Check mode is just a simulation. It will not generate output for tasks that use :ref:`conditionals based on registered variables <conditionals_registered_vars>` (results of prior tasks). However, it is great for validating configuration management playbooks that run on one node at a time. To run a playbook in check mode:: + + ansible-playbook foo.yml --check + +.. _forcing_to_run_in_check_mode: + +Enforcing or preventing check mode on tasks +------------------------------------------- + +.. versionadded:: 2.2 + +If you want certain tasks to run in check mode always, or never, regardless of whether you run the playbook with or without ``--check``, you can add the ``check_mode`` option to those tasks: + + - To force a task to run in check mode, even when the playbook is called without ``--check``, set ``check_mode: yes``. + - To force a task to run in normal mode and make changes to the system, even when the playbook is called with ``--check``, set ``check_mode: no``. + +For example:: + + tasks: + - name: This task will always make changes to the system + ansible.builtin.command: /something/to/run --even-in-check-mode + check_mode: no + + - name: This task will never make changes to the system + ansible.builtin.lineinfile: + line: "important config" + dest: /path/to/myconfig.conf + state: present + check_mode: yes + register: changes_to_important_config + +Running single tasks with ``check_mode: yes`` can be useful for testing Ansible modules, either to test the module itself or to test the conditions under which a module would make changes. You can register variables (see :ref:`playbooks_conditionals`) on these tasks for even more detail on the potential changes. + +.. note:: Prior to version 2.2 only the equivalent of ``check_mode: no`` existed. The notation for that was ``always_run: yes``. + +Skipping tasks or ignoring errors in check mode +----------------------------------------------- + +.. versionadded:: 2.1 + +If you want to skip a task or ignore errors on a task when you run Ansible in check mode, you can use a boolean magic variable ``ansible_check_mode``, which is set to ``True`` when Ansible runs in check mode. For example:: + + tasks: + + - name: This task will be skipped in check mode + ansible.builtin.git: + repo: ssh://git@github.com/mylogin/hello.git + dest: /home/mylogin/hello + when: not ansible_check_mode + + - name: This task will ignore errors in check mode + ansible.builtin.git: + repo: ssh://git@github.com/mylogin/hello.git + dest: /home/mylogin/hello + ignore_errors: "{{ ansible_check_mode }}" + +.. _diff_mode: + +Using diff mode +=============== + +The ``--diff`` option for ansible-playbook can be used alone or with ``--check``. When you run in diff mode, any module that supports diff mode reports the changes made or, if used with ``--check``, the changes that would have been made. Diff mode is most common in modules that manipulate files (for example, the template module) but other modules might also show 'before and after' information (for example, the user module). + +Diff mode produces a large amount of output, so it is best used when checking a single host at a time. For example:: + + ansible-playbook foo.yml --check --diff --limit foo.example.com + +.. versionadded:: 2.4 + +Enforcing or preventing diff mode on tasks +------------------------------------------ + +Because the ``--diff`` option can reveal sensitive information, you can disable it for a task by specifying ``diff: no``. For example:: + + tasks: + - name: This task will not report a diff when the file changes + ansible.builtin.template: + src: secret.conf.j2 + dest: /etc/secret.conf + owner: root + group: root + mode: '0600' + diff: no diff --git a/docs/docsite/rst/user_guide/playbooks_conditionals.rst b/docs/docsite/rst/user_guide/playbooks_conditionals.rst new file mode 100644 index 00000000..76599cb3 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_conditionals.rst @@ -0,0 +1,508 @@ +.. _playbooks_conditionals: + +************ +Conditionals +************ + +In a playbook, you may want to execute different tasks, or have different goals, depending on the value of a fact (data about the remote system), a variable, or the result of a previous task. You may want the value of some variables to depend on the value of other variables. Or you may want to create additional groups of hosts based on whether the hosts match other criteria. You can do all of these things with conditionals. + +Ansible uses Jinja2 :ref:`tests <playbooks_tests>` and :ref:`filters <playbooks_filters>` in conditionals. Ansible supports all the standard tests and filters, and adds some unique ones as well. + +.. note:: + + There are many options to control execution flow in Ansible. You can find more examples of supported conditionals at `<https://jinja.palletsprojects.com/en/master/templates/#comparisons>`_. + +.. contents:: + :local: + +.. _the_when_statement: + +Basic conditionals with ``when`` +================================ + +The simplest conditional statement applies to a single task. Create the task, then add a ``when`` statement that applies a test. The ``when`` clause is a raw Jinja2 expression without double curly braces (see :ref:`group_by_module`). When you run the task or playbook, Ansible evaluates the test for all hosts. On any host where the test passes (returns a value of True), Ansible runs that task. For example, if you are installing mysql on multiple machines, some of which have SELinux enabled, you might have a task to configure SELinux to allow mysql to run. You would only want that task to run on machines that have SELinux enabled: + +.. code-block:: yaml + + tasks: + - name: Configure SELinux to start mysql on any port + ansible.posix.seboolean: + name: mysql_connect_any + state: true + persistent: yes + when: ansible_selinux.status == "enabled" + # all variables can be used directly in conditionals without double curly braces + +Conditionals based on ansible_facts +----------------------------------- + +Often you want to execute or skip a task based on facts. Facts are attributes of individual hosts, including IP address, operating system, the status of a filesystem, and many more. With conditionals based on facts: + + - You can install a certain package only when the operating system is a particular version. + - You can skip configuring a firewall on hosts with internal IP addresses. + - You can perform cleanup tasks only when a filesystem is getting full. + +See :ref:`commonly_used_facts` for a list of facts that frequently appear in conditional statements. Not all facts exist for all hosts. For example, the 'lsb_major_release' fact used in an example below only exists when the lsb_release package is installed on the target host. To see what facts are available on your systems, add a debug task to your playbook:: + + - name: Show facts available on the system + ansible.builtin.debug: + var: ansible_facts + +Here is a sample conditional based on a fact: + +.. code-block:: yaml + + tasks: + - name: Shut down Debian flavored systems + ansible.builtin.command: /sbin/shutdown -t now + when: ansible_facts['os_family'] == "Debian" + +If you have multiple conditions, you can group them with parentheses: + +.. code-block:: yaml + + tasks: + - name: Shut down CentOS 6 and Debian 7 systems + ansible.builtin.command: /sbin/shutdown -t now + when: (ansible_facts['distribution'] == "CentOS" and ansible_facts['distribution_major_version'] == "6") or + (ansible_facts['distribution'] == "Debian" and ansible_facts['distribution_major_version'] == "7") + +You can use `logical operators <https://jinja.palletsprojects.com/en/master/templates/#logic>`_ to combine conditions. When you have multiple conditions that all need to be true (that is, a logical ``and``), you can specify them as a list:: + + tasks: + - name: Shut down CentOS 6 systems + ansible.builtin.command: /sbin/shutdown -t now + when: + - ansible_facts['distribution'] == "CentOS" + - ansible_facts['distribution_major_version'] == "6" + +If a fact or variable is a string, and you need to run a mathematical comparison on it, use a filter to ensure that Ansible reads the value as an integer:: + + tasks: + - ansible.builtin.shell: echo "only on Red Hat 6, derivatives, and later" + when: ansible_facts['os_family'] == "RedHat" and ansible_facts['lsb']['major_release'] | int >= 6 + +.. _conditionals_registered_vars: + +Conditions based on registered variables +---------------------------------------- + +Often in a playbook you want to execute or skip a task based on the outcome of an earlier task. For example, you might want to configure a service after it is upgraded by an earlier task. To create a conditional based on a registered variable: + + #. Register the outcome of the earlier task as a variable. + #. Create a conditional test based on the registered variable. + +You create the name of the registered variable using the ``register`` keyword. A registered variable always contains the status of the task that created it as well as any output that task generated. You can use registered variables in templates and action lines as well as in conditional ``when`` statements. You can access the string contents of the registered variable using ``variable.stdout``. For example:: + + - name: Test play + hosts: all + + tasks: + + - name: Register a variable + ansible.builtin.shell: cat /etc/motd + register: motd_contents + + - name: Use the variable in conditional statement + ansible.builtin.shell: echo "motd contains the word hi" + when: motd_contents.stdout.find('hi') != -1 + +You can use registered results in the loop of a task if the variable is a list. If the variable is not a list, you can convert it into a list, with either ``stdout_lines`` or with ``variable.stdout.split()``. You can also split the lines by other fields:: + + - name: Registered variable usage as a loop list + hosts: all + tasks: + + - name: Retrieve the list of home directories + ansible.builtin.command: ls /home + register: home_dirs + + - name: Add home dirs to the backup spooler + ansible.builtin.file: + path: /mnt/bkspool/{{ item }} + src: /home/{{ item }} + state: link + loop: "{{ home_dirs.stdout_lines }}" + # same as loop: "{{ home_dirs.stdout.split() }}" + +The string content of a registered variable can be empty. If you want to run another task only on hosts where the stdout of your registered variable is empty, check the registered variable's string contents for emptiness: + +.. code-block:: yaml + + - name: check registered variable for emptiness + hosts: all + + tasks: + + - name: List contents of directory + ansible.builtin.command: ls mydir + register: contents + + - name: Check contents for emptiness + ansible.builtin.debug: + msg: "Directory is empty" + when: contents.stdout == "" + +Ansible always registers something in a registered variable for every host, even on hosts where a task fails or Ansible skips a task because a condition is not met. To run a follow-up task on these hosts, query the registered variable for ``is skipped`` (not for "undefined" or "default"). See :ref:`registered_variables` for more information. Here are sample conditionals based on the success or failure of a task. Remember to ignore errors if you want Ansible to continue executing on a host when a failure occurs: + +.. code-block:: yaml + + tasks: + - name: Register a variable, ignore errors and continue + ansible.builtin.command: /bin/false + register: result + ignore_errors: true + + - name: Run only if the task that registered the "result" variable fails + ansible.builtin.command: /bin/something + when: result is failed + + - name: Run only if the task that registered the "result" variable succeeds + ansible.builtin.command: /bin/something_else + when: result is succeeded + + - name: Run only if the task that registered the "result" variable is skipped + ansible.builtin.command: /bin/still/something_else + when: result is skipped + +.. note:: Older versions of Ansible used ``success`` and ``fail``, but ``succeeded`` and ``failed`` use the correct tense. All of these options are now valid. + + +Conditionals based on variables +------------------------------- + +You can also create conditionals based on variables defined in the playbooks or inventory. Because conditionals require boolean input (a test must evaluate as True to trigger the condition), you must apply the ``| bool`` filter to non boolean variables, such as string variables with content like 'yes', 'on', '1', or 'true'. You can define variables like this: + +.. code-block:: yaml + + vars: + epic: true + monumental: "yes" + +With the variables above, Ansible would run one of these tasks and skip the other: + +.. code-block:: yaml + + tasks: + - name: Run the command if "epic" or "monumental" is true + ansible.builtin.shell: echo "This certainly is epic!" + when: epic or monumental | bool + + - name: Run the command if "epic" is false + ansible.builtin.shell: echo "This certainly isn't epic!" + when: not epic + +If a required variable has not been set, you can skip or fail using Jinja2's `defined` test. For example: + +.. code-block:: yaml + + tasks: + - name: Run the command if "foo" is defined + ansible.builtin.shell: echo "I've got '{{ foo }}' and am not afraid to use it!" + when: foo is defined + + - name: Fail if "bar" is undefined + ansible.builtin.fail: msg="Bailing out. This play requires 'bar'" + when: bar is undefined + +This is especially useful in combination with the conditional import of vars files (see below). +As the examples show, you do not need to use `{{ }}` to use variables inside conditionals, as these are already implied. + +.. _loops_and_conditionals: + +Using conditionals in loops +--------------------------- + +If you combine a ``when`` statement with a :ref:`loop <playbooks_loops>`, Ansible processes the condition separately for each item. This is by design, so you can execute the task on some items in the loop and skip it on other items. For example: + +.. code-block:: yaml + + tasks: + - name: Run with items greater than 5 + ansible.builtin.command: echo {{ item }} + loop: [ 0, 2, 4, 6, 8, 10 ] + when: item > 5 + +If you need to skip the whole task when the loop variable is undefined, use the `|default` filter to provide an empty iterator. For example, when looping over a list: + +.. code-block:: yaml + + - name: Skip the whole task when a loop variable is undefined + ansible.builtin.command: echo {{ item }} + loop: "{{ mylist|default([]) }}" + when: item > 5 + +You can do the same thing when looping over a dict: + +.. code-block:: yaml + + - name: The same as above using a dict + ansible.builtin.command: echo {{ item.key }} + loop: "{{ query('dict', mydict|default({})) }}" + when: item.value > 5 + +.. _loading_in_custom_facts: + +Loading custom facts +-------------------- + +You can provide your own facts, as described in :ref:`developing_modules`. To run them, just make a call to your own custom fact gathering module at the top of your list of tasks, and variables returned there will be accessible to future tasks: + +.. code-block:: yaml + + tasks: + - name: Gather site specific fact data + action: site_facts + + - name: Use a custom fact + ansible.builtin.command: /usr/bin/thingy + when: my_custom_fact_just_retrieved_from_the_remote_system == '1234' + +.. _when_with_reuse: + +Conditionals with re-use +------------------------ + +You can use conditionals with re-usable tasks files, playbooks, or roles. Ansible executes these conditional statements differently for dynamic re-use (includes) and for static re-use (imports). See :ref:`playbooks_reuse` for more information on re-use in Ansible. + +.. _conditional_imports: + +Conditionals with imports +^^^^^^^^^^^^^^^^^^^^^^^^^ + +When you add a conditional to an import statement, Ansible applies the condition to all tasks within the imported file. This behavior is the equivalent of :ref:`tag_inheritance`. Ansible applies the condition to every task, and evaluates each task separately. For example, you might have a playbook called ``main.yml`` and a tasks file called ``other_tasks.yml``:: + + # all tasks within an imported file inherit the condition from the import statement + # main.yml + - import_tasks: other_tasks.yml # note "import" + when: x is not defined + + # other_tasks.yml + - name: Set a variable + ansible.builtin.set_fact: + x: foo + + - name: Print a variable + ansible.builtin.debug: + var: x + +Ansible expands this at execution time to the equivalent of:: + + - name: Set a variable if not defined + ansible.builtin.set_fact: + x: foo + when: x is not defined + # this task sets a value for x + + - name: Do the task if "x" is not defined + ansible.builin.debug: + var: x + when: x is not defined + # Ansible skips this task, because x is now defined + +Thus if ``x`` is initially undefined, the ``debug`` task will be skipped. If this is not the behavior you want, use an ``include_*`` statement to apply a condition only to that statement itself. + +You can apply conditions to ``import_playbook`` as well as to the other ``import_*`` statements. When you use this approach, Ansible returns a 'skipped' message for every task on every host that does not match the criteria, creating repetitive output. In many cases the :ref:`group_by module <group_by_module>` can be a more streamlined way to accomplish the same objective; see :ref:`os_variance`. + +.. _conditional_includes: + +Conditionals with includes +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When you use a conditional on an ``include_*`` statement, the condition is applied only to the include task itself and not to any other tasks within the included file(s). To contrast with the example used for conditionals on imports above, look at the same playbook and tasks file, but using an include instead of an import:: + + # Includes let you re-use a file to define a variable when it is not already defined + + # main.yml + - include_tasks: other_tasks.yml + when: x is not defined + + # other_tasks.yml + - name: Set a variable + ansible.builtin.set_fact: + x: foo + + - name: Print a variable + ansible.builtin.debug: + var: x + +Ansible expands this at execution time to the equivalent of:: + + # main.yml + - include_tasks: other_tasks.yml + when: x is not defined + # if condition is met, Ansible includes other_tasks.yml + + # other_tasks.yml + - name: Set a variable + ansible.builtin.set_fact: + x: foo + # no condition applied to this task, Ansible sets the value of x to foo + + - name: Print a variable + ansible.builtin.debug: + var: x + # no condition applied to this task, Ansible prints the debug statement + +By using ``include_tasks`` instead of ``import_tasks``, both tasks from ``other_tasks.yml`` will be executed as expected. For more information on the differences between ``include`` v ``import`` see :ref:`playbooks_reuse`. + +Conditionals with roles +^^^^^^^^^^^^^^^^^^^^^^^ + +There are three ways to apply conditions to roles: + + - Add the same condition or conditions to all tasks in the role by placing your ``when`` statement under the ``roles`` keyword. See the example in this section. + - Add the same condition or conditions to all tasks in the role by placing your ``when`` statement on a static ``import_role`` in your playbook. + - Add a condition or conditions to individual tasks or blocks within the role itself. This is the only approach that allows you to select or skip some tasks within the role based on your ``when`` statement. To select or skip tasks within the role, you must have conditions set on individual tasks or blocks, use the dynamic ``include_role`` in your playbook, and add the condition or conditions to the include. When you use this approach, Ansible applies the condition to the include itself plus any tasks in the role that also have that ``when`` statement. + +When you incorporate a role in your playbook statically with the ``roles`` keyword, Ansible adds the conditions you define to all the tasks in the role. For example: + +.. code-block:: yaml + + - hosts: webservers + roles: + - role: debian_stock_config + when: ansible_facts['os_family'] == 'Debian' + +.. _conditional_variable_and_files: + +Selecting variables, files, or templates based on facts +------------------------------------------------------- + +Sometimes the facts about a host determine the values you want to use for certain variables or even the file or template you want to select for that host. For example, the names of packages are different on CentOS and on Debian. The configuration files for common services are also different on different OS flavors and versions. To load different variables file, templates, or other files based on a fact about the hosts: + + 1) name your vars files, templates, or files to match the Ansible fact that differentiates them + + 2) select the correct vars file, template, or file for each host with a variable based on that Ansible fact + +Ansible separates variables from tasks, keeping your playbooks from turning into arbitrary code with nested conditionals. This approach results in more streamlined and auditable configuration rules because there are fewer decision points to track. + +Selecting variables files based on facts +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can create a playbook that works on multiple platforms and OS versions with a minimum of syntax by placing your variable values in vars files and conditionally importing them. If you want to install Apache on some CentOS and some Debian servers, create variables files with YAML keys and values. For example:: + + --- + # for vars/RedHat.yml + apache: httpd + somethingelse: 42 + +Then import those variables files based on the facts you gather on the hosts in your playbook:: + + --- + - hosts: webservers + remote_user: root + vars_files: + - "vars/common.yml" + - [ "vars/{{ ansible_facts['os_family'] }}.yml", "vars/os_defaults.yml" ] + tasks: + - name: Make sure apache is started + ansible.builtin.service: + name: '{{ apache }}' + state: started + +Ansible gathers facts on the hosts in the webservers group, then interpolates the variable "ansible_facts['os_family']" into a list of filenames. If you have hosts with Red Hat operating systems (CentOS, for example), Ansible looks for 'vars/RedHat.yml'. If that file does not exist, Ansible attempts to load 'vars/os_defaults.yml'. For Debian hosts, Ansible first looks for 'vars/Debian.yml', before falling back on 'vars/os_defaults.yml'. If no files in the list are found, Ansible raises an error. + +Selecting files and templates based on facts +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can use the same approach when different OS flavors or versions require different configuration files or templates. Select the appropriate file or template based on the variables assigned to each host. This approach is often much cleaner than putting a lot of conditionals into a single template to cover multiple OS or package versions. + +For example, you can template out a configuration file that is very different between, say, CentOS and Debian:: + + - name: Template a file + ansible.builtin.template: + src: "{{ item }}" + dest: /etc/myapp/foo.conf + loop: "{{ query('first_found', { 'files': myfiles, 'paths': mypaths}) }}" + vars: + myfiles: + - "{{ ansible_facts['distribution'] }}.conf" + - default.conf + mypaths: ['search_location_one/somedir/', '/opt/other_location/somedir/'] + +.. _commonly_used_facts: + +Commonly-used facts +=================== + +The following Ansible facts are frequently used in conditionals. + +.. _ansible_distribution: + +ansible_facts['distribution'] +----------------------------- + +Possible values (sample, not complete list):: + + Alpine + Altlinux + Amazon + Archlinux + ClearLinux + Coreos + CentOS + Debian + Fedora + Gentoo + Mandriva + NA + OpenWrt + OracleLinux + RedHat + Slackware + SLES + SMGL + SUSE + Ubuntu + VMwareESX + +.. See `OSDIST_LIST` + +.. _ansible_distribution_major_version: + +ansible_facts['distribution_major_version'] +------------------------------------------- + +The major version of the operating system. For example, the value is `16` for Ubuntu 16.04. + +.. _ansible_os_family: + +ansible_facts['os_family'] +-------------------------- + +Possible values (sample, not complete list):: + + AIX + Alpine + Altlinux + Archlinux + Darwin + Debian + FreeBSD + Gentoo + HP-UX + Mandrake + RedHat + SGML + Slackware + Solaris + Suse + Windows + +.. Ansible checks `OS_FAMILY_MAP`; if there's no match, it returns the value of `platform.system()`. + +.. seealso:: + + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_reuse_roles` + Playbook organization by roles + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + :ref:`playbooks_variables` + All about variables + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_debugger.rst b/docs/docsite/rst/user_guide/playbooks_debugger.rst new file mode 100644 index 00000000..cc330cc5 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_debugger.rst @@ -0,0 +1,329 @@ +.. _playbook_debugger: + +*************** +Debugging tasks +*************** + +Ansible offers a task debugger so you can fix errors during execution instead of editing your playbook and running it again to see if your change worked. You have access to all of the features of the debugger in the context of the task. You can check or set the value of variables, update module arguments, and re-run the task with the new variables and arguments. The debugger lets you resolve the cause of the failure and continue with playbook execution. + +.. contents:: + :local: + +Enabling the debugger +===================== + +The debugger is not enabled by default. If you want to invoke the debugger during playbook execution, you must enable it first. + +Use one of these three methods to enable the debugger: + + * with the debugger keyword + * in configuration or an environment variable, or + * as a strategy + +Enabling the debugger with the ``debugger`` keyword +--------------------------------------------------- + +.. versionadded:: 2.5 + +You can use the ``debugger`` keyword to enable (or disable) the debugger for a specific play, role, block, or task. This option is especially useful when developing or extending playbooks, plays, and roles. You can enable the debugger on new or updated tasks. If they fail, you can fix the errors efficiently. The ``debugger`` keyword accepts five values: + +.. table:: + :class: documentation-table + + ========================= ====================================================== + Value Result + ========================= ====================================================== + always Always invoke the debugger, regardless of the outcome + + never Never invoke the debugger, regardless of the outcome + + on_failed Only invoke the debugger if a task fails + + on_unreachable Only invoke the debugger if a host is unreachable + + on_skipped Only invoke the debugger if the task is skipped + + ========================= ====================================================== + +When you use the ``debugger`` keyword, the value you specify overrides any global configuration to enable or disable the debugger. If you define ``debugger`` at multiple levels, such as in a role and in a task, Ansible honors the most granular definition. The definition at the play or role level applies to all blocks and tasks within that play or role, unless they specify a different value. The definition at the block level overrides the definition at the play or role level, and applies to all tasks within that block, unless they specify a different value. The definition at the task level always applies to the task; it overrides the definitions at the block, play, or role level. + +Examples of using the ``debugger`` keyword +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Example of setting the ``debugger`` keyword on a task: + +.. code-block:: yaml + + - name: Execute a command + ansible.builtin.command: "false" + debugger: on_failed + +Example of setting the ``debugger`` keyword on a play: + +.. code-block:: yaml + + - name: My play + hosts: all + debugger: on_skipped + tasks: + - name: Execute a command + ansible.builtin.command: "true" + when: False + +Example of setting the ``debugger`` keyword at multiple levels: + +.. code-block:: yaml + + + - name: Play + hosts: all + debugger: never + tasks: + - name: Execute a command + ansible.builtin.command: "false" + debugger: on_failed + +In this example, the debugger is set to ``never`` at the play level and to ``on_failed`` at the task level. If the task fails, Ansible invokes the debugger, because the definition on the task overrides the definition on its parent play. + +Enabling the debugger in configuration or an environment variable +----------------------------------------------------------------- + +.. versionadded:: 2.5 + +You can enable the task debugger globally with a setting in ansible.cfg or with an environment variable. The only options are ``True`` or ``False``. If you set the configuration option or environment variable to ``True``, Ansible runs the debugger on failed tasks by default. + +To enable the task debugger from ansible.cfg, add this setting to the defaults section:: + + [defaults] + enable_task_debugger = True + +To enable the task debugger with an environment variable, pass the variable when you run your playbook:: + + ANSIBLE_ENABLE_TASK_DEBUGGER=True ansible-playbook -i hosts site.yml + +When you enable the debugger globally, every failed task invokes the debugger, unless the role, play, block, or task explicity disables the debugger. If you need more granular control over what conditions trigger the debugger, use the ``debugger`` keyword. + +Enabling the debugger as a strategy +----------------------------------- + +If you are running legacy playbooks or roles, you may see the debugger enabled as a :ref:`strategy <strategy_plugins>`. You can do this at the play level, in ansible.cfg, or with the environment variable ``ANSIBLE_STRATEGY=debug``. For example: + +.. code-block:: yaml + + - hosts: test + strategy: debug + tasks: + ... + +Or in ansible.cfg:: + + [defaults] + strategy = debug + +.. note:: + + This backwards-compatible method, which matches Ansible versions before 2.5, may be removed in a future release. + +Resolving errors in the debugger +================================ + +After Ansible invokes the debugger, you can use the seven :ref:`debugger commands <available_commands>` to resolve the error that Ansible encountered. Consider this example playbook, which defines the ``var1`` variable but uses the undefined ``wrong_var`` variable in a task by mistake. + +.. code-block:: yaml + + - hosts: test + debugger: on_failed + gather_facts: no + vars: + var1: value1 + tasks: + - name: Use a wrong variable + ansible.builtin.ping: data={{ wrong_var }} + +If you run this playbook, Ansible invokes the debugger when the task fails. From the debug prompt, you can change the module arguments or the variables and run the task again. + +.. code-block:: none + + PLAY *************************************************************************** + + TASK [wrong variable] ********************************************************** + fatal: [192.0.2.10]: FAILED! => {"failed": true, "msg": "ERROR! 'wrong_var' is undefined"} + Debugger invoked + [192.0.2.10] TASK: wrong variable (debug)> p result._result + {'failed': True, + 'msg': 'The task includes an option with an undefined variable. The error ' + "was: 'wrong_var' is undefined\n" + '\n' + 'The error appears to have been in ' + "'playbooks/debugger.yml': line 7, " + 'column 7, but may\n' + 'be elsewhere in the file depending on the exact syntax problem.\n' + '\n' + 'The offending line appears to be:\n' + '\n' + ' tasks:\n' + ' - name: wrong variable\n' + ' ^ here\n'} + [192.0.2.10] TASK: wrong variable (debug)> p task.args + {u'data': u'{{ wrong_var }}'} + [192.0.2.10] TASK: wrong variable (debug)> task.args['data'] = '{{ var1 }}' + [192.0.2.10] TASK: wrong variable (debug)> p task.args + {u'data': '{{ var1 }}'} + [192.0.2.10] TASK: wrong variable (debug)> redo + ok: [192.0.2.10] + + PLAY RECAP ********************************************************************* + 192.0.2.10 : ok=1 changed=0 unreachable=0 failed=0 + +Changing the task arguments in the debugger to use ``var1`` instead of ``wrong_var`` makes the task run successfully. + +.. _available_commands: + +Available debug commands +======================== + +You can use these seven commands at the debug prompt: + +.. table:: + :class: documentation-table + + ========================== ============ ========================================================= + Command Shortcut Action + ========================== ============ ========================================================= + print p Print information about the task + + task.args[*key*] = *value* no shortcut Update module arguments + + task_vars[*key*] = *value* no shortcut Update task variables (you must ``update_task`` next) + + update_task u Recreate a task with updated task variables + + redo r Run the task again + + continue c Continue executing, starting with the next task + + quit q Quit the debugger + + ========================== ============ ========================================================= + +For more details, see the individual descriptions and examples below. + +.. _pprint_command: + +Print command +------------- + +``print *task/task.args/task_vars/host/result*`` prints information about the task:: + + [192.0.2.10] TASK: install package (debug)> p task + TASK: install package + [192.0.2.10] TASK: install package (debug)> p task.args + {u'name': u'{{ pkg_name }}'} + [192.0.2.10] TASK: install package (debug)> p task_vars + {u'ansible_all_ipv4_addresses': [u'192.0.2.10'], + u'ansible_architecture': u'x86_64', + ... + } + [192.0.2.10] TASK: install package (debug)> p task_vars['pkg_name'] + u'bash' + [192.0.2.10] TASK: install package (debug)> p host + 192.0.2.10 + [192.0.2.10] TASK: install package (debug)> p result._result + {'_ansible_no_log': False, + 'changed': False, + u'failed': True, + ... + u'msg': u"No package matching 'not_exist' is available"} + +.. _update_args_command: + +Update args command +------------------- + +``task.args[*key*] = *value*`` updates a module argument. This sample playbook has an invalid package name:: + + - hosts: test + strategy: debug + gather_facts: yes + vars: + pkg_name: not_exist + tasks: + - name: Install a package + ansible.builtin.apt: name={{ pkg_name }} + +When you run the playbook, the invalid package name triggers an error, and Ansible invokes the debugger. You can fix the package name by viewing, then updating the module argument:: + + [192.0.2.10] TASK: install package (debug)> p task.args + {u'name': u'{{ pkg_name }}'} + [192.0.2.10] TASK: install package (debug)> task.args['name'] = 'bash' + [192.0.2.10] TASK: install package (debug)> p task.args + {u'name': 'bash'} + [192.0.2.10] TASK: install package (debug)> redo + +After you update the module argument, use ``redo`` to run the task again with the new args. + +.. _update_vars_command: + +Update vars command +------------------- + +``task_vars[*key*] = *value*`` updates the ``task_vars``. You could fix the playbook above by viewing, then updating the task variables instead of the module args:: + + [192.0.2.10] TASK: install package (debug)> p task_vars['pkg_name'] + u'not_exist' + [192.0.2.10] TASK: install package (debug)> task_vars['pkg_name'] = 'bash' + [192.0.2.10] TASK: install package (debug)> p task_vars['pkg_name'] + 'bash' + [192.0.2.10] TASK: install package (debug)> update_task + [192.0.2.10] TASK: install package (debug)> redo + +After you update the task variables, you must use ``update_task`` to load the new variables before using ``redo`` to run the task again. + +.. note:: + In 2.5 this was updated from ``vars`` to ``task_vars`` to avoid conflicts with the ``vars()`` python function. + +.. _update_task_command: + +Update task command +------------------- + +.. versionadded:: 2.8 + +``u`` or ``update_task`` recreates the task from the original task data structure and templates with updated task variables. See the entry :ref:`update_vars_command` for an example of use. + +.. _redo_command: + +Redo command +------------ + +``r`` or ``redo`` runs the task again. + +.. _continue_command: + +Continue command +---------------- + +``c`` or ``continue`` continues executing, starting with the next task. + +.. _quit_command: + +Quit command +------------ + +``q`` or ``quit`` quits the debugger. The playbook execution is aborted. + +How the debugger interacts with the free strategy +================================================= + +With the default ``linear`` strategy enabled, Ansible halts execution while the debugger is active, and runs the debugged task immediately after you enter the ``redo`` command. With the ``free`` strategy enabled, however, Ansible does not wait for all hosts, and may queue later tasks on one host before a task fails on another host. With the ``free`` strategy, Ansible does not queue or execute any tasks while the debugger is active. However, all queued tasks remain in the queue and run as soon as you exit the debugger. If you use ``redo`` to reschedule a task from the debugger, other queued tasks may execute before your rescheduled task. For more information about strategies, see :ref:`playbooks_strategies`. + +.. seealso:: + + :ref:`playbooks_start_and_step` + Running playbooks while debugging or testing + :ref:`playbooks_intro` + An introduction to playbooks + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_delegation.rst b/docs/docsite/rst/user_guide/playbooks_delegation.rst new file mode 100644 index 00000000..1042bafb --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_delegation.rst @@ -0,0 +1,136 @@ +.. _playbooks_delegation: + +Controlling where tasks run: delegation and local actions +========================================================= + +By default Ansible gathers facts and executes all tasks on the machines that match the ``hosts`` line of your playbook. This page shows you how to delegate tasks to a different machine or group, delegate facts to specific machines or groups, or run an entire playbook locally. Using these approaches, you can manage inter-related environments precisely and efficiently. For example, when updating your webservers, you might need to remove them from a load-balanced pool temporarily. You cannot perform this task on the webservers themselves. By delegating the task to localhost, you keep all the tasks within the same play. + +.. contents:: + :local: + +Tasks that cannot be delegated +------------------------------ + +Some tasks always execute on the controller. These tasks, including ``include``, ``add_host``, and ``debug``, cannot be delegated. + +.. _delegation: + +Delegating tasks +---------------- + +If you want to perform a task on one host with reference to other hosts, use the ``delegate_to`` keyword on a task. This is ideal for managing nodes in a load balanced pool or for controlling outage windows. You can use delegation with the :ref:`serial <rolling_update_batch_size>` keyword to control the number of hosts executing at one time:: + + --- + - hosts: webservers + serial: 5 + + tasks: + - name: Take out of load balancer pool + ansible.builtin.command: /usr/bin/take_out_of_pool {{ inventory_hostname }} + delegate_to: 127.0.0.1 + + - name: Actual steps would go here + ansible.builtin.yum: + name: acme-web-stack + state: latest + + - name: Add back to load balancer pool + ansible.builtin.command: /usr/bin/add_back_to_pool {{ inventory_hostname }} + delegate_to: 127.0.0.1 + +The first and third tasks in this play run on 127.0.0.1, which is the machine running Ansible. There is also a shorthand syntax that you can use on a per-task basis: ``local_action``. Here is the same playbook as above, but using the shorthand syntax for delegating to 127.0.0.1:: + + --- + # ... + + tasks: + - name: Take out of load balancer pool + local_action: ansible.builtin.command /usr/bin/take_out_of_pool {{ inventory_hostname }} + + # ... + + - name: Add back to load balancer pool + local_action: ansible.builtin.command /usr/bin/add_back_to_pool {{ inventory_hostname }} + +You can use a local action to call 'rsync' to recursively copy files to the managed servers:: + + --- + # ... + + tasks: + - name: Recursively copy files from management server to target + local_action: ansible.builtin.command rsync -a /path/to/files {{ inventory_hostname }}:/path/to/target/ + +Note that you must have passphrase-less SSH keys or an ssh-agent configured for this to work, otherwise rsync asks for a passphrase. + +To specify more arguments, use the following syntax:: + + --- + # ... + + tasks: + - name: Send summary mail + local_action: + module: community.general.mail + subject: "Summary Mail" + to: "{{ mail_recipient }}" + body: "{{ mail_body }}" + run_once: True + +The `ansible_host` variable reflects the host a task is delegated to. + +.. _delegate_facts: + +Delegating facts +---------------- + +Delegating Ansible tasks is like delegating tasks in the real world - your groceries belong to you, even if someone else delivers them to your home. Similarly, any facts gathered by a delegated task are assigned by default to the `inventory_hostname` (the current host), not to the host which produced the facts (the delegated to host). To assign gathered facts to the delegated host instead of the current host, set ``delegate_facts`` to ``true``:: + + --- + - hosts: app_servers + + tasks: + - name: Gather facts from db servers + ansible.builtin.setup: + delegate_to: "{{ item }}" + delegate_facts: true + loop: "{{ groups['dbservers'] }}" + +This task gathers facts for the machines in the dbservers group and assigns the facts to those machines, even though the play targets the app_servers group. This way you can lookup `hostvars['dbhost1']['ansible_default_ipv4']['address']` even though dbservers were not part of the play, or left out by using `--limit`. + +.. _local_playbooks: + +Local playbooks +--------------- + +It may be useful to use a playbook locally on a remote host, rather than by connecting over SSH. This can be useful for assuring the configuration of a system by putting a playbook in a crontab. This may also be used +to run a playbook inside an OS installer, such as an Anaconda kickstart. + +To run an entire playbook locally, just set the ``hosts:`` line to ``hosts: 127.0.0.1`` and then run the playbook like so:: + + ansible-playbook playbook.yml --connection=local + +Alternatively, a local connection can be used in a single playbook play, even if other plays in the playbook +use the default remote connection type:: + + --- + - hosts: 127.0.0.1 + connection: local + +.. note:: + If you set the connection to local and there is no ansible_python_interpreter set, modules will run under /usr/bin/python and not + under {{ ansible_playbook_python }}. Be sure to set ansible_python_interpreter: "{{ ansible_playbook_python }}" in + host_vars/localhost.yml, for example. You can avoid this issue by using ``local_action`` or ``delegate_to: localhost`` instead. + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + :ref:`playbooks_strategies` + More ways to control how and where Ansible executes + `Ansible Examples on GitHub <https://github.com/ansible/ansible-examples>`_ + Many examples of full-stack deployments + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_environment.rst b/docs/docsite/rst/user_guide/playbooks_environment.rst new file mode 100644 index 00000000..7d97b954 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_environment.rst @@ -0,0 +1,141 @@ +.. _playbooks_environment: + +Setting the remote environment +============================== + +.. versionadded:: 1.1 + +You can use the ``environment`` keyword at the play, block, or task level to set an environment variable for an action on a remote host. With this keyword, you can enable using a proxy for a task that does http requests, set the required environment variables for language-specific version managers, and more. + +When you set a value with ``environment:`` at the play or block level, it is available only to tasks within the play or block that are executed by the same user. The ``environment:`` keyword does not affect Ansible itself, Ansible configuration settings, the environment for other users, or the execution of other plugins like lookups and filters. Variables set with ``environment:`` do not automatically become Ansible facts, even when you set them at the play level. You must include an explicit ``gather_facts`` task in your playbook and set the ``environment`` keyword on that task to turn these values into Ansible facts. + +.. contents:: + :local: + +Setting the remote environment in a task +---------------------------------------- + +You can set the environment directly at the task level:: + + - hosts: all + remote_user: root + + tasks: + + - name: Install cobbler + ansible.builtin.package: + name: cobbler + state: present + environment: + http_proxy: http://proxy.example.com:8080 + +You can re-use environment settings by defining them as variables in your play and accessing them in a task as you would access any stored Ansible variable:: + + - hosts: all + remote_user: root + + # create a variable named "proxy_env" that is a dictionary + vars: + proxy_env: + http_proxy: http://proxy.example.com:8080 + + tasks: + + - name: Install cobbler + ansible.builtin.package: + name: cobbler + state: present + environment: "{{ proxy_env }}" + +You can store environment settings for re-use in multiple playbooks by defining them in a group_vars file:: + + --- + # file: group_vars/boston + + ntp_server: ntp.bos.example.com + backup: bak.bos.example.com + proxy_env: + http_proxy: http://proxy.bos.example.com:8080 + https_proxy: http://proxy.bos.example.com:8080 + +You can set the remote environment at the play level:: + + - hosts: testing + + roles: + - php + - nginx + + environment: + http_proxy: http://proxy.example.com:8080 + +These examples show proxy settings, but you can provide any number of settings this way. + +Working with language-specific version managers +=============================================== + +Some language-specific version managers (such as rbenv and nvm) require you to set environment variables while these tools are in use. When using these tools manually, you usually source some environment variables from a script or from lines added to your shell configuration file. In Ansible, you can do this with the environment keyword at the play level:: + + --- + ### A playbook demonstrating a common npm workflow: + # - Check for package.json in the application directory + # - If package.json exists: + # * Run npm prune + # * Run npm install + + - hosts: application + become: false + + vars: + node_app_dir: /var/local/my_node_app + + environment: + NVM_DIR: /var/local/nvm + PATH: /var/local/nvm/versions/node/v4.2.1/bin:{{ ansible_env.PATH }} + + tasks: + - name: Check for package.json + ansible.builtin.stat: + path: '{{ node_app_dir }}/package.json' + register: packagejson + + - name: Run npm prune + ansible.builtin.command: npm prune + args: + chdir: '{{ node_app_dir }}' + when: packagejson.stat.exists + + - name: Run npm install + community.general.npm: + path: '{{ node_app_dir }}' + when: packagejson.stat.exists + +.. note:: + The example above uses ``ansible_env`` as part of the PATH. Basing variables on ``ansible_env`` is risky. Ansible populates ``ansible_env`` values by gathering facts, so the value of the variables depends on the remote_user or become_user Ansible used when gathering those facts. If you change remote_user/become_user the values in ``ansible-env`` may not be the ones you expect. + +.. warning:: + Environment variables are normally passed in clear text (shell plugin dependent) so they are not a recommended way of passing secrets to the module being executed. + +You can also specify the environment at the task level:: + + --- + - name: Install ruby 2.3.1 + ansible.builtin.command: rbenv install {{ rbenv_ruby_version }} + args: + creates: '{{ rbenv_root }}/versions/{{ rbenv_ruby_version }}/bin/ruby' + vars: + rbenv_root: /usr/local/rbenv + rbenv_ruby_version: 2.3.1 + environment: + CONFIGURE_OPTS: '--disable-install-doc' + RBENV_ROOT: '{{ rbenv_root }}' + PATH: '{{ rbenv_root }}/bin:{{ rbenv_root }}/shims:{{ rbenv_plugins }}/ruby-build/bin:{{ ansible_env.PATH }}' + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_error_handling.rst b/docs/docsite/rst/user_guide/playbooks_error_handling.rst new file mode 100644 index 00000000..c73067cc --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_error_handling.rst @@ -0,0 +1,245 @@ +.. _playbooks_error_handling: + +*************************** +Error handling in playbooks +*************************** + +When Ansible receives a non-zero return code from a command or a failure from a module, by default it stops executing on that host and continues on other hosts. However, in some circumstances you may want different behavior. Sometimes a non-zero return code indicates success. Sometimes you want a failure on one host to stop execution on all hosts. Ansible provides tools and settings to handle these situations and help you get the behavior, output, and reporting you want. + +.. contents:: + :local: + +.. _ignoring_failed_commands: + +Ignoring failed commands +======================== + +By default Ansible stops executing tasks on a host when a task fails on that host. You can use ``ignore_errors`` to continue on in spite of the failure:: + + - name: Do not count this as a failure + ansible.builtin.command: /bin/false + ignore_errors: yes + +The ``ignore_errors`` directive only works when the task is able to run and returns a value of 'failed'. It does not make Ansible ignore undefined variable errors, connection failures, execution issues (for example, missing packages), or syntax errors. + +Ignoring unreachable host errors +================================ + +.. versionadded:: 2.7 + +You can ignore a task failure due to the host instance being 'UNREACHABLE' with the ``ignore_unreachable`` keyword. Ansible ignores the task errors, but continues to execute future tasks against the unreachable host. For example, at the task level:: + + - name: This executes, fails, and the failure is ignored + ansible.builtin.command: /bin/true + ignore_unreachable: yes + + - name: This executes, fails, and ends the play for this host + ansible.builtin.command: /bin/true + +And at the playbook level:: + + - hosts: all + ignore_unreachable: yes + tasks: + - name: This executes, fails, and the failure is ignored + ansible.builtin.command: /bin/true + + - name: This executes, fails, and ends the play for this host + ansible.builtin.command: /bin/true + ignore_unreachable: no + +.. _resetting_unreachable: + +Resetting unreachable hosts +=========================== + +If Ansible cannot connect to a host, it marks that host as 'UNREACHABLE' and removes it from the list of active hosts for the run. You can use `meta: clear_host_errors` to reactivate all hosts, so subsequent tasks can try to reach them again. + + +.. _handlers_and_failure: + +Handlers and failure +==================== + +Ansible runs :ref:`handlers <handlers>` at the end of each play. If a task notifies a handler but +another task fails later in the play, by default the handler does *not* run on that host, +which may leave the host in an unexpected state. For example, a task could update +a configuration file and notify a handler to restart some service. If a +task later in the same play fails, the configuration file might be changed but +the service will not be restarted. + +You can change this behavior with the ``--force-handlers`` command-line option, +by including ``force_handlers: True`` in a play, or by adding ``force_handlers = True`` +to ansible.cfg. When handlers are forced, Ansible will run all notified handlers on +all hosts, even hosts with failed tasks. (Note that certain errors could still prevent +the handler from running, such as a host becoming unreachable.) + +.. _controlling_what_defines_failure: + +Defining failure +================ + +Ansible lets you define what "failure" means in each task using the ``failed_when`` conditional. As with all conditionals in Ansible, lists of multiple ``failed_when`` conditions are joined with an implicit ``and``, meaning the task only fails when *all* conditions are met. If you want to trigger a failure when any of the conditions is met, you must define the conditions in a string with an explicit ``or`` operator. + +You may check for failure by searching for a word or phrase in the output of a command:: + + - name: Fail task when the command error output prints FAILED + ansible.builtin.command: /usr/bin/example-command -x -y -z + register: command_result + failed_when: "'FAILED' in command_result.stderr" + +or based on the return code:: + + - name: Fail task when both files are identical + ansible.builtin.raw: diff foo/file1 bar/file2 + register: diff_cmd + failed_when: diff_cmd.rc == 0 or diff_cmd.rc >= 2 + +You can also combine multiple conditions for failure. This task will fail if both conditions are true:: + + - name: Check if a file exists in temp and fail task if it does + ansible.builtin.command: ls /tmp/this_should_not_be_here + register: result + failed_when: + - result.rc == 0 + - '"No such" not in result.stdout' + +If you want the task to fail when only one condition is satisfied, change the ``failed_when`` definition to:: + + failed_when: result.rc == 0 or "No such" not in result.stdout + +If you have too many conditions to fit neatly into one line, you can split it into a multi-line yaml value with ``>``:: + + - name: example of many failed_when conditions with OR + ansible.builtin.shell: "./myBinary" + register: ret + failed_when: > + ("No such file or directory" in ret.stdout) or + (ret.stderr != '') or + (ret.rc == 10) + +.. _override_the_changed_result: + +Defining "changed" +================== + +Ansible lets you define when a particular task has "changed" a remote node using the ``changed_when`` conditional. This lets you determine, based on return codes or output, whether a change should be reported in Ansible statistics and whether a handler should be triggered or not. As with all conditionals in Ansible, lists of multiple ``changed_when`` conditions are joined with an implicit ``and``, meaning the task only reports a change when *all* conditions are met. If you want to report a change when any of the conditions is met, you must define the conditions in a string with an explicit ``or`` operator. For example:: + + tasks: + + - name: Report 'changed' when the return code is not equal to 2 + ansible.builtin.shell: /usr/bin/billybass --mode="take me to the river" + register: bass_result + changed_when: "bass_result.rc != 2" + + - name: This will never report 'changed' status + ansible.builtin.shell: wall 'beep' + changed_when: False + +You can also combine multiple conditions to override "changed" result:: + + - name: Combine multiple conditions to override 'changed' result + ansible.builtin.command: /bin/fake_command + register: result + ignore_errors: True + changed_when: + - '"ERROR" in result.stderr' + - result.rc == 2 + +See :ref:`controlling_what_defines_failure` for more conditional syntax examples. + +Ensuring success for command and shell +====================================== + +The :ref:`command <command_module>` and :ref:`shell <shell_module>` modules care about return codes, so if you have a command whose successful exit code is not zero, you can do this:: + + tasks: + - name: Run this command and ignore the result + ansible.builtin.shell: /usr/bin/somecommand || /bin/true + + +Aborting a play on all hosts +============================ + +Sometimes you want a failure on a single host, or failures on a certain percentage of hosts, to abort the entire play on all hosts. You can stop play execution after the first failure happens with ``any_errors_fatal``. For finer-grained control, you can use ``max_fail_percentage`` to abort the run after a given percentage of hosts has failed. + +Aborting on the first error: any_errors_fatal +--------------------------------------------- + +If you set ``any_errors_fatal`` and a task returns an error, Ansible finishes the fatal task on all hosts in the current batch, then stops executing the play on all hosts. Subsequent tasks and plays are not executed. You can recover from fatal errors by adding a :ref:`rescue section <block_error_handling>` to the block. You can set ``any_errors_fatal`` at the play or block level:: + + - hosts: somehosts + any_errors_fatal: true + roles: + - myrole + + - hosts: somehosts + tasks: + - block: + - include_tasks: mytasks.yml + any_errors_fatal: true + +You can use this feature when all tasks must be 100% successful to continue playbook execution. For example, if you run a service on machines in multiple data centers with load balancers to pass traffic from users to the service, you want all load balancers to be disabled before you stop the service for maintenance. To ensure that any failure in the task that disables the load balancers will stop all other tasks:: + + --- + - hosts: load_balancers_dc_a + any_errors_fatal: true + + tasks: + - name: Shut down datacenter 'A' + ansible.builtin.command: /usr/bin/disable-dc + + - hosts: frontends_dc_a + + tasks: + - name: Stop service + ansible.builtin.command: /usr/bin/stop-software + + - name: Update software + ansible.builtin.command: /usr/bin/upgrade-software + + - hosts: load_balancers_dc_a + + tasks: + - name: Start datacenter 'A' + ansible.builtin.command: /usr/bin/enable-dc + +In this example Ansible starts the software upgrade on the front ends only if all of the load balancers are successfully disabled. + +.. _maximum_failure_percentage: + +Setting a maximum failure percentage +------------------------------------ + +By default, Ansible continues to execute tasks as long as there are hosts that have not yet failed. In some situations, such as when executing a rolling update, you may want to abort the play when a certain threshold of failures has been reached. To achieve this, you can set a maximum failure percentage on a play:: + + --- + - hosts: webservers + max_fail_percentage: 30 + serial: 10 + +The ``max_fail_percentage`` setting applies to each batch when you use it with :ref:`serial <rolling_update_batch_size>`. In the example above, if more than 3 of the 10 servers in the first (or any) batch of servers failed, the rest of the play would be aborted. + +.. note:: + + The percentage set must be exceeded, not equaled. For example, if serial were set to 4 and you wanted the task to abort the play when 2 of the systems failed, set the max_fail_percentage at 49 rather than 50. + +Controlling errors in blocks +============================ + +You can also use blocks to define responses to task errors. This approach is similar to exception handling in many programming languages. See :ref:`block_error_handling` for details and examples. + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + :ref:`playbooks_conditionals` + Conditional statements in playbooks + :ref:`playbooks_variables` + All about variables + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_filters.rst b/docs/docsite/rst/user_guide/playbooks_filters.rst new file mode 100644 index 00000000..f009900a --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_filters.rst @@ -0,0 +1,1696 @@ +.. _playbooks_filters: + +******************************** +Using filters to manipulate data +******************************** + +Filters let you transform JSON data into YAML data, split a URL to extract the hostname, get the SHA1 hash of a string, add or multiply integers, and much more. You can use the Ansible-specific filters documented here to manipulate your data, or use any of the standard filters shipped with Jinja2 - see the list of :ref:`built-in filters <jinja2:builtin-filters>` in the official Jinja2 template documentation. You can also use :ref:`Python methods <jinja2:python-methods>` to transform data. You can :ref:`create custom Ansible filters as plugins <developing_filter_plugins>`, though we generally welcome new filters into the ansible-base repo so everyone can use them. + +Because templating happens on the Ansible controller, **not** on the target host, filters execute on the controller and transform data locally. + +.. contents:: + :local: + +Handling undefined variables +============================ + +Filters can help you manage missing or undefined variables by providing defaults or making some variables optional. If you configure Ansible to ignore most undefined variables, you can mark some variables as requiring values with the ``mandatory`` filter. + +.. _defaulting_undefined_variables: + +Providing default values +------------------------ + +You can provide default values for variables directly in your templates using the Jinja2 'default' filter. This is often a better approach than failing if a variable is not defined:: + + {{ some_variable | default(5) }} + +In the above example, if the variable 'some_variable' is not defined, Ansible uses the default value 5, rather than raising an "undefined variable" error and failing. If you are working within a role, you can also add a ``defaults/main.yml`` to define the default values for variables in your role. + +Beginning in version 2.8, attempting to access an attribute of an Undefined value in Jinja will return another Undefined value, rather than throwing an error immediately. This means that you can now simply use +a default with a value in a nested data structure (in other words, :code:`{{ foo.bar.baz | default('DEFAULT') }}`) when you do not know if the intermediate values are defined. + +If you want to use the default value when variables evaluate to false or an empty string you have to set the second parameter to ``true``:: + + {{ lookup('env', 'MY_USER') | default('admin', true) }} + +.. _omitting_undefined_variables: + +Making variables optional +------------------------- + +By default Ansible requires values for all variables in a templated expression. However, you can make specific variables optional. For example, you might want to use a system default for some items and control the value for others. To make a variable optional, set the default value to the special variable ``omit``:: + + - name: Touch files with an optional mode + ansible.builtin.file: + dest: "{{ item.path }}" + state: touch + mode: "{{ item.mode | default(omit) }}" + loop: + - path: /tmp/foo + - path: /tmp/bar + - path: /tmp/baz + mode: "0444" + +In this example, the default mode for the files ``/tmp/foo`` and ``/tmp/bar`` is determined by the umask of the system. Ansible does not send a value for ``mode``. Only the third file, ``/tmp/baz``, receives the `mode=0444` option. + +.. note:: If you are "chaining" additional filters after the ``default(omit)`` filter, you should instead do something like this: + ``"{{ foo | default(None) | some_filter or omit }}"``. In this example, the default ``None`` (Python null) value will cause the later filters to fail, which will trigger the ``or omit`` portion of the logic. Using ``omit`` in this manner is very specific to the later filters you are chaining though, so be prepared for some trial and error if you do this. + +.. _forcing_variables_to_be_defined: + +Defining mandatory values +------------------------- + +If you configure Ansible to ignore undefined variables, you may want to define some values as mandatory. By default, Ansible fails if a variable in your playbook or command is undefined. You can configure Ansible to allow undefined variables by setting :ref:`DEFAULT_UNDEFINED_VAR_BEHAVIOR` to ``false``. In that case, you may want to require some variables to be defined. You can do this with:: + + {{ variable | mandatory }} + +The variable value will be used as is, but the template evaluation will raise an error if it is undefined. + +Defining different values for true/false/null (ternary) +======================================================= + +You can create a test, then define one value to use when the test returns true and another when the test returns false (new in version 1.9):: + + {{ (status == 'needs_restart') | ternary('restart', 'continue') }} + +In addition, you can define a one value to use on true, one value on false and a third value on null (new in version 2.8):: + + {{ enabled | ternary('no shutdown', 'shutdown', omit) }} + +Managing data types +=================== + +You might need to know, change, or set the data type on a variable. For example, a registered variable might contain a dictionary when your next task needs a list, or a user :ref:`prompt <playbooks_prompts>` might return a string when your playbook needs a boolean value. Use the ``type_debug``, ``dict2items``, and ``items2dict`` filters to manage data types. You can also use the data type itself to cast a value as a specific data type. + +Discovering the data type +------------------------- + +.. versionadded:: 2.3 + +If you are unsure of the underlying Python type of a variable, you can use the ``type_debug`` filter to display it. This is useful in debugging when you need a particular type of variable:: + + {{ myvar | type_debug }} + + +.. _dict_filter: + +Transforming dictionaries into lists +------------------------------------ + +.. versionadded:: 2.6 + + +Use the ``dict2items`` filter to transform a dictionary into a list of items suitable for :ref:`looping <playbooks_loops>`:: + + {{ dict | dict2items }} + +Dictionary data (before applying the ``dict2items`` filter):: + + tags: + Application: payment + Environment: dev + +List data (after applying the ``dict2items`` filter):: + + - key: Application + value: payment + - key: Environment + value: dev + +.. versionadded:: 2.8 + +The ``dict2items`` filter is the reverse of the ``items2dict`` filter. + +If you want to configure the names of the keys, the ``dict2items`` filter accepts 2 keyword arguments. Pass the ``key_name`` and ``value_name`` arguments to configure the names of the keys in the list output:: + + {{ files | dict2items(key_name='file', value_name='path') }} + +Dictionary data (before applying the ``dict2items`` filter):: + + files: + users: /etc/passwd + groups: /etc/group + +List data (after applying the ``dict2items`` filter):: + + - file: users + path: /etc/passwd + - file: groups + path: /etc/group + + +Transforming lists into dictionaries +------------------------------------ + +.. versionadded:: 2.7 + +Use the ``items2dict`` filter to transform a list into a dictionary, mapping the content into ``key: value`` pairs:: + + {{ tags | items2dict }} + +List data (before applying the ``items2dict`` filter):: + + tags: + - key: Application + value: payment + - key: Environment + value: dev + +Dictionary data (after applying the ``items2dict`` filter):: + + Application: payment + Environment: dev + +The ``items2dict`` filter is the reverse of the ``dict2items`` filter. + +Not all lists use ``key`` to designate keys and ``value`` to designate values. For example:: + + fruits: + - fruit: apple + color: red + - fruit: pear + color: yellow + - fruit: grapefruit + color: yellow + +In this example, you must pass the ``key_name`` and ``value_name`` arguments to configure the transformation. For example:: + + {{ tags | items2dict(key_name='fruit', value_name='color') }} + +If you do not pass these arguments, or do not pass the correct values for your list, you will see ``KeyError: key`` or ``KeyError: my_typo``. + +Forcing the data type +--------------------- + +You can cast values as certain types. For example, if you expect the input "True" from a :ref:`vars_prompt <playbooks_prompts>` and you want Ansible to recognize it as a boolean value instead of a string:: + + - debug: + msg: test + when: some_string_value | bool + +If you want to perform a mathematical comparison on a fact and you want Ansible to recognize it as an integer instead of a string:: + + - shell: echo "only on Red Hat 6, derivatives, and later" + when: ansible_facts['os_family'] == "RedHat" and ansible_facts['lsb']['major_release'] | int >= 6 + + +.. versionadded:: 1.6 + +.. _filters_for_formatting_data: + +Formatting data: YAML and JSON +============================== + +You can switch a data structure in a template from or to JSON or YAML format, with options for formatting, indenting, and loading data. The basic filters are occasionally useful for debugging:: + + {{ some_variable | to_json }} + {{ some_variable | to_yaml }} + +For human readable output, you can use:: + + {{ some_variable | to_nice_json }} + {{ some_variable | to_nice_yaml }} + +You can change the indentation of either format:: + + {{ some_variable | to_nice_json(indent=2) }} + {{ some_variable | to_nice_yaml(indent=8) }} + +The ``to_yaml`` and ``to_nice_yaml`` filters use the `PyYAML library`_ which has a default 80 symbol string length limit. That causes unexpected line break after 80th symbol (if there is a space after 80th symbol) +To avoid such behavior and generate long lines, use the ``width`` option. You must use a hardcoded number to define the width, instead of a construction like ``float("inf")``, because the filter does not support proxying Python functions. For example:: + + {{ some_variable | to_yaml(indent=8, width=1337) }} + {{ some_variable | to_nice_yaml(indent=8, width=1337) }} + +The filter does support passing through other YAML parameters. For a full list, see the `PyYAML documentation`_. + +If you are reading in some already formatted data:: + + {{ some_variable | from_json }} + {{ some_variable | from_yaml }} + +for example:: + + tasks: + - name: Register JSON output as a variable + ansible.builtin.shell: cat /some/path/to/file.json + register: result + + - name: Set a variable + ansible.builtin.set_fact: + myvar: "{{ result.stdout | from_json }}" + + +Filter `to_json` and Unicode support +------------------------------------ + +By default `to_json` and `to_nice_json` will convert data received to ASCII, so:: + + {{ 'München'| to_json }} + +will return:: + + 'M\u00fcnchen' + +To keep Unicode characters, pass the parameter `ensure_ascii=False` to the filter:: + + {{ 'München'| to_json(ensure_ascii=False) }} + + 'München' + +.. versionadded:: 2.7 + +To parse multi-document YAML strings, the ``from_yaml_all`` filter is provided. +The ``from_yaml_all`` filter will return a generator of parsed YAML documents. + +for example:: + + tasks: + - name: Register a file content as a variable + ansible.builtin.shell: cat /some/path/to/multidoc-file.yaml + register: result + + - name: Print the transformed variable + ansible.builtin.debug: + msg: '{{ item }}' + loop: '{{ result.stdout | from_yaml_all | list }}' + +Combining and selecting data +============================ + +You can combine data from multiple sources and types, and select values from large data structures, giving you precise control over complex data. + +.. _zip_filter: + +Combining items from multiple lists: zip and zip_longest +-------------------------------------------------------- + +.. versionadded:: 2.3 + +To get a list combining the elements of other lists use ``zip``:: + + - name: Give me list combo of two lists + ansible.builtin.debug: + msg: "{{ [1,2,3,4,5] | zip(['a','b','c','d','e','f']) | list }}" + + - name: Give me shortest combo of two lists + ansible.builtin.debug: + msg: "{{ [1,2,3] | zip(['a','b','c','d','e','f']) | list }}" + +To always exhaust all lists use ``zip_longest``:: + + - name: Give me longest combo of three lists , fill with X + ansible.builtin.debug: + msg: "{{ [1,2,3] | zip_longest(['a','b','c','d','e','f'], [21, 22, 23], fillvalue='X') | list }}" + +Similarly to the output of the ``items2dict`` filter mentioned above, these filters can be used to construct a ``dict``:: + + {{ dict(keys_list | zip(values_list)) }} + +List data (before applying the ``zip`` filter):: + + keys_list: + - one + - two + values_list: + - apple + - orange + +Dictonary data (after applying the ``zip`` filter):: + + one: apple + two: orange + +Combining objects and subelements +--------------------------------- + +.. versionadded:: 2.7 + +The ``subelements`` filter produces a product of an object and the subelement values of that object, similar to the ``subelements`` lookup. This lets you specify individual subelements to use in a template. For example, this expression:: + + {{ users | subelements('groups', skip_missing=True) }} + +Data before applying the ``subelements`` filter:: + + users: + - name: alice + authorized: + - /tmp/alice/onekey.pub + - /tmp/alice/twokey.pub + groups: + - wheel + - docker + - name: bob + authorized: + - /tmp/bob/id_rsa.pub + groups: + - docker + +Data after applying the ``subelements`` filter:: + + - + - name: alice + groups: + - wheel + - docker + authorized: + - /tmp/alice/onekey.pub + - /tmp/alice/twokey.pub + - wheel + - + - name: alice + groups: + - wheel + - docker + authorized: + - /tmp/alice/onekey.pub + - /tmp/alice/twokey.pub + - docker + - + - name: bob + authorized: + - /tmp/bob/id_rsa.pub + groups: + - docker + - docker + +You can use the transformed data with ``loop`` to iterate over the same subelement for multiple objects:: + + - name: Set authorized ssh key, extracting just that data from 'users' + ansible.posix.authorized_key: + user: "{{ item.0.name }}" + key: "{{ lookup('file', item.1) }}" + loop: "{{ users | subelements('authorized') }}" + +.. _combine_filter: + +Combining hashes/dictionaries +----------------------------- + +.. versionadded:: 2.0 + +The ``combine`` filter allows hashes to be merged. For example, the following would override keys in one hash:: + + {{ {'a':1, 'b':2} | combine({'b':3}) }} + +The resulting hash would be:: + + {'a':1, 'b':3} + +The filter can also take multiple arguments to merge:: + + {{ a | combine(b, c, d) }} + {{ [a, b, c, d] | combine }} + +In this case, keys in ``d`` would override those in ``c``, which would override those in ``b``, and so on. + +The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. + +recursive + Is a boolean, default to ``False``. + Should the ``combine`` recursively merge nested hashes. + Note: It does **not** depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. + +list_merge + Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. + It modifies the behaviour of ``combine`` when the hashes to merge contain arrays/lists. + +.. code-block:: yaml + + default: + a: + x: default + y: default + b: default + c: default + patch: + a: + y: patch + z: patch + b: patch + +If ``recursive=False`` (the default), nested hash aren't merged:: + + {{ default | combine(patch) }} + +This would result in:: + + a: + y: patch + z: patch + b: patch + c: default + +If ``recursive=True``, recurse into nested hash and merge their keys:: + + {{ default | combine(patch, recursive=True) }} + +This would result in:: + + a: + x: default + y: patch + z: patch + b: patch + c: default + +If ``list_merge='replace'`` (the default), arrays from the right hash will "replace" the ones in the left hash:: + + default: + a: + - default + patch: + a: + - patch + +.. code-block:: jinja + + {{ default | combine(patch) }} + +This would result in:: + + a: + - patch + +If ``list_merge='keep'``, arrays from the left hash will be kept:: + + {{ default | combine(patch, list_merge='keep') }} + +This would result in:: + + a: + - default + +If ``list_merge='append'``, arrays from the right hash will be appended to the ones in the left hash:: + + {{ default | combine(patch, list_merge='append') }} + +This would result in:: + + a: + - default + - patch + +If ``list_merge='prepend'``, arrays from the right hash will be prepended to the ones in the left hash:: + + {{ default | combine(patch, list_merge='prepend') }} + +This would result in:: + + a: + - patch + - default + +If ``list_merge='append_rp'``, arrays from the right hash will be appended to the ones in the left hash. Elements of arrays in the left hash that are also in the corresponding array of the right hash will be removed ("rp" stands for "remove present"). Duplicate elements that aren't in both hashes are kept:: + + default: + a: + - 1 + - 1 + - 2 + - 3 + patch: + a: + - 3 + - 4 + - 5 + - 5 + +.. code-block:: jinja + + {{ default | combine(patch, list_merge='append_rp') }} + +This would result in:: + + a: + - 1 + - 1 + - 2 + - 3 + - 4 + - 5 + - 5 + +If ``list_merge='prepend_rp'``, the behavior is similar to the one for ``append_rp``, but elements of arrays in the right hash are prepended:: + + {{ default | combine(patch, list_merge='prepend_rp') }} + +This would result in:: + + a: + - 3 + - 4 + - 5 + - 5 + - 1 + - 1 + - 2 + +``recursive`` and ``list_merge`` can be used together:: + + default: + a: + a': + x: default_value + y: default_value + list: + - default_value + b: + - 1 + - 1 + - 2 + - 3 + patch: + a: + a': + y: patch_value + z: patch_value + list: + - patch_value + b: + - 3 + - 4 + - 4 + - key: value + +.. code-block:: jinja + + {{ default | combine(patch, recursive=True, list_merge='append_rp') }} + +This would result in:: + + a: + a': + x: default_value + y: patch_value + z: patch_value + list: + - default_value + - patch_value + b: + - 1 + - 1 + - 2 + - 3 + - 4 + - 4 + - key: value + + +.. _extract_filter: + +Selecting values from arrays or hashtables +------------------------------------------- + +.. versionadded:: 2.1 + +The `extract` filter is used to map from a list of indices to a list of values from a container (hash or array):: + + {{ [0,2] | map('extract', ['x','y','z']) | list }} + {{ ['x','y'] | map('extract', {'x': 42, 'y': 31}) | list }} + +The results of the above expressions would be:: + + ['x', 'z'] + [42, 31] + +The filter can take another argument:: + + {{ groups['x'] | map('extract', hostvars, 'ec2_ip_address') | list }} + +This takes the list of hosts in group 'x', looks them up in `hostvars`, and then looks up the `ec2_ip_address` of the result. The final result is a list of IP addresses for the hosts in group 'x'. + +The third argument to the filter can also be a list, for a recursive lookup inside the container:: + + {{ ['a'] | map('extract', b, ['x','y']) | list }} + +This would return a list containing the value of `b['a']['x']['y']`. + +Combining lists +--------------- + +This set of filters returns a list of combined lists. + + +permutations +^^^^^^^^^^^^ +To get permutations of a list:: + + - name: Give me largest permutations (order matters) + ansible.builtin.debug: + msg: "{{ [1,2,3,4,5] | permutations | list }}" + + - name: Give me permutations of sets of three + ansible.builtin.debug: + msg: "{{ [1,2,3,4,5] | permutations(3) | list }}" + +combinations +^^^^^^^^^^^^ +Combinations always require a set size:: + + - name: Give me combinations for sets of two + ansible.builtin.debug: + msg: "{{ [1,2,3,4,5] | combinations(2) | list }}" + +Also see the :ref:`zip_filter` + +products +^^^^^^^^ +The product filter returns the `cartesian product <https://docs.python.org/3/library/itertools.html#itertools.product>`_ of the input iterables. This is roughly equivalent to nested for-loops in a generator expression. + +For example:: + + - name: Generate multiple hostnames + ansible.builtin.debug: + msg: "{{ ['foo', 'bar'] | product(['com']) | map('join', '.') | join(',') }}" + +This would result in:: + + { "msg": "foo.com,bar.com" } + +.. json_query_filter: + +Selecting JSON data: JSON queries +--------------------------------- + +To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure. + +.. note:: + + This filter has migrated to the `community.general <https://galaxy.ansible.com/community/general>`_ collection. Follow the installation instructions to install that collection. + + +.. note:: This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples <http://jmespath.org/examples.html>`_. + +Consider this data structure:: + + { + "domain_definition": { + "domain": { + "cluster": [ + { + "name": "cluster1" + }, + { + "name": "cluster2" + } + ], + "server": [ + { + "name": "server11", + "cluster": "cluster1", + "port": "8080" + }, + { + "name": "server12", + "cluster": "cluster1", + "port": "8090" + }, + { + "name": "server21", + "cluster": "cluster2", + "port": "9080" + }, + { + "name": "server22", + "cluster": "cluster2", + "port": "9090" + } + ], + "library": [ + { + "name": "lib1", + "target": "cluster1" + }, + { + "name": "lib2", + "target": "cluster2" + } + ] + } + } + } + +To extract all clusters from this structure, you can use the following query:: + + - name: Display all cluster names + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}" + +To extract all server names:: + + - name: Display all server names + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}" + +To extract ports from cluster1:: + + - ansible.builtin.name: Display all ports from cluster1 + debug: + var: item + loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" + vars: + server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port" + +.. note:: You can use a variable to make the query more readable. + +To print out the ports from cluster1 in a comma separated string:: + + - name: Display all ports from cluster1 as a string + ansible.builtin.debug: + msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}" + +.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability. + +You can use YAML `single quote escaping <https://yaml.org/spec/current.html#id2534365>`_:: + + - name: Display all ports from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}" + +.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote. + +To get a hash map with all ports and names of a cluster:: + + - name: Display all server ports and names from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" + vars: + server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}" + + +Randomizing data +================ + +When you need a randomly generated value, use one of these filters. + + +.. _random_mac_filter: + +Random MAC addresses +-------------------- + +.. versionadded:: 2.6 + +This filter can be used to generate a random MAC address from a string prefix. + +.. note:: + + This filter has migrated to the `community.general <https://galaxy.ansible.com/community/general>`_ collection. Follow the installation instructions to install that collection. + +To get a random MAC address from a string prefix starting with '52:54:00':: + + "{{ '52:54:00' | community.general.random_mac }}" + # => '52:54:00:ef:1c:03' + +Note that if anything is wrong with the prefix string, the filter will issue an error. + + .. versionadded:: 2.9 + +As of Ansible version 2.9, you can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses:: + + "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}" + + +.. _random_filter: + +Random items or numbers +----------------------- + +The ``random`` filter in Ansible is an extension of the default Jinja2 random filter, and can be used to return a random item from a sequence of items or to generate a random number based on a range. + +To get a random item from a list:: + + "{{ ['a','b','c'] | random }}" + # => 'c' + +To get a random number between 0 and a specified number:: + + "{{ 60 | random }} * * * * root /script/from/cron" + # => '21 * * * * root /script/from/cron' + +To get a random number from 0 to 100 but in steps of 10:: + + {{ 101 | random(step=10) }} + # => 70 + +To get a random number from 1 to 100 but in steps of 10:: + + {{ 101 | random(1, 10) }} + # => 31 + {{ 101 | random(start=1, step=10) }} + # => 51 + +You can initialize the random number generator from a seed to create random-but-idempotent numbers:: + + "{{ 60 | random(seed=inventory_hostname) }} * * * * root /script/from/cron" + +Shuffling a list +---------------- + +The ``shuffle`` filter randomizes an existing list, giving a different order every invocation. + +To get a random list from an existing list:: + + {{ ['a','b','c'] | shuffle }} + # => ['c','a','b'] + {{ ['a','b','c'] | shuffle }} + # => ['b','c','a'] + +You can initialize the shuffle generator from a seed to generate a random-but-idempotent order:: + + {{ ['a','b','c'] | shuffle(seed=inventory_hostname) }} + # => ['b','a','c'] + +The shuffle filter returns a list whenever possible. If you use it with a non 'listable' item, the filter does nothing. + + +.. _list_filters: + +Managing list variables +======================= + +You can search for the minimum or maximum value in a list, or flatten a multi-level list. + +To get the minimum value from list of numbers:: + + {{ list1 | min }} + +To get the maximum value from a list of numbers:: + + {{ [3, 4, 2] | max }} + +.. versionadded:: 2.5 + +Flatten a list (same thing the `flatten` lookup does):: + + {{ [3, [4, 2] ] | flatten }} + +Flatten only the first level of a list (akin to the `items` lookup):: + + {{ [3, [4, [2]] ] | flatten(levels=1) }} + + +.. _set_theory_filters: + +Selecting from sets or lists (set theory) +========================================= + +You can select or combine items from sets or lists. + +.. versionadded:: 1.4 + +To get a unique set from a list:: + + # list1: [1, 2, 5, 1, 3, 4, 10] + {{ list1 | unique }} + # => [1, 2, 5, 3, 4, 10] + +To get a union of two lists:: + + # list1: [1, 2, 5, 1, 3, 4, 10] + # list2: [1, 2, 3, 4, 5, 11, 99] + {{ list1 | union(list2) }} + # => [1, 2, 5, 1, 3, 4, 10, 11, 99] + +To get the intersection of 2 lists (unique list of all items in both):: + + # list1: [1, 2, 5, 3, 4, 10] + # list2: [1, 2, 3, 4, 5, 11, 99] + {{ list1 | intersect(list2) }} + # => [1, 2, 5, 3, 4] + +To get the difference of 2 lists (items in 1 that don't exist in 2):: + + # list1: [1, 2, 5, 1, 3, 4, 10] + # list2: [1, 2, 3, 4, 5, 11, 99] + {{ list1 | difference(list2) }} + # => [10] + +To get the symmetric difference of 2 lists (items exclusive to each list):: + + # list1: [1, 2, 5, 1, 3, 4, 10] + # list2: [1, 2, 3, 4, 5, 11, 99] + {{ list1 | symmetric_difference(list2) }} + # => [10, 11, 99] + +.. _math_stuff: + +Calculating numbers (math) +========================== + +.. versionadded:: 1.9 + +You can calculate logs, powers, and roots of numbers with Ansible filters. Jinja2 provides other mathematical functions like abs() and round(). + +Get the logarithm (default is e):: + + {{ myvar | log }} + +Get the base 10 logarithm:: + + {{ myvar | log(10) }} + +Give me the power of 2! (or 5):: + + {{ myvar | pow(2) }} + {{ myvar | pow(5) }} + +Square root, or the 5th:: + + {{ myvar | root }} + {{ myvar | root(5) }} + + +Managing network interactions +============================= + +These filters help you with common network tasks. + +.. note:: + + These filters have migrated to the `ansible.netcommon <https://galaxy.ansible.com/ansible/netcommon>`_ collection. Follow the installation instructions to install that collection. + +.. _ipaddr_filter: + +IP address filters +------------------ + +.. versionadded:: 1.9 + +To test if a string is a valid IP address:: + + {{ myvar | ansible.netcommon.ipaddr }} + +You can also require a specific IP protocol version:: + + {{ myvar | ansible.netcommon.ipv4 }} + {{ myvar | ansible.netcommon.ipv6 }} + +IP address filter can also be used to extract specific information from an IP +address. For example, to get the IP address itself from a CIDR, you can use:: + + {{ '192.0.2.1/24' | ansible.netcommon.ipaddr('address') }} + +More information about ``ipaddr`` filter and complete usage guide can be found +in :ref:`playbooks_filters_ipaddr`. + +.. _network_filters: + +Network CLI filters +------------------- + +.. versionadded:: 2.4 + +To convert the output of a network device CLI command into structured JSON +output, use the ``parse_cli`` filter:: + + {{ output | ansible.netcommon.parse_cli('path/to/spec') }} + +The ``parse_cli`` filter will load the spec file and pass the command output +through it, returning JSON output. The YAML spec file defines how to parse the CLI output. + +The spec file should be valid formatted YAML. It defines how to parse the CLI +output and return JSON data. Below is an example of a valid spec file that +will parse the output from the ``show vlan`` command. + +.. code-block:: yaml + + --- + vars: + vlan: + vlan_id: "{{ item.vlan_id }}" + name: "{{ item.name }}" + enabled: "{{ item.state != 'act/lshut' }}" + state: "{{ item.state }}" + + keys: + vlans: + value: "{{ vlan }}" + items: "^(?P<vlan_id>\\d+)\\s+(?P<name>\\w+)\\s+(?P<state>active|act/lshut|suspended)" + state_static: + value: present + + +The spec file above will return a JSON data structure that is a list of hashes +with the parsed VLAN information. + +The same command could be parsed into a hash by using the key and values +directives. Here is an example of how to parse the output into a hash +value using the same ``show vlan`` command. + +.. code-block:: yaml + + --- + vars: + vlan: + key: "{{ item.vlan_id }}" + values: + vlan_id: "{{ item.vlan_id }}" + name: "{{ item.name }}" + enabled: "{{ item.state != 'act/lshut' }}" + state: "{{ item.state }}" + + keys: + vlans: + value: "{{ vlan }}" + items: "^(?P<vlan_id>\\d+)\\s+(?P<name>\\w+)\\s+(?P<state>active|act/lshut|suspended)" + state_static: + value: present + +Another common use case for parsing CLI commands is to break a large command +into blocks that can be parsed. This can be done using the ``start_block`` and +``end_block`` directives to break the command into blocks that can be parsed. + +.. code-block:: yaml + + --- + vars: + interface: + name: "{{ item[0].match[0] }}" + state: "{{ item[1].state }}" + mode: "{{ item[2].match[0] }}" + + keys: + interfaces: + value: "{{ interface }}" + start_block: "^Ethernet.*$" + end_block: "^$" + items: + - "^(?P<name>Ethernet\\d\\/\\d*)" + - "admin state is (?P<state>.+)," + - "Port mode is (.+)" + + +The example above will parse the output of ``show interface`` into a list of +hashes. + +The network filters also support parsing the output of a CLI command using the +TextFSM library. To parse the CLI output with TextFSM use the following +filter:: + + {{ output.stdout[0] | ansible.netcommon.parse_cli_textfsm('path/to/fsm') }} + +Use of the TextFSM filter requires the TextFSM library to be installed. + +Network XML filters +------------------- + +.. versionadded:: 2.5 + +To convert the XML output of a network device command into structured JSON +output, use the ``parse_xml`` filter:: + + {{ output | ansible.netcommon.parse_xml('path/to/spec') }} + +The ``parse_xml`` filter will load the spec file and pass the command output +through formatted as JSON. + +The spec file should be valid formatted YAML. It defines how to parse the XML +output and return JSON data. + +Below is an example of a valid spec file that +will parse the output from the ``show vlan | display xml`` command. + +.. code-block:: yaml + + --- + vars: + vlan: + vlan_id: "{{ item.vlan_id }}" + name: "{{ item.name }}" + desc: "{{ item.desc }}" + enabled: "{{ item.state.get('inactive') != 'inactive' }}" + state: "{% if item.state.get('inactive') == 'inactive'%} inactive {% else %} active {% endif %}" + + keys: + vlans: + value: "{{ vlan }}" + top: configuration/vlans/vlan + items: + vlan_id: vlan-id + name: name + desc: description + state: ".[@inactive='inactive']" + + +The spec file above will return a JSON data structure that is a list of hashes +with the parsed VLAN information. + +The same command could be parsed into a hash by using the key and values +directives. Here is an example of how to parse the output into a hash +value using the same ``show vlan | display xml`` command. + +.. code-block:: yaml + + --- + vars: + vlan: + key: "{{ item.vlan_id }}" + values: + vlan_id: "{{ item.vlan_id }}" + name: "{{ item.name }}" + desc: "{{ item.desc }}" + enabled: "{{ item.state.get('inactive') != 'inactive' }}" + state: "{% if item.state.get('inactive') == 'inactive'%} inactive {% else %} active {% endif %}" + + keys: + vlans: + value: "{{ vlan }}" + top: configuration/vlans/vlan + items: + vlan_id: vlan-id + name: name + desc: description + state: ".[@inactive='inactive']" + + +The value of ``top`` is the XPath relative to the XML root node. +In the example XML output given below, the value of ``top`` is ``configuration/vlans/vlan``, +which is an XPath expression relative to the root node (<rpc-reply>). +``configuration`` in the value of ``top`` is the outer most container node, and ``vlan`` +is the inner-most container node. + +``items`` is a dictionary of key-value pairs that map user-defined names to XPath expressions +that select elements. The Xpath expression is relative to the value of the XPath value contained in ``top``. +For example, the ``vlan_id`` in the spec file is a user defined name and its value ``vlan-id`` is the +relative to the value of XPath in ``top`` + +Attributes of XML tags can be extracted using XPath expressions. The value of ``state`` in the spec +is an XPath expression used to get the attributes of the ``vlan`` tag in output XML.:: + + <rpc-reply> + <configuration> + <vlans> + <vlan inactive="inactive"> + <name>vlan-1</name> + <vlan-id>200</vlan-id> + <description>This is vlan-1</description> + </vlan> + </vlans> + </configuration> + </rpc-reply> + +.. note:: + For more information on supported XPath expressions, see `XPath Support <https://docs.python.org/2/library/xml.etree.elementtree.html#xpath-support>`_. + +Network VLAN filters +-------------------- + +.. versionadded:: 2.8 + +Use the ``vlan_parser`` filter to transform an unsorted list of VLAN integers into a +sorted string list of integers according to IOS-like VLAN list rules. This list has the following properties: + +* Vlans are listed in ascending order. +* Three or more consecutive VLANs are listed with a dash. +* The first line of the list can be first_line_len characters long. +* Subsequent list lines can be other_line_len characters. + +To sort a VLAN list:: + + {{ [3003, 3004, 3005, 100, 1688, 3002, 3999] | ansible.netcommon.vlan_parser }} + +This example renders the following sorted list:: + + ['100,1688,3002-3005,3999'] + + +Another example Jinja template:: + + {% set parsed_vlans = vlans | ansible.netcommon.vlan_parser %} + switchport trunk allowed vlan {{ parsed_vlans[0] }} + {% for i in range (1, parsed_vlans | count) %} + switchport trunk allowed vlan add {{ parsed_vlans[i] }} + +This allows for dynamic generation of VLAN lists on a Cisco IOS tagged interface. You can store an exhaustive raw list of the exact VLANs required for an interface and then compare that to the parsed IOS output that would actually be generated for the configuration. + + +.. _hash_filters: + +Encrypting and checksumming strings and passwords +================================================= + +.. versionadded:: 1.9 + +To get the sha1 hash of a string:: + + {{ 'test1' | hash('sha1') }} + +To get the md5 hash of a string:: + + {{ 'test1' | hash('md5') }} + +Get a string checksum:: + + {{ 'test2' | checksum }} + +Other hashes (platform dependent):: + + {{ 'test2' | hash('blowfish') }} + +To get a sha512 password hash (random salt):: + + {{ 'passwordsaresecret' | password_hash('sha512') }} + +To get a sha256 password hash with a specific salt:: + + {{ 'secretpassword' | password_hash('sha256', 'mysecretsalt') }} + +An idempotent method to generate unique hashes per system is to use a salt that is consistent between runs:: + + {{ 'secretpassword' | password_hash('sha512', 65534 | random(seed=inventory_hostname) | string) }} + +Hash types available depend on the master system running Ansible, 'hash' depends on hashlib, password_hash depends on passlib (https://passlib.readthedocs.io/en/stable/lib/passlib.hash.html). + +.. versionadded:: 2.7 + +Some hash types allow providing a rounds parameter:: + + {{ 'secretpassword' | password_hash('sha256', 'mysecretsalt', rounds=10000) }} + +.. _other_useful_filters: + +Manipulating text +================= + +Several filters work with text, including URLs, file names, and path names. + +.. _comment_filter: + +Adding comments to files +------------------------ + +The ``comment`` filter lets you create comments in a file from text in a template, with a variety of comment styles. By default Ansible uses ``#`` to start a comment line and adds a blank comment line above and below your comment text. For example the following:: + + {{ "Plain style (default)" | comment }} + +produces this output: + +.. code-block:: text + + # + # Plain style (default) + # + +Ansible offers styles for comments in C (``//...``), C block +(``/*...*/``), Erlang (``%...``) and XML (``<!--...-->``):: + + {{ "C style" | comment('c') }} + {{ "C block style" | comment('cblock') }} + {{ "Erlang style" | comment('erlang') }} + {{ "XML style" | comment('xml') }} + +You can define a custom comment character. This filter:: + + {{ "My Special Case" | comment(decoration="! ") }} + +produces: + +.. code-block:: text + + ! + ! My Special Case + ! + +You can fully customize the comment style:: + + {{ "Custom style" | comment('plain', prefix='#######\n#', postfix='#\n#######\n ###\n #') }} + +That creates the following output: + +.. code-block:: text + + ####### + # + # Custom style + # + ####### + ### + # + +The filter can also be applied to any Ansible variable. For example to +make the output of the ``ansible_managed`` variable more readable, we can +change the definition in the ``ansible.cfg`` file to this: + +.. code-block:: jinja + + [defaults] + + ansible_managed = This file is managed by Ansible.%n + template: {file} + date: %Y-%m-%d %H:%M:%S + user: {uid} + host: {host} + +and then use the variable with the `comment` filter:: + + {{ ansible_managed | comment }} + +which produces this output: + +.. code-block:: sh + + # + # This file is managed by Ansible. + # + # template: /home/ansible/env/dev/ansible_managed/roles/role1/templates/test.j2 + # date: 2015-09-10 11:02:58 + # user: ansible + # host: myhost + # + +Splitting URLs +-------------- + +.. versionadded:: 2.4 + +The ``urlsplit`` filter extracts the fragment, hostname, netloc, password, path, port, query, scheme, and username from an URL. With no arguments, returns a dictionary of all the fields:: + + {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('hostname') }} + # => 'www.acme.com' + + {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('netloc') }} + # => 'user:password@www.acme.com:9000' + + {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('username') }} + # => 'user' + + {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('password') }} + # => 'password' + + {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('path') }} + # => '/dir/index.html' + + {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('port') }} + # => '9000' + + {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('scheme') }} + # => 'http' + + {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('query') }} + # => 'query=term' + + {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('fragment') }} + # => 'fragment' + + {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit }} + # => + # { + # "fragment": "fragment", + # "hostname": "www.acme.com", + # "netloc": "user:password@www.acme.com:9000", + # "password": "password", + # "path": "/dir/index.html", + # "port": 9000, + # "query": "query=term", + # "scheme": "http", + # "username": "user" + # } + +Searching strings with regular expressions +------------------------------------------ + +To search a string with a regex, use the "regex_search" filter:: + + # search for "foo" in "foobar" + {{ 'foobar' | regex_search('(foo)') }} + + # will return empty if it cannot find a match + {{ 'ansible' | regex_search('(foobar)') }} + + # case insensitive search in multiline mode + {{ 'foo\nBAR' | regex_search("^bar", multiline=True, ignorecase=True) }} + + +To search for all occurrences of regex matches, use the "regex_findall" filter:: + + # Return a list of all IPv4 addresses in the string + {{ 'Some DNS servers are 8.8.8.8 and 8.8.4.4' | regex_findall('\\b(?:[0-9]{1,3}\\.){3}[0-9]{1,3}\\b') }} + + +To replace text in a string with regex, use the "regex_replace" filter:: + + # convert "ansible" to "able" + {{ 'ansible' | regex_replace('^a.*i(.*)$', 'a\\1') }} + + # convert "foobar" to "bar" + {{ 'foobar' | regex_replace('^f.*o(.*)$', '\\1') }} + + # convert "localhost:80" to "localhost, 80" using named groups + {{ 'localhost:80' | regex_replace('^(?P<host>.+):(?P<port>\\d+)$', '\\g<host>, \\g<port>') }} + + # convert "localhost:80" to "localhost" + {{ 'localhost:80' | regex_replace(':80') }} + + # change a multiline string + {{ var | regex_replace('^', '#CommentThis#', multiline=True) }} + +.. note:: + If you want to match the whole string and you are using ``*`` make sure to always wraparound your regular expression with the start/end anchors. For example ``^(.*)$`` will always match only one result, while ``(.*)`` on some Python versions will match the whole string and an empty string at the end, which means it will make two replacements:: + + # add "https://" prefix to each item in a list + GOOD: + {{ hosts | map('regex_replace', '^(.*)$', 'https://\\1') | list }} + {{ hosts | map('regex_replace', '(.+)', 'https://\\1') | list }} + {{ hosts | map('regex_replace', '^', 'https://') | list }} + + BAD: + {{ hosts | map('regex_replace', '(.*)', 'https://\\1') | list }} + + # append ':80' to each item in a list + GOOD: + {{ hosts | map('regex_replace', '^(.*)$', '\\1:80') | list }} + {{ hosts | map('regex_replace', '(.+)', '\\1:80') | list }} + {{ hosts | map('regex_replace', '$', ':80') | list }} + + BAD: + {{ hosts | map('regex_replace', '(.*)', '\\1:80') | list }} + +.. note:: + Prior to ansible 2.0, if "regex_replace" filter was used with variables inside YAML arguments (as opposed to simpler 'key=value' arguments), then you needed to escape backreferences (for example, ``\\1``) with 4 backslashes (``\\\\``) instead of 2 (``\\``). + +.. versionadded:: 2.0 + +To escape special characters within a standard Python regex, use the "regex_escape" filter (using the default re_type='python' option):: + + # convert '^f.*o(.*)$' to '\^f\.\*o\(\.\*\)\$' + {{ '^f.*o(.*)$' | regex_escape() }} + +.. versionadded:: 2.8 + +To escape special characters within a POSIX basic regex, use the "regex_escape" filter with the re_type='posix_basic' option:: + + # convert '^f.*o(.*)$' to '\^f\.\*o(\.\*)\$' + {{ '^f.*o(.*)$' | regex_escape('posix_basic') }} + + +Managing file names and path names +---------------------------------- + +To get the last name of a file path, like 'foo.txt' out of '/etc/asdf/foo.txt':: + + {{ path | basename }} + +To get the last name of a windows style file path (new in version 2.0):: + + {{ path | win_basename }} + +To separate the windows drive letter from the rest of a file path (new in version 2.0):: + + {{ path | win_splitdrive }} + +To get only the windows drive letter:: + + {{ path | win_splitdrive | first }} + +To get the rest of the path without the drive letter:: + + {{ path | win_splitdrive | last }} + +To get the directory from a path:: + + {{ path | dirname }} + +To get the directory from a windows path (new version 2.0):: + + {{ path | win_dirname }} + +To expand a path containing a tilde (`~`) character (new in version 1.5):: + + {{ path | expanduser }} + +To expand a path containing environment variables:: + + {{ path | expandvars }} + +.. note:: `expandvars` expands local variables; using it on remote paths can lead to errors. + +.. versionadded:: 2.6 + +To get the real path of a link (new in version 1.8):: + + {{ path | realpath }} + +To get the relative path of a link, from a start point (new in version 1.7):: + + {{ path | relpath('/etc') }} + +To get the root and extension of a path or file name (new in version 2.0):: + + # with path == 'nginx.conf' the return would be ('nginx', '.conf') + {{ path | splitext }} + +The ``splitext`` filter returns a string. The individual components can be accessed by using the ``first`` and ``last`` filters:: + + # with path == 'nginx.conf' the return would be 'nginx' + {{ path | splitext | first }} + + # with path == 'nginx.conf' the return would be 'conf' + {{ path | splitext | last }} + +To join one or more path components:: + + {{ ('/etc', path, 'subdir', file) | path_join }} + +.. versionadded:: 2.10 + +Manipulating strings +==================== + +To add quotes for shell usage:: + + - name: Run a shell command + ansible.builtin.shell: echo {{ string_value | quote }} + +To concatenate a list into a string:: + + {{ list | join(" ") }} + +To work with Base64 encoded strings:: + + {{ encoded | b64decode }} + {{ decoded | string | b64encode }} + +As of version 2.6, you can define the type of encoding to use, the default is ``utf-8``:: + + {{ encoded | b64decode(encoding='utf-16-le') }} + {{ decoded | string | b64encode(encoding='utf-16-le') }} + +.. note:: The ``string`` filter is only required for Python 2 and ensures that text to encode is a unicode string. Without that filter before b64encode the wrong value will be encoded. + +.. versionadded:: 2.6 + +Managing UUIDs +============== + +To create a namespaced UUIDv5:: + + {{ string | to_uuid(namespace='11111111-2222-3333-4444-555555555555') }} + +.. versionadded:: 2.10 + +To create a namespaced UUIDv5 using the default Ansible namespace '361E6D51-FAEC-444A-9079-341386DA8E2E':: + + {{ string | to_uuid }} + +.. versionadded:: 1.9 + +To make use of one attribute from each item in a list of complex variables, use the :func:`Jinja2 map filter <jinja2:map>`:: + + # get a comma-separated list of the mount points (for example, "/,/mnt/stuff") on a host + {{ ansible_mounts | map(attribute='mount') | join(',') }} + +Handling dates and times +======================== + +To get a date object from a string use the `to_datetime` filter:: + + # Get total amount of seconds between two dates. Default date format is %Y-%m-%d %H:%M:%S but you can pass your own format + {{ (("2016-08-14 20:00:12" | to_datetime) - ("2015-12-25" | to_datetime('%Y-%m-%d'))).total_seconds() }} + + # Get remaining seconds after delta has been calculated. NOTE: This does NOT convert years, days, hours, and so on to seconds. For that, use total_seconds() + {{ (("2016-08-14 20:00:12" | to_datetime) - ("2016-08-14 18:00:00" | to_datetime)).seconds }} + # This expression evaluates to "12" and not "132". Delta is 2 hours, 12 seconds + + # get amount of days between two dates. This returns only number of days and discards remaining hours, minutes, and seconds + {{ (("2016-08-14 20:00:12" | to_datetime) - ("2015-12-25" | to_datetime('%Y-%m-%d'))).days }} + +.. versionadded:: 2.4 + +To format a date using a string (like with the shell date command), use the "strftime" filter:: + + # Display year-month-day + {{ '%Y-%m-%d' | strftime }} + + # Display hour:min:sec + {{ '%H:%M:%S' | strftime }} + + # Use ansible_date_time.epoch fact + {{ '%Y-%m-%d %H:%M:%S' | strftime(ansible_date_time.epoch) }} + + # Use arbitrary epoch value + {{ '%Y-%m-%d' | strftime(0) }} # => 1970-01-01 + {{ '%Y-%m-%d' | strftime(1441357287) }} # => 2015-09-04 + +.. note:: To get all string possibilities, check https://docs.python.org/3/library/time.html#time.strftime + +Getting Kubernetes resource names +================================= + +.. note:: + + These filters have migrated to the `community.kubernetes <https://galaxy.ansible.com/community/kubernetes>`_ collection. Follow the installation instructions to install that collection. + +Use the "k8s_config_resource_name" filter to obtain the name of a Kubernetes ConfigMap or Secret, +including its hash:: + + {{ configmap_resource_definition | community.kubernetes.k8s_config_resource_name }} + +This can then be used to reference hashes in Pod specifications:: + + my_secret: + kind: Secret + name: my_secret_name + + deployment_resource: + kind: Deployment + spec: + template: + spec: + containers: + - envFrom: + - secretRef: + name: {{ my_secret | community.kubernetes.k8s_config_resource_name }} + +.. versionadded:: 2.8 + +.. _PyYAML library: https://pyyaml.org/ + +.. _PyYAML documentation: https://pyyaml.org/wiki/PyYAMLDocumentation + + +.. seealso:: + + :ref:`about_playbooks` + An introduction to playbooks + :ref:`playbooks_conditionals` + Conditional statements in playbooks + :ref:`playbooks_variables` + All about variables + :ref:`playbooks_loops` + Looping in playbooks + :ref:`playbooks_reuse_roles` + Playbook organization by roles + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_filters_ipaddr.rst b/docs/docsite/rst/user_guide/playbooks_filters_ipaddr.rst new file mode 100644 index 00000000..0a6d4825 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_filters_ipaddr.rst @@ -0,0 +1,744 @@ +:orphan: + +.. _playbooks_filters_ipaddr: + +ipaddr filter +````````````` + +.. versionadded:: 1.9 + +``ipaddr()`` is a Jinja2 filter designed to provide an interface to the `netaddr`_ +Python package from within Ansible. It can operate on strings or lists of +items, test various data to check if they are valid IP addresses, and manipulate +the input data to extract requested information. ``ipaddr()`` works with both +IPv4 and IPv6 addresses in various forms. There are also additional functions +available to manipulate IP subnets and MAC addresses. + +.. note:: + + The ``ipaddr()`` filter migrated to the `ansible.netcommon <https://galaxy.ansible.com/ansible/netcommon>`_ collection. Follow the installation instructions to install that collection. + +To use this filter in Ansible, you need to install the `netaddr`_ Python library on +a computer on which you use Ansible (it is not required on remote hosts). +It can usually be installed with either your system package manager or using +``pip``:: + + pip install netaddr + +.. _netaddr: https://pypi.org/project/netaddr/ + +.. contents:: Topics + :local: + :depth: 2 + :backlinks: top + + +Basic tests +^^^^^^^^^^^ + +``ipaddr()`` is designed to return the input value if a query is True, and +``False`` if a query is False. This way it can be easily used in chained +filters. To use the filter, pass a string to it: + +.. code-block:: none + + {{ '192.0.2.0' | ansible.netcommon.ipaddr }} + +You can also pass the values as variables:: + + {{ myvar | ansible.netcommon.ipaddr }} + +Here are some example test results of various input strings:: + + # These values are valid IP addresses or network ranges + '192.168.0.1' -> 192.168.0.1 + '192.168.32.0/24' -> 192.168.32.0/24 + 'fe80::100/10' -> fe80::100/10 + 45443646733 -> ::a:94a7:50d + '523454/24' -> 0.7.252.190/24 + + # Values that are not valid IP addresses or network ranges + 'localhost' -> False + True -> False + 'space bar' -> False + False -> False + '' -> False + ':' -> False + 'fe80:/10' -> False + +Sometimes you need either IPv4 or IPv6 addresses. To filter only for a particular +type, ``ipaddr()`` filter has two "aliases", ``ipv4()`` and ``ipv6()``. + +Example use of an IPv4 filter:: + + {{ myvar | ansible.netcommon.ipv4 }} + +A similar example of an IPv6 filter:: + + {{ myvar | ansible.netcommon.ipv6 }} + +Here's some example test results to look for IPv4 addresses:: + + '192.168.0.1' -> 192.168.0.1 + '192.168.32.0/24' -> 192.168.32.0/24 + 'fe80::100/10' -> False + 45443646733 -> False + '523454/24' -> 0.7.252.190/24 + +And the same data filtered for IPv6 addresses:: + + '192.168.0.1' -> False + '192.168.32.0/24' -> False + 'fe80::100/10' -> fe80::100/10 + 45443646733 -> ::a:94a7:50d + '523454/24' -> False + + +Filtering lists +^^^^^^^^^^^^^^^ + +You can filter entire lists - ``ipaddr()`` will return a list with values +valid for a particular query:: + + # Example list of values + test_list = ['192.24.2.1', 'host.fqdn', '::1', '192.168.32.0/24', 'fe80::100/10', True, '', '42540766412265424405338506004571095040/64'] + + # {{ test_list | ansible.netcommon.ipaddr }} + ['192.24.2.1', '::1', '192.168.32.0/24', 'fe80::100/10', '2001:db8:32c:faad::/64'] + + # {{ test_list | ansible.netcommon.ipv4 }} + ['192.24.2.1', '192.168.32.0/24'] + + # {{ test_list | ansible.netcommon.ipv6 }} + ['::1', 'fe80::100/10', '2001:db8:32c:faad::/64'] + + +Wrapping IPv6 addresses in [ ] brackets +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Some configuration files require IPv6 addresses to be "wrapped" in square +brackets (``[ ]``). To accomplish that, you can use the ``ipwrap()`` filter. It +will wrap all IPv6 addresses and leave any other strings intact:: + + # {{ test_list | ansible.netcommon.ipwrap }} + ['192.24.2.1', 'host.fqdn', '[::1]', '192.168.32.0/24', '[fe80::100]/10', True, '', '[2001:db8:32c:faad::]/64'] + +As you can see, ``ipwrap()`` did not filter out non-IP address values, which is +usually what you want when for example you are mixing IP addresses with +hostnames. If you still want to filter out all non-IP address values, you can +chain both filters together:: + + # {{ test_list | ansible.netcommon.ipaddr | ansible.netcommon.ipwrap }} + ['192.24.2.1', '[::1]', '192.168.32.0/24', '[fe80::100]/10', '[2001:db8:32c:faad::]/64'] + + +Basic queries +^^^^^^^^^^^^^ + +You can provide a single argument to each ``ipaddr()`` filter. The filter will then +treat it as a query and return values modified by that query. Lists will +contain only values that you are querying for. + +Types of queries include: + +- query by name: ``ansible.netcommon.ipaddr('address')``, ``ansible.netcommon.ipv4('network')``; +- query by CIDR range: ``ansible.netcommon.ipaddr('192.168.0.0/24')``, ``ansible.netcommon.ipv6('2001:db8::/32')``; +- query by index number: ``ansible.netcommon.ipaddr('1')``, ``ansible.netcommon.ipaddr('-1')``; + +If a query type is not recognized, Ansible will raise an error. + + +Getting information about hosts and networks +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here's our test list again:: + + # Example list of values + test_list = ['192.24.2.1', 'host.fqdn', '::1', '192.168.32.0/24', 'fe80::100/10', True, '', '42540766412265424405338506004571095040/64'] + +Let's take the list above and get only those elements that are host IP addresses +and not network ranges:: + + # {{ test_list | ansible.netcommon.ipaddr('address') }} + ['192.24.2.1', '::1', 'fe80::100'] + +As you can see, even though some values had a host address with a CIDR prefix, +they were dropped by the filter. If you want host IP addresses with their correct +CIDR prefixes (as is common with IPv6 addressing), you can use the +``ipaddr('host')`` filter:: + + # {{ test_list | ansible.netcommon.ipaddr('host') }} + ['192.24.2.1/32', '::1/128', 'fe80::100/10'] + +Filtering by IP address type also works:: + + # {{ test_list | ansible.netcommon.ipv4('address') }} + ['192.24.2.1'] + + # {{ test_list | ansible.netcommon.ipv6('address') }} + ['::1', 'fe80::100'] + +You can check if IP addresses or network ranges are accessible on a public +Internet, or if they are in private networks:: + + # {{ test_list | ansible.netcommon.ipaddr('public') }} + ['192.24.2.1', '2001:db8:32c:faad::/64'] + + # {{ test_list | ansible.netcommon.ipaddr('private') }} + ['192.168.32.0/24', 'fe80::100/10'] + +You can check which values are specifically network ranges:: + + # {{ test_list | ansible.netcommon.ipaddr('net') }} + ['192.168.32.0/24', '2001:db8:32c:faad::/64'] + +You can also check how many IP addresses can be in a certain range:: + + # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('size') }} + [256, 18446744073709551616L] + +By specifying a network range as a query, you can check if a given value is in +that range:: + + # {{ test_list | ansible.netcommon.ipaddr('192.0.0.0/8') }} + ['192.24.2.1', '192.168.32.0/24'] + +If you specify a positive or negative integer as a query, ``ipaddr()`` will +treat this as an index and will return the specific IP address from a network +range, in the 'host/prefix' format:: + + # First IP address (network address) + # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('0') }} + ['192.168.32.0/24', '2001:db8:32c:faad::/64'] + + # Second IP address (usually the gateway host) + # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('1') }} + ['192.168.32.1/24', '2001:db8:32c:faad::1/64'] + + # Last IP address (the broadcast address in IPv4 networks) + # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('-1') }} + ['192.168.32.255/24', '2001:db8:32c:faad:ffff:ffff:ffff:ffff/64'] + +You can also select IP addresses from a range by their index, from the start or +end of the range:: + + # Returns from the start of the range + # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('200') }} + ['192.168.32.200/24', '2001:db8:32c:faad::c8/64'] + + # Returns from the end of the range + # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('-200') }} + ['192.168.32.56/24', '2001:db8:32c:faad:ffff:ffff:ffff:ff38/64'] + + # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('400') }} + ['2001:db8:32c:faad::190/64'] + + +Getting information from host/prefix values +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You frequently use a combination of IP addresses and subnet prefixes +("CIDR"), this is even more common with IPv6. The ``ansible.netcommon.ipaddr()`` filter can extract +useful data from these prefixes. + +Here's an example set of two host prefixes (with some "control" values):: + + host_prefix = ['2001:db8:deaf:be11::ef3/64', '192.0.2.48/24', '127.0.0.1', '192.168.0.0/16'] + +First, let's make sure that we only work with correct host/prefix values, not +just subnets or single IP addresses:: + + # {{ host_prefix | ansible.netcommon.ipaddr('host/prefix') }} + ['2001:db8:deaf:be11::ef3/64', '192.0.2.48/24'] + +In Debian-based systems, the network configuration stored in the ``/etc/network/interfaces`` file uses a combination of IP address, network address, netmask and broadcast address to configure an IPv4 network interface. We can get these values from a single 'host/prefix' combination: + +.. code-block:: jinja + + # Jinja2 template + {% set ipv4_host = host_prefix | unique | ansible.netcommon.ipv4('host/prefix') | first %} + iface eth0 inet static + address {{ ipv4_host | ansible.netcommon.ipaddr('address') }} + network {{ ipv4_host | ansible.netcommon.ipaddr('network') }} + netmask {{ ipv4_host | ansible.netcommon.ipaddr('netmask') }} + broadcast {{ ipv4_host | ansible.netcommon.ipaddr('broadcast') }} + + # Generated configuration file + iface eth0 inet static + address 192.0.2.48 + network 192.0.2.0 + netmask 255.255.255.0 + broadcast 192.0.2.255 + +In the above example, we needed to handle the fact that values were stored in +a list, which is unusual in IPv4 networks, where only a single IP address can be +set on an interface. However, IPv6 networks can have multiple IP addresses set +on an interface: + +.. code-block:: jinja + + # Jinja2 template + iface eth0 inet6 static + {% set ipv6_list = host_prefix | unique | ansible.netcommon.ipv6('host/prefix') %} + address {{ ipv6_list[0] }} + {% if ipv6_list | length > 1 %} + {% for subnet in ipv6_list[1:] %} + up /sbin/ip address add {{ subnet }} dev eth0 + down /sbin/ip address del {{ subnet }} dev eth0 + {% endfor %} + {% endif %} + + # Generated configuration file + iface eth0 inet6 static + address 2001:db8:deaf:be11::ef3/64 + +If needed, you can extract subnet and prefix information from the 'host/prefix' value:: + +.. code-block:: jinja + + # {{ host_prefix | ansible.netcommon.ipaddr('host/prefix') | ansible.netcommon.ipaddr('subnet') }} + ['2001:db8:deaf:be11::/64', '192.0.2.0/24'] + + # {{ host_prefix | ansible.netcommon.ipaddr('host/prefix') | ansible.netcommon.ipaddr('prefix') }} + [64, 24] + + +Converting subnet masks to CIDR notation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Given a subnet in the form of network address and subnet mask, the ``ipaddr()`` filter can convert it into CIDR notation. This can be useful for converting Ansible facts gathered about network configuration from subnet masks into CIDR format:: + + ansible_default_ipv4: { + address: "192.168.0.11", + alias: "eth0", + broadcast: "192.168.0.255", + gateway: "192.168.0.1", + interface: "eth0", + macaddress: "fa:16:3e:c4:bd:89", + mtu: 1500, + netmask: "255.255.255.0", + network: "192.168.0.0", + type: "ether" + } + +First concatenate the network and netmask:: + + net_mask = "{{ ansible_default_ipv4.network }}/{{ ansible_default_ipv4.netmask }}" + '192.168.0.0/255.255.255.0' + +This result can be converted to canonical form with ``ipaddr()`` to produce a subnet in CIDR format:: + + # {{ net_mask | ansible.netcommon.ipaddr('prefix') }} + '24' + + # {{ net_mask | ansible.netcommon.ipaddr('net') }} + '192.168.0.0/24' + + +Getting information about the network in CIDR notation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Given an IP address, the ``ipaddr()`` filter can produce the network address in CIDR notation. +This can be useful when you want to obtain the network address from the IP address in CIDR format. + +Here's an example of IP address:: + + ip_address = "{{ ansible_default_ipv4.address }}/{{ ansible_default_ipv4.netmask }}" + '192.168.0.11/255.255.255.0' + +This can be used to obtain the network address in CIDR notation format:: + + # {{ ip_address | ansible.netcommon.ipaddr('network/prefix') }} + '192.168.0.0/24' + + +IP address conversion +^^^^^^^^^^^^^^^^^^^^^ + +Here's our test list again:: + + # Example list of values + test_list = ['192.24.2.1', 'host.fqdn', '::1', '192.168.32.0/24', 'fe80::100/10', True, '', '42540766412265424405338506004571095040/64'] + +You can convert IPv4 addresses into IPv6 addresses:: + + # {{ test_list | ansible.netcommon.ipv4('ipv6') }} + ['::ffff:192.24.2.1/128', '::ffff:192.168.32.0/120'] + +Converting from IPv6 to IPv4 works very rarely:: + + # {{ test_list | ansible.netcommon.ipv6('ipv4') }} + ['0.0.0.1/32'] + +But we can make a double conversion if needed:: + + # {{ test_list | ansible.netcommon.ipaddr('ipv6') | ansible.netcommon.ipaddr('ipv4') }} + ['192.24.2.1/32', '0.0.0.1/32', '192.168.32.0/24'] + +You can convert IP addresses to integers, the same way that you can convert +integers into IP addresses:: + + # {{ test_list | ansible.netcommon.ipaddr('address') | ansible.netcommon.ipaddr('int') }} + [3222798849, 1, '3232243712/24', '338288524927261089654018896841347694848/10', '42540766412265424405338506004571095040/64'] + +You can convert IPv4 address to `Hexadecimal notation <https://en.wikipedia.org/wiki/Hexadecimal>`_ with optional delimiter:: + + # {{ '192.168.1.5' | ansible.netcommon.ip4_hex }} + c0a80105 + # {{ '192.168.1.5' | ansible.netcommon.ip4_hex(':') }} + c0:a8:01:05 + +You can convert IP addresses to PTR records:: + + # {% for address in test_list | ansible.netcommon.ipaddr %} + # {{ address | ansible.netcommon.ipaddr('revdns') }} + # {% endfor %} + 1.2.24.192.in-addr.arpa. + 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa. + 0.32.168.192.in-addr.arpa. + 0.0.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.e.f.ip6.arpa. + 0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.d.a.a.f.c.2.3.0.8.b.d.0.1.0.0.2.ip6.arpa. + + +Converting IPv4 address to a 6to4 address +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A `6to4`_ tunnel is a way to access the IPv6 Internet from an IPv4-only network. If you +have a public IPv4 address, you can automatically configure its IPv6 +equivalent in the ``2002::/16`` network range. After conversion you will gain +access to a ``2002:xxxx:xxxx::/48`` subnet which could be split into 65535 +``/64`` subnets if needed. + +To convert your IPv4 address, just send it through the ``'6to4'`` filter. It will +be automatically converted to a router address (with a ``::1/48`` host address):: + + # {{ '193.0.2.0' | ansible.netcommon.ipaddr('6to4') }} + 2002:c100:0200::1/48 + +.. _6to4: https://en.wikipedia.org/wiki/6to4 + + +Finding IP addresses within a range +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To find usable IP addresses within an IP range, try these ``ipaddr`` filters: + +To find the next usable IP address in a range, use ``next_usable`` :: + + # {{ '192.168.122.1/24' | ansible.netcommon.ipaddr('next_usable') }} + 192.168.122.2 + +To find the last usable IP address from a range, use ``last_usable``:: + + # {{ '192.168.122.1/24' | ansible.netcommon.ipaddr('last_usable') }} + 192.168.122.254 + +To find the available range of IP addresses from the given network address, use ``range_usable``:: + + # {{ '192.168.122.1/24' | ansible.netcommon.ipaddr('range_usable') }} + 192.168.122.1-192.168.122.254 + +To find the peer IP address for a point to point link, use ``peer``:: + + # {{ '192.168.122.1/31' | ansible.netcommon.ipaddr('peer') }} + 192.168.122.0 + # {{ '192.168.122.1/30' | ansible.netcommon.ipaddr('peer') }} + 192.168.122.2 + +To return the nth ip from a network, use the filter ``nthhost``:: + + # {{ '10.0.0.0/8' | ansible.netcommon.nthhost(305) }} + 10.0.1.49 + +``nthhost`` also supports a negative value:: + + # {{ '10.0.0.0/8' | ansible.netcommon.nthhost(-1) }} + 10.255.255.255 + +To find the next nth usable IP address in relation to another within a range, use ``next_nth_usable`` +In the example, ``next_nth_usable`` returns the second usable IP address for the given IP range:: + + # {{ '192.168.122.1/24' | ansible.netcommon.next_nth_usable(2) }} + 192.168.122.3 + +If there is no usable address, it returns an empty string:: + + # {{ '192.168.122.254/24' | ansible.netcommon.next_nth_usable(2) }} + "" + +Just like ``next_nth_ansible``, you have ``previous_nth_usable`` to find the previous usable address:: + + # {{ '192.168.122.10/24' | ansible.netcommon.previous_nth_usable(2) }} + 192.168.122.8 + + +Testing if a address belong to a network range +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``network_in_usable`` filter returns whether an address passed as an argument is usable in a network. +Usable addresses are addresses that can be assigned to a host. The network ID and the broadcast address +are not usable addresses.:: + + # {{ '192.168.0.0/24' | ansible.netcommon.network_in_usable( '192.168.0.1' ) }} + True + + # {{ '192.168.0.0/24' | ansible.netcommon.network_in_usable( '192.168.0.255' ) }} + False + + # {{ '192.168.0.0/16' | ansible.netcommon.network_in_usable( '192.168.0.255' ) }} + True + +The ``network_in_network`` filter returns whether an address or a network passed as argument is in a network.:: + + # {{ '192.168.0.0/24' | ansible.netcommon.network_in_network( '192.168.0.1' ) }} + True + + # {{ '192.168.0.0/24' | ansible.netcommon.network_in_network( '192.168.0.0/24' ) }} + True + + # {{ '192.168.0.0/24' | ansible.netcommon.network_in_network( '192.168.0.255' ) }} + True + + # Check in a network is part of another network + # {{ '192.168.0.0/16' | ansible.netcommon.network_in_network( '192.168.0.0/24' ) }} + True + +To check whether multiple addresses belong to a network, use the ``reduce_on_network`` filter:: + + # {{ ['192.168.0.34', '10.3.0.3', '192.168.2.34'] | ansible.netcommon.reduce_on_network( '192.168.0.0/24' ) }} + ['192.168.0.34'] + + +IP Math +^^^^^^^ + +.. versionadded:: 2.7 + +The ``ipmath()`` filter can be used to do simple IP math/arithmetic. + +Here are a few simple examples:: + + # Get the next five addresses based on an IP address + # {{ '192.168.1.5' | ansible.netcommon.ipmath(5) }} + 192.168.1.10 + + # Get the ten previous addresses based on an IP address + # {{ '192.168.0.5' | ansible.netcommon.ipmath(-10) }} + 192.167.255.251 + + # Get the next five addresses using CIDR notation + # {{ '192.168.1.1/24' | ansible.netcommon.ipmath(5) }} + 192.168.1.6 + + # Get the previous five addresses using CIDR notation + # {{ '192.168.1.6/24' | ansible.netcommon.ipmath(-5) }} + 192.168.1.1 + + # Get the previous ten address using cidr notation + # It returns a address of the previous network range + # {{ '192.168.2.6/24' | ansible.netcommon.ipmath(-10) }} + 192.168.1.252 + + # Get the next ten addresses in IPv6 + # {{ '2001::1' | ansible.netcommon.ipmath(10) }} + 2001::b + + # Get the previous ten address in IPv6 + # {{ '2001::5' | ansible.netcommon.ipmath(-10) }} + 2000:ffff:ffff:ffff:ffff:ffff:ffff:fffb + + + +Subnet manipulation +^^^^^^^^^^^^^^^^^^^ + +The ``ipsubnet()`` filter can be used to manipulate network subnets in several ways. + +Here is an example IP address and subnet:: + + address = '192.168.144.5' + subnet = '192.168.0.0/16' + +To check if a given string is a subnet, pass it through the filter without any +arguments. If the given string is an IP address, it will be converted into +a subnet:: + + # {{ address | ansible.netcommon.ipsubnet }} + 192.168.144.5/32 + + # {{ subnet | ansible.netcommon.ipsubnet }} + 192.168.0.0/16 + +If you specify a subnet size as the first parameter of the ``ipsubnet()`` filter, and +the subnet size is **smaller than the current one**, you will get the number of subnets +a given subnet can be split into:: + + # {{ subnet | ansible.netcommon.ipsubnet(20) }} + 16 + +The second argument of the ``ipsubnet()`` filter is an index number; by specifying it +you can get a new subnet with the specified size:: + + # First subnet + # {{ subnet | ansible.netcommon.ipsubnet(20, 0) }} + 192.168.0.0/20 + + # Last subnet + # {{ subnet | ansible.netcommon.ipsubnet(20, -1) }} + 192.168.240.0/20 + + # Fifth subnet + # {{ subnet | ansible.netcommon.ipsubnet(20, 5) }} + 192.168.80.0/20 + + # Fifth to last subnet + # {{ subnet | ansible.netcommon.ipsubnet(20, -5) }} + 192.168.176.0/20 + +If you specify an IP address instead of a subnet, and give a subnet size as +the first argument, the ``ipsubnet()`` filter will instead return the biggest subnet that +contains that given IP address:: + + # {{ address | ansible.netcommon.ipsubnet(20) }} + 192.168.144.0/20 + +By specifying an index number as a second argument, you can select smaller and +smaller subnets:: + + # First subnet + # {{ address | ansible.netcommon.ipsubnet(18, 0) }} + 192.168.128.0/18 + + # Last subnet + # {{ address | ansible.netcommon.ipsubnet(18, -1) }} + 192.168.144.4/31 + + # Fifth subnet + # {{ address | ansible.netcommon.ipsubnet(18, 5) }} + 192.168.144.0/23 + + # Fifth to last subnet + # {{ address | ansible.netcommon.ipsubnet(18, -5) }} + 192.168.144.0/27 + +By specifying another subnet as a second argument, if the second subnet includes +the first, you can determine the rank of the first subnet in the second :: + + # The rank of the IP in the subnet (the IP is the 36870nth /32 of the subnet) + # {{ address | ansible.netcommon.ipsubnet(subnet) }} + 36870 + + # The rank in the /24 that contain the address + # {{ address | ansible.netcommon.ipsubnet('192.168.144.0/24') }} + 6 + + # An IP with the subnet in the first /30 in a /24 + # {{ '192.168.144.1/30' | ansible.netcommon.ipsubnet('192.168.144.0/24') }} + 1 + + # The fifth subnet /30 in a /24 + # {{ '192.168.144.16/30' | ansible.netcommon.ipsubnet('192.168.144.0/24') }} + 5 + +If the second subnet doesn't include the first subnet, the ``ipsubnet()`` filter raises an error. + + +You can use the ``ipsubnet()`` filter with the ``ipaddr()`` filter to, for example, split +a given ``/48`` prefix into smaller ``/64`` subnets:: + + # {{ '193.0.2.0' | ansible.netcommon.ipaddr('6to4') | ipsubnet(64, 58820) | ansible.netcommon.ipaddr('1') }} + 2002:c100:200:e5c4::1/64 + +Because of the size of IPv6 subnets, iteration over all of them to find the +correct one may take some time on slower computers, depending on the size +difference between the subnets. + + +Subnet Merging +^^^^^^^^^^^^^^ + +.. versionadded:: 2.6 + +The ``cidr_merge()`` filter can be used to merge subnets or individual addresses +into their minimal representation, collapsing overlapping subnets and merging +adjacent ones wherever possible:: + + {{ ['192.168.0.0/17', '192.168.128.0/17', '192.168.128.1' ] | cidr_merge }} + # => ['192.168.0.0/16'] + + {{ ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24'] | cidr_merge }} + # => ['192.168.0.0/23', '192.168.3.0/24'] + +Changing the action from 'merge' to 'span' will instead return the smallest +subnet which contains all of the inputs:: + + {{ ['192.168.0.0/24', '192.168.3.0/24'] | ansible.netcommon.cidr_merge('span') }} + # => '192.168.0.0/22' + + {{ ['192.168.1.42', '192.168.42.1'] | ansible.netcommon.cidr_merge('span') }} + # => '192.168.0.0/18' + + +MAC address filter +^^^^^^^^^^^^^^^^^^ + +You can use the ``hwaddr()`` filter to check if a given string is a MAC address or +convert it between various formats. Examples:: + + # Example MAC address + macaddress = '1a:2b:3c:4d:5e:6f' + + # Check if given string is a MAC address + # {{ macaddress | ansible.netcommon.hwaddr }} + 1a:2b:3c:4d:5e:6f + + # Convert MAC address to PostgreSQL format + # {{ macaddress | ansible.netcommon.hwaddr('pgsql') }} + 1a2b3c:4d5e6f + + # Convert MAC address to Cisco format + # {{ macaddress | ansible.netcommon.hwaddr('cisco') }} + 1a2b.3c4d.5e6f + +The supported formats result in the following conversions for the ``1a:2b:3c:4d:5e:6f`` MAC address:: + + bare: 1A2B3C4D5E6F + bool: True + int: 28772997619311 + cisco: 1a2b.3c4d.5e6f + eui48 or win: 1A-2B-3C-4D-5E-6F + linux or unix: 1a:2b:3c:4d:5e:6f: + pgsql, postgresql, or psql: 1a2b3c:4d5e6f + + +Generate an IPv6 address in Stateless Configuration (SLAAC) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +the filter ``slaac()`` generates an IPv6 address for a given network and a MAC Address in Stateless Configuration:: + + # {{ fdcf:1894:23b5:d38c:0000:0000:0000:0000 | slaac('c2:31:b3:83:bf:2b') }} + fdcf:1894:23b5:d38c:c031:b3ff:fe83:bf2b + +.. seealso:: + + + `ansible.netcommon <https://galaxy.ansible.com/ansible/netcommon>`_ + Ansible network collection for common code + :ref:`about_playbooks` + An introduction to playbooks + :ref:`playbooks_filters` + Introduction to Jinja2 filters and their uses + :ref:`playbooks_conditionals` + Conditional statements in playbooks + :ref:`playbooks_variables` + All about variables + :ref:`playbooks_loops` + Looping in playbooks + :ref:`playbooks_reuse_roles` + Playbook organization by roles + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_handlers.rst b/docs/docsite/rst/user_guide/playbooks_handlers.rst new file mode 100644 index 00000000..4650d5e7 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_handlers.rst @@ -0,0 +1,148 @@ +.. _handlers: + +Handlers: running operations on change +====================================== + +Sometimes you want a task to run only when a change is made on a machine. For example, you may want to restart a service if a task updates the configuration of that service, but not if the configuration is unchanged. Ansible uses handlers to address this use case. Handlers are tasks that only run when notified. Each handler should have a globally unique name. + +.. contents:: + :local: + +Handler example +--------------- + +This playbook, ``verify-apache.yml``, contains a single play with a handler:: + + --- + - name: Verify apache installation + hosts: webservers + vars: + http_port: 80 + max_clients: 200 + remote_user: root + tasks: + - name: Ensure apache is at the latest version + ansible.builtin.yum: + name: httpd + state: latest + + - name: Write the apache config file + ansible.builtin.template: + src: /srv/httpd.j2 + dest: /etc/httpd.conf + notify: + - Restart apache + + - name: Ensure apache is running + ansible.builtin.service: + name: httpd + state: started + + handlers: + - name: Restart apache + ansible.builtin.service: + name: httpd + state: restarted + +In this example playbook, the second task notifies the handler. A single task can notify more than one handler:: + + - name: Template configuration file + ansible.builtin.template: + src: template.j2 + dest: /etc/foo.conf + notify: + - Restart memcached + - Restart apache + + handlers: + - name: Restart memcached + ansible.builtin.service: + name: memcached + state: restarted + + - name: Restart apache + ansible.builtin.service: + name: apache + state: restarted + +Controlling when handlers run +----------------------------- + +By default, handlers run after all the tasks in a particular play have been completed. This approach is efficient, because the handler only runs once, regardless of how many tasks notify it. For example, if multiple tasks update a configuration file and notify a handler to restart Apache, Ansible only bounces Apache once to avoid unnecessary restarts. + +If you need handlers to run before the end of the play, add a task to flush them using the :ref:`meta module <meta_module>`, which executes Ansible actions:: + + tasks: + - name: Some tasks go here + ansible.builtin.shell: ... + + - name: Flush handlers + meta: flush_handlers + + - name: Some other tasks + ansible.builtin.shell: ... + +The ``meta: flush_handlers`` task triggers any handlers that have been notified at that point in the play. + +Using variables with handlers +----------------------------- + +You may want your Ansible handlers to use variables. For example, if the name of a service varies slightly by distribution, you want your output to show the exact name of the restarted service for each target machine. Avoid placing variables in the name of the handler. Since handler names are templated early on, Ansible may not have a value available for a handler name like this:: + + handlers: + # This handler name may cause your play to fail! + - name: Restart "{{ web_service_name }}" + +If the variable used in the handler name is not available, the entire play fails. Changing that variable mid-play **will not** result in newly created handler. + +Instead, place variables in the task parameters of your handler. You can load the values using ``include_vars`` like this: + + .. code-block:: yaml+jinja + + tasks: + - name: Set host variables based on distribution + include_vars: "{{ ansible_facts.distribution }}.yml" + + handlers: + - name: Restart web service + ansible.builtin.service: + name: "{{ web_service_name | default('httpd') }}" + state: restarted + +Handlers can also "listen" to generic topics, and tasks can notify those topics as follows:: + + handlers: + - name: Restart memcached + ansible.builtin.service: + name: memcached + state: restarted + listen: "restart web services" + + - name: Restart apache + ansible.builtin.service: + name: apache + state: restarted + listen: "restart web services" + + tasks: + - name: Restart everything + ansible.builtin.command: echo "this task will restart the web services" + notify: "restart web services" + +This use makes it much easier to trigger multiple handlers. It also decouples handlers from their names, +making it easier to share handlers among playbooks and roles (especially when using 3rd party roles from +a shared source like Galaxy). + +.. note:: + * Handlers always run in the order they are defined, not in the order listed in the notify-statement. This is also the case for handlers using `listen`. + * Handler names and `listen` topics live in a global namespace. + * Handler names are templatable and `listen` topics are not. + * Use unique handler names. If you trigger more than one handler with the same name, the first one(s) get overwritten. Only the last one defined will run. + * You can notify a handler defined inside a static include. + * You cannot notify a handler defined inside a dynamic include. + +When using handlers within roles, note that: + +* handlers notified within ``pre_tasks``, ``tasks``, and ``post_tasks`` sections are automatically flushed at the end of section where they were notified. +* handlers notified within ``roles`` section are automatically flushed at the end of ``tasks`` section, but before any ``tasks`` handlers. +* handlers are play scoped and as such can be used outside of the role they are defined in. diff --git a/docs/docsite/rst/user_guide/playbooks_intro.rst b/docs/docsite/rst/user_guide/playbooks_intro.rst new file mode 100644 index 00000000..24037b3e --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_intro.rst @@ -0,0 +1,151 @@ +.. _about_playbooks: +.. _playbooks_intro: + +****************** +Intro to playbooks +****************** + +Ansible Playbooks offer a repeatable, re-usable, simple configuration management and multi-machine deployment system, one that is well suited to deploying complex applications. If you need to execute a task with Ansible more than once, write a playbook and put it under source control. Then you can use the playbook to push out new configuration or confirm the configuration of remote systems. The playbooks in the `ansible-examples repository <https://github.com/ansible/ansible-examples>`_ illustrate many useful techniques. You may want to look at these in another tab as you read the documentation. + +Playbooks can: + +* declare configurations +* orchestrate steps of any manual ordered process, on multiple sets of machines, in a defined order +* launch tasks synchronously or :ref:`asynchronously <playbooks_async>` + +.. contents:: + :local: + +.. _playbook_language_example: + +Playbook syntax +=============== + +Playbooks are expressed in YAML format with a minimum of syntax. If you are not familiar with YAML, look at our overview of :ref:`yaml_syntax` and consider installing an add-on for your text editor (see :ref:`other_tools_and_programs`) to help you write clean YAML syntax in your playbooks. + +A playbook is composed of one or more 'plays' in an ordered list. The terms 'playbook' and 'play' are sports analogies. Each play executes part of the overall goal of the playbook, running one or more tasks. Each task calls an Ansible module. + +Playbook execution +================== + +A playbook runs in order from top to bottom. Within each play, tasks also run in order from top to bottom. Playbooks with multiple 'plays' can orchestrate multi-machine deployments, running one play on your webservers, then another play on your database servers, then a third play on your network infrastructure, and so on. At a minimum, each play defines two things: + +* the managed nodes to target, using a :ref:`pattern <intro_patterns>` +* at least one task to execute + +In this example, the first play targets the web servers; the second play targets the database servers:: + + --- + - name: update web servers + hosts: webservers + remote_user: root + + tasks: + - name: ensure apache is at the latest version + yum: + name: httpd + state: latest + - name: write the apache config file + template: + src: /srv/httpd.j2 + dest: /etc/httpd.conf + + - name: update db servers + hosts: databases + remote_user: root + + tasks: + - name: ensure postgresql is at the latest version + yum: + name: postgresql + state: latest + - name: ensure that postgresql is started + service: + name: postgresql + state: started + +Your playbook can include more than just a hosts line and tasks. For example, the playbook above sets a ``remote_user`` for each play. This is the user account for the SSH connection. You can add other :ref:`playbook_keywords` at the playbook, play, or task level to influence how Ansible behaves. Playbook keywords can control the :ref:`connection plugin <connection_plugins>`, whether to use :ref:`privilege escalation <become>`, how to handle errors, and more. To support a variety of environments, Ansible lets you set many of these parameters as command-line flags, in your Ansible configuration, or in your inventory. Learning the :ref:`precedence rules <general_precedence_rules>` for these sources of data will help you as you expand your Ansible ecosystem. + +.. _tasks_list: + +Task execution +-------------- + +By default, Ansible executes each task in order, one at a time, against all machines matched by the host pattern. Each task executes a module with specific arguments. When a task has executed on all target machines, Ansible moves on to the next task. You can use :ref:`strategies <playbooks_strategies>` to change this default behavior. Within each play, Ansible applies the same task directives to all hosts. If a task fails on a host, Ansible takes that host out of the rotation for the rest of the playbook. + +When you run a playbook, Ansible returns information about connections, the ``name`` lines of all your plays and tasks, whether each task has succeeded or failed on each machine, and whether each task has made a change on each machine. At the bottom of the playbook execution, Ansible provides a summary of the nodes that were targeted and how they performed. General failures and fatal "unreachable" communication attempts are kept separate in the counts. + +.. _idempotency: + +Desired state and 'idempotency' +------------------------------- + +Most Ansible modules check whether the desired final state has already been achieved, and exit without performing any actions if that state has been achieved, so that repeating the task does not change the final state. Modules that behave this way are often called 'idempotent.' Whether you run a playbook once, or multiple times, the outcome should be the same. However, not all playbooks and not all modules behave this way. If you are unsure, test your playbooks in a sandbox environment before running them multiple times in production. + +.. _executing_a_playbook: + +Running playbooks +----------------- + +To run your playbook, use the :ref:`ansible-playbook` command:: + + ansible-playbook playbook.yml -f 10 + +Use the ``--verbose`` flag when running your playbook to see detailed output from successful modules as well as unsuccessful ones. + +.. _playbook_ansible-pull: + +Ansible-Pull +============ + +Should you want to invert the architecture of Ansible, so that nodes check in to a central location, instead +of pushing configuration out to them, you can. + +The ``ansible-pull`` is a small script that will checkout a repo of configuration instructions from git, and then +run ``ansible-playbook`` against that content. + +Assuming you load balance your checkout location, ``ansible-pull`` scales essentially infinitely. + +Run ``ansible-pull --help`` for details. + +There's also a `clever playbook <https://github.com/ansible/ansible-examples/blob/master/language_features/ansible_pull.yml>`_ available to configure ``ansible-pull`` via a crontab from push mode. + +Verifying playbooks +=================== + +You may want to verify your playbooks to catch syntax errors and other problems before you run them. The :ref:`ansible-playbook` command offers several options for verification, including ``--check``, ``--diff``, ``--list-hosts``, ``list-tasks``, and ``--syntax-check``. The :ref:`validate-playbook-tools` describes other tools for validating and testing playbooks. + +.. _linting_playbooks: + +ansible-lint +------------ + +You can use `ansible-lint <https://docs.ansible.com/ansible-lint/index.html>`_ for detailed, Ansible-specific feedback on your playbooks before you execute them. For example, if you run ``ansible-lint`` on the playbook called ``verify-apache.yml`` near the top of this page, you should get the following results: + +.. code-block:: bash + + $ ansible-lint verify-apache.yml + [403] Package installs should not use latest + verify-apache.yml:8 + Task/Handler: ensure apache is at the latest version + +The `ansible-lint default rules <https://docs.ansible.com/ansible-lint/rules/default_rules.html>`_ page describes each error. For ``[403]``, the recommended fix is to change ``state: latest`` to ``state: present`` in the playbook. + +.. seealso:: + + `ansible-lint <https://docs.ansible.com/ansible-lint/index.html>`_ + Learn how to test Ansible Playbooks syntax + :ref:`yaml_syntax` + Learn about YAML syntax + :ref:`playbooks_best_practices` + Tips for managing playbooks in the real world + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + :ref:`developing_modules` + Learn to extend Ansible by writing your own modules + :ref:`intro_patterns` + Learn about how to select hosts + `GitHub examples directory <https://github.com/ansible/ansible-examples>`_ + Complete end-to-end playbook examples + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups diff --git a/docs/docsite/rst/user_guide/playbooks_lookups.rst b/docs/docsite/rst/user_guide/playbooks_lookups.rst new file mode 100644 index 00000000..004db708 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_lookups.rst @@ -0,0 +1,37 @@ +.. _playbooks_lookups: + +******* +Lookups +******* + +Lookup plugins retrieve data from outside sources such as files, databases, key/value stores, APIs, and other services. Like all templating, lookups execute and are evaluated on the Ansible control machine. Ansible makes the data returned by a lookup plugin available using the standard templating system. Before Ansible 2.5, lookups were mostly used indirectly in ``with_<lookup>`` constructs for looping. Starting with Ansible 2.5, lookups are used more explicitly as part of Jinja2 expressions fed into the ``loop`` keyword. + +.. _lookups_and_variables: + +Using lookups in variables +========================== + +You can populate variables using lookups. Ansible evaluates the value each time it is executed in a task (or template):: + + vars: + motd_value: "{{ lookup('file', '/etc/motd') }}" + tasks: + - debug: + msg: "motd value is {{ motd_value }}" + +For more details and a list of lookup plugins in ansible-base, see :ref:`plugins_lookup`. You may also find lookup plugins in collections. You can review a list of lookup plugins installed on your control machine with the command ``ansible-doc -l -t lookup``. + +.. seealso:: + + :ref:`working_with_playbooks` + An introduction to playbooks + :ref:`playbooks_conditionals` + Conditional statements in playbooks + :ref:`playbooks_variables` + All about variables + :ref:`playbooks_loops` + Looping in playbooks + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_loops.rst b/docs/docsite/rst/user_guide/playbooks_loops.rst new file mode 100644 index 00000000..0934eeed --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_loops.rst @@ -0,0 +1,445 @@ +.. _playbooks_loops: + +***** +Loops +***** + +Sometimes you want to repeat a task multiple times. In computer programming, this is called a loop. Common Ansible loops include changing ownership on several files and/or directories with the :ref:`file module <file_module>`, creating multiple users with the :ref:`user module <user_module>`, and +repeating a polling step until a certain result is reached. Ansible offers two keywords for creating loops: ``loop`` and ``with_<lookup>``. + +.. note:: + * We added ``loop`` in Ansible 2.5. It is not yet a full replacement for ``with_<lookup>``, but we recommend it for most use cases. + * We have not deprecated the use of ``with_<lookup>`` - that syntax will still be valid for the foreseeable future. + * We are looking to improve ``loop`` syntax - watch this page and the `changelog <https://github.com/ansible/ansible/tree/devel/changelogs>`_ for updates. + +.. contents:: + :local: + +Comparing ``loop`` and ``with_*`` +================================= + +* The ``with_<lookup>`` keywords rely on :ref:`lookup_plugins` - even ``items`` is a lookup. +* The ``loop`` keyword is equivalent to ``with_list``, and is the best choice for simple loops. +* The ``loop`` keyword will not accept a string as input, see :ref:`query_vs_lookup`. +* Generally speaking, any use of ``with_*`` covered in :ref:`migrating_to_loop` can be updated to use ``loop``. +* Be careful when changing ``with_items`` to ``loop``, as ``with_items`` performed implicit single-level flattening. You may need to use ``flatten(1)`` with ``loop`` to match the exact outcome. For example, to get the same output as: + +.. code-block:: yaml + + with_items: + - 1 + - [2,3] + - 4 + +you would need:: + + loop: "{{ [1, [2,3] ,4] | flatten(1) }}" + +* Any ``with_*`` statement that requires using ``lookup`` within a loop should not be converted to use the ``loop`` keyword. For example, instead of doing: + +.. code-block:: yaml + + loop: "{{ lookup('fileglob', '*.txt', wantlist=True) }}" + +it's cleaner to keep:: + + with_fileglob: '*.txt' + +.. _standard_loops: + +Standard loops +============== + +Iterating over a simple list +---------------------------- + +Repeated tasks can be written as standard loops over a simple list of strings. You can define the list directly in the task:: + + - name: Add several users + ansible.builtin.user: + name: "{{ item }}" + state: present + groups: "wheel" + loop: + - testuser1 + - testuser2 + +You can define the list in a variables file, or in the 'vars' section of your play, then refer to the name of the list in the task:: + + loop: "{{ somelist }}" + +Either of these examples would be the equivalent of:: + + - name: Add user testuser1 + ansible.builtin.user: + name: "testuser1" + state: present + groups: "wheel" + + - name: Add user testuser2 + ansible.builtin.user: + name: "testuser2" + state: present + groups: "wheel" + +You can pass a list directly to a parameter for some plugins. Most of the packaging modules, like :ref:`yum <yum_module>` and :ref:`apt <apt_module>`, have this capability. When available, passing the list to a parameter is better than looping over the task. For example:: + + - name: Optimal yum + ansible.builtin.yum: + name: "{{ list_of_packages }}" + state: present + + - name: Non-optimal yum, slower and may cause issues with interdependencies + ansible.builtin.yum: + name: "{{ item }}" + state: present + loop: "{{ list_of_packages }}" + +Check the :ref:`module documentation <modules_by_category>` to see if you can pass a list to any particular module's parameter(s). + +Iterating over a list of hashes +------------------------------- + +If you have a list of hashes, you can reference subkeys in a loop. For example:: + + - name: Add several users + ansible.builtin.user: + name: "{{ item.name }}" + state: present + groups: "{{ item.groups }}" + loop: + - { name: 'testuser1', groups: 'wheel' } + - { name: 'testuser2', groups: 'root' } + +When combining :ref:`conditionals <playbooks_conditionals>` with a loop, the ``when:`` statement is processed separately for each item. +See :ref:`the_when_statement` for examples. + +Iterating over a dictionary +--------------------------- + +To loop over a dict, use the :ref:`dict2items <dict_filter>`: + +.. code-block:: yaml + + - name: Using dict2items + ansible.builtin.debug: + msg: "{{ item.key }} - {{ item.value }}" + loop: "{{ tag_data | dict2items }}" + vars: + tag_data: + Environment: dev + Application: payment + +Here, we are iterating over `tag_data` and printing the key and the value from it. + +Registering variables with a loop +================================= + +You can register the output of a loop as a variable. For example:: + + - name: Register loop output as a variable + ansible.builtin.shell: "echo {{ item }}" + loop: + - "one" + - "two" + register: echo + +When you use ``register`` with a loop, the data structure placed in the variable will contain a ``results`` attribute that is a list of all responses from the module. This differs from the data structure returned when using ``register`` without a loop:: + + { + "changed": true, + "msg": "All items completed", + "results": [ + { + "changed": true, + "cmd": "echo \"one\" ", + "delta": "0:00:00.003110", + "end": "2013-12-19 12:00:05.187153", + "invocation": { + "module_args": "echo \"one\"", + "module_name": "shell" + }, + "item": "one", + "rc": 0, + "start": "2013-12-19 12:00:05.184043", + "stderr": "", + "stdout": "one" + }, + { + "changed": true, + "cmd": "echo \"two\" ", + "delta": "0:00:00.002920", + "end": "2013-12-19 12:00:05.245502", + "invocation": { + "module_args": "echo \"two\"", + "module_name": "shell" + }, + "item": "two", + "rc": 0, + "start": "2013-12-19 12:00:05.242582", + "stderr": "", + "stdout": "two" + } + ] + } + +Subsequent loops over the registered variable to inspect the results may look like:: + + - name: Fail if return code is not 0 + ansible.builtin.fail: + msg: "The command ({{ item.cmd }}) did not have a 0 return code" + when: item.rc != 0 + loop: "{{ echo.results }}" + +During iteration, the result of the current item will be placed in the variable:: + + - name: Place the result of the current item in the variable + ansible.builtin.shell: echo "{{ item }}" + loop: + - one + - two + register: echo + changed_when: echo.stdout != "one" + +.. _complex_loops: + +Complex loops +============= + +Iterating over nested lists +--------------------------- + +You can use Jinja2 expressions to iterate over complex lists. For example, a loop can combine nested lists:: + + - name: Give users access to multiple databases + community.mysql.mysql_user: + name: "{{ item[0] }}" + priv: "{{ item[1] }}.*:ALL" + append_privs: yes + password: "foo" + loop: "{{ ['alice', 'bob'] |product(['clientdb', 'employeedb', 'providerdb'])|list }}" + + +.. _do_until_loops: + +Retrying a task until a condition is met +---------------------------------------- + +.. versionadded:: 1.4 + +You can use the ``until`` keyword to retry a task until a certain condition is met. Here's an example:: + + - name: Retry a task until a certain condition is met + ansible.builtin.shell: /usr/bin/foo + register: result + until: result.stdout.find("all systems go") != -1 + retries: 5 + delay: 10 + +This task runs up to 5 times with a delay of 10 seconds between each attempt. If the result of any attempt has "all systems go" in its stdout, the task succeeds. The default value for "retries" is 3 and "delay" is 5. + +To see the results of individual retries, run the play with ``-vv``. + +When you run a task with ``until`` and register the result as a variable, the registered variable will include a key called "attempts", which records the number of the retries for the task. + +.. note:: You must set the ``until`` parameter if you want a task to retry. If ``until`` is not defined, the value for the ``retries`` parameter is forced to 1. + +Looping over inventory +---------------------- + +To loop over your inventory, or just a subset of it, you can use a regular ``loop`` with the ``ansible_play_batch`` or ``groups`` variables:: + + - name: Show all the hosts in the inventory + ansible.builtin.debug: + msg: "{{ item }}" + loop: "{{ groups['all'] }}" + + - name: Show all the hosts in the current play + ansible.builtin.debug: + msg: "{{ item }}" + loop: "{{ ansible_play_batch }}" + +There is also a specific lookup plugin ``inventory_hostnames`` that can be used like this:: + + - name: Show all the hosts in the inventory + ansible.builtin.debug: + msg: "{{ item }}" + loop: "{{ query('inventory_hostnames', 'all') }}" + + - name: Show all the hosts matching the pattern, ie all but the group www + ansible.builtin.debug: + msg: "{{ item }}" + loop: "{{ query('inventory_hostnames', 'all:!www') }}" + +More information on the patterns can be found in :ref:`intro_patterns`. + +.. _query_vs_lookup: + +Ensuring list input for ``loop``: using ``query`` rather than ``lookup`` +======================================================================== + +The ``loop`` keyword requires a list as input, but the ``lookup`` keyword returns a string of comma-separated values by default. Ansible 2.5 introduced a new Jinja2 function named :ref:`query <query>` that always returns a list, offering a simpler interface and more predictable output from lookup plugins when using the ``loop`` keyword. + +You can force ``lookup`` to return a list to ``loop`` by using ``wantlist=True``, or you can use ``query`` instead. + +These examples do the same thing:: + + loop: "{{ query('inventory_hostnames', 'all') }}" + + loop: "{{ lookup('inventory_hostnames', 'all', wantlist=True) }}" + + +.. _loop_control: + +Adding controls to loops +======================== +.. versionadded:: 2.1 + +The ``loop_control`` keyword lets you manage your loops in useful ways. + +Limiting loop output with ``label`` +----------------------------------- +.. versionadded:: 2.2 + +When looping over complex data structures, the console output of your task can be enormous. To limit the displayed output, use the ``label`` directive with ``loop_control``:: + + - name: Create servers + digital_ocean: + name: "{{ item.name }}" + state: present + loop: + - name: server1 + disks: 3gb + ram: 15Gb + network: + nic01: 100Gb + nic02: 10Gb + ... + loop_control: + label: "{{ item.name }}" + +The output of this task will display just the ``name`` field for each ``item`` instead of the entire contents of the multi-line ``{{ item }}`` variable. + +.. note:: This is for making console output more readable, not protecting sensitive data. If there is sensitive data in ``loop``, set ``no_log: yes`` on the task to prevent disclosure. + +Pausing within a loop +--------------------- +.. versionadded:: 2.2 + +To control the time (in seconds) between the execution of each item in a task loop, use the ``pause`` directive with ``loop_control``:: + + # main.yml + - name: Create servers, pause 3s before creating next + community.digitalocean.digital_ocean: + name: "{{ item }}" + state: present + loop: + - server1 + - server2 + loop_control: + pause: 3 + +Tracking progress through a loop with ``index_var`` +--------------------------------------------------- +.. versionadded:: 2.5 + +To keep track of where you are in a loop, use the ``index_var`` directive with ``loop_control``. This directive specifies a variable name to contain the current loop index:: + + - name: Count our fruit + ansible.builtin.debug: + msg: "{{ item }} with index {{ my_idx }}" + loop: + - apple + - banana + - pear + loop_control: + index_var: my_idx + +.. note:: `index_var` is 0 indexed. + +Defining inner and outer variable names with ``loop_var`` +--------------------------------------------------------- +.. versionadded:: 2.1 + +You can nest two looping tasks using ``include_tasks``. However, by default Ansible sets the loop variable ``item`` for each loop. This means the inner, nested loop will overwrite the value of ``item`` from the outer loop. +You can specify the name of the variable for each loop using ``loop_var`` with ``loop_control``:: + + # main.yml + - include_tasks: inner.yml + loop: + - 1 + - 2 + - 3 + loop_control: + loop_var: outer_item + + # inner.yml + - name: Print outer and inner items + ansible.builtin.debug: + msg: "outer item={{ outer_item }} inner item={{ item }}" + loop: + - a + - b + - c + +.. note:: If Ansible detects that the current loop is using a variable which has already been defined, it will raise an error to fail the task. + +Extended loop variables +----------------------- +.. versionadded:: 2.8 + +As of Ansible 2.8 you can get extended loop information using the ``extended`` option to loop control. This option will expose the following information. + +========================== =========== +Variable Description +-------------------------- ----------- +``ansible_loop.allitems`` The list of all items in the loop +``ansible_loop.index`` The current iteration of the loop. (1 indexed) +``ansible_loop.index0`` The current iteration of the loop. (0 indexed) +``ansible_loop.revindex`` The number of iterations from the end of the loop (1 indexed) +``ansible_loop.revindex0`` The number of iterations from the end of the loop (0 indexed) +``ansible_loop.first`` ``True`` if first iteration +``ansible_loop.last`` ``True`` if last iteration +``ansible_loop.length`` The number of items in the loop +``ansible_loop.previtem`` The item from the previous iteration of the loop. Undefined during the first iteration. +``ansible_loop.nextitem`` The item from the following iteration of the loop. Undefined during the last iteration. +========================== =========== + +:: + + loop_control: + extended: yes + +Accessing the name of your loop_var +----------------------------------- +.. versionadded:: 2.8 + +As of Ansible 2.8 you can get the name of the value provided to ``loop_control.loop_var`` using the ``ansible_loop_var`` variable + +For role authors, writing roles that allow loops, instead of dictating the required ``loop_var`` value, you can gather the value via:: + + "{{ lookup('vars', ansible_loop_var) }}" + +.. _migrating_to_loop: + +Migrating from with_X to loop +============================= + +.. include:: shared_snippets/with2loop.txt + +.. seealso:: + + :ref:`about_playbooks` + An introduction to playbooks + :ref:`playbooks_reuse_roles` + Playbook organization by roles + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + :ref:`playbooks_conditionals` + Conditional statements in playbooks + :ref:`playbooks_variables` + All about variables + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_module_defaults.rst b/docs/docsite/rst/user_guide/playbooks_module_defaults.rst new file mode 100644 index 00000000..f1260e22 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_module_defaults.rst @@ -0,0 +1,143 @@ +.. _module_defaults: + +Module defaults +=============== + +If you frequently call the same module with the same arguments, it can be useful to define default arguments for that particular module using the ``module_defaults`` attribute. + +Here is a basic example:: + + - hosts: localhost + module_defaults: + ansible.builtin.file: + owner: root + group: root + mode: 0755 + tasks: + - name: Create file1 + ansible.builtin.file: + state: touch + path: /tmp/file1 + + - name: Create file2 + ansible.builtin.file: + state: touch + path: /tmp/file2 + + - name: Create file3 + ansible.builtin.file: + state: touch + path: /tmp/file3 + +The ``module_defaults`` attribute can be used at the play, block, and task level. Any module arguments explicitly specified in a task will override any established default for that module argument:: + + - block: + - name: Print a message + ansible.builtin.debug: + msg: "Different message" + module_defaults: + ansible.builtin.debug: + msg: "Default message" + +You can remove any previously established defaults for a module by specifying an empty dict:: + + - name: Create file1 + ansible.builtin.file: + state: touch + path: /tmp/file1 + module_defaults: + file: {} + +.. note:: + Any module defaults set at the play level (and block/task level when using ``include_role`` or ``import_role``) will apply to any roles used, which may cause unexpected behavior in the role. + +Here are some more realistic use cases for this feature. + +Interacting with an API that requires auth:: + + - hosts: localhost + module_defaults: + ansible.builtin.uri: + force_basic_auth: true + user: some_user + password: some_password + tasks: + - name: Interact with a web service + ansible.builtin.uri: + url: http://some.api.host/v1/whatever1 + + - name: Interact with a web service + ansible.builtin.uri: + url: http://some.api.host/v1/whatever2 + + - name: Interact with a web service + ansible.builtin.uri: + url: http://some.api.host/v1/whatever3 + +Setting a default AWS region for specific EC2-related modules:: + + - hosts: localhost + vars: + my_region: us-west-2 + module_defaults: + amazon.aws.ec2: + region: '{{ my_region }}' + community.aws.ec2_instance_info: + region: '{{ my_region }}' + amazon.aws.ec2_vpc_net_info: + region: '{{ my_region }}' + +.. _module_defaults_groups: + +Module defaults groups +---------------------- + +.. versionadded:: 2.7 + +Ansible 2.7 adds a preview-status feature to group together modules that share common sets of parameters. This makes it easier to author playbooks making heavy use of API-based modules such as cloud modules. + ++---------+---------------------------+-----------------+ +| Group | Purpose | Ansible Version | ++=========+===========================+=================+ +| aws | Amazon Web Services | 2.7 | ++---------+---------------------------+-----------------+ +| azure | Azure | 2.7 | ++---------+---------------------------+-----------------+ +| gcp | Google Cloud Platform | 2.7 | ++---------+---------------------------+-----------------+ +| k8s | Kubernetes | 2.8 | ++---------+---------------------------+-----------------+ +| os | OpenStack | 2.8 | ++---------+---------------------------+-----------------+ +| acme | ACME | 2.10 | ++---------+---------------------------+-----------------+ +| docker* | Docker | 2.10 | ++---------+---------------------------+-----------------+ +| ovirt | oVirt | 2.10 | ++---------+---------------------------+-----------------+ +| vmware | VMware | 2.10 | ++---------+---------------------------+-----------------+ + +* The `docker_stack <docker_stack_module>`_ module is not included in the ``docker`` defaults group. + +Use the groups with ``module_defaults`` by prefixing the group name with ``group/`` - for example ``group/aws``. + +In a playbook, you can set module defaults for whole groups of modules, such as setting a common AWS region. + +.. code-block:: YAML + + # example_play.yml + - hosts: localhost + module_defaults: + group/aws: + region: us-west-2 + tasks: + - name: Get info + aws_s3_bucket_info: + + # now the region is shared between both info modules + + - name: Get info + ec2_ami_info: + filters: + name: 'RHEL*7.5*' diff --git a/docs/docsite/rst/user_guide/playbooks_prompts.rst b/docs/docsite/rst/user_guide/playbooks_prompts.rst new file mode 100644 index 00000000..856f7037 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_prompts.rst @@ -0,0 +1,116 @@ +.. _playbooks_prompts: + +************************** +Interactive input: prompts +************************** + +If you want your playbook to prompt the user for certain input, add a 'vars_prompt' section. Prompting the user for variables lets you avoid recording sensitive data like passwords. In addition to security, prompts support flexibility. For example, if you use one playbook across multiple software releases, you could prompt for the particular release version. + +.. contents:: + :local: + +Here is a most basic example:: + + --- + - hosts: all + vars_prompt: + + - name: username + prompt: What is your username? + private: no + + - name: password + prompt: What is your password? + + tasks: + + - name: Print a message + ansible.builtin.debug: + msg: 'Logging in as {{ username }}' + +The user input is hidden by default but it can be made visible by setting ``private: no``. + +.. note:: + Prompts for individual ``vars_prompt`` variables will be skipped for any variable that is already defined through the command line ``--extra-vars`` option, or when running from a non-interactive session (such as cron or Ansible Tower). See :ref:`passing_variables_on_the_command_line`. + +If you have a variable that changes infrequently, you can provide a default value that can be overridden:: + + vars_prompt: + + - name: release_version + prompt: Product release version + default: "1.0" + +Encrypting values supplied by ``vars_prompt`` +--------------------------------------------- + +You can encrypt the entered value so you can use it, for instance, with the user module to define a password:: + + vars_prompt: + + - name: my_password2 + prompt: Enter password2 + private: yes + encrypt: sha512_crypt + confirm: yes + salt_size: 7 + +If you have `Passlib <https://passlib.readthedocs.io/en/stable/>`_ installed, you can use any crypt scheme the library supports: + +- *des_crypt* - DES Crypt +- *bsdi_crypt* - BSDi Crypt +- *bigcrypt* - BigCrypt +- *crypt16* - Crypt16 +- *md5_crypt* - MD5 Crypt +- *bcrypt* - BCrypt +- *sha1_crypt* - SHA-1 Crypt +- *sun_md5_crypt* - Sun MD5 Crypt +- *sha256_crypt* - SHA-256 Crypt +- *sha512_crypt* - SHA-512 Crypt +- *apr_md5_crypt* - Apache's MD5-Crypt variant +- *phpass* - PHPass' Portable Hash +- *pbkdf2_digest* - Generic PBKDF2 Hashes +- *cta_pbkdf2_sha1* - Cryptacular's PBKDF2 hash +- *dlitz_pbkdf2_sha1* - Dwayne Litzenberger's PBKDF2 hash +- *scram* - SCRAM Hash +- *bsd_nthash* - FreeBSD's MCF-compatible nthash encoding + +The only parameters accepted are 'salt' or 'salt_size'. You can use your own salt by defining +'salt', or have one generated automatically using 'salt_size'. By default Ansible generates a salt +of size 8. + +.. versionadded:: 2.7 + +If you do not have Passlib installed, Ansible uses the `crypt <https://docs.python.org/2/library/crypt.html>`_ library as a fallback. Ansible supports at most four crypt schemes, depending on your platform at most the following crypt schemes are supported: + +- *bcrypt* - BCrypt +- *md5_crypt* - MD5 Crypt +- *sha256_crypt* - SHA-256 Crypt +- *sha512_crypt* - SHA-512 Crypt + +.. versionadded:: 2.8 +.. _unsafe_prompts: + +Allowing special characters in ``vars_prompt`` values +----------------------------------------------------- + +Some special characters, such as ``{`` and ``%`` can create templating errors. If you need to accept special characters, use the ``unsafe`` option:: + + vars_prompt: + - name: my_password_with_weird_chars + prompt: Enter password + unsafe: yes + private: yes + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + :ref:`playbooks_conditionals` + Conditional statements in playbooks + :ref:`playbooks_variables` + All about variables + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_python_version.rst b/docs/docsite/rst/user_guide/playbooks_python_version.rst new file mode 100644 index 00000000..60821b37 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_python_version.rst @@ -0,0 +1,64 @@ +.. _pb-py-compat: + +******************** +Python3 in templates +******************** + +Ansible uses Jinja2 to leverage Python data types and standard functions in templates and variables. +You can use these data types and standard functions to perform a rich set of operations on your data. However, +if you use templates, you must be aware of differences between Python versions. + +These topics help you design templates that work on both Python2 and Python3. They might also help if you are upgrading from Python2 to Python3. Upgrading within Python2 or Python3 does not usually introduce changes that affect Jinja2 templates. + +.. _pb-py-compat-dict-views: + +Dictionary views +================ + +In Python2, the :meth:`dict.keys`, :meth:`dict.values`, and :meth:`dict.items` +methods return a list. Jinja2 returns that to Ansible via a string +representation that Ansible can turn back into a list. + +In Python3, those methods return a :ref:`dictionary view <python3:dict-views>` object. The +string representation that Jinja2 returns for dictionary views cannot be parsed back +into a list by Ansible. It is, however, easy to make this portable by +using the :func:`list <jinja2:list>` filter whenever using :meth:`dict.keys`, +:meth:`dict.values`, or :meth:`dict.items`:: + + vars: + hosts: + testhost1: 127.0.0.2 + testhost2: 127.0.0.3 + tasks: + - debug: + msg: '{{ item }}' + # Only works with Python 2 + #loop: "{{ hosts.keys() }}" + # Works with both Python 2 and Python 3 + loop: "{{ hosts.keys() | list }}" + +.. _pb-py-compat-iteritems: + +dict.iteritems() +================ + +Python2 dictionaries have :meth:`~dict.iterkeys`, :meth:`~dict.itervalues`, and :meth:`~dict.iteritems` methods. + +Python3 dictionaries do not have these methods. Use :meth:`dict.keys`, :meth:`dict.values`, and :meth:`dict.items` to make your playbooks and templates compatible with both Python2 and Python3:: + + vars: + hosts: + testhost1: 127.0.0.2 + testhost2: 127.0.0.3 + tasks: + - debug: + msg: '{{ item }}' + # Only works with Python 2 + #loop: "{{ hosts.iteritems() }}" + # Works with both Python 2 and Python 3 + loop: "{{ hosts.items() | list }}" + +.. seealso:: + * The :ref:`pb-py-compat-dict-views` entry for information on + why the :func:`list filter <jinja2:list>` is necessary + here. diff --git a/docs/docsite/rst/user_guide/playbooks_reuse.rst b/docs/docsite/rst/user_guide/playbooks_reuse.rst new file mode 100644 index 00000000..3e80f5c2 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_reuse.rst @@ -0,0 +1,201 @@ +.. _playbooks_reuse: + +************************** +Re-using Ansible artifacts +************************** + +You can write a simple playbook in one very large file, and most users learn the one-file approach first. However, breaking tasks up into different files is an excellent way to organize complex sets of tasks and reuse them. Smaller, more distributed artifacts let you re-use the same variables, tasks, and plays in multiple playbooks to address different use cases. You can use distributed artifacts across multiple parent playbooks or even multiple times within one playbook. For example, you might want to update your customer database as part of several different playbooks. If you put all the tasks related to updating your database in a tasks file, you can re-use them in many playbooks while only maintaining them in one place. + +.. contents:: + :local: + +Creating re-usable files and roles +================================== + +Ansible offers four distributed, re-usable artifacts: variables files, task files, playbooks, and roles. + + - A variables file contains only variables. + - A task file contains only tasks. + - A playbook contains at least one play, and may contain variables, tasks, and other content. You can re-use tightly focused playbooks, but you can only re-use them statically, not dynamically. + - A role contains a set of related tasks, variables, defaults, handlers, and even modules or other plugins in a defined file-tree. Unlike variables files, task files, or playbooks, roles can be easily uploaded and shared via Ansible Galaxy. See :ref:`playbooks_reuse_roles` for details about creating and using roles. + +.. versionadded:: 2.4 + +Re-using playbooks +================== + +You can incorporate multiple playbooks into a master playbook. However, you can only use imports to re-use playbooks. For example: + +.. code-block:: yaml + + - import_playbook: webservers.yml + - import_playbook: databases.yml + +Importing incorporates playbooks in other playbooks statically. Ansible runs the plays and tasks in each imported playbook in the order they are listed, just as if they had been defined directly in the master playbook. + +Re-using files and roles +======================== + +Ansible offers two ways to re-use files and roles in a playbook: dynamic and static. + + - For dynamic re-use, add an ``include_*`` task in the tasks section of a play: + + - :ref:`include_role <include_role_module>` + - :ref:`include_tasks <include_tasks_module>` + - :ref:`include_vars <include_vars_module>` + + - For static re-use, add an ``import_*`` task in the tasks section of a play: + + - :ref:`import_role <import_role_module>` + - :ref:`import_tasks <import_tasks_module>` + +Task include and import statements can be used at arbitrary depth. + +You can still use the bare :ref:`roles <roles_keyword>` keyword at the play level to incorporate a role in a playbook statically. However, the bare :ref:`include <include_module>` keyword, once used for both task files and playbook-level includes, is now deprecated. + +Includes: dynamic re-use +------------------------ + +Including roles, tasks, or variables adds them to a playbook dynamically. Ansible processes included files and roles as they come up in a playbook, so included tasks can be affected by the results of earlier tasks within the top-level playbook. Included roles and tasks are similar to handlers - they may or may not run, depending on the results of other tasks in the top-level playbook. + +The primary advantage of using ``include_*`` statements is looping. When a loop is used with an include, the included tasks or role will be executed once for each item in the loop. + +You can pass variables into includes. See :ref:`ansible_variable_precedence` for more details on variable inheritance and precedence. + +Imports: static re-use +---------------------- + +Importing roles, tasks, or playbooks adds them to a playbook statically. Ansible pre-processes imported files and roles before it runs any tasks in a playbook, so imported content is never affected by other tasks within the top-level playbook. + +You can pass variables to imports. You must pass variables if you want to run an imported file more than once in a playbook. For example: + +.. code-block:: yaml + + tasks: + - import_tasks: wordpress.yml + vars: + wp_user: timmy + + - import_tasks: wordpress.yml + vars: + wp_user: alice + + - import_tasks: wordpress.yml + vars: + wp_user: bob + +See :ref:`ansible_variable_precedence` for more details on variable inheritance and precedence. + +.. _dynamic_vs_static: + +Comparing includes and imports: dynamic and static re-use +------------------------------------------------------------ + +Each approach to re-using distributed Ansible artifacts has advantages and limitations. You may choose dynamic re-use for some playbooks and static re-use for others. Although you can use both dynamic and static re-use in a single playbook, it is best to select one approach per playbook. Mixing static and dynamic re-use can introduce difficult-to-diagnose bugs into your playbooks. This table summarizes the main differences so you can choose the best approach for each playbook you create. + +.. table:: + :class: documentation-table + + ========================= ======================================== ======================================== + .. Include_* Import_* + ========================= ======================================== ======================================== + Type of re-use Dynamic Static + + When processed At runtime, when encountered Pre-processed during playbook parsing + + Task or play All includes are tasks ``import_playbook`` cannot be a task + + Task options Apply only to include task itself Apply to all child tasks in import + + Calling from loops Executed once for each loop item Cannot be used in a loop + + Using ``--list-tags`` Tags within includes not listed All tags appear with ``--list-tags`` + + Using ``--list-tasks`` Tasks within includes not listed All tasks appear with ``--list-tasks`` + + Notifying handlers Cannot trigger handlers within includes Can trigger individual imported handlers + + Using ``--start-at-task`` Cannot start at tasks within includes Can start at imported tasks + + Using inventory variables Can ``include_*: {{ inventory_var }}`` Cannot ``import_*: {{ inventory_var }}`` + + With playbooks No ``include_playbook`` Can import full playbooks + + With variables files Can include variables files Use ``vars_files:`` to import variables + + ========================= ======================================== ======================================== + +Re-using tasks as handlers +========================== + +You can also use includes and imports in the :ref:`handlers` section of a playbook. For instance, if you want to define how to restart Apache, you only have to do that once for all of your playbooks. You might make a ``restarts.yml`` file that looks like: + +.. code-block:: yaml + + # restarts.yml + - name: Restart apache + ansible.builtin.service: + name: apache + state: restarted + + - name: Restart mysql + ansible.builtin.service: + name: mysql + state: restarted + +You can trigger handlers from either an import or an include, but the procedure is different for each method of re-use. If you include the file, you must notify the include itself, which triggers all the tasks in ``restarts.yml``. If you import the file, you must notify the individual task(s) within ``restarts.yml``. You can mix direct tasks and handlers with included or imported tasks and handlers. + +Triggering included (dynamic) handlers +-------------------------------------- + +Includes are executed at run-time, so the name of the include exists during play execution, but the included tasks do not exist until the include itself is triggered. To use the ``Restart apache`` task with dynamic re-use, refer to the name of the include itself. This approach triggers all tasks in the included file as handlers. For example, with the task file shown above: + +.. code-block:: yaml + + - trigger an included (dynamic) handler + hosts: localhost + handlers: + - name: Restart services + include_tasks: restarts.yml + tasks: + - command: "true" + notify: Restart services + +Triggering imported (static) handlers +------------------------------------- + +Imports are processed before the play begins, so the name of the import no longer exists during play execution, but the names of the individual imported tasks do exist. To use the ``Restart apache`` task with static re-use, refer to the name of each task or tasks within the imported file. For example, with the task file shown above: + +.. code-block:: yaml + + - trigger an imported (static) handler + hosts: localhost + handlers: + - name: Restart services + import_tasks: restarts.yml + tasks: + - command: "true" + notify: Restart apache + - command: "true" + notify: Restart mysql + +.. seealso:: + + :ref:`utilities_modules` + Documentation of the ``include*`` and ``import*`` modules discussed here. + :ref:`working_with_playbooks` + Review the basic Playbook language features + :ref:`playbooks_variables` + All about variables in playbooks + :ref:`playbooks_conditionals` + Conditionals in playbooks + :ref:`playbooks_loops` + Loops in playbooks + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + :ref:`ansible_galaxy` + How to share roles on galaxy, role management + `GitHub Ansible examples <https://github.com/ansible/ansible-examples>`_ + Complete playbook files from the GitHub project source + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups diff --git a/docs/docsite/rst/user_guide/playbooks_reuse_includes.rst b/docs/docsite/rst/user_guide/playbooks_reuse_includes.rst new file mode 100644 index 00000000..ecce954a --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_reuse_includes.rst @@ -0,0 +1,32 @@ +:orphan: + +.. _playbooks_reuse_includes: + +Including and importing +======================= + +The content on this page has been moved to :ref:`playbooks_reuse`. + + +.. seealso:: + + :ref:`yaml_syntax` + Learn about YAML syntax + :ref:`working_with_playbooks` + Review the basic Playbook language features + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + :ref:`playbooks_variables` + All about variables in playbooks + :ref:`playbooks_conditionals` + Conditionals in playbooks + :ref:`playbooks_loops` + Loops in playbooks + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + :ref:`developing_modules` + Learn how to extend Ansible by writing your own modules + `GitHub Ansible examples <https://github.com/ansible/ansible-examples>`_ + Complete playbook files from the GitHub project source + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups diff --git a/docs/docsite/rst/user_guide/playbooks_reuse_roles.rst b/docs/docsite/rst/user_guide/playbooks_reuse_roles.rst new file mode 100644 index 00000000..56093d3d --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_reuse_roles.rst @@ -0,0 +1,490 @@ +.. _playbooks_reuse_roles: + +***** +Roles +***** + +Roles let you automatically load related vars_files, tasks, handlers, and other Ansible artifacts based on a known file structure. Once you group your content in roles, you can easily reuse them and share them with other users. + +.. contents:: + :local: + +Role directory structure +======================== + +An Ansible role has a defined directory structure with seven main standard directories. You must include at least one of these directories in each role. You can omit any directories the role does not use. For example: + +.. code-block:: text + + # playbooks + site.yml + webservers.yml + fooservers.yml + roles/ + common/ + tasks/ + handlers/ + library/ + files/ + templates/ + vars/ + defaults/ + meta/ + webservers/ + tasks/ + defaults/ + meta/ + +By default Ansible will look in each directory within a role for a ``main.yml`` file for relevant content (also ``main.yaml`` and ``main``): + +- ``tasks/main.yml`` - the main list of tasks that the role executes. +- ``handlers/main.yml`` - handlers, which may be used within or outside this role. +- ``library/my_module.py`` - modules, which may be used within this role (see :ref:`embedding_modules_and_plugins_in_roles` for more information). +- ``defaults/main.yml`` - default variables for the role (see :ref:`playbooks_variables` for more information). These variables have the lowest priority of any variables available, and can be easily overridden by any other variable, including inventory variables. +- ``vars/main.yml`` - other variables for the role (see :ref:`playbooks_variables` for more information). +- ``files/main.yml`` - files that the role deploys. +- ``templates/main.yml`` - templates that the role deploys. +- ``meta/main.yml`` - metadata for the role, including role dependencies. + +You can add other YAML files in some directories. For example, you can place platform-specific tasks in separate files and refer to them in the ``tasks/main.yml`` file: + +.. code-block:: yaml + + # roles/example/tasks/main.yml + - name: Install the correct web server for RHEL + import_tasks: redhat.yml + when: ansible_facts['os_family']|lower == 'redhat' + + - name: Install the correct web server for Debian + import_tasks: debian.yml + when: ansible_facts['os_family']|lower == 'debian' + + # roles/example/tasks/redhat.yml + - name: Install web server + ansible.builtin.yum: + name: "httpd" + state: present + + # roles/example/tasks/debian.yml + - name: Install web server + ansible.builtin.apt: + name: "apache2" + state: present + +Roles may also include modules and other plugin types in a directory called ``library``. For more information, please refer to :ref:`embedding_modules_and_plugins_in_roles` below. + +.. _role_search_path: + +Storing and finding roles +========================= + +By default, Ansible looks for roles in two locations: + +- in a directory called ``roles/``, relative to the playbook file +- in ``/etc/ansible/roles`` + +If you store your roles in a different location, set the :ref:`roles_path <DEFAULT_ROLES_PATH>` configuration option so Ansible can find your roles. Checking shared roles into a single location makes them easier to use in multiple playbooks. See :ref:`intro_configuration` for details about managing settings in ansible.cfg. + +Alternatively, you can call a role with a fully qualified path: + +.. code-block:: yaml + + --- + - hosts: webservers + roles: + - role: '/path/to/my/roles/common' + +Using roles +=========== + +You can use roles in three ways: + +- at the play level with the ``roles`` option: This is the classic way of using roles in a play. +- at the tasks level with ``include_role``: You can reuse roles dynamically anywhere in the ``tasks`` section of a play using ``include_role``. +- at the tasks level with ``import_role``: You can reuse roles statically anywhere in the ``tasks`` section of a play using ``import_role``. + +.. _roles_keyword: + +Using roles at the play level +----------------------------- + +The classic (original) way to use roles is with the ``roles`` option for a given play: + +.. code-block:: yaml + + --- + - hosts: webservers + roles: + - common + - webservers + +When you use the ``roles`` option at the play level, for each role 'x': + +- If roles/x/tasks/main.yml exists, Ansible adds the tasks in that file to the play. +- If roles/x/handlers/main.yml exists, Ansible adds the handlers in that file to the play. +- If roles/x/vars/main.yml exists, Ansible adds the variables in that file to the play. +- If roles/x/defaults/main.yml exists, Ansible adds the variables in that file to the play. +- If roles/x/meta/main.yml exists, Ansible adds any role dependencies in that file to the list of roles. +- Any copy, script, template or include tasks (in the role) can reference files in roles/x/{files,templates,tasks}/ (dir depends on task) without having to path them relatively or absolutely. + +When you use the ``roles`` option at the play level, Ansible treats the roles as static imports and processes them during playbook parsing. Ansible executes your playbook in this order: + +- Any ``pre_tasks`` defined in the play. +- Any handlers triggered by pre_tasks. +- Each role listed in ``roles:``, in the order listed. Any role dependencies defined in the role's ``meta/main.yml`` run first, subject to tag filtering and conditionals. See :ref:`role_dependencies` for more details. +- Any ``tasks`` defined in the play. +- Any handlers triggered by the roles or tasks. +- Any ``post_tasks`` defined in the play. +- Any handlers triggered by post_tasks. + +.. note:: + If using tags with tasks in a role, be sure to also tag your pre_tasks, post_tasks, and role dependencies and pass those along as well, especially if the pre/post tasks and role dependencies are used for monitoring outage window control or load balancing. See :ref:`tags` for details on adding and using tags. + +You can pass other keywords to the ``roles`` option: + +.. code-block:: yaml + + --- + - hosts: webservers + roles: + - common + - role: foo_app_instance + vars: + dir: '/opt/a' + app_port: 5000 + tags: typeA + - role: foo_app_instance + vars: + dir: '/opt/b' + app_port: 5001 + tags: typeB + +When you add a tag to the ``role`` option, Ansible applies the tag to ALL tasks within the role. + +When using ``vars:`` within the ``roles:`` section of a playbook, the variables are added to the play variables, making them available to all tasks within the play before and after the role. This behavior can be changed by :ref:`DEFAULT_PRIVATE_ROLE_VARS`. + +Including roles: dynamic reuse +------------------------------ + +You can reuse roles dynamically anywhere in the ``tasks`` section of a play using ``include_role``. While roles added in a ``roles`` section run before any other tasks in a playbook, included roles run in the order they are defined. If there are other tasks before an ``include_role`` task, the other tasks will run first. + +To include a role: + +.. code-block:: yaml + + --- + - hosts: webservers + tasks: + - name: Print a message + ansible.builtin.debug: + msg: "this task runs before the example role" + + - name: Include the example role + include_role: + name: example + + - name: Print a message + ansible.builtin.debug: + msg: "this task runs after the example role" + +You can pass other keywords, including variables and tags, when including roles: + +.. code-block:: yaml + + --- + - hosts: webservers + tasks: + - name: Include the foo_app_instance role + include_role: + name: foo_app_instance + vars: + dir: '/opt/a' + app_port: 5000 + tags: typeA + ... + +When you add a :ref:`tag <tags>` to an ``include_role`` task, Ansible applies the tag `only` to the include itself. This means you can pass ``--tags`` to run only selected tasks from the role, if those tasks themselves have the same tag as the include statement. See :ref:`selective_reuse` for details. + +You can conditionally include a role: + +.. code-block:: yaml + + --- + - hosts: webservers + tasks: + - name: Include the some_role role + include_role: + name: some_role + when: "ansible_facts['os_family'] == 'RedHat'" + +Importing roles: static reuse +----------------------------- + +You can reuse roles statically anywhere in the ``tasks`` section of a play using ``import_role``. The behavior is the same as using the ``roles`` keyword. For example: + +.. code-block:: yaml + + --- + - hosts: webservers + tasks: + - name: Print a message + ansible.builtin.debug: + msg: "before we run our role" + + - name: Import the example role + import_role: + name: example + + - name: Print a message + ansible.builtin.debug: + msg: "after we ran our role" + +You can pass other keywords, including variables and tags, when importing roles: + +.. code-block:: yaml + + --- + - hosts: webservers + tasks: + - name: Import the foo_app_instance role + import_role: + name: foo_app_instance + vars: + dir: '/opt/a' + app_port: 5000 + ... + +When you add a tag to an ``import_role`` statement, Ansible applies the tag to `all` tasks within the role. See :ref:`tag_inheritance` for details. + +.. _run_role_twice: + +Running a role multiple times in one playbook +============================================= + +Ansible only executes each role once, even if you define it multiple times, unless the parameters defined on the role are different for each definition. For example, Ansible only runs the role ``foo`` once in a play like this: + +.. code-block:: yaml + + --- + - hosts: webservers + roles: + - foo + - bar + - foo + +You have two options to force Ansible to run a role more than once. + +Passing different parameters +---------------------------- + +You can pass different parameters in each role definition as: + +.. code-block:: yaml + + --- + - hosts: webservers + roles: + - { role: foo, vars: { message: "first" } } + - { role: foo, vars: { message: "second" } } + +or + +.. code-block:: yaml + + --- + - hosts: webservers + roles: + - role: foo + vars: + message: "first" + - role: foo + vars: + message: "second" + +In this example, because each role definition has different parameters, Ansible runs ``foo`` twice. + +Using ``allow_duplicates: true`` +-------------------------------- + +Add ``allow_duplicates: true`` to the ``meta/main.yml`` file for the role: + +.. code-block:: yaml + + # playbook.yml + --- + - hosts: webservers + roles: + - foo + - foo + + # roles/foo/meta/main.yml + --- + allow_duplicates: true + +In this example, Ansible runs ``foo`` twice because we have explicitly enabled it to do so. + +.. _role_dependencies: + +Using role dependencies +======================= + +Role dependencies let you automatically pull in other roles when using a role. Ansible does not execute role dependencies when you include or import a role. You must use the ``roles`` keyword if you want Ansible to execute role dependencies. + +Role dependencies are stored in the ``meta/main.yml`` file within the role directory. This file should contain a list of roles and parameters to insert before the specified role. For example: + +.. code-block:: yaml + + # roles/myapp/meta/main.yml + --- + dependencies: + - role: common + vars: + some_parameter: 3 + - role: apache + vars: + apache_port: 80 + - role: postgres + vars: + dbname: blarg + other_parameter: 12 + +Ansible always executes role dependencies before the role that includes them. Ansible executes recursive role dependencies as well. If one role depends on a second role, and the second role depends on a third role, Ansible executes the third role, then the second role, then the first role. + +Running role dependencies multiple times in one playbook +-------------------------------------------------------- + +Ansible treats duplicate role dependencies like duplicate roles listed under ``roles:``: Ansible only executes role dependencies once, even if defined multiple times, unless the parameters, tags, or when clause defined on the role are different for each definition. If two roles in a playbook both list a third role as a dependency, Ansible only runs that role dependency once, unless you pass different parameters, tags, when clause, or use ``allow_duplicates: true`` in the dependent (third) role. See :ref:`Galaxy role dependencies <galaxy_dependencies>` for more details. + +For example, a role named ``car`` depends on a role named ``wheel`` as follows: + +.. code-block:: yaml + + --- + dependencies: + - role: wheel + vars: + n: 1 + - role: wheel + vars: + n: 2 + - role: wheel + vars: + n: 3 + - role: wheel + vars: + n: 4 + +And the ``wheel`` role depends on two roles: ``tire`` and ``brake``. The ``meta/main.yml`` for wheel would then contain the following: + +.. code-block:: yaml + + --- + dependencies: + - role: tire + - role: brake + +And the ``meta/main.yml`` for ``tire`` and ``brake`` would contain the following: + +.. code-block:: yaml + + --- + allow_duplicates: true + +The resulting order of execution would be as follows: + +.. code-block:: text + + tire(n=1) + brake(n=1) + wheel(n=1) + tire(n=2) + brake(n=2) + wheel(n=2) + ... + car + +To use ``allow_duplicates: true`` with role dependencies, you must specify it for the dependent role, not for the parent role. In the example above, ``allow_duplicates: true`` appears in the ``meta/main.yml`` of the ``tire`` and ``brake`` roles. The ``wheel`` role does not require ``allow_duplicates: true``, because each instance defined by ``car`` uses different parameter values. + +.. note:: + See :ref:`playbooks_variables` for details on how Ansible chooses among variable values defined in different places (variable inheritance and scope). + +.. _embedding_modules_and_plugins_in_roles: + +Embedding modules and plugins in roles +====================================== + +If you write a custom module (see :ref:`developing_modules`) or a plugin (see :ref:`developing_plugins`), you might wish to distribute it as part of a role. For example, if you write a module that helps configure your company's internal software, and you want other people in your organization to use this module, but you do not want to tell everyone how to configure their Ansible library path, you can include the module in your internal_config role. + +To add a module or a plugin to a role: +Alongside the 'tasks' and 'handlers' structure of a role, add a directory named 'library' and then include the module directly inside the 'library' directory. + +Assuming you had this: + +.. code-block:: text + + roles/ + my_custom_modules/ + library/ + module1 + module2 + +The module will be usable in the role itself, as well as any roles that are called *after* this role, as follows: + +.. code-block:: yaml + + --- + - hosts: webservers + roles: + - my_custom_modules + - some_other_role_using_my_custom_modules + - yet_another_role_using_my_custom_modules + +If necessary, you can also embed a module in a role to modify a module in Ansible's core distribution. For example, you can use the development version of a particular module before it is released in production releases by copying the module and embedding the copy in a role. Use this approach with caution, as API signatures may change in core components, and this workaround is not guaranteed to work. + +The same mechanism can be used to embed and distribute plugins in a role, using the same schema. For example, for a filter plugin: + +.. code-block:: text + + roles/ + my_custom_filter/ + filter_plugins + filter1 + filter2 + +These filters can then be used in a Jinja template in any role called after 'my_custom_filter'. + +Sharing roles: Ansible Galaxy +============================= + +`Ansible Galaxy <https://galaxy.ansible.com>`_ is a free site for finding, downloading, rating, and reviewing all kinds of community-developed Ansible roles and can be a great way to get a jumpstart on your automation projects. + +The client ``ansible-galaxy`` is included in Ansible. The Galaxy client allows you to download roles from Ansible Galaxy, and also provides an excellent default framework for creating your own roles. + +Read the `Ansible Galaxy documentation <https://galaxy.ansible.com/docs/>`_ page for more information + +.. seealso:: + + :ref:`ansible_galaxy` + How to create new roles, share roles on Galaxy, role management + :ref:`yaml_syntax` + Learn about YAML syntax + :ref:`working_with_playbooks` + Review the basic Playbook language features + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + :ref:`playbooks_variables` + Variables in playbooks + :ref:`playbooks_conditionals` + Conditionals in playbooks + :ref:`playbooks_loops` + Loops in playbooks + :ref:`tags` + Using tags to select or skip roles/tasks in long playbooks + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + :ref:`developing_modules` + Extending Ansible by writing your own modules + `GitHub Ansible examples <https://github.com/ansible/ansible-examples>`_ + Complete playbook files from the GitHub project source + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups diff --git a/docs/docsite/rst/user_guide/playbooks_roles.rst b/docs/docsite/rst/user_guide/playbooks_roles.rst new file mode 100644 index 00000000..f79e2308 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_roles.rst @@ -0,0 +1,19 @@ +:orphan: + +Playbook Roles and Include Statements +===================================== + +.. contents:: Topics + + +The documentation regarding roles and includes for playbooks have moved. Their new location is here: :ref:`playbooks_reuse`. Please update any links you may have made directly to this page. + +.. seealso:: + + :ref:`ansible_galaxy` + How to share roles on galaxy, role management + :ref:`working_with_playbooks` + Review the basic Playbook language features + :ref:`playbooks_reuse` + Creating reusable Playbooks. + diff --git a/docs/docsite/rst/user_guide/playbooks_special_topics.rst b/docs/docsite/rst/user_guide/playbooks_special_topics.rst new file mode 100644 index 00000000..5df72c11 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_special_topics.rst @@ -0,0 +1,8 @@ +:orphan: + +.. _playbooks_special_topics: + +Advanced playbooks features +=========================== + +This page is obsolete. Refer to the :ref:`main User Guide index page <user_guide_index>` for links to all playbook-related topics. Please update any links you may have made directly to this page. diff --git a/docs/docsite/rst/user_guide/playbooks_startnstep.rst b/docs/docsite/rst/user_guide/playbooks_startnstep.rst new file mode 100644 index 00000000..e3b62961 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_startnstep.rst @@ -0,0 +1,40 @@ +.. _playbooks_start_and_step: + +*************************************** +Executing playbooks for troubleshooting +*************************************** + +When you are testing new plays or debugging playbooks, you may need to run the same play multiple times. To make this more efficient, Ansible offers two alternative ways to execute a playbook: start-at-task and step mode. + +.. _start_at_task: + +start-at-task +------------- + +To start executing your playbook at a particular task (usually the task that failed on the previous run), use the ``--start-at-task`` option:: + + ansible-playbook playbook.yml --start-at-task="install packages" + +In this example, Ansible starts executing your playbook at a task named "install packages". This feature does not work with tasks inside dynamically re-used roles or tasks (``include_*``), see :ref:`dynamic_vs_static`. + +.. _step: + +Step mode +--------- + +To execute a playbook interactively, use ``--step``:: + + ansible-playbook playbook.yml --step + +With this option, Ansible stops on each task, and asks if it should execute that task. For example, if you have a task called "configure ssh", the playbook run will stop and ask:: + + Perform task: configure ssh (y/n/c): + +Answer "y" to execute the task, answer "n" to skip the task, and answer "c" to exit step mode, executing all remaining tasks without asking. + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + :ref:`playbook_debugger` + Using the Ansible debugger diff --git a/docs/docsite/rst/user_guide/playbooks_strategies.rst b/docs/docsite/rst/user_guide/playbooks_strategies.rst new file mode 100644 index 00000000..a97f0447 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_strategies.rst @@ -0,0 +1,216 @@ +.. _playbooks_strategies: + +Controlling playbook execution: strategies and more +=================================================== + +By default, Ansible runs each task on all hosts affected by a play before starting the next task on any host, using 5 forks. If you want to change this default behavior, you can use a different strategy plugin, change the number of forks, or apply one of several keywords like ``serial``. + +.. contents:: + :local: + +Selecting a strategy +-------------------- +The default behavior described above is the :ref:`linear strategy<linear_strategy>`. Ansible offers other strategies, including the :ref:`debug strategy<debug_strategy>` (see also :ref:`playbook_debugger`) and the :ref:`free strategy<free_strategy>`, which allows each host to run until the end of the play as fast as it can:: + + - hosts: all + strategy: free + tasks: + ... + +You can select a different strategy for each play as shown above, or set your preferred strategy globally in ``ansible.cfg``, under the ``defaults`` stanza:: + + [defaults] + strategy = free + +All strategies are implemented as :ref:`strategy plugins<strategy_plugins>`. Please review the documentation for each strategy plugin for details on how it works. + +Setting the number of forks +--------------------------- +If you have the processing power available and want to use more forks, you can set the number in ``ansible.cfg``:: + + [defaults] + forks = 30 + +or pass it on the command line: `ansible-playbook -f 30 my_playbook.yml`. + +Using keywords to control execution +----------------------------------- + +In addition to strategies, several :ref:`keywords<playbook_keywords>` also affect play execution. You can set a number, a percentage, or a list of numbers of hosts you want to manage at a time with ``serial``. Ansible completes the play on the specified number or percentage of hosts before starting the next batch of hosts. You can restrict the number of workers allotted to a block or task with ``throttle``. You can control how Ansible selects the next host in a group to execute against with ``order``. You can run a task on a single host with ``run_once``. These keywords are not strategies. They are directives or options applied to a play, block, or task. + +.. _rolling_update_batch_size: + +Setting the batch size with ``serial`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default, Ansible runs in parallel against all the hosts in the :ref:`pattern <intro_patterns>` you set in the ``hosts:`` field of each play. If you want to manage only a few machines at a time, for example during a rolling update, you can define how many hosts Ansible should manage at a single time using the ``serial`` keyword:: + + --- + - name: test play + hosts: webservers + serial: 2 + gather_facts: False + + tasks: + - name: first task + command: hostname + - name: second task + command: hostname + +In the above example, if we had 4 hosts in the group 'webservers', Ansible would execute the play completely (both tasks) on 2 of the hosts before moving on to the next 2 hosts:: + + + PLAY [webservers] **************************************** + + TASK [first task] **************************************** + changed: [web2] + changed: [web1] + + TASK [second task] *************************************** + changed: [web1] + changed: [web2] + + PLAY [webservers] **************************************** + + TASK [first task] **************************************** + changed: [web3] + changed: [web4] + + TASK [second task] *************************************** + changed: [web3] + changed: [web4] + + PLAY RECAP *********************************************** + web1 : ok=2 changed=2 unreachable=0 failed=0 + web2 : ok=2 changed=2 unreachable=0 failed=0 + web3 : ok=2 changed=2 unreachable=0 failed=0 + web4 : ok=2 changed=2 unreachable=0 failed=0 + + +You can also specify a percentage with the ``serial`` keyword. Ansible applies the percentage to the total number of hosts in a play to determine the number of hosts per pass:: + + --- + - name: test play + hosts: webservers + serial: "30%" + +If the number of hosts does not divide equally into the number of passes, the final pass contains the remainder. In this example, if you had 20 hosts in the webservers group, the first batch would contain 6 hosts, the second batch would contain 6 hosts, the third batch would contain 6 hosts, and the last batch would contain 2 hosts. + +You can also specify batch sizes as a list. For example:: + + --- + - name: test play + hosts: webservers + serial: + - 1 + - 5 + - 10 + +In the above example, the first batch would contain a single host, the next would contain 5 hosts, and (if there are any hosts left), every following batch would contain either 10 hosts or all the remaining hosts, if fewer than 10 hosts remained. + +You can list multiple batch sizes as percentages:: + + --- + - name: test play + hosts: webservers + serial: + - "10%" + - "20%" + - "100%" + +You can also mix and match the values:: + + --- + - name: test play + hosts: webservers + serial: + - 1 + - 5 + - "20%" + +.. note:: + No matter how small the percentage, the number of hosts per pass will always be 1 or greater. + +Restricting execution with ``throttle`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``throttle`` keyword limits the number of workers for a particular task. It can be set at the block and task level. Use ``throttle`` to restrict tasks that may be CPU-intensive or interact with a rate-limiting API:: + + tasks: + - command: /path/to/cpu_intensive_command + throttle: 1 + +If you have already restricted the number of forks or the number of machines to execute against in parallel, you can reduce the number of workers with ``throttle``, but you cannot increase it. In other words, to have an effect, your ``throttle`` setting must be lower than your ``forks`` or ``serial`` setting if you are using them together. + +Ordering execution based on inventory +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``order`` keyword controls the order in which hosts are run. Possible values for order are: + +inventory: + (default) The order provided in the inventory +reverse_inventory: + The reverse of the order provided by the inventory +sorted: + Sorted alphabetically sorted by name +reverse_sorted: + Sorted by name in reverse alphabetical order +shuffle: + Randomly ordered on each run + +Other keywords that affect play execution include ``ignore_errors``, ``ignore_unreachable``, and ``any_errors_fatal``. These options are documented in :ref:`playbooks_error_handling`. + +.. _run_once: + +Running on a single machine with ``run_once`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you want a task to run only on the first host in your batch of hosts, set ``run_once`` to true on that task:: + + --- + # ... + + tasks: + + # ... + + - command: /opt/application/upgrade_db.py + run_once: true + + # ... + +Ansible executes this task on the first host in the current batch and applies all results and facts to all the hosts in the same batch. This approach is similar to applying a conditional to a task such as:: + + - command: /opt/application/upgrade_db.py + when: inventory_hostname == webservers[0] + +However, with ``run_once``, the results are applied to all the hosts. To run the task on a specific host, instead of the first host in the batch, delegate the task:: + + - command: /opt/application/upgrade_db.py + run_once: true + delegate_to: web01.example.org + +As always with :ref:`delegation <playbooks_delegation>`, the action will be executed on the delegated host, but the information is still that of the original host in the task. + +.. note:: + When used together with ``serial``, tasks marked as ``run_once`` will be run on one host in *each* serial batch. If the task must run only once regardless of ``serial`` mode, use + :code:`when: inventory_hostname == ansible_play_hosts_all[0]` construct. + +.. note:: + Any conditional (in other words, `when:`) will use the variables of the 'first host' to decide if the task runs or not, no other hosts will be tested. + +.. note:: + If you want to avoid the default behavior of setting the fact for all hosts, set ``delegate_facts: True`` for the specific task or block. + +.. seealso:: + + :ref:`about_playbooks` + An introduction to playbooks + :ref:`playbooks_delegation` + Running tasks on or assigning facts to specific machines + :ref:`playbooks_reuse_roles` + Playbook organization by roles + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_tags.rst b/docs/docsite/rst/user_guide/playbooks_tags.rst new file mode 100644 index 00000000..93c26636 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_tags.rst @@ -0,0 +1,428 @@ +.. _tags: + +**** +Tags +**** + +If you have a large playbook, it may be useful to run only specific parts of it instead of running the entire playbook. You can do this with Ansible tags. Using tags to execute or skip selected tasks is a two-step process: + + #. Add tags to your tasks, either individually or with tag inheritance from a block, play, role, or import. + #. Select or skip tags when you run your playbook. + +.. contents:: + :local: + +Adding tags with the tags keyword +================================= + +You can add tags to a single task or include. You can also add tags to multiple tasks by defining them at the level of a block, play, role, or import. The keyword ``tags`` addresses all these use cases. The ``tags`` keyword always defines tags and adds them to tasks; it does not select or skip tasks for execution. You can only select or skip tasks based on tags at the command line when you run a playbook. See :ref:`using_tags` for more details. + +Adding tags to individual tasks +------------------------------- + +At the simplest level, you can apply one or more tags to an individual task. You can add tags to tasks in playbooks, in task files, or within a role. Here is an example that tags two tasks with different tags: + +.. code-block:: yaml + + tasks: + - name: Install the servers + ansible.builtin.yum: + name: + - httpd + - memcached + state: present + tags: + - packages + - webservers + + - name: Configure the service + ansible.builtin.template: + src: templates/src.j2 + dest: /etc/foo.conf + tags: + - configuration + +You can apply the same tag to more than one individual task. This example tags several tasks with the same tag, "ntp": + +.. code-block:: yaml + + --- + # file: roles/common/tasks/main.yml + + - name: Install ntp + ansible.builtin.yum: + name: ntp + state: present + tags: ntp + + - name: Configure ntp + ansible.builtin.template: + src: ntp.conf.j2 + dest: /etc/ntp.conf + notify: + - restart ntpd + tags: ntp + + - name: Enable and run ntpd + ansible.builtin.service: + name: ntpd + state: started + enabled: yes + tags: ntp + + - name: Install NFS utils + ansible.builtin.yum: + name: + - nfs-utils + - nfs-util-lib + state: present + tags: filesharing + +If you ran these four tasks in a playbook with ``--tags ntp``, Ansible would run the three tasks tagged ``ntp`` and skip the one task that does not have that tag. + +.. _tags_on_includes: + +Adding tags to includes +----------------------- + +You can apply tags to dynamic includes in a playbook. As with tags on an individual task, tags on an ``include_*`` task apply only to the include itself, not to any tasks within the included file or role. If you add ``mytag`` to a dynamic include, then run that playbook with ``--tags mytag``, Ansible runs the include itself, runs any tasks within the included file or role tagged with ``mytag``, and skips any tasks within the included file or role without that tag. See :ref:`selective_reuse` for more details. + +You add tags to includes the same way you add tags to any other task: + +.. code-block:: yaml + + --- + # file: roles/common/tasks/main.yml + + - name: Dynamic re-use of database tasks + include_tasks: db.yml + tags: db + +You can add a tag only to the dynamic include of a role. In this example, the ``foo`` tag will `not` apply to tasks inside the ``bar`` role: + +.. code-block:: yaml + + --- + - hosts: webservers + tasks: + - name: Include the bar role + include_role: + name: bar + tags: + - foo + +With plays, blocks, the ``role`` keyword, and static imports, Ansible applies tag inheritance, adding the tags you define to every task inside the play, block, role, or imported file. However, tag inheritance does *not* apply to dynamic re-use with ``include_role`` and ``include_tasks``. With dynamic re-use (includes), the tags you define apply only to the include itself. If you need tag inheritance, use a static import. If you cannot use an import because the rest of your playbook uses includes, see :ref:`apply_keyword` for ways to work around this behavior. + +.. _tag_inheritance: + +Tag inheritance: adding tags to multiple tasks +---------------------------------------------- + +If you want to apply the same tag or tags to multiple tasks without adding a ``tags`` line to every task, you can define the tags at the level of your play or block, or when you add a role or import a file. Ansible applies the tags down the dependency chain to all child tasks. With roles and imports, Ansible appends the tags set by the ``roles`` section or import to any tags set on individual tasks or blocks within the role or imported file. This is called tag inheritance. Tag inheritance is convenient, because you do not have to tag every task. However, the tags still apply to the tasks individually. + +Adding tags to blocks +^^^^^^^^^^^^^^^^^^^^^ + +If you want to apply a tag to many, but not all, of the tasks in your play, use a :ref:`block <playbooks_blocks>` and define the tags at that level. For example, we could edit the NTP example shown above to use a block: + +.. code-block:: yaml + + # myrole/tasks/main.yml + tasks: + - block: + tags: ntp + - name: Install ntp + ansible.builtin.yum: + name: ntp + state: present + + - name: Configure ntp + ansible.builtin.template: + src: ntp.conf.j2 + dest: /etc/ntp.conf + notify: + - restart ntpd + + - name: Enable and run ntpd + ansible.builtin.service: + name: ntpd + state: started + enabled: yes + + - name: Install NFS utils + ansible.builtin.yum: + name: + - nfs-utils + - nfs-util-lib + state: present + tags: filesharing + +Adding tags to plays +^^^^^^^^^^^^^^^^^^^^ + +If all the tasks in a play should get the same tag, you can add the tag at the level of the play. For example, if you had a play with only the NTP tasks, you could tag the entire play: + +.. code-block:: yaml + + - hosts: all + tags: ntp + tasks: + - name: Install ntp + ansible.builtin.yum: + name: ntp + state: present + + - name: Configure ntp + ansible.builtin.template: + src: ntp.conf.j2 + dest: /etc/ntp.conf + notify: + - restart ntpd + + - name: Enable and run ntpd + ansible.builtin.service: + name: ntpd + state: started + enabled: yes + + - hosts: fileservers + tags: filesharing + tasks: + ... + +Adding tags to roles +^^^^^^^^^^^^^^^^^^^^ + +There are three ways to add tags to roles: + + #. Add the same tag or tags to all tasks in the role by setting tags under ``roles``. See examples in this section. + #. Add the same tag or tags to all tasks in the role by setting tags on a static ``import_role`` in your playbook. See examples in :ref:`tags_on_imports`. + #. Add a tag or tags to to individual tasks or blocks within the role itself. This is the only approach that allows you to select or skip some tasks within the role. To select or skip tasks within the role, you must have tags set on individual tasks or blocks, use the dynamic ``include_role`` in your playbook, and add the same tag or tags to the include. When you use this approach, and then run your playbook with ``--tags foo``, Ansible runs the include itself plus any tasks in the role that also have the tag ``foo``. See :ref:`tags_on_includes` for details. + +When you incorporate a role in your playbook statically with the ``roles`` keyword, Ansible adds any tags you define to all the tasks in the role. For example: + +.. code-block:: yaml + + roles: + - role: webserver + vars: + port: 5000 + tags: [ web, foo ] + +or: + +.. code-block:: yaml + + --- + - hosts: webservers + roles: + - role: foo + tags: + - bar + - baz + # using YAML shorthand, this is equivalent to: + # - { role: foo, tags: ["bar", "baz"] } + +.. _tags_on_imports: + +Adding tags to imports +^^^^^^^^^^^^^^^^^^^^^^ + +You can also apply a tag or tags to all the tasks imported by the static ``import_role`` and ``import_tasks`` statements: + +.. code-block:: yaml + + --- + - hosts: webservers + tasks: + - name: Import the foo role + import_role: + name: foo + tags: + - bar + - baz + + - name: Import tasks from foo.yml + import_tasks: foo.yml + tags: [ web, foo ] + +.. _apply_keyword: + +Tag inheritance for includes: blocks and the ``apply`` keyword +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default, Ansible does not apply :ref:`tag inheritance <tag_inheritance>` to dynamic re-use with ``include_role`` and ``include_tasks``. If you add tags to an include, they apply only to the include itself, not to any tasks in the included file or role. This allows you to execute selected tasks within a role or task file - see :ref:`selective_reuse` when you run your playbook. + +If you want tag inheritance, you probably want to use imports. However, using both includes and imports in a single playbook can lead to difficult-to-diagnose bugs. For this reason, if your playbook uses ``include_*`` to re-use roles or tasks, and you need tag inheritance on one include, Ansible offers two workarounds. You can use the ``apply`` keyword: + +.. code-block:: yaml + + - name: Apply the db tag to the include and to all tasks in db.yaml + include_tasks: + file: db.yml + # adds 'db' tag to tasks within db.yml + apply: + tags: db + # adds 'db' tag to this 'include_tasks' itself + tags: db + +Or you can use a block: + +.. code-block:: yaml + + - block: + - name: Include tasks from db.yml + include_tasks: db.yml + tags: db + +.. _special_tags: + +Special tags: always and never +============================== + +Ansible reserves two tag names for special behavior: always and never. If you assign the ``always`` tag to a task or play, Ansible will always run that task or play, unless you specifically skip it (``--skip-tags always``). + +For example: + +.. code-block:: yaml + + tasks: + - name: Print a message + ansible.builtin.debug: + msg: "Always runs" + tags: + - always + + - name: Print a message + ansible.builtin.debug: + msg: "runs when you use tag1" + tags: + - tag1 + +.. warning:: + * Fact gathering is tagged with 'always' by default. It is only skipped if + you apply a tag and then use a different tag in ``--tags`` or the same + tag in ``--skip-tags``. + +.. versionadded:: 2.5 + +If you assign the ``never`` tag to a task or play, Ansible will skip that task or play unless you specifically request it (``--tags never``). + +For example: + +.. code-block:: yaml + + tasks: + - name: Run the rarely-used debug task + ansible.builtin.debug: + msg: '{{ showmevar }}' + tags: [ never, debug ] + +The rarely-used debug task in the example above only runs when you specifically request the ``debug`` or ``never`` tags. + +.. _using_tags: + +Selecting or skipping tags when you run a playbook +================================================== + +Once you have added tags to your tasks, includes, blocks, plays, roles, and imports, you can selectively execute or skip tasks based on their tags when you run :ref:`ansible-playbook`. Ansible runs or skips all tasks with tags that match the tags you pass at the command line. If you have added a tag at the block or play level, with ``roles``, or with an import, that tag applies to every task within the block, play, role, or imported role or file. If you have a role with lots of tags and you want to call subsets of the role at different times, either :ref:`use it with dynamic includes <selective_reuse>`, or split the role into multiple roles. + +:ref:`ansible-playbook` offers five tag-related command-line options: + +* ``--tags all`` - run all tasks, ignore tags (default behavior) +* ``--tags [tag1, tag2]`` - run only tasks with the tags ``tag1`` and ``tag2`` +* ``--skip-tags [tag3, tag4]`` - run all tasks except those with the tags ``tag3`` and ``tag4`` +* ``--tags tagged`` - run only tasks with at least one tag +* ``--tags untagged`` - run only tasks with no tags + +For example, to run only tasks and blocks tagged ``configuration`` and ``packages`` in a very long playbook: + +.. code-block:: bash + + ansible-playbook example.yml --tags "configuration,packages" + +To run all tasks except those tagged ``packages``: + +.. code-block:: bash + + ansible-playbook example.yml --skip-tags "packages" + +Previewing the results of using tags +------------------------------------ + +When you run a role or playbook, you might not know or remember which tasks have which tags, or which tags exist at all. Ansible offers two command-line flags for :ref:`ansible-playbook` that help you manage tagged playbooks: + +* ``--list-tags`` - generate a list of available tags +* ``--list-tasks`` - when used with ``--tags tagname`` or ``--skip-tags tagname``, generate a preview of tagged tasks + +For example, if you do not know whether the tag for configuration tasks is ``config`` or ``conf`` in a playbook, role, or tasks file, you can display all available tags without running any tasks: + +.. code-block:: bash + + ansible-playbook example.yml --list-tags + +If you do not know which tasks have the tags ``configuration`` and ``packages``, you can pass those tags and add ``--list-tasks``. Ansible lists the tasks but does not execute any of them. + +.. code-block:: bash + + ansible-playbook example.yml --tags "configuration,packages" --list-tasks + +These command-line flags have one limitation: they cannot show tags or tasks within dynamically included files or roles. See :ref:`dynamic_vs_static` for more information on differences between static imports and dynamic includes. + +.. _selective_reuse: + +Selectively running tagged tasks in re-usable files +--------------------------------------------------- + +If you have a role or a tasks file with tags defined at the task or block level, you can selectively run or skip those tagged tasks in a playbook if you use a dynamic include instead of a static import. You must use the same tag on the included tasks and on the include statement itself. For example you might create a file with some tagged and some untagged tasks: + +.. code-block:: yaml + + # mixed.yml + tasks: + - name: Run the task with no tags + ansible.builtin.debug: + msg: this task has no tags + + - name: Run the tagged task + ansible.builtin.debug: + msg: this task is tagged with mytag + tags: mytag + + - block: + - name: Run the first block task with mytag + ... + - name: Run the second block task with mytag + ... + tags: + - mytag + +And you might include the tasks file above in a playbook: + +.. code-block:: yaml + + # myplaybook.yml + - hosts: all + tasks: + - name: Run tasks from mixed.yml + include_tasks: + name: mixed.yml + tags: mytag + +When you run the playbook with ``ansible-playbook -i hosts myplaybook.yml --tags "mytag"``, Ansible skips the task with no tags, runs the tagged individual task, and runs the two tasks in the block. + +Configuring tags globally +------------------------- + +If you run or skip certain tags by default, you can use the :ref:`TAGS_RUN` and :ref:`TAGS_SKIP` options in Ansible configuration to set those defaults. + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + :ref:`playbooks_reuse_roles` + Playbook organization by roles + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_templating.rst b/docs/docsite/rst/user_guide/playbooks_templating.rst new file mode 100644 index 00000000..162ab813 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_templating.rst @@ -0,0 +1,55 @@ +.. _playbooks_templating: + +******************* +Templating (Jinja2) +******************* + +Ansible uses Jinja2 templating to enable dynamic expressions and access to variables. Ansible includes a lot of specialized filters and tests for templating. You can use all the standard filters and tests included in Jinja2 as well. Ansible also offers a new plugin type: :ref:`lookup_plugins`. + +All templating happens on the Ansible controller **before** the task is sent and executed on the target machine. This approach minimizes the package requirements on the target (jinja2 is only required on the controller). It also limits the amount of data Ansible passes to the target machine. Ansible parses templates on the controller and passes only the information needed for each task to the target machine, instead of passing all the data on the controller and parsing it on the target. + +.. contents:: + :local: + +.. toctree:: + :maxdepth: 2 + + playbooks_filters + playbooks_tests + playbooks_lookups + playbooks_python_version + +.. _templating_now: + +Get the current time +==================== + +.. versionadded:: 2.8 + +The ``now()`` Jinja2 function retrieves a Python datetime object or a string representation for the current time. + +The ``now()`` function supports 2 arguments: + +utc + Specify ``True`` to get the current time in UTC. Defaults to ``False``. + +fmt + Accepts a `strftime <https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`_ string that returns a formatted date time string. + + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + :ref:`playbooks_conditionals` + Conditional statements in playbooks + :ref:`playbooks_loops` + Looping in playbooks + :ref:`playbooks_reuse_roles` + Playbook organization by roles + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_tests.rst b/docs/docsite/rst/user_guide/playbooks_tests.rst new file mode 100644 index 00000000..0a1aa8d9 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_tests.rst @@ -0,0 +1,395 @@ +.. _playbooks_tests: + +***** +Tests +***** + +`Tests <http://jinja.pocoo.org/docs/dev/templates/#tests>`_ in Jinja are a way of evaluating template expressions and returning True or False. Jinja ships with many of these. See `builtin tests`_ in the official Jinja template documentation. + +The main difference between tests and filters are that Jinja tests are used for comparisons, whereas filters are used for data manipulation, and have different applications in jinja. Tests can also be used in list processing filters, like ``map()`` and ``select()`` to choose items in the list. + +Like all templating, tests always execute on the Ansible controller, **not** on the target of a task, as they test local data. + +In addition to those Jinja2 tests, Ansible supplies a few more and users can easily create their own. + +.. contents:: + :local: + +.. _test_syntax: + +Test syntax +=========== + +`Test syntax <http://jinja.pocoo.org/docs/dev/templates/#tests>`_ varies from `filter syntax <http://jinja.pocoo.org/docs/dev/templates/#filters>`_ (``variable | filter``). Historically Ansible has registered tests as both jinja tests and jinja filters, allowing for them to be referenced using filter syntax. + +As of Ansible 2.5, using a jinja test as a filter will generate a warning. + +The syntax for using a jinja test is as follows:: + + variable is test_name + +Such as:: + + result is failed + +.. _testing_strings: + +Testing strings +=============== + +To match strings against a substring or a regular expression, use the ``match``, ``search`` or ``regex`` tests:: + + vars: + url: "http://example.com/users/foo/resources/bar" + + tasks: + - debug: + msg: "matched pattern 1" + when: url is match("http://example.com/users/.*/resources/") + + - debug: + msg: "matched pattern 2" + when: url is search("/users/.*/resources/.*") + + - debug: + msg: "matched pattern 3" + when: url is search("/users/") + + - debug: + msg: "matched pattern 4" + when: url is regex("example.com/\w+/foo") + +``match`` succeeds if it finds the pattern at the beginning of the string, while ``search`` succeeds if it finds the pattern anywhere within string. By default, ``regex`` works like ``search``, but ``regex`` can be configured to perform other tests as well, by passing the ``match_type`` keyword argument. In particular, ``match_type`` determines the ``re`` method that gets used to perform the search. The full list can be found in the relevant Python documentation `here <https://docs.python.org/3/library/re.html#regular-expression-objects>`_. + +All of the string tests also take optional ``ignorecase`` and ``multiline`` arguments. These correspond to ``re.I`` and ``re.M`` from Python's ``re`` library, respectively. + +.. _testing_vault: + +Vault +===== + +.. versionadded:: 2.10 + +You can test whether a variable is an inline single vault encrypted value using the ``vault_encrypted`` test. + +.. code-block:: yaml + + vars: + variable: !vault | + $ANSIBLE_VAULT;1.2;AES256;dev + 61323931353866666336306139373937316366366138656131323863373866376666353364373761 + 3539633234313836346435323766306164626134376564330a373530313635343535343133316133 + 36643666306434616266376434363239346433643238336464643566386135356334303736353136 + 6565633133366366360a326566323363363936613664616364623437336130623133343530333739 + 3039 + + tasks: + - debug: + msg: '{{ (variable is vault_encrypted) | ternary("Vault encrypted", "Not vault encrypted") }}' + +.. _testing_truthiness: + +Testing truthiness +================== + +.. versionadded:: 2.10 + +As of Ansible 2.10, you can now perform Python like truthy and falsy checks. + +.. code-block:: yaml + + - debug: + msg: "Truthy" + when: value is truthy + vars: + value: "some string" + + - debug: + msg: "Falsy" + when: value is falsy + vars: + value: "" + +Additionally, the ``truthy`` and ``falsy`` tests accept an optional parameter called ``convert_bool`` that will attempt +to convert boolean indicators to actual booleans. + +.. code-block:: yaml + + - debug: + msg: "Truthy" + when: value is truthy(convert_bool=True) + vars: + value: "yes" + + - debug: + msg: "Falsy" + when: value is falsy(convert_bool=True) + vars: + value: "off" + +.. _testing_versions: + +Comparing versions +================== + +.. versionadded:: 1.6 + +.. note:: In 2.5 ``version_compare`` was renamed to ``version`` + +To compare a version number, such as checking if the ``ansible_facts['distribution_version']`` +version is greater than or equal to '12.04', you can use the ``version`` test. + +The ``version`` test can also be used to evaluate the ``ansible_facts['distribution_version']``:: + + {{ ansible_facts['distribution_version'] is version('12.04', '>=') }} + +If ``ansible_facts['distribution_version']`` is greater than or equal to 12.04, this test returns True, otherwise False. + +The ``version`` test accepts the following operators:: + + <, lt, <=, le, >, gt, >=, ge, ==, =, eq, !=, <>, ne + +This test also accepts a 3rd parameter, ``strict`` which defines if strict version parsing as defined by ``distutils.version.StrictVersion`` should be used. The default is ``False`` (using ``distutils.version.LooseVersion``), ``True`` enables strict version parsing:: + + {{ sample_version_var is version('1.0', operator='lt', strict=True) }} + +When using ``version`` in a playbook or role, don't use ``{{ }}`` as described in the `FAQ <https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#when-should-i-use-also-how-to-interpolate-variables-or-dynamic-variable-names>`_:: + + vars: + my_version: 1.2.3 + + tasks: + - debug: + msg: "my_version is higher than 1.0.0" + when: my_version is version('1.0.0', '>') + +.. _math_tests: + +Set theory tests +================ + +.. versionadded:: 2.1 + +.. note:: In 2.5 ``issubset`` and ``issuperset`` were renamed to ``subset`` and ``superset`` + +To see if a list includes or is included by another list, you can use 'subset' and 'superset':: + + vars: + a: [1,2,3,4,5] + b: [2,3] + tasks: + - debug: + msg: "A includes B" + when: a is superset(b) + + - debug: + msg: "B is included in A" + when: b is subset(a) + +.. _contains_test: + +Testing if a list contains a value +================================== + +.. versionadded:: 2.8 + +Ansible includes a ``contains`` test which operates similarly, but in reverse of the Jinja2 provided ``in`` test. +The ``contains`` test is designed to work with the ``select``, ``reject``, ``selectattr``, and ``rejectattr`` filters:: + + vars: + lacp_groups: + - master: lacp0 + network: 10.65.100.0/24 + gateway: 10.65.100.1 + dns4: + - 10.65.100.10 + - 10.65.100.11 + interfaces: + - em1 + - em2 + + - master: lacp1 + network: 10.65.120.0/24 + gateway: 10.65.120.1 + dns4: + - 10.65.100.10 + - 10.65.100.11 + interfaces: + - em3 + - em4 + + tasks: + - debug: + msg: "{{ (lacp_groups|selectattr('interfaces', 'contains', 'em1')|first).master }}" + +.. versionadded:: 2.4 + +Testing if a list value is True +=============================== + +You can use `any` and `all` to check if any or all elements in a list are true or not:: + + vars: + mylist: + - 1 + - "{{ 3 == 3 }}" + - True + myotherlist: + - False + - True + tasks: + + - debug: + msg: "all are true!" + when: mylist is all + + - debug: + msg: "at least one is true" + when: myotherlist is any + +.. _path_tests: + +Testing paths +============= + +.. note:: In 2.5 the following tests were renamed to remove the ``is_`` prefix + +The following tests can provide information about a path on the controller:: + + - debug: + msg: "path is a directory" + when: mypath is directory + + - debug: + msg: "path is a file" + when: mypath is file + + - debug: + msg: "path is a symlink" + when: mypath is link + + - debug: + msg: "path already exists" + when: mypath is exists + + - debug: + msg: "path is {{ (mypath is abs)|ternary('absolute','relative')}}" + + - debug: + msg: "path is the same file as path2" + when: mypath is same_file(path2) + + - debug: + msg: "path is a mount" + when: mypath is mount + + +Testing size formats +==================== + +The ``human_readable`` and ``human_to_bytes`` functions let you test your +playbooks to make sure you are using the right size format in your tasks, and that +you provide Byte format to computers and human-readable format to people. + +Human readable +-------------- + +Asserts whether the given string is human readable or not. + +For example:: + + - name: "Human Readable" + assert: + that: + - '"1.00 Bytes" == 1|human_readable' + - '"1.00 bits" == 1|human_readable(isbits=True)' + - '"10.00 KB" == 10240|human_readable' + - '"97.66 MB" == 102400000|human_readable' + - '"0.10 GB" == 102400000|human_readable(unit="G")' + - '"0.10 Gb" == 102400000|human_readable(isbits=True, unit="G")' + +This would result in:: + + { "changed": false, "msg": "All assertions passed" } + +Human to bytes +-------------- + +Returns the given string in the Bytes format. + +For example:: + + - name: "Human to Bytes" + assert: + that: + - "{{'0'|human_to_bytes}} == 0" + - "{{'0.1'|human_to_bytes}} == 0" + - "{{'0.9'|human_to_bytes}} == 1" + - "{{'1'|human_to_bytes}} == 1" + - "{{'10.00 KB'|human_to_bytes}} == 10240" + - "{{ '11 MB'|human_to_bytes}} == 11534336" + - "{{ '1.1 GB'|human_to_bytes}} == 1181116006" + - "{{'10.00 Kb'|human_to_bytes(isbits=True)}} == 10240" + +This would result in:: + + { "changed": false, "msg": "All assertions passed" } + + +.. _test_task_results: + +Testing task results +==================== + +The following tasks are illustrative of the tests meant to check the status of tasks:: + + tasks: + + - shell: /usr/bin/foo + register: result + ignore_errors: True + + - debug: + msg: "it failed" + when: result is failed + + # in most cases you'll want a handler, but if you want to do something right now, this is nice + - debug: + msg: "it changed" + when: result is changed + + - debug: + msg: "it succeeded in Ansible >= 2.1" + when: result is succeeded + + - debug: + msg: "it succeeded" + when: result is success + + - debug: + msg: "it was skipped" + when: result is skipped + +.. note:: From 2.1, you can also use success, failure, change, and skip so that the grammar matches, for those who need to be strict about it. + + +.. _builtin tests: http://jinja.palletsprojects.com/templates/#builtin-tests + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + :ref:`playbooks_conditionals` + Conditional statements in playbooks + :ref:`playbooks_variables` + All about variables + :ref:`playbooks_loops` + Looping in playbooks + :ref:`playbooks_reuse_roles` + Playbook organization by roles + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_variables.rst b/docs/docsite/rst/user_guide/playbooks_variables.rst new file mode 100644 index 00000000..eb2b58f7 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_variables.rst @@ -0,0 +1,466 @@ +.. _playbooks_variables: + +*************** +Using Variables +*************** + +Ansible uses variables to manage differences between systems. With Ansible, you can execute tasks and playbooks on multiple different systems with a single command. To represent the variations among those different systems, you can create variables with standard YAML syntax, including lists and dictionaries. You can define these variables in your playbooks, in your :ref:`inventory <intro_inventory>`, in re-usable :ref:`files <playbooks_reuse>` or :ref:`roles <playbooks_reuse_roles>`, or at the command line. You can also create variables during a playbook run by registering the return value or values of a task as a new variable. + +After you create variables, either by defining them in a file, passing them at the command line, or registering the return value or values of a task as a new variable, you can use those variables in module arguments, in :ref:`conditional "when" statements <playbooks_conditionals>`, in :ref:`templates <playbooks_templating>`, and in :ref:`loops <playbooks_loops>`. The `ansible-examples github repository <https://github.com/ansible/ansible-examples>`_ contains many examples of using variables in Ansible. + +Once you understand the concepts and examples on this page, read about :ref:`Ansible facts <vars_and_facts>`, which are variables you retrieve from remote systems. + +.. contents:: + :local: + +.. _valid_variable_names: + +Creating valid variable names +============================= + +Not all strings are valid Ansible variable names. A variable name can only include letters, numbers, and underscores. `Python keywords`_ or :ref:`playbook keywords<playbook_keywords>` are not valid variable names. A variable name cannot begin with a number. + +Variable names can begin with an underscore. In many programming languages, variables that begin with an underscore are private. This is not true in Ansible. Variables that begin with an underscore are treated exactly the same as any other variable. Do not rely on this convention for privacy or security. + +This table gives examples of valid and invalid variable names: + +.. table:: + :class: documentation-table + + ====================== ==================================================================== + Valid variable names Not valid + ====================== ==================================================================== + ``foo`` ``*foo``, `Python keywords`_ such as ``async`` and ``lambda`` + + ``foo_env`` :ref:`playbook keywords<playbook_keywords>` such as ``environment`` + + ``foo_port`` ``foo-port``, ``foo port``, ``foo.port`` + + ``foo5``, ``_foo`` ``5foo``, ``12`` + ====================== ==================================================================== + +.. _Python keywords: https://docs.python.org/3/reference/lexical_analysis.html#keywords + +Simple variables +================ + +Simple variables combine a variable name with a single value. You can use this syntax (and the syntax for lists and dictionaries shown below) in a variety of places. For details about setting variables in inventory, in playbooks, in reusable files, in roles, or at the command line, see :ref:`setting_variables`. + +Defining simple variables +------------------------- + +You can define a simple variable using standard YAML syntax. For example:: + + remote_install_path: /opt/my_app_config + +Referencing simple variables +---------------------------- + +After you define a variable, use Jinja2 syntax to reference it. Jinja2 variables use double curly braces. For example, the expression ``My amp goes to {{ max_amp_value }}`` demonstrates the most basic form of variable substitution. You can use Jinja2 syntax in playbooks. For example:: + + ansible.builtin.template: + src: foo.cfg.j2 + dest: '{{ remote_install_path }}/foo.cfg' + +In this example, the variable defines the location of a file, which can vary from one system to another. + +.. note:: + + Ansible allows Jinja2 loops and conditionals in :ref:`templates <playbooks_templating>` but not in playbooks. You cannot create a loop of tasks. Ansible playbooks are pure machine-parseable YAML. + +.. _yaml_gotchas: + +When to quote variables (a YAML gotcha) +======================================= + +If you start a value with ``{{ foo }}``, you must quote the whole expression to create valid YAML syntax. If you do not quote the whole expression, the YAML parser cannot interpret the syntax - it might be a variable or it might be the start of a YAML dictionary. For guidance on writing YAML, see the :ref:`yaml_syntax` documentation. + +If you use a variable without quotes like this:: + + - hosts: app_servers + vars: + app_path: {{ base_path }}/22 + +You will see: ``ERROR! Syntax Error while loading YAML.`` If you add quotes, Ansible works correctly:: + + - hosts: app_servers + vars: + app_path: "{{ base_path }}/22" + +.. _list_variables: + +List variables +============== + +A list variable combines a variable name with multiple values. The multiple values can be stored as an itemized list or in square brackets ``[]``, separated with commas. + +Defining variables as lists +--------------------------- + +You can define variables with multiple values using YAML lists. For example:: + + region: + - northeast + - southeast + - midwest + +Referencing list variables +-------------------------- + +When you use variables defined as a list (also called an array), you can use individual, specific fields from that list. The first item in a list is item 0, the second item is item 1. For example:: + + region: "{{ region[0] }}" + +The value of this expression would be "northeast". + +.. _dictionary_variables: + +Dictionary variables +==================== + +A dictionary stores the data in key-value pairs. Usually, dictionaries are used to store related data, such as the information contained in an ID or a user profile. + +Defining variables as key:value dictionaries +-------------------------------------------- + +You can define more complex variables using YAML dictionaries. A YAML dictionary maps keys to values. For example:: + + foo: + field1: one + field2: two + +Referencing key:value dictionary variables +------------------------------------------ + +When you use variables defined as a key:value dictionary (also called a hash), you can use individual, specific fields from that dictionary using either bracket notation or dot notation:: + + foo['field1'] + foo.field1 + +Both of these examples reference the same value ("one"). Bracket notation always works. Dot notation can cause problems because some keys collide with attributes and methods of python dictionaries. Use bracket notation if you use keys which start and end with two underscores (which are reserved for special meanings in python) or are any of the known public attributes: + +``add``, ``append``, ``as_integer_ratio``, ``bit_length``, ``capitalize``, ``center``, ``clear``, ``conjugate``, ``copy``, ``count``, ``decode``, ``denominator``, ``difference``, ``difference_update``, ``discard``, ``encode``, ``endswith``, ``expandtabs``, ``extend``, ``find``, ``format``, ``fromhex``, ``fromkeys``, ``get``, ``has_key``, ``hex``, ``imag``, ``index``, ``insert``, ``intersection``, ``intersection_update``, ``isalnum``, ``isalpha``, ``isdecimal``, ``isdigit``, ``isdisjoint``, ``is_integer``, ``islower``, ``isnumeric``, ``isspace``, ``issubset``, ``issuperset``, ``istitle``, ``isupper``, ``items``, ``iteritems``, ``iterkeys``, ``itervalues``, ``join``, ``keys``, ``ljust``, ``lower``, ``lstrip``, ``numerator``, ``partition``, ``pop``, ``popitem``, ``real``, ``remove``, ``replace``, ``reverse``, ``rfind``, ``rindex``, ``rjust``, ``rpartition``, ``rsplit``, ``rstrip``, ``setdefault``, ``sort``, ``split``, ``splitlines``, ``startswith``, ``strip``, ``swapcase``, ``symmetric_difference``, ``symmetric_difference_update``, ``title``, ``translate``, ``union``, ``update``, ``upper``, ``values``, ``viewitems``, ``viewkeys``, ``viewvalues``, ``zfill``. + +.. _registered_variables: + +Registering variables +===================== + +You can create variables from the output of an Ansible task with the task keyword ``register``. You can use registered variables in any later tasks in your play. For example:: + + - hosts: web_servers + + tasks: + + - name: Run a shell command and register its output as a variable + ansible.builtin.shell: /usr/bin/foo + register: foo_result + ignore_errors: true + + - name: Run a shell command using output of the previous task + ansible.builtin.shell: /usr/bin/bar + when: foo_result.rc == 5 + +For more examples of using registered variables in conditions on later tasks, see :ref:`playbooks_conditionals`. Registered variables may be simple variables, list variables, dictionary variables, or complex nested data structures. The documentation for each module includes a ``RETURN`` section describing the return values for that module. To see the values for a particular task, run your playbook with ``-v``. + +Registered variables are stored in memory. You cannot cache registered variables for use in future plays. Registered variables are only valid on the host for the rest of the current playbook run. + +Registered variables are host-level variables. When you register a variable in a task with a loop, the registered variable contains a value for each item in the loop. The data structure placed in the variable during the loop will contain a ``results`` attribute, that is a list of all responses from the module. For a more in-depth example of how this works, see the :ref:`playbooks_loops` section on using register with a loop. + +.. note:: If a task fails or is skipped, Ansible still registers a variable with a failure or skipped status, unless the task is skipped based on tags. See :ref:`tags` for information on adding and using tags. + +.. _accessing_complex_variable_data: + +Referencing nested variables +============================ + +Many registered variables (and :ref:`facts <vars_and_facts>`) are nested YAML or JSON data structures. You cannot access values from these nested data structures with the simple ``{{ foo }}`` syntax. You must use either bracket notation or dot notation. For example, to reference an IP address from your facts using the bracket notation:: + + {{ ansible_facts["eth0"]["ipv4"]["address"] }} + +To reference an IP address from your facts using the dot notation:: + + {{ ansible_facts.eth0.ipv4.address }} + +.. _about_jinja2: +.. _jinja2_filters: + +Transforming variables with Jinja2 filters +========================================== + +Jinja2 filters let you transform the value of a variable within a template expression. For example, the ``capitalize`` filter capitalizes any value passed to it; the ``to_yaml`` and ``to_json`` filters change the format of your variable values. Jinja2 includes many `built-in filters <http://jinja.pocoo.org/docs/templates/#builtin-filters>`_ and Ansible supplies many more filters. To find more examples of filters, see :ref:`playbooks_filters`. + +.. _setting_variables: + +Where to set variables +====================== + +You can define variables in a variety of places, such as in inventory, in playbooks, in reusable files, in roles, and at the command line. Ansible loads every possible variable it finds, then chooses the variable to apply based on :ref:`variable precedence rules <ansible_variable_precedence>`. + +.. _define_variables_in_inventory: + +Defining variables in inventory +------------------------------- + +You can define different variables for each individual host, or set shared variables for a group of hosts in your inventory. For example, if all machines in the ``[Boston]`` group use 'boston.ntp.example.com' as an NTP server, you can set a group variable. The :ref:`intro_inventory` page has details on setting :ref:`host variables <host_variables>` and :ref:`group variables <group_variables>` in inventory. + +.. _playbook_variables: + +Defining variables in a playbook +-------------------------------- + +You can define variables directly in a playbook:: + + - hosts: webservers + vars: + http_port: 80 + +When you define variables in a playbook, they are visible to anyone who runs that playbook. This is especially useful if you share playbooks widely. + +.. _included_variables: +.. _variable_file_separation_details: + +Defining variables in included files and roles +---------------------------------------------- + +You can define variables in reusable variables files and/or in reusable roles. When you define variables in reusable variable files, the sensitive variables are separated from playbooks. This separation enables you to store your playbooks in a source control software and even share the playbooks, without the risk of exposing passwords or other sensitive and personal data. For information about creating reusable files and roles, see :ref:`playbooks_reuse`. + +This example shows how you can include variables defined in an external file:: + + --- + + - hosts: all + remote_user: root + vars: + favcolor: blue + vars_files: + - /vars/external_vars.yml + + tasks: + + - name: This is just a placeholder + ansible.builtin.command: /bin/echo foo + +The contents of each variables file is a simple YAML dictionary. For example:: + + --- + # in the above example, this would be vars/external_vars.yml + somevar: somevalue + password: magic + +.. note:: + You can keep per-host and per-group variables in similar files. To learn about organizing your variables, see :ref:`splitting_out_vars`. + +.. _passing_variables_on_the_command_line: + +Defining variables at runtime +----------------------------- + +You can define variables when you run your playbook by passing variables at the command line using the ``--extra-vars`` (or ``-e``) argument. You can also request user input with a ``vars_prompt`` (see :ref:`playbooks_prompts`). When you pass variables at the command line, use a single quoted string, that contains one or more variables, in one of the formats below. + +key=value format +^^^^^^^^^^^^^^^^ + +Values passed in using the ``key=value`` syntax are interpreted as strings. Use the JSON format if you need to pass non-string values such as Booleans, integers, floats, lists, and so on. + +.. code-block:: text + + ansible-playbook release.yml --extra-vars "version=1.23.45 other_variable=foo" + +JSON string format +^^^^^^^^^^^^^^^^^^ + +.. code-block:: text + + ansible-playbook release.yml --extra-vars '{"version":"1.23.45","other_variable":"foo"}' + ansible-playbook arcade.yml --extra-vars '{"pacman":"mrs","ghosts":["inky","pinky","clyde","sue"]}' + +When passing variables with ``--extra-vars``, you must escape quotes and other special characters appropriately for both your markup (for example, JSON), and for your shell:: + + ansible-playbook arcade.yml --extra-vars "{\"name\":\"Conan O\'Brien\"}" + ansible-playbook arcade.yml --extra-vars '{"name":"Conan O'\\\''Brien"}' + ansible-playbook script.yml --extra-vars "{\"dialog\":\"He said \\\"I just can\'t get enough of those single and double-quotes"\!"\\\"\"}" + +If you have a lot of special characters, use a JSON or YAML file containing the variable definitions. + +vars from a JSON or YAML file +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: text + + ansible-playbook release.yml --extra-vars "@some_file.json" + + +.. _ansible_variable_precedence: + +Variable precedence: Where should I put a variable? +=================================================== + +You can set multiple variables with the same name in many different places. When you do this, Ansible loads every possible variable it finds, then chooses the variable to apply based on variable precedence. In other words, the different variables will override each other in a certain order. + +Teams and projects that agree on guidelines for defining variables (where to define certain types of variables) usually avoid variable precedence concerns. We suggest that you define each variable in one place: figure out where to define a variable, and keep it simple. For examples, see :ref:`variable_examples`. + +Some behavioral parameters that you can set in variables you can also set in Ansible configuration, as command-line options, and using playbook keywords. For example, you can define the user Ansible uses to connect to remote devices as a variable with ``ansible_user``, in a configuration file with ``DEFAULT_REMOTE_USER``, as a command-line option with ``-u``, and with the playbook keyword ``remote_user``. If you define the same parameter in a variable and by another method, the variable overrides the other setting. This approach allows host-specific settings to override more general settings. For examples and more details on the precedence of these various settings, see :ref:`general_precedence_rules`. + +Understanding variable precedence +--------------------------------- + +Ansible does apply variable precedence, and you might have a use for it. Here is the order of precedence from least to greatest (the last listed variables override all other variables): + + #. command line values (for example, ``-u my_user``, these are not variables) + #. role defaults (defined in role/defaults/main.yml) [1]_ + #. inventory file or script group vars [2]_ + #. inventory group_vars/all [3]_ + #. playbook group_vars/all [3]_ + #. inventory group_vars/* [3]_ + #. playbook group_vars/* [3]_ + #. inventory file or script host vars [2]_ + #. inventory host_vars/* [3]_ + #. playbook host_vars/* [3]_ + #. host facts / cached set_facts [4]_ + #. play vars + #. play vars_prompt + #. play vars_files + #. role vars (defined in role/vars/main.yml) + #. block vars (only for tasks in block) + #. task vars (only for the task) + #. include_vars + #. set_facts / registered vars + #. role (and include_role) params + #. include params + #. extra vars (for example, ``-e "user=my_user"``)(always win precedence) + +In general, Ansible gives precedence to variables that were defined more recently, more actively, and with more explicit scope. Variables in the the defaults folder inside a role are easily overridden. Anything in the vars directory of the role overrides previous versions of that variable in the namespace. Host and/or inventory variables override role defaults, but explicit includes such as the vars directory or an ``include_vars`` task override inventory variables. + +Ansible merges different variables set in inventory so that more specific settings override more generic settings. For example, ``ansible_ssh_user`` specified as a group_var is overridden by ``ansible_user`` specified as a host_var. For details about the precedence of variables set in inventory, see :ref:`how_we_merge`. + +.. rubric:: Footnotes + +.. [1] Tasks in each role see their own role's defaults. Tasks defined outside of a role see the last role's defaults. +.. [2] Variables defined in inventory file or provided by dynamic inventory. +.. [3] Includes vars added by 'vars plugins' as well as host_vars and group_vars which are added by the default vars plugin shipped with Ansible. +.. [4] When created with set_facts's cacheable option, variables have the high precedence in the play, + but are the same as a host facts precedence when they come from the cache. + +.. note:: Within any section, redefining a var overrides the previous instance. + If multiple groups have the same variable, the last one loaded wins. + If you define a variable twice in a play's ``vars:`` section, the second one wins. +.. note:: The previous describes the default config ``hash_behaviour=replace``, switch to ``merge`` to only partially overwrite. + +.. _variable_scopes: + +Scoping variables +----------------- + +You can decide where to set a variable based on the scope you want that value to have. Ansible has three main scopes: + + * Global: this is set by config, environment variables and the command line + * Play: each play and contained structures, vars entries (vars; vars_files; vars_prompt), role defaults and vars. + * Host: variables directly associated to a host, like inventory, include_vars, facts or registered task outputs + +Inside a template, you automatically have access to all variables that are in scope for a host, plus any registered variables, facts, and magic variables. + +.. _variable_examples: + +Tips on where to set variables +------------------------------ + +You should choose where to define a variable based on the kind of control you might want over values. + +Set variables in inventory that deal with geography or behavior. Since groups are frequently the entity that maps roles onto hosts, you can often set variables on the group instead of defining them on a role. Remember: child groups override parent groups, and host variables override group variables. See :ref:`define_variables_in_inventory` for details on setting host and group variables. + +Set common defaults in a ``group_vars/all`` file. See :ref:`splitting_out_vars` for details on how to organize host and group variables in your inventory. Group variables are generally placed alongside your inventory file, but they can also be returned by dynamic inventory (see :ref:`intro_dynamic_inventory`) or defined in :ref:`ansible_tower` from the UI or API:: + + --- + # file: /etc/ansible/group_vars/all + # this is the site wide default + ntp_server: default-time.example.com + +Set location-specific variables in ``group_vars/my_location`` files. All groups are children of the ``all`` group, so variables set here override those set in ``group_vars/all``:: + + --- + # file: /etc/ansible/group_vars/boston + ntp_server: boston-time.example.com + +If one host used a different NTP server, you could set that in a host_vars file, which would override the group variable:: + + --- + # file: /etc/ansible/host_vars/xyz.boston.example.com + ntp_server: override.example.com + +Set defaults in roles to avoid undefined-variable errors. If you share your roles, other users can rely on the reasonable defaults you added in the ``roles/x/defaults/main.yml`` file, or they can easily override those values in inventory or at the command line. See :ref:`playbooks_reuse_roles` for more info. For example:: + + --- + # file: roles/x/defaults/main.yml + # if no other value is supplied in inventory or as a parameter, this value will be used + http_port: 80 + +Set variables in roles to ensure a value is used in that role, and is not overridden by inventory variables. If you are not sharing your role with others, you can define app-specific behaviors like ports this way, in ``roles/x/vars/main.yml``. If you are sharing roles with others, putting variables here makes them harder to override, although they still can by passing a parameter to the role or setting a variable with ``-e``:: + + --- + # file: roles/x/vars/main.yml + # this will absolutely be used in this role + http_port: 80 + +Pass variables as parameters when you call roles for maximum clarity, flexibility, and visibility. This approach overrides any defaults that exist for a role. For example:: + + roles: + - role: apache + vars: + http_port: 8080 + +When you read this playbook it is clear that you have chosen to set a variable or override a default. You can also pass multiple values, which allows you to run the same role multiple times. See :ref:`run_role_twice` for more details. For example:: + + roles: + - role: app_user + vars: + myname: Ian + - role: app_user + vars: + myname: Terry + - role: app_user + vars: + myname: Graham + - role: app_user + vars: + myname: John + +Variables set in one role are available to later roles. You can set variables in a ``roles/common_settings/vars/main.yml`` file and use them in other roles and elsewhere in your playbook:: + + roles: + - role: common_settings + - role: something + vars: + foo: 12 + - role: something_else + +.. note:: There are some protections in place to avoid the need to namespace variables. + In this example, variables defined in 'common_settings' are available to 'something' and 'something_else' tasks, but tasks in 'something' have foo set at 12, even if 'common_settings' sets foo to 20. + +Instead of worrying about variable precedence, we encourage you to think about how easily or how often you want to override a variable when deciding where to set it. If you are not sure what other variables are defined, and you need a particular value, use ``--extra-vars`` (``-e``) to override all other variables. + +Using advanced variable syntax +============================== + +For information about advanced YAML syntax used to declare variables and have more control over the data placed in YAML files used by Ansible, see :ref:`playbooks_advanced_syntax`. + +.. seealso:: + + :ref:`about_playbooks` + An introduction to playbooks + :ref:`playbooks_conditionals` + Conditional statements in playbooks + :ref:`playbooks_filters` + Jinja2 filters and their uses + :ref:`playbooks_loops` + Looping in playbooks + :ref:`playbooks_reuse_roles` + Playbook organization by roles + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + :ref:`special_variables` + List of special variables + `User Mailing List <https://groups.google.com/group/ansible-devel>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/playbooks_vars_facts.rst b/docs/docsite/rst/user_guide/playbooks_vars_facts.rst new file mode 100644 index 00000000..3828b8e3 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_vars_facts.rst @@ -0,0 +1,680 @@ +.. _vars_and_facts: + +************************************************ +Discovering variables: facts and magic variables +************************************************ + +With Ansible you can retrieve or discover certain variables containing information about your remote systems or about Ansible itself. Variables related to remote systems are called facts. With facts, you can use the behavior or state of one system as configuration on other systems. For example, you can use the IP address of one system as a configuration value on another system. Variables related to Ansible are called magic variables. + +.. contents:: + :local: + +Ansible facts +============= + +Ansible facts are data related to your remote systems, including operating systems, IP addresses, attached filesystems, and more. You can access this data in the ``ansible_facts`` variable. By default, you can also access some Ansible facts as top-level variables with the ``ansible_`` prefix. You can disable this behavior using the :ref:`INJECT_FACTS_AS_VARS` setting. To see all available facts, add this task to a play:: + + - name: Print all available facts + ansible.builtin.debug: + var: ansible_facts + +To see the 'raw' information as gathered, run this command at the command line:: + + ansible <hostname> -m ansible.builtin.setup + +Facts include a large amount of variable data, which may look like this: + +.. code-block:: json + + { + "ansible_all_ipv4_addresses": [ + "REDACTED IP ADDRESS" + ], + "ansible_all_ipv6_addresses": [ + "REDACTED IPV6 ADDRESS" + ], + "ansible_apparmor": { + "status": "disabled" + }, + "ansible_architecture": "x86_64", + "ansible_bios_date": "11/28/2013", + "ansible_bios_version": "4.1.5", + "ansible_cmdline": { + "BOOT_IMAGE": "/boot/vmlinuz-3.10.0-862.14.4.el7.x86_64", + "console": "ttyS0,115200", + "no_timer_check": true, + "nofb": true, + "nomodeset": true, + "ro": true, + "root": "LABEL=cloudimg-rootfs", + "vga": "normal" + }, + "ansible_date_time": { + "date": "2018-10-25", + "day": "25", + "epoch": "1540469324", + "hour": "12", + "iso8601": "2018-10-25T12:08:44Z", + "iso8601_basic": "20181025T120844109754", + "iso8601_basic_short": "20181025T120844", + "iso8601_micro": "2018-10-25T12:08:44.109968Z", + "minute": "08", + "month": "10", + "second": "44", + "time": "12:08:44", + "tz": "UTC", + "tz_offset": "+0000", + "weekday": "Thursday", + "weekday_number": "4", + "weeknumber": "43", + "year": "2018" + }, + "ansible_default_ipv4": { + "address": "REDACTED", + "alias": "eth0", + "broadcast": "REDACTED", + "gateway": "REDACTED", + "interface": "eth0", + "macaddress": "REDACTED", + "mtu": 1500, + "netmask": "255.255.255.0", + "network": "REDACTED", + "type": "ether" + }, + "ansible_default_ipv6": {}, + "ansible_device_links": { + "ids": {}, + "labels": { + "xvda1": [ + "cloudimg-rootfs" + ], + "xvdd": [ + "config-2" + ] + }, + "masters": {}, + "uuids": { + "xvda1": [ + "cac81d61-d0f8-4b47-84aa-b48798239164" + ], + "xvdd": [ + "2018-10-25-12-05-57-00" + ] + } + }, + "ansible_devices": { + "xvda": { + "holders": [], + "host": "", + "links": { + "ids": [], + "labels": [], + "masters": [], + "uuids": [] + }, + "model": null, + "partitions": { + "xvda1": { + "holders": [], + "links": { + "ids": [], + "labels": [ + "cloudimg-rootfs" + ], + "masters": [], + "uuids": [ + "cac81d61-d0f8-4b47-84aa-b48798239164" + ] + }, + "sectors": "83883999", + "sectorsize": 512, + "size": "40.00 GB", + "start": "2048", + "uuid": "cac81d61-d0f8-4b47-84aa-b48798239164" + } + }, + "removable": "0", + "rotational": "0", + "sas_address": null, + "sas_device_handle": null, + "scheduler_mode": "deadline", + "sectors": "83886080", + "sectorsize": "512", + "size": "40.00 GB", + "support_discard": "0", + "vendor": null, + "virtual": 1 + }, + "xvdd": { + "holders": [], + "host": "", + "links": { + "ids": [], + "labels": [ + "config-2" + ], + "masters": [], + "uuids": [ + "2018-10-25-12-05-57-00" + ] + }, + "model": null, + "partitions": {}, + "removable": "0", + "rotational": "0", + "sas_address": null, + "sas_device_handle": null, + "scheduler_mode": "deadline", + "sectors": "131072", + "sectorsize": "512", + "size": "64.00 MB", + "support_discard": "0", + "vendor": null, + "virtual": 1 + }, + "xvde": { + "holders": [], + "host": "", + "links": { + "ids": [], + "labels": [], + "masters": [], + "uuids": [] + }, + "model": null, + "partitions": { + "xvde1": { + "holders": [], + "links": { + "ids": [], + "labels": [], + "masters": [], + "uuids": [] + }, + "sectors": "167770112", + "sectorsize": 512, + "size": "80.00 GB", + "start": "2048", + "uuid": null + } + }, + "removable": "0", + "rotational": "0", + "sas_address": null, + "sas_device_handle": null, + "scheduler_mode": "deadline", + "sectors": "167772160", + "sectorsize": "512", + "size": "80.00 GB", + "support_discard": "0", + "vendor": null, + "virtual": 1 + } + }, + "ansible_distribution": "CentOS", + "ansible_distribution_file_parsed": true, + "ansible_distribution_file_path": "/etc/redhat-release", + "ansible_distribution_file_variety": "RedHat", + "ansible_distribution_major_version": "7", + "ansible_distribution_release": "Core", + "ansible_distribution_version": "7.5.1804", + "ansible_dns": { + "nameservers": [ + "127.0.0.1" + ] + }, + "ansible_domain": "", + "ansible_effective_group_id": 1000, + "ansible_effective_user_id": 1000, + "ansible_env": { + "HOME": "/home/zuul", + "LANG": "en_US.UTF-8", + "LESSOPEN": "||/usr/bin/lesspipe.sh %s", + "LOGNAME": "zuul", + "MAIL": "/var/mail/zuul", + "PATH": "/usr/local/bin:/usr/bin", + "PWD": "/home/zuul", + "SELINUX_LEVEL_REQUESTED": "", + "SELINUX_ROLE_REQUESTED": "", + "SELINUX_USE_CURRENT_RANGE": "", + "SHELL": "/bin/bash", + "SHLVL": "2", + "SSH_CLIENT": "REDACTED 55672 22", + "SSH_CONNECTION": "REDACTED 55672 REDACTED 22", + "USER": "zuul", + "XDG_RUNTIME_DIR": "/run/user/1000", + "XDG_SESSION_ID": "1", + "_": "/usr/bin/python2" + }, + "ansible_eth0": { + "active": true, + "device": "eth0", + "ipv4": { + "address": "REDACTED", + "broadcast": "REDACTED", + "netmask": "255.255.255.0", + "network": "REDACTED" + }, + "ipv6": [ + { + "address": "REDACTED", + "prefix": "64", + "scope": "link" + } + ], + "macaddress": "REDACTED", + "module": "xen_netfront", + "mtu": 1500, + "pciid": "vif-0", + "promisc": false, + "type": "ether" + }, + "ansible_eth1": { + "active": true, + "device": "eth1", + "ipv4": { + "address": "REDACTED", + "broadcast": "REDACTED", + "netmask": "255.255.224.0", + "network": "REDACTED" + }, + "ipv6": [ + { + "address": "REDACTED", + "prefix": "64", + "scope": "link" + } + ], + "macaddress": "REDACTED", + "module": "xen_netfront", + "mtu": 1500, + "pciid": "vif-1", + "promisc": false, + "type": "ether" + }, + "ansible_fips": false, + "ansible_form_factor": "Other", + "ansible_fqdn": "centos-7-rax-dfw-0003427354", + "ansible_hostname": "centos-7-rax-dfw-0003427354", + "ansible_interfaces": [ + "lo", + "eth1", + "eth0" + ], + "ansible_is_chroot": false, + "ansible_kernel": "3.10.0-862.14.4.el7.x86_64", + "ansible_lo": { + "active": true, + "device": "lo", + "ipv4": { + "address": "127.0.0.1", + "broadcast": "host", + "netmask": "255.0.0.0", + "network": "127.0.0.0" + }, + "ipv6": [ + { + "address": "::1", + "prefix": "128", + "scope": "host" + } + ], + "mtu": 65536, + "promisc": false, + "type": "loopback" + }, + "ansible_local": {}, + "ansible_lsb": { + "codename": "Core", + "description": "CentOS Linux release 7.5.1804 (Core)", + "id": "CentOS", + "major_release": "7", + "release": "7.5.1804" + }, + "ansible_machine": "x86_64", + "ansible_machine_id": "2db133253c984c82aef2fafcce6f2bed", + "ansible_memfree_mb": 7709, + "ansible_memory_mb": { + "nocache": { + "free": 7804, + "used": 173 + }, + "real": { + "free": 7709, + "total": 7977, + "used": 268 + }, + "swap": { + "cached": 0, + "free": 0, + "total": 0, + "used": 0 + } + }, + "ansible_memtotal_mb": 7977, + "ansible_mounts": [ + { + "block_available": 7220998, + "block_size": 4096, + "block_total": 9817227, + "block_used": 2596229, + "device": "/dev/xvda1", + "fstype": "ext4", + "inode_available": 10052341, + "inode_total": 10419200, + "inode_used": 366859, + "mount": "/", + "options": "rw,seclabel,relatime,data=ordered", + "size_available": 29577207808, + "size_total": 40211361792, + "uuid": "cac81d61-d0f8-4b47-84aa-b48798239164" + }, + { + "block_available": 0, + "block_size": 2048, + "block_total": 252, + "block_used": 252, + "device": "/dev/xvdd", + "fstype": "iso9660", + "inode_available": 0, + "inode_total": 0, + "inode_used": 0, + "mount": "/mnt/config", + "options": "ro,relatime,mode=0700", + "size_available": 0, + "size_total": 516096, + "uuid": "2018-10-25-12-05-57-00" + } + ], + "ansible_nodename": "centos-7-rax-dfw-0003427354", + "ansible_os_family": "RedHat", + "ansible_pkg_mgr": "yum", + "ansible_processor": [ + "0", + "GenuineIntel", + "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz", + "1", + "GenuineIntel", + "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz", + "2", + "GenuineIntel", + "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz", + "3", + "GenuineIntel", + "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz", + "4", + "GenuineIntel", + "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz", + "5", + "GenuineIntel", + "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz", + "6", + "GenuineIntel", + "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz", + "7", + "GenuineIntel", + "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz" + ], + "ansible_processor_cores": 8, + "ansible_processor_count": 8, + "ansible_processor_nproc": 8, + "ansible_processor_threads_per_core": 1, + "ansible_processor_vcpus": 8, + "ansible_product_name": "HVM domU", + "ansible_product_serial": "REDACTED", + "ansible_product_uuid": "REDACTED", + "ansible_product_version": "4.1.5", + "ansible_python": { + "executable": "/usr/bin/python2", + "has_sslcontext": true, + "type": "CPython", + "version": { + "major": 2, + "micro": 5, + "minor": 7, + "releaselevel": "final", + "serial": 0 + }, + "version_info": [ + 2, + 7, + 5, + "final", + 0 + ] + }, + "ansible_python_version": "2.7.5", + "ansible_real_group_id": 1000, + "ansible_real_user_id": 1000, + "ansible_selinux": { + "config_mode": "enforcing", + "mode": "enforcing", + "policyvers": 31, + "status": "enabled", + "type": "targeted" + }, + "ansible_selinux_python_present": true, + "ansible_service_mgr": "systemd", + "ansible_ssh_host_key_ecdsa_public": "REDACTED KEY VALUE", + "ansible_ssh_host_key_ed25519_public": "REDACTED KEY VALUE", + "ansible_ssh_host_key_rsa_public": "REDACTED KEY VALUE", + "ansible_swapfree_mb": 0, + "ansible_swaptotal_mb": 0, + "ansible_system": "Linux", + "ansible_system_capabilities": [ + "" + ], + "ansible_system_capabilities_enforced": "True", + "ansible_system_vendor": "Xen", + "ansible_uptime_seconds": 151, + "ansible_user_dir": "/home/zuul", + "ansible_user_gecos": "", + "ansible_user_gid": 1000, + "ansible_user_id": "zuul", + "ansible_user_shell": "/bin/bash", + "ansible_user_uid": 1000, + "ansible_userspace_architecture": "x86_64", + "ansible_userspace_bits": "64", + "ansible_virtualization_role": "guest", + "ansible_virtualization_type": "xen", + "gather_subset": [ + "all" + ], + "module_setup": true + } + +You can reference the model of the first disk in the facts shown above in a template or playbook as:: + + {{ ansible_facts['devices']['xvda']['model'] }} + +To reference the system hostname:: + + {{ ansible_facts['nodename'] }} + +You can use facts in conditionals (see :ref:`playbooks_conditionals`) and also in templates. You can also use facts to create dynamic groups of hosts that match particular criteria, see the :ref:`group_by module <group_by_module>` documentation for details. + +.. _fact_requirements: + +Package requirements for fact gathering +--------------------------------------- + +On some distros, you may see missing fact values or facts set to default values because the packages that support gathering those facts are not installed by default. You can install the necessary packages on your remote hosts using the OS package manager. Known dependencies include: + +* Linux Network fact gathering - Depends on the ``ip`` binary, commonly included in the ``iproute2`` package. + +.. _fact_caching: + +Caching facts +------------- + +Like registered variables, facts are stored in memory by default. However, unlike registered variables, facts can be gathered independently and cached for repeated use. With cached facts, you can refer to facts from one system when configuring a second system, even if Ansible executes the current play on the second system first. For example:: + + {{ hostvars['asdf.example.com']['ansible_facts']['os_family'] }} + +Caching is controlled by the cache plugins. By default, Ansible uses the memory cache plugin, which stores facts in memory for the duration of the current playbook run. To retain Ansible facts for repeated use, select a different cache plugin. See :ref:`cache_plugins` for details. + +Fact caching can improve performance. If you manage thousands of hosts, you can configure fact caching to run nightly, then manage configuration on a smaller set of servers periodically throughout the day. With cached facts, you have access to variables and information about all hosts even when you are only managing a small number of servers. + +.. _disabling_facts: + +Disabling facts +--------------- + +By default, Ansible gathers facts at the beginning of each play. If you do not need to gather facts (for example, if you know everything about your systems centrally), you can turn off fact gathering at the play level to improve scalability. Disabling facts may particularly improve performance in push mode with very large numbers of systems, or if you are using Ansible on experimental platforms. To disable fact gathering:: + + - hosts: whatever + gather_facts: no + +Adding custom facts +------------------- + +The setup module in Ansible automatically discovers a standard set of facts about each host. If you want to add custom values to your facts, you can write a custom facts module, set temporary facts with a ``ansible.builtin.set_fact`` task, or provide permanent custom facts using the facts.d directory. + +.. _local_facts: + +facts.d or local facts +^^^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 1.3 + +You can add static custom facts by adding static files to facts.d, or add dynamic facts by adding executable scripts to facts.d. For example, you can add a list of all users on a host to your facts by creating and running a script in facts.d. + +To use facts.d, create an ``/etc/ansible/facts.d`` directory on the remote host or hosts. If you prefer a different directory, create it and specify it using the ``fact_path`` play keyword. Add files to the directory to supply your custom facts. All file names must end with ``.fact``. The files can be JSON, INI, or executable files returning JSON. + +To add static facts, simply add a file with the ``.facts`` extension. For example, create ``/etc/ansible/facts.d/preferences.fact`` with this content:: + + [general] + asdf=1 + bar=2 + +The next time fact gathering runs, your facts will include a hash variable fact named ``general`` with ``asdf`` and ``bar`` as members. To validate this, run the following:: + + ansible <hostname> -m ansible.builtin.setup -a "filter=ansible_local" + +And you will see your custom fact added:: + + "ansible_local": { + "preferences": { + "general": { + "asdf" : "1", + "bar" : "2" + } + } + } + +The ansible_local namespace separates custom facts created by facts.d from system facts or variables defined elsewhere in the playbook, so variables will not override each other. You can access this custom fact in a template or playbook as:: + + {{ ansible_local['preferences']['general']['asdf'] }} + +.. note:: The key part in the key=value pairs will be converted into lowercase inside the ansible_local variable. Using the example above, if the ini file contained ``XYZ=3`` in the ``[general]`` section, then you should expect to access it as: ``{{ ansible_local['preferences']['general']['xyz'] }}`` and not ``{{ ansible_local['preferences']['general']['XYZ'] }}``. This is because Ansible uses Python's `ConfigParser`_ which passes all option names through the `optionxform`_ method and this method's default implementation converts option names to lower case. + +.. _ConfigParser: https://docs.python.org/2/library/configparser.html +.. _optionxform: https://docs.python.org/2/library/configparser.html#ConfigParser.RawConfigParser.optionxform + +You can also use facts.d to execute a script on the remote host, generating dynamic custom facts to the ansible_local namespace. For example, you can generate a list of all users that exist on a remote host as a fact about that host. To generate dynamic custom facts using facts.d: + + #. Write and test a script to generate the JSON data you want. + #. Save the script in your facts.d directory. + #. Make sure your script has the ``.fact`` file extension. + #. Make sure your script is executable by the Ansible connection user. + #. Gather facts to execute the script and add the JSON output to ansible_local. + +By default, fact gathering runs once at the beginning of each play. If you create a custom fact using facts.d in a playbook, it will be available in the next play that gathers facts. If you want to use it in the same play where you created it, you must explicitly re-run the setup module. For example:: + + - hosts: webservers + tasks: + + - name: Create directory for ansible custom facts + ansible.builtin.file: + state: directory + recurse: yes + path: /etc/ansible/facts.d + + - name: Install custom ipmi fact + ansible.builtin.copy: + src: ipmi.fact + dest: /etc/ansible/facts.d + + - name: Re-read facts after adding custom fact + ansible.builtin.setup: + filter: ansible_local + +If you use this pattern frequently, a custom facts module would be more efficient than facts.d. + +.. _magic_variables_and_hostvars: + +Information about Ansible: magic variables +========================================== + +You can access information about Ansible operations, including the python version being used, the hosts and groups in inventory, and the directories for playbooks and roles, using "magic" variables. Like connection variables, magic variables are :ref:`special_variables`. Magic variable names are reserved - do not set variables with these names. The variable ``environment`` is also reserved. + +The most commonly used magic variables are ``hostvars``, ``groups``, ``group_names``, and ``inventory_hostname``. With ``hostvars``, you can access variables defined for any host in the play, at any point in a playbook. You can access Ansible facts using the ``hostvars`` variable too, but only after you have gathered (or cached) facts. + +If you want to configure your database server using the value of a 'fact' from another node, or the value of an inventory variable assigned to another node, you can use ``hostvars`` in a template or on an action line:: + + {{ hostvars['test.example.com']['ansible_facts']['distribution'] }} + +With ``groups``, a list of all the groups (and hosts) in the inventory, you can enumerate all hosts within a group. For example: + +.. code-block:: jinja + + {% for host in groups['app_servers'] %} + # something that applies to all app servers. + {% endfor %} + +You can use ``groups`` and ``hostvars`` together to find all the IP addresses in a group. + +.. code-block:: jinja + + {% for host in groups['app_servers'] %} + {{ hostvars[host]['ansible_facts']['eth0']['ipv4']['address'] }} + {% endfor %} + +You can use this approach to point a frontend proxy server to all the hosts in your app servers group, to set up the correct firewall rules between servers, and so on. You must either cache facts or gather facts for those hosts before the task that fills out the template. + +With ``group_names``, a list (array) of all the groups the current host is in, you can create templated files that vary based on the group membership (or role) of the host: + +.. code-block:: jinja + + {% if 'webserver' in group_names %} + # some part of a configuration file that only applies to webservers + {% endif %} + +You can use the magic variable ``inventory_hostname``, the name of the host as configured in your inventory, as an alternative to ``ansible_hostname`` when fact-gathering is disabled. If you have a long FQDN, you can use ``inventory_hostname_short``, which contains the part up to the first period, without the rest of the domain. + +Other useful magic variables refer to the current play or playbook. These vars may be useful for filling out templates with multiple hostnames or for injecting the list into the rules for a load balancer. + +``ansible_play_hosts`` is the list of all hosts still active in the current play. + +``ansible_play_batch`` is a list of hostnames that are in scope for the current 'batch' of the play. + +The batch size is defined by ``serial``, when not set it is equivalent to the whole play (making it the same as ``ansible_play_hosts``). + +``ansible_playbook_python`` is the path to the python executable used to invoke the Ansible command line tool. + +``inventory_dir`` is the pathname of the directory holding Ansible's inventory host file. + +``inventory_file`` is the pathname and the filename pointing to the Ansible's inventory host file. + +``playbook_dir`` contains the playbook base directory. + +``role_path`` contains the current role's pathname and only works inside a role. + +``ansible_check_mode`` is a boolean, set to ``True`` if you run Ansible with ``--check``. + +.. _ansible_version: + +Ansible version +--------------- + +.. versionadded:: 1.8 + +To adapt playbook behavior to different versions of Ansible, you can use the variable ``ansible_version``, which has the following structure:: + + "ansible_version": { + "full": "2.10.1", + "major": 2, + "minor": 10, + "revision": 1, + "string": "2.10.1" + } diff --git a/docs/docsite/rst/user_guide/playbooks_vault.rst b/docs/docsite/rst/user_guide/playbooks_vault.rst new file mode 100644 index 00000000..03bd2c04 --- /dev/null +++ b/docs/docsite/rst/user_guide/playbooks_vault.rst @@ -0,0 +1,6 @@ +:orphan: + +Using vault in playbooks +======================== + +The documentation regarding Ansible Vault has moved. The new location is here: :ref:`vault`. Please update any links you may have made directly to this page. diff --git a/docs/docsite/rst/user_guide/plugin_filtering_config.rst b/docs/docsite/rst/user_guide/plugin_filtering_config.rst new file mode 100644 index 00000000..2e9900c9 --- /dev/null +++ b/docs/docsite/rst/user_guide/plugin_filtering_config.rst @@ -0,0 +1,26 @@ +.. _plugin_filtering_config: + +Blacklisting modules +==================== + +If you want to avoid using certain modules, you can blacklist them to prevent Ansible from loading them. To blacklist plugins, create a yaml configuration file. The default location for this file is :file:`/etc/ansible/plugin_filters.yml`, or you can select a different path for the blacklist file using the :ref:`PLUGIN_FILTERS_CFG` setting in the ``defaults`` section of your ansible.cfg. Here is an example blacklist file: + +.. code-block:: YAML + + --- + filter_version: '1.0' + module_blacklist: + # Deprecated + - docker + # We only allow pip, not easy_install + - easy_install + +The file contains two fields: + + * A file version so that you can update the format while keeping backwards compatibility in the future. The present version should be the string, ``"1.0"`` + + * A list of modules to blacklist. Any module in this list will not be loaded by Ansible when it searches for a module to invoke for a task. + +.. note:: + + You cannot blacklist the ``stat`` module, as it is required for Ansible to run. diff --git a/docs/docsite/rst/user_guide/quickstart.rst b/docs/docsite/rst/user_guide/quickstart.rst new file mode 100644 index 00000000..7e97d9ab --- /dev/null +++ b/docs/docsite/rst/user_guide/quickstart.rst @@ -0,0 +1,20 @@ +.. _quickstart_guide: + +Ansible Quickstart Guide +======================== + +We've recorded a short video that introduces Ansible. + +The `quickstart video <https://www.ansible.com/resources/videos/quick-start-video>`_ is about 13 minutes long and gives you a high level +introduction to Ansible -- what it does and how to use it. We'll also tell you about other products in the Ansible ecosystem. + +Enjoy, and be sure to visit the rest of the documentation to learn more. + +.. seealso:: + + `A system administrators guide to getting started with Ansible <https://www.redhat.com/en/blog/system-administrators-guide-getting-started-ansible-fast>`_ + A step by step introduction to Ansible + `Ansible Automation for SysAdmins <https://opensource.com/downloads/ansible-quickstart>`_ + A downloadable guide for getting started with Ansible + :ref:`network_getting_started` + A guide for network engineers using Ansible for the first time diff --git a/docs/docsite/rst/user_guide/sample_setup.rst b/docs/docsite/rst/user_guide/sample_setup.rst new file mode 100644 index 00000000..9be60004 --- /dev/null +++ b/docs/docsite/rst/user_guide/sample_setup.rst @@ -0,0 +1,285 @@ +.. _sample_setup: + +******************** +Sample Ansible setup +******************** + +You have learned about playbooks, inventory, roles, and variables. This section pulls all those elements together, outlining a sample setup for automating a web service. You can find more example playbooks illustrating these patterns in our `ansible-examples repository <https://github.com/ansible/ansible-examples>`_. (NOTE: These may not use all of the features in the latest release, but are still an excellent reference!). + +The sample setup organizes playbooks, roles, inventory, and variables files by function, with tags at the play and task level for greater granularity and control. This is a powerful and flexible approach, but there are other ways to organize Ansible content. Your usage of Ansible should fit your needs, not ours, so feel free to modify this approach and organize your content as you see fit. + +.. contents:: + :local: + +Sample directory layout +----------------------- + +This layout organizes most tasks in roles, with a single inventory file for each environment and a few playbooks in the top-level directory:: + + production # inventory file for production servers + staging # inventory file for staging environment + + group_vars/ + group1.yml # here we assign variables to particular groups + group2.yml + host_vars/ + hostname1.yml # here we assign variables to particular systems + hostname2.yml + + library/ # if any custom modules, put them here (optional) + module_utils/ # if any custom module_utils to support modules, put them here (optional) + filter_plugins/ # if any custom filter plugins, put them here (optional) + + site.yml # master playbook + webservers.yml # playbook for webserver tier + dbservers.yml # playbook for dbserver tier + tasks/ # task files included from playbooks + webservers-extra.yml # <-- avoids confusing playbook with task files + + roles/ + common/ # this hierarchy represents a "role" + tasks/ # + main.yml # <-- tasks file can include smaller files if warranted + handlers/ # + main.yml # <-- handlers file + templates/ # <-- files for use with the template resource + ntp.conf.j2 # <------- templates end in .j2 + files/ # + bar.txt # <-- files for use with the copy resource + foo.sh # <-- script files for use with the script resource + vars/ # + main.yml # <-- variables associated with this role + defaults/ # + main.yml # <-- default lower priority variables for this role + meta/ # + main.yml # <-- role dependencies + library/ # roles can also include custom modules + module_utils/ # roles can also include custom module_utils + lookup_plugins/ # or other types of plugins, like lookup in this case + + webtier/ # same kind of structure as "common" was above, done for the webtier role + monitoring/ # "" + fooapp/ # "" + +.. note: By default, Ansible assumes your playbooks are stored in one directory with roles stored in a sub-directory called ``roles/``. As you use Ansible to automate more tasks, you may want to move your playbooks into a sub-directory called ``playbooks/``. If you do this, you must configure the path to your ``roles/`` directory using the ``roles_path`` setting in ansible.cfg. + +Alternative directory layout +---------------------------- + +Alternatively you can put each inventory file with its ``group_vars``/``host_vars`` in a separate directory. This is particularly useful if your ``group_vars``/``host_vars`` don't have that much in common in different environments. The layout could look something like this:: + + inventories/ + production/ + hosts # inventory file for production servers + group_vars/ + group1.yml # here we assign variables to particular groups + group2.yml + host_vars/ + hostname1.yml # here we assign variables to particular systems + hostname2.yml + + staging/ + hosts # inventory file for staging environment + group_vars/ + group1.yml # here we assign variables to particular groups + group2.yml + host_vars/ + stagehost1.yml # here we assign variables to particular systems + stagehost2.yml + + library/ + module_utils/ + filter_plugins/ + + site.yml + webservers.yml + dbservers.yml + + roles/ + common/ + webtier/ + monitoring/ + fooapp/ + +This layout gives you more flexibility for larger environments, as well as a total separation of inventory variables between different environments. However, this approach is harder to maintain, because there are more files. For more information on organizing group and host variables, see :ref:`splitting_out_vars`. + +.. _groups_and_hosts: + +Sample group and host variables +------------------------------- + +These sample group and host variables files record the variable values that apply to each machine or group of machines. For instance, the data center in Atlanta has its own NTP servers, so when setting up ntp.conf, we should use them:: + + --- + # file: group_vars/atlanta + ntp: ntp-atlanta.example.com + backup: backup-atlanta.example.com + +Similarly, the webservers have some configuration that does not apply to the database servers:: + + --- + # file: group_vars/webservers + apacheMaxRequestsPerChild: 3000 + apacheMaxClients: 900 + +Default values, or values that are universally true, belong in a file called group_vars/all:: + + --- + # file: group_vars/all + ntp: ntp-boston.example.com + backup: backup-boston.example.com + +If necessary, you can define specific hardware variance in systems in a host_vars file:: + + --- + # file: host_vars/db-bos-1.example.com + foo_agent_port: 86 + bar_agent_port: 99 + +Again, if you are using :ref:`dynamic inventory <dynamic_inventory>`, Ansible creates many dynamic groups automatically. So a tag like "class:webserver" would load in variables from the file "group_vars/ec2_tag_class_webserver" automatically. + +.. _split_by_role: + +Sample playbooks organized by function +-------------------------------------- + +With this setup, a single playbook can define all the infrastructure. The site.yml playbook imports two other playbooks, one for the webservers and one for the database servers:: + + --- + # file: site.yml + - import_playbook: webservers.yml + - import_playbook: dbservers.yml + +The webservers.yml file, also at the top level, maps the configuration of the webservers group to the roles related to the webservers group:: + + --- + # file: webservers.yml + - hosts: webservers + roles: + - common + - webtier + +With this setup, you can configure your whole infrastructure by "running" site.yml, or run a subset by running webservers.yml. This is analogous to the Ansible "--limit" parameter but a little more explicit:: + + ansible-playbook site.yml --limit webservers + ansible-playbook webservers.yml + +.. _role_organization: + +Sample task and handler files in a function-based role +------------------------------------------------------ + +Ansible loads any file called ``main.yml`` in a role sub-directory. This sample ``tasks/main.yml`` file is simple - it sets up NTP, but it could do more if we wanted:: + + --- + # file: roles/common/tasks/main.yml + + - name: be sure ntp is installed + yum: + name: ntp + state: present + tags: ntp + + - name: be sure ntp is configured + template: + src: ntp.conf.j2 + dest: /etc/ntp.conf + notify: + - restart ntpd + tags: ntp + + - name: be sure ntpd is running and enabled + service: + name: ntpd + state: started + enabled: yes + tags: ntp + +Here is an example handlers file. As a review, handlers are only fired when certain tasks report changes, and are run at the end +of each play:: + + --- + # file: roles/common/handlers/main.yml + - name: restart ntpd + service: + name: ntpd + state: restarted + +See :ref:`playbooks_reuse_roles` for more information. + + +.. _organization_examples: + +What the sample setup enables +----------------------------- + +The basic organizational structure described above enables a lot of different automation options. To reconfigure your entire infrastructure:: + + ansible-playbook -i production site.yml + +To reconfigure NTP on everything:: + + ansible-playbook -i production site.yml --tags ntp + +To reconfigure only the webservers:: + + ansible-playbook -i production webservers.yml + +To reconfigure only the webservers in Boston:: + + ansible-playbook -i production webservers.yml --limit boston + +To reconfigure only the first 10 webservers in Boston, and then the next 10:: + + ansible-playbook -i production webservers.yml --limit boston[0:9] + ansible-playbook -i production webservers.yml --limit boston[10:19] + +The sample setup also supports basic ad-hoc commands:: + + ansible boston -i production -m ping + ansible boston -i production -m command -a '/sbin/reboot' + +To discover what tasks would run or what hostnames would be affected by a particular Ansible command:: + + # confirm what task names would be run if I ran this command and said "just ntp tasks" + ansible-playbook -i production webservers.yml --tags ntp --list-tasks + + # confirm what hostnames might be communicated with if I said "limit to boston" + ansible-playbook -i production webservers.yml --limit boston --list-hosts + +.. _dep_vs_config: + +Organizing for deployment or configuration +------------------------------------------ + +The sample setup models a typical configuration topology. When doing multi-tier deployments, there are going +to be some additional playbooks that hop between tiers to roll out an application. In this case, 'site.yml' +may be augmented by playbooks like 'deploy_exampledotcom.yml' but the general concepts still apply. Ansible allows you to deploy and configure using the same tool, so you would likely reuse groups and keep the OS configuration in separate playbooks or roles from the app deployment. + +Consider "playbooks" as a sports metaphor -- you can have one set of plays to use against all your infrastructure and situational plays that you use at different times and for different purposes. + +.. _ship_modules_with_playbooks: + +Using local Ansible modules +--------------------------- + +If a playbook has a :file:`./library` directory relative to its YAML file, this directory can be used to add Ansible modules that will +automatically be in the Ansible module path. This is a great way to keep modules that go with a playbook together. This is shown +in the directory structure example at the start of this section. + +.. seealso:: + + :ref:`yaml_syntax` + Learn about YAML syntax + :ref:`working_with_playbooks` + Review the basic playbook features + :ref:`list_of_collections` + Browse existing collections, modules, and plugins + :ref:`developing_modules` + Learn how to extend Ansible by writing your own modules + :ref:`intro_patterns` + Learn about how to select hosts + `GitHub examples directory <https://github.com/ansible/ansible-examples>`_ + Complete playbook files from the github project source + `Mailing List <https://groups.google.com/group/ansible-project>`_ + Questions? Help? Ideas? Stop by the list on Google Groups diff --git a/docs/docsite/rst/user_guide/shared_snippets/SSH_password_prompt.txt b/docs/docsite/rst/user_guide/shared_snippets/SSH_password_prompt.txt new file mode 100644 index 00000000..dc61ab38 --- /dev/null +++ b/docs/docsite/rst/user_guide/shared_snippets/SSH_password_prompt.txt @@ -0,0 +1,2 @@ +.. note:: + Ansible does not expose a channel to allow communication between the user and the ssh process to accept a password manually to decrypt an ssh key when using the ssh connection plugin (which is the default). The use of ``ssh-agent`` is highly recommended. diff --git a/docs/docsite/rst/user_guide/shared_snippets/with2loop.txt b/docs/docsite/rst/user_guide/shared_snippets/with2loop.txt new file mode 100644 index 00000000..5217f942 --- /dev/null +++ b/docs/docsite/rst/user_guide/shared_snippets/with2loop.txt @@ -0,0 +1,205 @@ +In most cases, loops work best with the ``loop`` keyword instead of ``with_X`` style loops. The ``loop`` syntax is usually best expressed using filters instead of more complex use of ``query`` or ``lookup``. + +These examples show how to convert many common ``with_`` style loops to ``loop`` and filters. + +with_list +--------- + +``with_list`` is directly replaced by ``loop``. + +.. code-block:: yaml+jinja + + - name: with_list + ansible.builtin.debug: + msg: "{{ item }}" + with_list: + - one + - two + + - name: with_list -> loop + ansible.builtin.debug: + msg: "{{ item }}" + loop: + - one + - two + +with_items +---------- + +``with_items`` is replaced by ``loop`` and the ``flatten`` filter. + +.. code-block:: yaml+jinja + + - name: with_items + ansible.builtin.debug: + msg: "{{ item }}" + with_items: "{{ items }}" + + - name: with_items -> loop + ansible.builtin.debug: + msg: "{{ item }}" + loop: "{{ items|flatten(levels=1) }}" + +with_indexed_items +------------------ + +``with_indexed_items`` is replaced by ``loop``, the ``flatten`` filter and ``loop_control.index_var``. + +.. code-block:: yaml+jinja + + - name: with_indexed_items + ansible.builtin.debug: + msg: "{{ item.0 }} - {{ item.1 }}" + with_indexed_items: "{{ items }}" + + - name: with_indexed_items -> loop + ansible.builtin.debug: + msg: "{{ index }} - {{ item }}" + loop: "{{ items|flatten(levels=1) }}" + loop_control: + index_var: index + +with_flattened +-------------- + +``with_flattened`` is replaced by ``loop`` and the ``flatten`` filter. + +.. code-block:: yaml+jinja + + - name: with_flattened + ansible.builtin.debug: + msg: "{{ item }}" + with_flattened: "{{ items }}" + + - name: with_flattened -> loop + ansible.builtin.debug: + msg: "{{ item }}" + loop: "{{ items|flatten }}" + +with_together +------------- + +``with_together`` is replaced by ``loop`` and the ``zip`` filter. + +.. code-block:: yaml+jinja + + - name: with_together + ansible.builtin.debug: + msg: "{{ item.0 }} - {{ item.1 }}" + with_together: + - "{{ list_one }}" + - "{{ list_two }}" + + - name: with_together -> loop + ansible.builtin.debug: + msg: "{{ item.0 }} - {{ item.1 }}" + loop: "{{ list_one|zip(list_two)|list }}" + +Another example with complex data + +.. code-block:: yaml+jinja + + - name: with_together -> loop + ansible.builtin.debug: + msg: "{{ item.0 }} - {{ item.1 }} - {{ item.2 }}" + loop: "{{ data[0]|zip(*data[1:])|list }}" + vars: + data: + - ['a', 'b', 'c'] + - ['d', 'e', 'f'] + - ['g', 'h', 'i'] + +with_dict +--------- + +``with_dict`` can be substituted by ``loop`` and either the ``dictsort`` or ``dict2items`` filters. + +.. code-block:: yaml+jinja + + - name: with_dict + ansible.builtin.debug: + msg: "{{ item.key }} - {{ item.value }}" + with_dict: "{{ dictionary }}" + + - name: with_dict -> loop (option 1) + ansible.builtin.debug: + msg: "{{ item.key }} - {{ item.value }}" + loop: "{{ dictionary|dict2items }}" + + - name: with_dict -> loop (option 2) + ansible.builtin.debug: + msg: "{{ item.0 }} - {{ item.1 }}" + loop: "{{ dictionary|dictsort }}" + +with_sequence +------------- + +``with_sequence`` is replaced by ``loop`` and the ``range`` function, and potentially the ``format`` filter. + +.. code-block:: yaml+jinja + + - name: with_sequence + ansible.builtin.debug: + msg: "{{ item }}" + with_sequence: start=0 end=4 stride=2 format=testuser%02x + + - name: with_sequence -> loop + ansible.builtin.debug: + msg: "{{ 'testuser%02x' | format(item) }}" + # range is exclusive of the end point + loop: "{{ range(0, 4 + 1, 2)|list }}" + +with_subelements +---------------- + +``with_subelements`` is replaced by ``loop`` and the ``subelements`` filter. + +.. code-block:: yaml+jinja + + - name: with_subelements + ansible.builtin.debug: + msg: "{{ item.0.name }} - {{ item.1 }}" + with_subelements: + - "{{ users }}" + - mysql.hosts + + - name: with_subelements -> loop + ansible.builtin.debug: + msg: "{{ item.0.name }} - {{ item.1 }}" + loop: "{{ users|subelements('mysql.hosts') }}" + +with_nested/with_cartesian +-------------------------- + +``with_nested`` and ``with_cartesian`` are replaced by loop and the ``product`` filter. + +.. code-block:: yaml+jinja + + - name: with_nested + ansible.builtin.debug: + msg: "{{ item.0 }} - {{ item.1 }}" + with_nested: + - "{{ list_one }}" + - "{{ list_two }}" + + - name: with_nested -> loop + ansible.builtin.debug: + msg: "{{ item.0 }} - {{ item.1 }}" + loop: "{{ list_one|product(list_two)|list }}" + +with_random_choice +------------------ + +``with_random_choice`` is replaced by just use of the ``random`` filter, without need of ``loop``. + +.. code-block:: yaml+jinja + + - name: with_random_choice + ansible.builtin.debug: + msg: "{{ item }}" + with_random_choice: "{{ my_list }}" + + - name: with_random_choice -> loop (No loop is needed here) + ansible.builtin.debug: + msg: "{{ my_list|random }}" + tags: random diff --git a/docs/docsite/rst/user_guide/vault.rst b/docs/docsite/rst/user_guide/vault.rst new file mode 100644 index 00000000..abb2fadb --- /dev/null +++ b/docs/docsite/rst/user_guide/vault.rst @@ -0,0 +1,653 @@ +.. _vault: + +************************************* +Encrypting content with Ansible Vault +************************************* + +Ansible Vault encrypts variables and files so you can protect sensitive content such as passwords or keys rather than leaving it visible as plaintext in playbooks or roles. To use Ansible Vault you need one or more passwords to encrypt and decrypt content. If you store your vault passwords in a third-party tool such as a secret manager, you need a script to access them. Use the passwords with the :ref:`ansible-vault` command-line tool to create and view encrypted variables, create encrypted files, encrypt existing files, or edit, re-key, or decrypt files. You can then place encrypted content under source control and share it more safely. + +.. warning:: + * Encryption with Ansible Vault ONLY protects 'data at rest'. Once the content is decrypted ('data in use'), play and plugin authors are responsible for avoiding any secret disclosure, see :ref:`no_log <keep_secret_data>` for details on hiding output and :ref:`vault_securing_editor` for security considerations on editors you use with Ansible Vault. + +You can use encrypted variables and files in ad-hoc commands and playbooks by supplying the passwords you used to encrypt them. You can modify your ``ansible.cfg`` file to specify the location of a password file or to always prompt for the password. + +.. contents:: + :local: + +Managing vault passwords +======================== + +Managing your encrypted content is easier if you develop a strategy for managing your vault passwords. A vault password can be any string you choose. There is no special command to create a vault password. However, you need to keep track of your vault passwords. Each time you encrypt a variable or file with Ansible Vault, you must provide a password. When you use an encrypted variable or file in a command or playbook, you must provide the same password that was used to encrypt it. To develop a strategy for managing vault passwords, start with two questions: + + * Do you want to encrypt all your content with the same password, or use different passwords for different needs? + * Where do you want to store your password or passwords? + +Choosing between a single password and multiple passwords +--------------------------------------------------------- + +If you have a small team or few sensitive values, you can use a single password for everything you encrypt with Ansible Vault. Store your vault password securely in a file or a secret manager as described below. + +If you have a larger team or many sensitive values, you can use multiple passwords. For example, you can use different passwords for different users or different levels of access. Depending on your needs, you might want a different password for each encrypted file, for each directory, or for each environment. For example, you might have a playbook that includes two vars files, one for the dev environment and one for the production environment, encrypted with two different passwords. When you run the playbook, select the correct vault password for the environment you are targeting, using a vault ID. + +.. _vault_ids: + +Managing multiple passwords with vault IDs +------------------------------------------ + +If you use multiple vault passwords, you can differentiate one password from another with vault IDs. You use the vault ID in three ways: + + * Pass it with :option:`--vault-id <ansible-playbook --vault-id>` to the :ref:`ansible-vault` command when you create encrypted content + * Include it wherever you store the password for that vault ID (see :ref:`storing_vault_passwords`) + * Pass it with :option:`--vault-id <ansible-playbook --vault-id>` to the :ref:`ansible-playbook` command when you run a playbook that uses content you encrypted with that vault ID + +When you pass a vault ID as an option to the :ref:`ansible-vault` command, you add a label (a hint or nickname) to the encrypted content. This label documents which password you used to encrypt it. The encrypted variable or file includes the vault ID label in plain text in the header. The vault ID is the last element before the encrypted content. For example:: + + my_encrytped_var: !vault | + $ANSIBLE_VAULT;1.2;AES256;dev + 30613233633461343837653833666333643061636561303338373661313838333565653635353162 + 3263363434623733343538653462613064333634333464660a663633623939393439316636633863 + 61636237636537333938306331383339353265363239643939666639386530626330633337633833 + 6664656334373166630a363736393262666465663432613932613036303963343263623137386239 + 6330 + +In addition to the label, you must provide a source for the related password. The source can be a prompt, a file, or a script, depending on how you are storing your vault passwords. The pattern looks like this: + +.. code-block:: bash + + --vault-id label@source + +If your playbook uses multiple encrypted variables or files that you encrypted with different passwords, you must pass the vault IDs when you run that playbook. You can use :option:`--vault-id <ansible-playbook --vault-id>` by itself, with :option:`--vault-password-file <ansible-playbook --vault-password-file>`, or with :option:`--ask-vault-pass <ansible-playbook --ask-vault-pass>`. The pattern is the same as when you create encrypted content: include the label and the source for the matching password. + +See below for examples of encrypting content with vault IDs and using content encrypted with vault IDs. The :option:`--vault-id <ansible-playbook --vault-id>` option works with any Ansible command that interacts with vaults, including :ref:`ansible-vault`, :ref:`ansible-playbook`, and so on. + +Limitations of vault IDs +^^^^^^^^^^^^^^^^^^^^^^^^ + +Ansible does not enforce using the same password every time you use a particular vault ID label. You can encrypt different variables or files with the same vault ID label but different passwords. This usually happens when you type the password at a prompt and make a mistake. It is possible to use different passwords with the same vault ID label on purpose. For example, you could use each label as a reference to a class of passwords, rather than a single password. In this scenario, you must always know which specific password or file to use in context. However, you are more likely to encrypt two files with the same vault ID label and different passwords by mistake. If you encrypt two files with the same label but different passwords by accident, you can :ref:`rekey <rekeying_files>` one file to fix the issue. + +Enforcing vault ID matching +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default the vault ID label is only a hint to remind you which password you used to encrypt a variable or file. Ansible does not check that the vault ID in the header of the encrypted content matches the vault ID you provide when you use the content. Ansible decrypts all files and variables called by your command or playbook that are encrypted with the password you provide. To check the encrypted content and decrypt it only when the vault ID it contains matches the one you provide with ``--vault-id``, set the config option :ref:`DEFAULT_VAULT_ID_MATCH`. When you set :ref:`DEFAULT_VAULT_ID_MATCH`, each password is only used to decrypt data that was encrypted with the same label. This is efficient, predictable, and can reduce errors when different values are encrypted with different passwords. + +.. note:: + Even with the :ref:`DEFAULT_VAULT_ID_MATCH` setting enabled, Ansible does not enforce using the same password every time you use a particular vault ID label. + +.. _storing_vault_passwords: + +Storing and accessing vault passwords +------------------------------------- + +You can memorize your vault password, or manually copy vault passwords from any source and paste them at a command-line prompt, but most users store them securely and access them as needed from within Ansible. You have two options for storing vault passwords that work from within Ansible: in files, or in a third-party tool such as the system keyring or a secret manager. If you store your passwords in a third-party tool, you need a vault password client script to retrieve them from within Ansible. + +Storing passwords in files +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To store a vault password in a file, enter the password as a string on a single line in the file. Make sure the permissions on the file are appropriate. Do not add password files to source control. + +.. _vault_password_client_scripts: + +Storing passwords in third-party tools with vault password client scripts +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can store your vault passwords on the system keyring, in a database, or in a secret manager and retrieve them from within Ansible using a vault password client script. Enter the password as a string on a single line. If your password has a vault ID, store it in a way that works with your password storage tool. + +To create a vault password client script: + + * Create a file with a name ending in ``-client.py`` + * Make the file executable + * Within the script itself: + * Print the passwords to standard output + * Accept a ``--vault-id`` option + * If the script prompts for data (for example, a database password), send the prompts to standard error + +When you run a playbook that uses vault passwords stored in a third-party tool, specify the script as the source within the ``--vault-id`` flag. For example: + +.. code-block:: bash + + ansible-playbook --vault-id dev@contrib/vault/vault-keyring-client.py + +Ansible executes the client script with a ``--vault-id`` option so the script knows which vault ID label you specified. For example a script loading passwords from a secret manager can use the vault ID label to pick either the 'dev' or 'prod' password. The example command above results in the following execution of the client script: + +.. code-block:: bash + + contrib/vault/vault-keyring-client.py --vault-id dev + +For an example of a client script that loads passwords from the system keyring, see :file:`contrib/vault/vault-keyring-client.py`. + + +Encrypting content with Ansible Vault +===================================== + +Once you have a strategy for managing and storing vault passwords, you can start encrypting content. You can encrypt two types of content with Ansible Vault: variables and files. Encrypted content always includes the ``!vault`` tag, which tells Ansible and YAML that the content needs to be decrypted, and a ``|`` character, which allows multi-line strings. Encrypted content created with ``--vault-id`` also contains the vault ID label. For more details about the encryption process and the format of content encrypted with Ansible Vault, see :ref:`vault_format`. This table shows the main differences between encrypted variables and encrypted files: + +.. table:: + :class: documentation-table + + ====================== ================================= ==================================== + .. Encrypted variables Encrypted files + ====================== ================================= ==================================== + How much is encrypted? Variables within a plaintext file The entire file + + When is it decrypted? On demand, only when needed Whenever loaded or referenced [#f1]_ + + What can be encrypted? Only variables Any structured data file + + ====================== ================================= ==================================== + +.. [#f1] Ansible cannot know if it needs content from an encrypted file unless it decrypts the file, so it decrypts all encrypted files referenced in your playbooks and roles. + +.. _encrypting_variables: +.. _single_encrypted_variable: + +Encrypting individual variables with Ansible Vault +-------------------------------------------------- + +You can encrypt single values inside a YAML file using the :ref:`ansible-vault encrypt_string <ansible_vault_encrypt_string>` command. For one way to keep your vaulted variables safely visible, see :ref:`tip_for_variables_and_vaults`. + +Advantages and disadvantages of encrypting variables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +With variable-level encryption, your files are still easily legible. You can mix plaintext and encrypted variables, even inline in a play or role. However, password rotation is not as simple as with file-level encryption. You cannot :ref:`rekey <rekeying_files>` encrypted variables. Also, variable-level encryption only works on variables. If you want to encrypt tasks or other content, you must encrypt the entire file. + +.. _encrypt_string_for_use_in_yaml: + +Creating encrypted variables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :ref:`ansible-vault encrypt_string <ansible_vault_encrypt_string>` command encrypts and formats any string you type (or copy or generate) into a format that can be included in a playbook, role, or variables file. To create a basic encrypted variable, pass three options to the :ref:`ansible-vault encrypt_string <ansible_vault_encrypt_string>` command: + + * a source for the vault password (prompt, file, or script, with or without a vault ID) + * the string to encrypt + * the string name (the name of the variable) + +The pattern looks like this: + +.. code-block:: bash + + ansible-vault encrypt_string <password_source> '<string_to_encrypt>' --name '<string_name_of_variable>' + +For example, to encrypt the string 'foobar' using the only password stored in 'a_password_file' and name the variable 'the_secret': + +.. code-block:: bash + + ansible-vault encrypt_string --vault-password-file a_password_file 'foobar' --name 'the_secret' + +The command above creates this content:: + + the_secret: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 62313365396662343061393464336163383764373764613633653634306231386433626436623361 + 6134333665353966363534333632666535333761666131620a663537646436643839616531643561 + 63396265333966386166373632626539326166353965363262633030333630313338646335303630 + 3438626666666137650a353638643435666633633964366338633066623234616432373231333331 + 6564 + +To encrypt the string 'foooodev', add the vault ID label 'dev' with the 'dev' vault password stored in 'a_password_file', and call the encrypted variable 'the_dev_secret': + +.. code-block:: bash + + ansible-vault encrypt_string --vault-id dev@a_password_file 'foooodev' --name 'the_dev_secret' + +The command above creates this content:: + + the_dev_secret: !vault | + $ANSIBLE_VAULT;1.2;AES256;dev + 30613233633461343837653833666333643061636561303338373661313838333565653635353162 + 3263363434623733343538653462613064333634333464660a663633623939393439316636633863 + 61636237636537333938306331383339353265363239643939666639386530626330633337633833 + 6664656334373166630a363736393262666465663432613932613036303963343263623137386239 + 6330 + +To encrypt the string 'letmein' read from stdin, add the vault ID 'test' using the 'test' vault password stored in `a_password_file`, and name the variable 'test_db_password': + +.. code-block:: bash + + echo -n 'letmein' | ansible-vault encrypt_string --vault-id test@a_password_file --stdin-name 'test_db_password' + +.. warning:: + + Typing secret content directly at the command line (without a prompt) leaves the secret string in your shell history. Do not do this outside of testing. + +The command above creates this output:: + + Reading plaintext input from stdin. (ctrl-d to end input, twice if your content does not already have a new line) + db_password: !vault | + $ANSIBLE_VAULT;1.2;AES256;dev + 61323931353866666336306139373937316366366138656131323863373866376666353364373761 + 3539633234313836346435323766306164626134376564330a373530313635343535343133316133 + 36643666306434616266376434363239346433643238336464643566386135356334303736353136 + 6565633133366366360a326566323363363936613664616364623437336130623133343530333739 + 3039 + +To be prompted for a string to encrypt, encrypt it with the 'dev' vault password from 'a_password_file', name the variable 'new_user_password' and give it the vault ID label 'dev': + +.. code-block:: bash + + ansible-vault encrypt_string --vault-id dev@a_password_file --stdin-name 'new_user_password' + +The command above triggers this prompt: + +.. code-block:: text + + Reading plaintext input from stdin. (ctrl-d to end input, twice if your content does not already have a new line) + +Type the string to encrypt (for example, 'hunter2'), hit ctrl-d, and wait. + +.. warning:: + + Do not press ``Enter`` after supplying the string to encrypt. That will add a newline to the encrypted value. + +The sequence above creates this output:: + + new_user_password: !vault | + $ANSIBLE_VAULT;1.2;AES256;dev + 37636561366636643464376336303466613062633537323632306566653533383833366462366662 + 6565353063303065303831323539656138653863353230620a653638643639333133306331336365 + 62373737623337616130386137373461306535383538373162316263386165376131623631323434 + 3866363862363335620a376466656164383032633338306162326639643635663936623939666238 + 3161 + +You can add the output from any of the examples above to any playbook, variables file, or role for future use. Encrypted variables are larger than plain-text variables, but they protect your sensitive content while leaving the rest of the playbook, variables file, or role in plain text so you can easily read it. + +Viewing encrypted variables +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can view the original value of an encrypted variable using the debug module. You must pass the password that was used to encrypt the variable. For example, if you stored the variable created by the last example above in a file called 'vars.yml', you could view the unencrypted value of that variable like this: + +.. code-block:: console + + ansible localhost -m ansible.builtin.debug -a var="new_user_password" -e "@vars.yml" --vault-id dev@a_password_file + + localhost | SUCCESS => { + "new_user_password": "hunter2" + } + + +Encrypting files with Ansible Vault +----------------------------------- + +Ansible Vault can encrypt any structured data file used by Ansible, including: + + * group variables files from inventory + * host variables files from inventory + * variables files passed to ansible-playbook with ``-e @file.yml`` or ``-e @file.json`` + * variables files loaded by ``include_vars`` or ``vars_files`` + * variables files in roles + * defaults files in roles + * tasks files + * handlers files + * binary files or other arbitrary files + +The full file is encrypted in the vault. + +.. note:: + + Ansible Vault uses an editor to create or modify encrypted files. See :ref:`vault_securing_editor` for some guidance on securing the editor. + + +Advantages and disadvantages of encrypting files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +File-level encryption is easy to use. Password rotation for encrypted files is straightforward with the :ref:`rekey <rekeying_files>` command. Encrypting files can hide not only sensitive values, but the names of the variables you use. However, with file-level encryption the contents of files are no longer easy to access and read. This may be a problem with encrypted tasks files. When encrypting a variables file, see :ref:`tip_for_variables_and_vaults` for one way to keep references to these variables in a non-encrypted file. Ansible always decrypts the entire encrypted file when it is when loaded or referenced, because Ansible cannot know if it needs the content unless it decrypts it. + +.. _creating_files: + +Creating encrypted files +^^^^^^^^^^^^^^^^^^^^^^^^ + +To create a new encrypted data file called 'foo.yml' with the 'test' vault password from 'multi_password_file': + +.. code-block:: bash + + ansible-vault create --vault-id test@multi_password_file foo.yml + +The tool launches an editor (whatever editor you have defined with $EDITOR, default editor is vi). Add the content. When you close the the editor session, the file is saved as encrypted data. The file header reflects the vault ID used to create it: + +.. code-block:: text + + ``$ANSIBLE_VAULT;1.2;AES256;test`` + +To create a new encrypted data file with the vault ID 'my_new_password' assigned to it and be prompted for the password: + +.. code-block:: bash + + ansible-vault create --vault-id my_new_password@prompt foo.yml + +Again, add content to the file in the editor and save. Be sure to store the new password you created at the prompt, so you can find it when you want to decrypt that file. + +.. _encrypting_files: + +Encrypting existing files +^^^^^^^^^^^^^^^^^^^^^^^^^ + +To encrypt an existing file, use the :ref:`ansible-vault encrypt <ansible_vault_encrypt>` command. This command can operate on multiple files at once. For example: + +.. code-block:: bash + + ansible-vault encrypt foo.yml bar.yml baz.yml + +To encrypt existing files with the 'project' ID and be prompted for the password: + +.. code-block:: bash + + ansible-vault encrypt --vault-id project@prompt foo.yml bar.yml baz.yml + + +.. _viewing_files: + +Viewing encrypted files +^^^^^^^^^^^^^^^^^^^^^^^ + +To view the contents of an encrypted file without editing it, you can use the :ref:`ansible-vault view <ansible_vault_view>` command: + +.. code-block:: bash + + ansible-vault view foo.yml bar.yml baz.yml + + +.. _editing_encrypted_files: + +Editing encrypted files +^^^^^^^^^^^^^^^^^^^^^^^ + +To edit an encrypted file in place, use the :ref:`ansible-vault edit <ansible_vault_edit>` command. This command decrypts the file to a temporary file, allows you to edit the content, then saves and re-encrypts the content and removes the temporary file when you close the editor. For example: + +.. code-block:: bash + + ansible-vault edit foo.yml + +To edit a file encrypted with the ``vault2`` password file and assigned the vault ID ``pass2``: + +.. code-block:: bash + + ansible-vault edit --vault-id pass2@vault2 foo.yml + + +.. _rekeying_files: + +Changing the password and/or vault ID on encrypted files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To change the password on an encrypted file or files, use the :ref:`rekey <ansible_vault_rekey>` command: + +.. code-block:: bash + + ansible-vault rekey foo.yml bar.yml baz.yml + +This command can rekey multiple data files at once and will ask for the original password and also the new password. To set a different ID for the rekeyed files, pass the new ID to ``--new-vault-id``. For example, to rekey a list of files encrypted with the 'preprod1' vault ID from the 'ppold' file to the 'preprod2' vault ID and be prompted for the new password: + +.. code-block:: bash + + ansible-vault rekey --vault-id preprod1@ppold --new-vault-id preprod2@prompt foo.yml bar.yml baz.yml + + +.. _decrypting_files: + +Decrypting encrypted files +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you have an encrypted file that you no longer want to keep encrypted, you can permanently decrypt it by running the :ref:`ansible-vault decrypt <ansible_vault_decrypt>` command. This command will save the file unencrypted to the disk, so be sure you do not want to :ref:`edit <ansible_vault_edit>` it instead. + +.. code-block:: bash + + ansible-vault decrypt foo.yml bar.yml baz.yml + + +.. _vault_securing_editor: + +Steps to secure your editor +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Ansible Vault relies on your configured editor, which can be a source of disclosures. Most editors have ways to prevent loss of data, but these normally rely on extra plain text files that can have a clear text copy of your secrets. Consult your editor documentation to configure the editor to avoid disclosing secure data. The following sections provide some guidance on common editors but should not be taken as a complete guide to securing your editor. + + +vim +... + +You can set the following ``vim`` options in command mode to avoid cases of disclosure. There may be more settings you need to modify to ensure security, especially when using plugins, so consult the ``vim`` documentation. + + +1. Disable swapfiles that act like an autosave in case of crash or interruption. + +.. code-block:: text + + set noswapfile + +2. Disable creation of backup files. + +.. code-block:: text + + set nobackup + set nowritebackup + +3. Disable the viminfo file from copying data from your current session. + +.. code-block:: text + + set viminfo= + +4. Disable copying to the system clipboard. + +.. code-block:: text + + set clipboard= + + +You can optionally add these settings in ``.vimrc`` for all files, or just specific paths or extensions. See the ``vim`` manual for details. + + +Emacs +...... + +You can set the following Emacs options to avoid cases of disclosure. There may be more settings you need to modify to ensure security, especially when using plugins, so consult the Emacs documentation. + +1. Do not copy data to the system clipboard. + +.. code-block:: text + + (setq x-select-enable-clipboard nil) + +2. Disable creation of backup files. + +.. code-block:: text + + (setq make-backup-files nil) + +3. Disable autosave files. + +.. code-block:: text + + (setq auto-save-default nil) + + +.. _playbooks_vault: +.. _providing_vault_passwords: + +Using encrypted variables and files +=================================== + +When you run a task or playbook that uses encrypted variables or files, you must provide the passwords to decrypt the variables or files. You can do this at the command line or in the playbook itself. + +Passing a single password +------------------------- + +If all the encrypted variables and files your task or playbook needs use a single password, you can use the :option:`--ask-vault-pass <ansible-playbook --ask-vault-pass>` or :option:`--vault-password-file <ansible-playbook --vault-password-file>` cli options. + +To prompt for the password: + +.. code-block:: bash + + ansible-playbook --ask-vault-pass site.yml + +To retrieve the password from the :file:`/path/to/my/vault-password-file` file: + +.. code-block:: bash + + ansible-playbook --vault-password-file /path/to/my/vault-password-file site.yml + +To get the password from the vault password client script :file:`my-vault-password-client.py`: + +.. code-block:: bash + + ansible-playbook --vault-password-file my-vault-password-client.py + + +.. _specifying_vault_ids: + +Passing vault IDs +----------------- + +You can also use the :option:`--vault-id <ansible-playbook --vault-id>` option to pass a single password with its vault label. This approach is clearer when multiple vaults are used within a single inventory. + +To prompt for the password for the 'dev' vault ID: + +.. code-block:: bash + + ansible-playbook --vault-id dev@prompt site.yml + +To retrieve the password for the 'dev' vault ID from the :file:`dev-password` file: + +.. code-block:: bash + + ansible-playbook --vault-id dev@dev-password site.yml + +To get the password for the 'dev' vault ID from the vault password client script :file:`my-vault-password-client.py`: + +.. code-block:: bash + + ansible-playbook --vault-id dev@my-vault-password-client.py + +Passing multiple vault passwords +-------------------------------- + +If your task or playbook requires multiple encrypted variables or files that you encrypted with different vault IDs, you must use the :option:`--vault-id <ansible-playbook --vault-id>` option, passing multiple ``--vault-id`` options to specify the vault IDs ('dev', 'prod', 'cloud', 'db') and sources for the passwords (prompt, file, script). . For example, to use a 'dev' password read from a file and to be prompted for the 'prod' password: + +.. code-block:: bash + + ansible-playbook --vault-id dev@dev-password --vault-id prod@prompt site.yml + +By default the vault ID labels (dev, prod and so on) are only hints. Ansible attempts to decrypt vault content with each password. The password with the same label as the encrypted data will be tried first, after that each vault secret will be tried in the order they were provided on the command line. + +Where the encrypted data has no label, or the label does not match any of the provided labels, the passwords will be tried in the order they are specified. In the example above, the 'dev' password will be tried first, then the 'prod' password for cases where Ansible doesn't know which vault ID is used to encrypt something. + +Using ``--vault-id`` without a vault ID +--------------------------------------- + +The :option:`--vault-id <ansible-playbook --vault-id>` option can also be used without specifying a vault-id. This behavior is equivalent to :option:`--ask-vault-pass <ansible-playbook --ask-vault-pass>` or :option:`--vault-password-file <ansible-playbook --vault-password-file>` so is rarely used. + +For example, to use a password file :file:`dev-password`: + +.. code-block:: bash + + ansible-playbook --vault-id dev-password site.yml + +To prompt for the password: + +.. code-block:: bash + + ansible-playbook --vault-id @prompt site.yml + +To get the password from an executable script :file:`my-vault-password-client.py`: + +.. code-block:: bash + + ansible-playbook --vault-id my-vault-password-client.py + + +Configuring defaults for using encrypted content +================================================ + +Setting a default vault ID +-------------------------- + +If you use one vault ID more frequently than any other, you can set the config option :ref:`DEFAULT_VAULT_IDENTITY_LIST` to specify a default vault ID and password source. Ansible will use the default vault ID and source any time you do not specify :option:`--vault-id <ansible-playbook --vault-id>`. You can set multiple values for this option. Setting multiple values is equivalent to passing multiple :option:`--vault-id <ansible-playbook --vault-id>` cli options. + +Setting a default password source +--------------------------------- + +If you use one vault password file more frequently than any other, you can set the :ref:`DEFAULT_VAULT_PASSWORD_FILE` config option or the :envvar:`ANSIBLE_VAULT_PASSWORD_FILE` environment variable to specify that file. For example, if you set ``ANSIBLE_VAULT_PASSWORD_FILE=~/.vault_pass.txt``, Ansible will automatically search for the password in that file. This is useful if, for example, you use Ansible from a continuous integration system such as Jenkins. + +When are encrypted files made visible? +====================================== + +In general, content you encrypt with Ansible Vault remains encrypted after execution. However, there is one exception. If you pass an encrypted file as the ``src`` argument to the :ref:`copy <copy_module>`, :ref:`template <template_module>`, :ref:`unarchive <unarchive_module>`, :ref:`script <script_module>` or :ref:`assemble <assemble_module>` module, the file will not be encrypted on the target host (assuming you supply the correct vault password when you run the play). This behavior is intended and useful. You can encrypt a configuration file or template to avoid sharing the details of your configuration, but when you copy that configuration to servers in your environment, you want it to be decrypted so local users and processes can access it. + +.. _speeding_up_vault: + +Speeding up Ansible Vault +========================= + +If you have many encrypted files, decrypting them at startup may cause a perceptible delay. To speed this up, install the cryptography package: + +.. code-block:: bash + + pip install cryptography + + +.. _vault_format: + +Format of files encrypted with Ansible Vault +============================================ + +Ansible Vault creates UTF-8 encoded txt files. The file format includes a newline terminated header. For example:: + + $ANSIBLE_VAULT;1.1;AES256 + +or:: + + $ANSIBLE_VAULT;1.2;AES256;vault-id-label + +The header contains up to four elements, separated by semi-colons (``;``). + + 1. The format ID (``$ANSIBLE_VAULT``). Currently ``$ANSIBLE_VAULT`` is the only valid format ID. The format ID identifies content that is encrypted with Ansible Vault (via vault.is_encrypted_file()). + + 2. The vault format version (``1.X``). All supported versions of Ansible will currently default to '1.1' or '1.2' if a labeled vault ID is supplied. The '1.0' format is supported for reading only (and will be converted automatically to the '1.1' format on write). The format version is currently used as an exact string compare only (version numbers are not currently 'compared'). + + 3. The cipher algorithm used to encrypt the data (``AES256``). Currently ``AES256`` is the only supported cipher algorithm. Vault format 1.0 used 'AES', but current code always uses 'AES256'. + + 4. The vault ID label used to encrypt the data (optional, ``vault-id-label``) For example, if you encrypt a file with ``--vault-id dev@prompt``, the vault-id-label is ``dev``. + +Note: In the future, the header could change. Fields after the format ID and format version depend on the format version, and future vault format versions may add more cipher algorithm options and/or additional fields. + +The rest of the content of the file is the 'vaulttext'. The vaulttext is a text armored version of the +encrypted ciphertext. Each line is 80 characters wide, except for the last line which may be shorter. + +Ansible Vault payload format 1.1 - 1.2 +-------------------------------------- + +The vaulttext is a concatenation of the ciphertext and a SHA256 digest with the result 'hexlifyied'. + +'hexlify' refers to the ``hexlify()`` method of the Python Standard Library's `binascii <https://docs.python.org/3/library/binascii.html>`_ module. + +hexlify()'ed result of: + +- hexlify()'ed string of the salt, followed by a newline (``0x0a``) +- hexlify()'ed string of the crypted HMAC, followed by a newline. The HMAC is: + + - a `RFC2104 <https://www.ietf.org/rfc/rfc2104.txt>`_ style HMAC + + - inputs are: + + - The AES256 encrypted ciphertext + - A PBKDF2 key. This key, the cipher key, and the cipher IV are generated from: + + - the salt, in bytes + - 10000 iterations + - SHA256() algorithm + - the first 32 bytes are the cipher key + - the second 32 bytes are the HMAC key + - remaining 16 bytes are the cipher IV + +- hexlify()'ed string of the ciphertext. The ciphertext is: + + - AES256 encrypted data. The data is encrypted using: + + - AES-CTR stream cipher + - cipher key + - IV + - a 128 bit counter block seeded from an integer IV + - the plaintext + + - the original plaintext + - padding up to the AES256 blocksize. (The data used for padding is based on `RFC5652 <https://tools.ietf.org/html/rfc5652#section-6.3>`_) diff --git a/docs/docsite/rst/user_guide/windows.rst b/docs/docsite/rst/user_guide/windows.rst new file mode 100644 index 00000000..24277189 --- /dev/null +++ b/docs/docsite/rst/user_guide/windows.rst @@ -0,0 +1,21 @@ +.. _windows: + +Windows Guides +`````````````` + +The following sections provide information on managing +Windows hosts with Ansible. + +Because Windows is a non-POSIX-compliant operating system, there are differences between +how Ansible interacts with them and the way Windows works. These guides will highlight +some of the differences between Linux/Unix hosts and hosts running Windows. + +.. toctree:: + :maxdepth: 2 + + windows_setup + windows_winrm + windows_usage + windows_dsc + windows_performance + windows_faq diff --git a/docs/docsite/rst/user_guide/windows_dsc.rst b/docs/docsite/rst/user_guide/windows_dsc.rst new file mode 100644 index 00000000..40416305 --- /dev/null +++ b/docs/docsite/rst/user_guide/windows_dsc.rst @@ -0,0 +1,505 @@ +Desired State Configuration +=========================== + +.. contents:: Topics + :local: + +What is Desired State Configuration? +```````````````````````````````````` +Desired State Configuration, or DSC, is a tool built into PowerShell that can +be used to define a Windows host setup through code. The overall purpose of DSC +is the same as Ansible, it is just executed in a different manner. Since +Ansible 2.4, the ``win_dsc`` module has been added and can be used to leverage +existing DSC resources when interacting with a Windows host. + +More details on DSC can be viewed at `DSC Overview <https://docs.microsoft.com/en-us/powershell/scripting/dsc/overview/overview>`_. + +Host Requirements +````````````````` +To use the ``win_dsc`` module, a Windows host must have PowerShell v5.0 or +newer installed. All supported hosts, except for Windows Server 2008 (non R2) can be +upgraded to PowerShell v5. + +Once the PowerShell requirements have been met, using DSC is as simple as +creating a task with the ``win_dsc`` module. + +Why Use DSC? +```````````` +DSC and Ansible modules have a common goal which is to define and ensure the state of a +resource. Because of +this, resources like the DSC `File resource <https://docs.microsoft.com/en-us/powershell/scripting/dsc/reference/resources/windows/fileresource>`_ +and Ansible ``win_file`` can be used to achieve the same result. Deciding which to use depends +on the scenario. + +Reasons for using an Ansible module over a DSC resource: + +* The host does not support PowerShell v5.0, or it cannot easily be upgraded +* The DSC resource does not offer a feature present in an Ansible module. For example + win_regedit can manage the ``REG_NONE`` property type, while the DSC + ``Registry`` resource cannot +* DSC resources have limited check mode support, while some Ansible modules have + better checks +* DSC resources do not support diff mode, while some Ansible modules do +* Custom resources require further installation steps to be run on the host + beforehand, while Ansible modules are built-in to Ansible +* There are bugs in a DSC resource where an Ansible module works + +Reasons for using a DSC resource over an Ansible module: + +* The Ansible module does not support a feature present in a DSC resource +* There is no Ansible module available +* There are bugs in an existing Ansible module + +In the end, it doesn't matter whether the task is performed with DSC or an +Ansible module; what matters is that the task is performed correctly and the +playbooks are still readable. If you have more experience with DSC over Ansible +and it does the job, just use DSC for that task. + +How to Use DSC? +``````````````` +The ``win_dsc`` module takes in a free-form of options so that it changes +according to the resource it is managing. A list of built in resources can be +found at `resources <https://docs.microsoft.com/en-us/powershell/scripting/dsc/resources/resources>`_. + +Using the `Registry <https://docs.microsoft.com/en-us/powershell/scripting/dsc/reference/resources/windows/registryresource>`_ +resource as an example, this is the DSC definition as documented by Microsoft: + +.. code-block:: powershell + + Registry [string] #ResourceName + { + Key = [string] + ValueName = [string] + [ Ensure = [string] { Enable | Disable } ] + [ Force = [bool] ] + [ Hex = [bool] ] + [ DependsOn = [string[]] ] + [ ValueData = [string[]] ] + [ ValueType = [string] { Binary | Dword | ExpandString | MultiString | Qword | String } ] + } + +When defining the task, ``resource_name`` must be set to the DSC resource being +used - in this case the ``resource_name`` should be set to ``Registry``. The +``module_version`` can refer to a specific version of the DSC resource +installed; if left blank it will default to the latest version. The other +options are parameters that are used to define the resource, such as ``Key`` and +``ValueName``. While the options in the task are not case sensitive, +keeping the case as-is is recommended because it makes it easier to distinguish DSC +resource options from Ansible's ``win_dsc`` options. + +This is what the Ansible task version of the above DSC Registry resource would look like: + +.. code-block:: yaml+jinja + + - name: Use win_dsc module with the Registry DSC resource + win_dsc: + resource_name: Registry + Ensure: Present + Key: HKEY_LOCAL_MACHINE\SOFTWARE\ExampleKey + ValueName: TestValue + ValueData: TestData + +Starting in Ansible 2.8, the ``win_dsc`` module automatically validates the +input options from Ansible with the DSC definition. This means Ansible will +fail if the option name is incorrect, a mandatory option is not set, or the +value is not a valid choice. When running Ansible with a verbosity level of 3 +or more (``-vvv``), the return value will contain the possible invocation +options based on the ``resource_name`` specified. Here is an example of the +invocation output for the above ``Registry`` task: + +.. code-block:: ansible-output + + changed: [2016] => { + "changed": true, + "invocation": { + "module_args": { + "DependsOn": null, + "Ensure": "Present", + "Force": null, + "Hex": null, + "Key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\ExampleKey", + "PsDscRunAsCredential_password": null, + "PsDscRunAsCredential_username": null, + "ValueData": [ + "TestData" + ], + "ValueName": "TestValue", + "ValueType": null, + "module_version": "latest", + "resource_name": "Registry" + } + }, + "module_version": "1.1", + "reboot_required": false, + "verbose_set": [ + "Perform operation 'Invoke CimMethod' with following parameters, ''methodName' = ResourceSet,'className' = MSFT_DSCLocalConfigurationManager,'namespaceName' = root/Microsoft/Windows/DesiredStateConfiguration'.", + "An LCM method call arrived from computer SERVER2016 with user sid S-1-5-21-3088887838-4058132883-1884671576-1105.", + "[SERVER2016]: LCM: [ Start Set ] [[Registry]DirectResourceAccess]", + "[SERVER2016]: [[Registry]DirectResourceAccess] (SET) Create registry key 'HKLM:\\SOFTWARE\\ExampleKey'", + "[SERVER2016]: [[Registry]DirectResourceAccess] (SET) Set registry key value 'HKLM:\\SOFTWARE\\ExampleKey\\TestValue' to 'TestData' of type 'String'", + "[SERVER2016]: LCM: [ End Set ] [[Registry]DirectResourceAccess] in 0.1930 seconds.", + "[SERVER2016]: LCM: [ End Set ] in 0.2720 seconds.", + "Operation 'Invoke CimMethod' complete.", + "Time taken for configuration job to complete is 0.402 seconds" + ], + "verbose_test": [ + "Perform operation 'Invoke CimMethod' with following parameters, ''methodName' = ResourceTest,'className' = MSFT_DSCLocalConfigurationManager,'namespaceName' = root/Microsoft/Windows/DesiredStateConfiguration'.", + "An LCM method call arrived from computer SERVER2016 with user sid S-1-5-21-3088887838-4058132883-1884671576-1105.", + "[SERVER2016]: LCM: [ Start Test ] [[Registry]DirectResourceAccess]", + "[SERVER2016]: [[Registry]DirectResourceAccess] Registry key 'HKLM:\\SOFTWARE\\ExampleKey' does not exist", + "[SERVER2016]: LCM: [ End Test ] [[Registry]DirectResourceAccess] False in 0.2510 seconds.", + "[SERVER2016]: LCM: [ End Set ] in 0.3310 seconds.", + "Operation 'Invoke CimMethod' complete.", + "Time taken for configuration job to complete is 0.475 seconds" + ] + } + +The ``invocation.module_args`` key shows the actual values that were set as +well as other possible values that were not set. Unfortunately this will not +show the default value for a DSC property, only what was set from the Ansible +task. Any ``*_password`` option will be masked in the output for security +reasons, if there are any other sensitive module options, set ``no_log: True`` +on the task to stop all task output from being logged. + + +Property Types +-------------- +Each DSC resource property has a type that is associated with it. Ansible +will try to convert the defined options to the correct type during execution. +For simple types like ``[string]`` and ``[bool]`` this is a simple operation, +but complex types like ``[PSCredential]`` or arrays (like ``[string[]]``) this +require certain rules. + +PSCredential +++++++++++++ +A ``[PSCredential]`` object is used to store credentials in a secure way, but +Ansible has no way to serialize this over JSON. To set a DSC PSCredential property, +the definition of that parameter should have two entries that are suffixed with +``_username`` and ``_password`` for the username and password respectively. +For example: + +.. code-block:: yaml+jinja + + PsDscRunAsCredential_username: '{{ ansible_user }}' + PsDscRunAsCredential_password: '{{ ansible_password }}' + + SourceCredential_username: AdminUser + SourceCredential_password: PasswordForAdminUser + +.. Note:: On versions of Ansible older than 2.8, you should set ``no_log: yes`` + on the task definition in Ansible to ensure any credentials used are not + stored in any log file or console output. + +A ``[PSCredential]`` is defined with ``EmbeddedInstance("MSFT_Credential")`` in +a DSC resource MOF definition. + +CimInstance Type +++++++++++++++++ +A ``[CimInstance]`` object is used by DSC to store a dictionary object based on +a custom class defined by that resource. Defining a value that takes in a +``[CimInstance]`` in YAML is the same as defining a dictionary in YAML. +For example, to define a ``[CimInstance]`` value in Ansible: + +.. code-block:: yaml+jinja + + # [CimInstance]AuthenticationInfo == MSFT_xWebAuthenticationInformation + AuthenticationInfo: + Anonymous: no + Basic: yes + Digest: no + Windows: yes + +In the above example, the CIM instance is a representation of the class +`MSFT_xWebAuthenticationInformation <https://github.com/dsccommunity/xWebAdministration/blob/master/source/DSCResources/MSFT_xWebSite/MSFT_xWebSite.schema.mof>`_. +This class accepts four boolean variables, ``Anonymous``, ``Basic``, +``Digest``, and ``Windows``. The keys to use in a ``[CimInstance]`` depend on +the class it represents. Please read through the documentation of the resource +to determine the keys that can be used and the types of each key value. The +class definition is typically located in the ``<resource name>.schema.mof``. + +HashTable Type +++++++++++++++ +A ``[HashTable]`` object is also a dictionary but does not have a strict set of +keys that can/need to be defined. Like a ``[CimInstance]``, define it like a +normal dictionary value in YAML. A ``[HashTable]]`` is defined with +``EmbeddedInstance("MSFT_KeyValuePair")`` in a DSC resource MOF definition. + +Arrays +++++++ +Simple type arrays like ``[string[]]`` or ``[UInt32[]]`` are defined as a list +or as a comma separated string which are then cast to their type. Using a list +is recommended because the values are not manually parsed by the ``win_dsc`` +module before being passed to the DSC engine. For example, to define a simple +type array in Ansible: + +.. code-block:: yaml+jinja + + # [string[]] + ValueData: entry1, entry2, entry3 + ValueData: + - entry1 + - entry2 + - entry3 + + # [UInt32[]] + ReturnCode: 0,3010 + ReturnCode: + - 0 + - 3010 + +Complex type arrays like ``[CimInstance[]]`` (array of dicts), can be defined +like this example: + +.. code-block:: yaml+jinja + + # [CimInstance[]]BindingInfo == MSFT_xWebBindingInformation + BindingInfo: + - Protocol: https + Port: 443 + CertificateStoreName: My + CertificateThumbprint: C676A89018C4D5902353545343634F35E6B3A659 + HostName: DSCTest + IPAddress: '*' + SSLFlags: 1 + - Protocol: http + Port: 80 + IPAddress: '*' + +The above example, is an array with two values of the class `MSFT_xWebBindingInformation <https://github.com/dsccommunity/xWebAdministration/blob/master/source/DSCResources/MSFT_xWebSite/MSFT_xWebSite.schema.mof>`_. +When defining a ``[CimInstance[]]``, be sure to read the resource documentation +to find out what keys to use in the definition. + +DateTime +++++++++ +A ``[DateTime]`` object is a DateTime string representing the date and time in +the `ISO 8601 <https://www.w3.org/TR/NOTE-datetime>`_ date time format. The +value for a ``[DateTime]`` field should be quoted in YAML to ensure the string +is properly serialized to the Windows host. Here is an example of how to define +a ``[DateTime]`` value in Ansible: + +.. code-block:: yaml+jinja + + # As UTC-0 (No timezone) + DateTime: '2019-02-22T13:57:31.2311892+00:00' + + # As UTC+4 + DateTime: '2019-02-22T17:57:31.2311892+04:00' + + # As UTC-4 + DateTime: '2019-02-22T09:57:31.2311892-04:00' + +All the values above are equal to a UTC date time of February 22nd 2019 at +1:57pm with 31 seconds and 2311892 milliseconds. + +Run As Another User +------------------- +By default, DSC runs each resource as the SYSTEM account and not the account +that Ansible use to run the module. This means that resources that are dynamically +loaded based on a user profile, like the ``HKEY_CURRENT_USER`` registry hive, +will be loaded under the ``SYSTEM`` profile. The parameter +``PsDscRunAsCredential`` is a parameter that can be set for every DSC resource +force the DSC engine to run under a different account. As +``PsDscRunAsCredential`` has a type of ``PSCredential``, it is defined with the +``_username`` and ``_password`` suffix. + +Using the Registry resource type as an example, this is how to define a task +to access the ``HKEY_CURRENT_USER`` hive of the Ansible user: + +.. code-block:: yaml+jinja + + - name: Use win_dsc with PsDscRunAsCredential to run as a different user + win_dsc: + resource_name: Registry + Ensure: Present + Key: HKEY_CURRENT_USER\ExampleKey + ValueName: TestValue + ValueData: TestData + PsDscRunAsCredential_username: '{{ ansible_user }}' + PsDscRunAsCredential_password: '{{ ansible_password }}' + no_log: yes + +Custom DSC Resources +```````````````````` +DSC resources are not limited to the built-in options from Microsoft. Custom +modules can be installed to manage other resources that are not usually available. + +Finding Custom DSC Resources +---------------------------- +You can use the +`PSGallery <https://www.powershellgallery.com/>`_ to find custom resources, along with documentation on how to install them on a Windows host. + +The ``Find-DscResource`` cmdlet can also be used to find custom resources. For example: + +.. code-block:: powershell + + # Find all DSC resources in the configured repositories + Find-DscResource + + # Find all DSC resources that relate to SQL + Find-DscResource -ModuleName "*sql*" + +.. Note:: DSC resources developed by Microsoft that start with ``x``, means the + resource is experimental and comes with no support. + +Installing a Custom Resource +---------------------------- +There are three ways that a DSC resource can be installed on a host: + +* Manually with the ``Install-Module`` cmdlet +* Using the ``win_psmodule`` Ansible module +* Saving the module manually and copying it another host + +This is an example of installing the ``xWebAdministration`` resources using +``win_psmodule``: + +.. code-block:: yaml+jinja + + - name: Install xWebAdministration DSC resource + win_psmodule: + name: xWebAdministration + state: present + +Once installed, the win_dsc module will be able to use the resource by referencing it +with the ``resource_name`` option. + +The first two methods above only work when the host has access to the internet. +When a host does not have internet access, the module must first be installed +using the methods above on another host with internet access and then copied +across. To save a module to a local filepath, the following PowerShell cmdlet +can be run:: + + Save-Module -Name xWebAdministration -Path C:\temp + +This will create a folder called ``xWebAdministration`` in ``C:\temp`` which +can be copied to any host. For PowerShell to see this offline resource, it must +be copied to a directory set in the ``PSModulePath`` environment variable. +In most cases the path ``C:\Program Files\WindowsPowerShell\Module`` is set +through this variable, but the ``win_path`` module can be used to add different +paths. + +Examples +```````` +Extract a zip file +------------------ + +.. code-block:: yaml+jinja + + - name: Extract a zip file + win_dsc: + resource_name: Archive + Destination: C:\temp\output + Path: C:\temp\zip.zip + Ensure: Present + +Create a directory +------------------ + +.. code-block:: yaml+jinja + + - name: Create file with some text + win_dsc: + resource_name: File + DestinationPath: C:\temp\file + Contents: | + Hello + World + Ensure: Present + Type: File + + - name: Create directory that is hidden is set with the System attribute + win_dsc: + resource_name: File + DestinationPath: C:\temp\hidden-directory + Attributes: Hidden,System + Ensure: Present + Type: Directory + +Interact with Azure +------------------- + +.. code-block:: yaml+jinja + + - name: Install xAzure DSC resources + win_psmodule: + name: xAzure + state: present + + - name: Create virtual machine in Azure + win_dsc: + resource_name: xAzureVM + ImageName: a699494373c04fc0bc8f2bb1389d6106__Windows-Server-2012-R2-201409.01-en.us-127GB.vhd + Name: DSCHOST01 + ServiceName: ServiceName + StorageAccountName: StorageAccountName + InstanceSize: Medium + Windows: yes + Ensure: Present + Credential_username: '{{ ansible_user }}' + Credential_password: '{{ ansible_password }}' + +Setup IIS Website +----------------- + +.. code-block:: yaml+jinja + + - name: Install xWebAdministration module + win_psmodule: + name: xWebAdministration + state: present + + - name: Install IIS features that are required + win_dsc: + resource_name: WindowsFeature + Name: '{{ item }}' + Ensure: Present + loop: + - Web-Server + - Web-Asp-Net45 + + - name: Setup web content + win_dsc: + resource_name: File + DestinationPath: C:\inetpub\IISSite\index.html + Type: File + Contents: | + <html> + <head><title>IIS Site</title></head> + <body>This is the body</body> + </html> + Ensure: present + + - name: Create new website + win_dsc: + resource_name: xWebsite + Name: NewIISSite + State: Started + PhysicalPath: C:\inetpub\IISSite\index.html + BindingInfo: + - Protocol: https + Port: 8443 + CertificateStoreName: My + CertificateThumbprint: C676A89018C4D5902353545343634F35E6B3A659 + HostName: DSCTest + IPAddress: '*' + SSLFlags: 1 + - Protocol: http + Port: 8080 + IPAddress: '*' + AuthenticationInfo: + Anonymous: no + Basic: yes + Digest: no + Windows: yes + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + :ref:`List of Windows Modules <windows_modules>` + Windows specific module list, all implemented in PowerShell + `User Mailing List <https://groups.google.com/group/ansible-project>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/windows_faq.rst b/docs/docsite/rst/user_guide/windows_faq.rst new file mode 100644 index 00000000..75e99d2e --- /dev/null +++ b/docs/docsite/rst/user_guide/windows_faq.rst @@ -0,0 +1,236 @@ +.. _windows_faq: + +Windows Frequently Asked Questions +================================== + +Here are some commonly asked questions in regards to Ansible and Windows and +their answers. + +.. note:: This document covers questions about managing Microsoft Windows servers with Ansible. + For questions about Ansible Core, please see the + :ref:`general FAQ page <ansible_faq>`. + +Does Ansible work with Windows XP or Server 2003? +`````````````````````````````````````````````````` +Ansible does not work with Windows XP or Server 2003 hosts. Ansible does work with these Windows operating system versions: + +* Windows Server 2008 :sup:`1` +* Windows Server 2008 R2 :sup:`1` +* Windows Server 2012 +* Windows Server 2012 R2 +* Windows Server 2016 +* Windows Server 2019 +* Windows 7 :sup:`1` +* Windows 8.1 +* Windows 10 + +1 - See the :ref:`Server 2008 FAQ <windows_faq_server2008>` entry for more details. + +Ansible also has minimum PowerShell version requirements - please see +:ref:`windows_setup` for the latest information. + +.. _windows_faq_server2008: + +Are Server 2008, 2008 R2 and Windows 7 supported? +````````````````````````````````````````````````` +Microsoft ended Extended Support for these versions of Windows on January 14th, 2020, and Ansible deprecated official support in the 2.10 release. No new feature development will occur targeting these operating systems, and automated testing has ceased. However, existing modules and features will likely continue to work, and simple pull requests to resolve issues with these Windows versions may be accepted. + +Can I manage Windows Nano Server with Ansible? +`````````````````````````````````````````````` +Ansible does not currently work with Windows Nano Server, since it does +not have access to the full .NET Framework that is used by the majority of the +modules and internal components. + +Can Ansible run on Windows? +``````````````````````````` +No, Ansible can only manage Windows hosts. Ansible cannot run on a Windows host +natively, though it can run under the Windows Subsystem for Linux (WSL). + +.. note:: The Windows Subsystem for Linux is not supported by Ansible and + should not be used for production systems. + +To install Ansible on WSL, the following commands +can be run in the bash terminal: + +.. code-block:: shell + + sudo apt-get update + sudo apt-get install python-pip git libffi-dev libssl-dev -y + pip install --user ansible pywinrm + +To run Ansible from source instead of a release on the WSL, simply uninstall the pip +installed version and then clone the git repo. + +.. code-block:: shell + + pip uninstall ansible -y + git clone https://github.com/ansible/ansible.git + source ansible/hacking/env-setup + + # To enable Ansible on login, run the following + echo ". ~/ansible/hacking/env-setup -q' >> ~/.bashrc + +Can I use SSH keys to authenticate to Windows hosts? +```````````````````````````````````````````````````` +You cannot use SSH keys with the WinRM or PSRP connection plugins. +These connection plugins use X509 certificates for authentication instead +of the SSH key pairs that SSH uses. + +The way X509 certificates are generated and mapped to a user is different +from the SSH implementation; consult the :ref:`windows_winrm` documentation for +more information. + +Ansible 2.8 has added an experimental option to use the SSH connection plugin, +which uses SSH keys for authentication, for Windows servers. See :ref:`this question <windows_faq_ssh>` +for more information. + +.. _windows_faq_winrm: + +Why can I run a command locally that does not work under Ansible? +````````````````````````````````````````````````````````````````` +Ansible executes commands through WinRM. These processes are different from +running a command locally in these ways: + +* Unless using an authentication option like CredSSP or Kerberos with + credential delegation, the WinRM process does not have the ability to + delegate the user's credentials to a network resource, causing ``Access is + Denied`` errors. + +* All processes run under WinRM are in a non-interactive session. Applications + that require an interactive session will not work. + +* When running through WinRM, Windows restricts access to internal Windows + APIs like the Windows Update API and DPAPI, which some installers and + programs rely on. + +Some ways to bypass these restrictions are to: + +* Use ``become``, which runs a command as it would when run locally. This will + bypass most WinRM restrictions, as Windows is unaware the process is running + under WinRM when ``become`` is used. See the :ref:`become` documentation for more + information. + +* Use a scheduled task, which can be created with ``win_scheduled_task``. Like + ``become``, it will bypass all WinRM restrictions, but it can only be used to run + commands, not modules. + +* Use ``win_psexec`` to run a command on the host. PSExec does not use WinRM + and so will bypass any of the restrictions. + +* To access network resources without any of these workarounds, you can use + CredSSP or Kerberos with credential delegation enabled. + +See :ref:`become` more info on how to use become. The limitations section at +:ref:`windows_winrm` has more details around WinRM limitations. + +This program won't install on Windows with Ansible +`````````````````````````````````````````````````` +See :ref:`this question <windows_faq_winrm>` for more information about WinRM limitations. + +What Windows modules are available? +``````````````````````````````````` +Most of the Ansible modules in Ansible Core are written for a combination of +Linux/Unix machines and arbitrary web services. These modules are written in +Python and most of them do not work on Windows. + +Because of this, there are dedicated Windows modules that are written in +PowerShell and are meant to be run on Windows hosts. A list of these modules +can be found :ref:`here <windows_modules>`. + +In addition, the following Ansible Core modules/action-plugins work with Windows: + +* add_host +* assert +* async_status +* debug +* fail +* fetch +* group_by +* include +* include_role +* include_vars +* meta +* pause +* raw +* script +* set_fact +* set_stats +* setup +* slurp +* template (also: win_template) +* wait_for_connection + +Can I run Python modules on Windows hosts? +`````````````````````````````````````````` +No, the WinRM connection protocol is set to use PowerShell modules, so Python +modules will not work. A way to bypass this issue to use +``delegate_to: localhost`` to run a Python module on the Ansible controller. +This is useful if during a playbook, an external service needs to be contacted +and there is no equivalent Windows module available. + +.. _windows_faq_ssh: + +Can I connect to Windows hosts over SSH? +```````````````````````````````````````` +Ansible 2.8 has added an experimental option to use the SSH connection plugin +to manage Windows hosts. To connect to Windows hosts over SSH, you must install and configure the `Win32-OpenSSH <https://github.com/PowerShell/Win32-OpenSSH>`_ +fork that is in development with Microsoft on +the Windows host(s). While most of the basics should work with SSH, +``Win32-OpenSSH`` is rapidly changing, with new features added and bugs +fixed in every release. It is highly recommend you `install <https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH>`_ the latest release +of ``Win32-OpenSSH`` from the GitHub Releases page when using it with Ansible +on Windows hosts. + +To use SSH as the connection to a Windows host, set the following variables in +the inventory:: + + ansible_connection=ssh + + # Set either cmd or powershell not both + ansible_shell_type=cmd + # ansible_shell_type=powershell + +The value for ``ansible_shell_type`` should either be ``cmd`` or ``powershell``. +Use ``cmd`` if the ``DefaultShell`` has not been configured on the SSH service +and ``powershell`` if that has been set as the ``DefaultShell``. + +Why is connecting to a Windows host via SSH failing? +```````````````````````````````````````````````````` +Unless you are using ``Win32-OpenSSH`` as described above, you must connect to +Windows hosts using :ref:`windows_winrm`. If your Ansible output indicates that +SSH was used, either you did not set the connection vars properly or the host is not inheriting them correctly. + +Make sure ``ansible_connection: winrm`` is set in the inventory for the Windows +host(s). + +Why are my credentials being rejected? +`````````````````````````````````````` +This can be due to a myriad of reasons unrelated to incorrect credentials. + +See HTTP 401/Credentials Rejected at :ref:`windows_setup` for a more detailed +guide of this could mean. + +Why am I getting an error SSL CERTIFICATE_VERIFY_FAILED? +```````````````````````````````````````````````````````` +When the Ansible controller is running on Python 2.7.9+ or an older version of Python that +has backported SSLContext (like Python 2.7.5 on RHEL 7), the controller will attempt to +validate the certificate WinRM is using for an HTTPS connection. If the +certificate cannot be validated (such as in the case of a self signed cert), it will +fail the verification process. + +To ignore certificate validation, add +``ansible_winrm_server_cert_validation: ignore`` to inventory for the Windows +host. + +.. seealso:: + + :ref:`windows` + The Windows documentation index + :ref:`about_playbooks` + An introduction to playbooks + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + `User Mailing List <https://groups.google.com/group/ansible-project>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/windows_performance.rst b/docs/docsite/rst/user_guide/windows_performance.rst new file mode 100644 index 00000000..5eb5dbbd --- /dev/null +++ b/docs/docsite/rst/user_guide/windows_performance.rst @@ -0,0 +1,61 @@ +.. _windows_performance: + +Windows performance +=================== +This document offers some performance optimizations you might like to apply to +your Windows hosts to speed them up specifically in the context of using Ansible +with them, and generally. + +Optimise PowerShell performance to reduce Ansible task overhead +--------------------------------------------------------------- +To speed up the startup of PowerShell by around 10x, run the following +PowerShell snippet in an Administrator session. Expect it to take tens of +seconds. + +.. note:: + + If native images have already been created by the ngen task or service, you + will observe no difference in performance (but this snippet will at that + point execute faster than otherwise). + +.. code-block:: powershell + + function Optimize-PowershellAssemblies { + # NGEN powershell assembly, improves startup time of powershell by 10x + $old_path = $env:path + try { + $env:path = [Runtime.InteropServices.RuntimeEnvironment]::GetRuntimeDirectory() + [AppDomain]::CurrentDomain.GetAssemblies() | % { + if (! $_.location) {continue} + $Name = Split-Path $_.location -leaf + if ($Name.startswith("Microsoft.PowerShell.")) { + Write-Progress -Activity "Native Image Installation" -Status "$name" + ngen install $_.location | % {"`t$_"} + } + } + } finally { + $env:path = $old_path + } + } + Optimize-PowershellAssemblies + +PowerShell is used by every Windows Ansible module. This optimisation reduces +the time PowerShell takes to start up, removing that overhead from every invocation. + +This snippet uses `the native image generator, ngen <https://docs.microsoft.com/en-us/dotnet/framework/tools/ngen-exe-native-image-generator#WhenToUse>`_ +to pre-emptively create native images for the assemblies that PowerShell relies on. + +Fix high-CPU-on-boot for VMs/cloud instances +-------------------------------------------- +If you are creating golden images to spawn instances from, you can avoid a disruptive +high CPU task near startup via `processing the ngen queue <https://docs.microsoft.com/en-us/dotnet/framework/tools/ngen-exe-native-image-generator#native-image-service>`_ +within your golden image creation, if you know the CPU types won't change between +golden image build process and runtime. + +Place the following near the end of your playbook, bearing in mind the factors that can cause native images to be invalidated (`see MSDN <https://docs.microsoft.com/en-us/dotnet/framework/tools/ngen-exe-native-image-generator#native-images-and-jit-compilation>`_). + +.. code-block:: yaml + + - name: generate native .NET images for CPU + win_dotnet_ngen: + diff --git a/docs/docsite/rst/user_guide/windows_setup.rst b/docs/docsite/rst/user_guide/windows_setup.rst new file mode 100644 index 00000000..910fa06f --- /dev/null +++ b/docs/docsite/rst/user_guide/windows_setup.rst @@ -0,0 +1,573 @@ +.. _windows_setup: + +Setting up a Windows Host +========================= +This document discusses the setup that is required before Ansible can communicate with a Microsoft Windows host. + +.. contents:: + :local: + +Host Requirements +````````````````` +For Ansible to communicate to a Windows host and use Windows modules, the +Windows host must meet these requirements: + +* Ansible can generally manage Windows versions under current + and extended support from Microsoft. Ansible can manage desktop OSs including + Windows 7, 8.1, and 10, and server OSs including Windows Server 2008, + 2008 R2, 2012, 2012 R2, 2016, and 2019. + +* Ansible requires PowerShell 3.0 or newer and at least .NET 4.0 to be + installed on the Windows host. + +* A WinRM listener should be created and activated. More details for this can be + found below. + +.. Note:: While these are the base requirements for Ansible connectivity, some Ansible + modules have additional requirements, such as a newer OS or PowerShell + version. Please consult the module's documentation page + to determine whether a host meets those requirements. + +Upgrading PowerShell and .NET Framework +--------------------------------------- +Ansible requires PowerShell version 3.0 and .NET Framework 4.0 or newer to function on older operating systems like Server 2008 and Windows 7. The base image does not meet this +requirement. You can use the `Upgrade-PowerShell.ps1 <https://github.com/jborean93/ansible-windows/blob/master/scripts/Upgrade-PowerShell.ps1>`_ script to update these. + +This is an example of how to run this script from PowerShell: + +.. code-block:: powershell + + $url = "https://raw.githubusercontent.com/jborean93/ansible-windows/master/scripts/Upgrade-PowerShell.ps1" + $file = "$env:temp\Upgrade-PowerShell.ps1" + $username = "Administrator" + $password = "Password" + + (New-Object -TypeName System.Net.WebClient).DownloadFile($url, $file) + Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Force + + # Version can be 3.0, 4.0 or 5.1 + &$file -Version 5.1 -Username $username -Password $password -Verbose + +Once completed, you will need to remove auto logon +and set the execution policy back to the default of ``Restricted``. You can +do this with the following PowerShell commands: + +.. code-block:: powershell + + # This isn't needed but is a good security practice to complete + Set-ExecutionPolicy -ExecutionPolicy Restricted -Force + + $reg_winlogon_path = "HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon" + Set-ItemProperty -Path $reg_winlogon_path -Name AutoAdminLogon -Value 0 + Remove-ItemProperty -Path $reg_winlogon_path -Name DefaultUserName -ErrorAction SilentlyContinue + Remove-ItemProperty -Path $reg_winlogon_path -Name DefaultPassword -ErrorAction SilentlyContinue + +The script works by checking to see what programs need to be installed +(such as .NET Framework 4.5.2) and what PowerShell version is required. If a reboot +is required and the ``username`` and ``password`` parameters are set, the +script will automatically reboot and logon when it comes back up from the +reboot. The script will continue until no more actions are required and the +PowerShell version matches the target version. If the ``username`` and +``password`` parameters are not set, the script will prompt the user to +manually reboot and logon when required. When the user is next logged in, the +script will continue where it left off and the process continues until no more +actions are required. + +.. Note:: If running on Server 2008, then SP2 must be installed. If running on + Server 2008 R2 or Windows 7, then SP1 must be installed. + +.. Note:: Windows Server 2008 can only install PowerShell 3.0; specifying a + newer version will result in the script failing. + +.. Note:: The ``username`` and ``password`` parameters are stored in plain text + in the registry. Make sure the cleanup commands are run after the script finishes + to ensure no credentials are still stored on the host. + +WinRM Memory Hotfix +------------------- +When running on PowerShell v3.0, there is a bug with the WinRM service that +limits the amount of memory available to WinRM. Without this hotfix installed, +Ansible will fail to execute certain commands on the Windows host. These +hotfixes should be installed as part of the system bootstrapping or +imaging process. The script `Install-WMF3Hotfix.ps1 <https://github.com/jborean93/ansible-windows/blob/master/scripts/Install-WMF3Hotfix.ps1>`_ can be used to install the hotfix on affected hosts. + +The following PowerShell command will install the hotfix: + +.. code-block:: powershell + + $url = "https://raw.githubusercontent.com/jborean93/ansible-windows/master/scripts/Install-WMF3Hotfix.ps1" + $file = "$env:temp\Install-WMF3Hotfix.ps1" + + (New-Object -TypeName System.Net.WebClient).DownloadFile($url, $file) + powershell.exe -ExecutionPolicy ByPass -File $file -Verbose + +For more details, please refer to the `Hotfix document <https://support.microsoft.com/en-us/help/2842230/out-of-memory-error-on-a-computer-that-has-a-customized-maxmemorypersh>`_ from Microsoft. + +WinRM Setup +``````````` +Once Powershell has been upgraded to at least version 3.0, the final step is for the +WinRM service to be configured so that Ansible can connect to it. There are two +main components of the WinRM service that governs how Ansible can interface with +the Windows host: the ``listener`` and the ``service`` configuration settings. + +Details about each component can be read below, but the script +`ConfigureRemotingForAnsible.ps1 <https://github.com/ansible/ansible/blob/devel/examples/scripts/ConfigureRemotingForAnsible.ps1>`_ +can be used to set up the basics. This script sets up both HTTP and HTTPS +listeners with a self-signed certificate and enables the ``Basic`` +authentication option on the service. + +To use this script, run the following in PowerShell: + +.. code-block:: powershell + + $url = "https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1" + $file = "$env:temp\ConfigureRemotingForAnsible.ps1" + + (New-Object -TypeName System.Net.WebClient).DownloadFile($url, $file) + + powershell.exe -ExecutionPolicy ByPass -File $file + +There are different switches and parameters (like ``-EnableCredSSP`` and +``-ForceNewSSLCert``) that can be set alongside this script. The documentation +for these options are located at the top of the script itself. + +.. Note:: The ConfigureRemotingForAnsible.ps1 script is intended for training and + development purposes only and should not be used in a + production environment, since it enables settings (like ``Basic`` authentication) + that can be inherently insecure. + +WinRM Listener +-------------- +The WinRM services listens for requests on one or more ports. Each of these ports must have a +listener created and configured. + +To view the current listeners that are running on the WinRM service, run the +following command: + +.. code-block:: powershell + + winrm enumerate winrm/config/Listener + +This will output something like:: + + Listener + Address = * + Transport = HTTP + Port = 5985 + Hostname + Enabled = true + URLPrefix = wsman + CertificateThumbprint + ListeningOn = 10.0.2.15, 127.0.0.1, 192.168.56.155, ::1, fe80::5efe:10.0.2.15%6, fe80::5efe:192.168.56.155%8, fe80:: + ffff:ffff:fffe%2, fe80::203d:7d97:c2ed:ec78%3, fe80::e8ea:d765:2c69:7756%7 + + Listener + Address = * + Transport = HTTPS + Port = 5986 + Hostname = SERVER2016 + Enabled = true + URLPrefix = wsman + CertificateThumbprint = E6CDAA82EEAF2ECE8546E05DB7F3E01AA47D76CE + ListeningOn = 10.0.2.15, 127.0.0.1, 192.168.56.155, ::1, fe80::5efe:10.0.2.15%6, fe80::5efe:192.168.56.155%8, fe80:: + ffff:ffff:fffe%2, fe80::203d:7d97:c2ed:ec78%3, fe80::e8ea:d765:2c69:7756%7 + +In the example above there are two listeners activated; one is listening on +port 5985 over HTTP and the other is listening on port 5986 over HTTPS. Some of +the key options that are useful to understand are: + +* ``Transport``: Whether the listener is run over HTTP or HTTPS, it is + recommended to use a listener over HTTPS as the data is encrypted without + any further changes required. + +* ``Port``: The port the listener runs on, by default it is ``5985`` for HTTP + and ``5986`` for HTTPS. This port can be changed to whatever is required and + corresponds to the host var ``ansible_port``. + +* ``URLPrefix``: The URL prefix to listen on, by default it is ``wsman``. If + this is changed, the host var ``ansible_winrm_path`` must be set to the same + value. + +* ``CertificateThumbprint``: If running over an HTTPS listener, this is the + thumbprint of the certificate in the Windows Certificate Store that is used + in the connection. To get the details of the certificate itself, run this + command with the relevant certificate thumbprint in PowerShell:: + + $thumbprint = "E6CDAA82EEAF2ECE8546E05DB7F3E01AA47D76CE" + Get-ChildItem -Path cert:\LocalMachine\My -Recurse | Where-Object { $_.Thumbprint -eq $thumbprint } | Select-Object * + +Setup WinRM Listener +++++++++++++++++++++ +There are three ways to set up a WinRM listener: + +* Using ``winrm quickconfig`` for HTTP or + ``winrm quickconfig -transport:https`` for HTTPS. This is the easiest option + to use when running outside of a domain environment and a simple listener is + required. Unlike the other options, this process also has the added benefit of + opening up the Firewall for the ports required and starts the WinRM service. + +* Using Group Policy Objects. This is the best way to create a listener when the + host is a member of a domain because the configuration is done automatically + without any user input. For more information on group policy objects, see the + `Group Policy Objects documentation <https://msdn.microsoft.com/en-us/library/aa374162(v=vs.85).aspx>`_. + +* Using PowerShell to create the listener with a specific configuration. This + can be done by running the following PowerShell commands: + + .. code-block:: powershell + + $selector_set = @{ + Address = "*" + Transport = "HTTPS" + } + $value_set = @{ + CertificateThumbprint = "E6CDAA82EEAF2ECE8546E05DB7F3E01AA47D76CE" + } + + New-WSManInstance -ResourceURI "winrm/config/Listener" -SelectorSet $selector_set -ValueSet $value_set + + To see the other options with this PowerShell cmdlet, see + `New-WSManInstance <https://docs.microsoft.com/en-us/powershell/module/microsoft.wsman.management/new-wsmaninstance?view=powershell-5.1>`_. + +.. Note:: When creating an HTTPS listener, an existing certificate needs to be + created and stored in the ``LocalMachine\My`` certificate store. Without a + certificate being present in this store, most commands will fail. + +Delete WinRM Listener ++++++++++++++++++++++ +To remove a WinRM listener:: + + # Remove all listeners + Remove-Item -Path WSMan:\localhost\Listener\* -Recurse -Force + + # Only remove listeners that are run over HTTPS + Get-ChildItem -Path WSMan:\localhost\Listener | Where-Object { $_.Keys -contains "Transport=HTTPS" } | Remove-Item -Recurse -Force + +.. Note:: The ``Keys`` object is an array of strings, so it can contain different + values. By default it contains a key for ``Transport=`` and ``Address=`` + which correspond to the values from winrm enumerate winrm/config/Listeners. + +WinRM Service Options +--------------------- +There are a number of options that can be set to control the behavior of the WinRM service component, +including authentication options and memory settings. + +To get an output of the current service configuration options, run the +following command: + +.. code-block:: powershell + + winrm get winrm/config/Service + winrm get winrm/config/Winrs + +This will output something like:: + + Service + RootSDDL = O:NSG:BAD:P(A;;GA;;;BA)(A;;GR;;;IU)S:P(AU;FA;GA;;;WD)(AU;SA;GXGW;;;WD) + MaxConcurrentOperations = 4294967295 + MaxConcurrentOperationsPerUser = 1500 + EnumerationTimeoutms = 240000 + MaxConnections = 300 + MaxPacketRetrievalTimeSeconds = 120 + AllowUnencrypted = false + Auth + Basic = true + Kerberos = true + Negotiate = true + Certificate = true + CredSSP = true + CbtHardeningLevel = Relaxed + DefaultPorts + HTTP = 5985 + HTTPS = 5986 + IPv4Filter = * + IPv6Filter = * + EnableCompatibilityHttpListener = false + EnableCompatibilityHttpsListener = false + CertificateThumbprint + AllowRemoteAccess = true + + Winrs + AllowRemoteShellAccess = true + IdleTimeout = 7200000 + MaxConcurrentUsers = 2147483647 + MaxShellRunTime = 2147483647 + MaxProcessesPerShell = 2147483647 + MaxMemoryPerShellMB = 2147483647 + MaxShellsPerUser = 2147483647 + +While many of these options should rarely be changed, a few can easily impact +the operations over WinRM and are useful to understand. Some of the important +options are: + +* ``Service\AllowUnencrypted``: This option defines whether WinRM will allow + traffic that is run over HTTP without message encryption. Message level + encryption is only possible when ``ansible_winrm_transport`` is ``ntlm``, + ``kerberos`` or ``credssp``. By default this is ``false`` and should only be + set to ``true`` when debugging WinRM messages. + +* ``Service\Auth\*``: These flags define what authentication + options are allowed with the WinRM service. By default, ``Negotiate (NTLM)`` + and ``Kerberos`` are enabled. + +* ``Service\Auth\CbtHardeningLevel``: Specifies whether channel binding tokens are + not verified (None), verified but not required (Relaxed), or verified and + required (Strict). CBT is only used when connecting with NTLM or Kerberos + over HTTPS. + +* ``Service\CertificateThumbprint``: This is the thumbprint of the certificate + used to encrypt the TLS channel used with CredSSP authentication. By default + this is empty; a self-signed certificate is generated when the WinRM service + starts and is used in the TLS process. + +* ``Winrs\MaxShellRunTime``: This is the maximum time, in milliseconds, that a + remote command is allowed to execute. + +* ``Winrs\MaxMemoryPerShellMB``: This is the maximum amount of memory allocated + per shell, including the shell's child processes. + +To modify a setting under the ``Service`` key in PowerShell:: + + # substitute {path} with the path to the option after winrm/config/Service + Set-Item -Path WSMan:\localhost\Service\{path} -Value "value here" + + # for example, to change Service\Auth\CbtHardeningLevel run + Set-Item -Path WSMan:\localhost\Service\Auth\CbtHardeningLevel -Value Strict + +To modify a setting under the ``Winrs`` key in PowerShell:: + + # Substitute {path} with the path to the option after winrm/config/Winrs + Set-Item -Path WSMan:\localhost\Shell\{path} -Value "value here" + + # For example, to change Winrs\MaxShellRunTime run + Set-Item -Path WSMan:\localhost\Shell\MaxShellRunTime -Value 2147483647 + +.. Note:: If running in a domain environment, some of these options are set by + GPO and cannot be changed on the host itself. When a key has been + configured with GPO, it contains the text ``[Source="GPO"]`` next to the value. + +Common WinRM Issues +------------------- +Because WinRM has a wide range of configuration options, it can be difficult +to setup and configure. Because of this complexity, issues that are shown by Ansible +could in fact be issues with the host setup instead. + +One easy way to determine whether a problem is a host issue is to +run the following command from another Windows host to connect to the +target Windows host:: + + # Test out HTTP + winrs -r:http://server:5985/wsman -u:Username -p:Password ipconfig + + # Test out HTTPS (will fail if the cert is not verifiable) + winrs -r:https://server:5986/wsman -u:Username -p:Password -ssl ipconfig + + # Test out HTTPS, ignoring certificate verification + $username = "Username" + $password = ConvertTo-SecureString -String "Password" -AsPlainText -Force + $cred = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $username, $password + + $session_option = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck + Invoke-Command -ComputerName server -UseSSL -ScriptBlock { ipconfig } -Credential $cred -SessionOption $session_option + +If this fails, the issue is probably related to the WinRM setup. If it works, the issue may not be related to the WinRM setup; please continue reading for more troubleshooting suggestions. + +HTTP 401/Credentials Rejected ++++++++++++++++++++++++++++++ +A HTTP 401 error indicates the authentication process failed during the initial +connection. Some things to check for this are: + +* Verify that the credentials are correct and set properly in your inventory with + ``ansible_user`` and ``ansible_password`` + +* Ensure that the user is a member of the local Administrators group or has been explicitly + granted access (a connection test with the ``winrs`` command can be used to + rule this out). + +* Make sure that the authentication option set by ``ansible_winrm_transport`` is enabled under + ``Service\Auth\*`` + +* If running over HTTP and not HTTPS, use ``ntlm``, ``kerberos`` or ``credssp`` + with ``ansible_winrm_message_encryption: auto`` to enable message encryption. + If using another authentication option or if the installed pywinrm version cannot be + upgraded, the ``Service\AllowUnencrypted`` can be set to ``true`` but this is + only recommended for troubleshooting + +* Ensure the downstream packages ``pywinrm``, ``requests-ntlm``, + ``requests-kerberos``, and/or ``requests-credssp`` are up to date using ``pip``. + +* If using Kerberos authentication, ensure that ``Service\Auth\CbtHardeningLevel`` is + not set to ``Strict``. + +* When using Basic or Certificate authentication, make sure that the user is a local account and + not a domain account. Domain accounts do not work with Basic and Certificate + authentication. + +HTTP 500 Error +++++++++++++++ +These indicate an error has occurred with the WinRM service. Some things +to check for include: + +* Verify that the number of current open shells has not exceeded either + ``WinRsMaxShellsPerUser`` or any of the other Winrs quotas haven't been + exceeded. + +Timeout Errors ++++++++++++++++ +These usually indicate an error with the network connection where +Ansible is unable to reach the host. Some things to check for include: + +* Make sure the firewall is not set to block the configured WinRM listener ports +* Ensure that a WinRM listener is enabled on the port and path set by the host vars +* Ensure that the ``winrm`` service is running on the Windows host and configured for + automatic start + +Connection Refused Errors ++++++++++++++++++++++++++ +These usually indicate an error when trying to communicate with the +WinRM service on the host. Some things to check for: + +* Ensure that the WinRM service is up and running on the host. Use + ``(Get-Service -Name winrm).Status`` to get the status of the service. +* Check that the host firewall is allowing traffic over the WinRM port. By default + this is ``5985`` for HTTP and ``5986`` for HTTPS. + +Sometimes an installer may restart the WinRM or HTTP service and cause this error. The +best way to deal with this is to use ``win_psexec`` from another +Windows host. + +Failure to Load Builtin Modules ++++++++++++++++++++++++++++++++ +If powershell fails with an error message similar to ``The 'Out-String' command was found in the module 'Microsoft.PowerShell.Utility', but the module could not be loaded.`` +then there could be a problem trying to access all the paths specified by the ``PSModulePath`` environment variable. +A common cause of this issue is that the ``PSModulePath`` environment variable contains a UNC path to a file share and +because of the double hop/credential delegation issue the Ansible process cannot access these folders. The way around +this problems is to either: + +* Remove the UNC path from the ``PSModulePath`` environment variable, or +* Use an authentication option that supports credential delegation like ``credssp`` or ``kerberos`` with credential delegation enabled + +See `KB4076842 <https://support.microsoft.com/en-us/help/4076842>`_ for more information on this problem. + + +Windows SSH Setup +````````````````` +Ansible 2.8 has added an experimental SSH connection for Windows managed nodes. + +.. warning:: + Use this feature at your own risk! + Using SSH with Windows is experimental, the implementation may make + backwards incompatible changes in feature releases. The server side + components can be unreliable depending on the version that is installed. + +Installing Win32-OpenSSH +------------------------ +The first step to using SSH with Windows is to install the `Win32-OpenSSH <https://github.com/PowerShell/Win32-OpenSSH>`_ +service on the Windows host. Microsoft offers a way to install ``Win32-OpenSSH`` through a Windows +capability but currently the version that is installed through this process is +too old to work with Ansible. To install ``Win32-OpenSSH`` for use with +Ansible, select one of these three installation options: + +* Manually install the service, following the `install instructions <https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH>`_ + from Microsoft. + +* Install the `openssh <https://chocolatey.org/packages/openssh>`_ package using Chocolatey:: + + choco install --package-parameters=/SSHServerFeature openssh + +* Use ``win_chocolatey`` to install the service:: + + - name: install the Win32-OpenSSH service + win_chocolatey: + name: openssh + package_params: /SSHServerFeature + state: present + +* Use an existing Ansible Galaxy role like `jborean93.win_openssh <https://galaxy.ansible.com/jborean93/win_openssh>`_:: + + # Make sure the role has been downloaded first + ansible-galaxy install jborean93.win_openssh + + # main.yml + - name: install Win32-OpenSSH service + hosts: windows + gather_facts: no + roles: + - role: jborean93.win_openssh + opt_openssh_setup_service: True + +.. note:: ``Win32-OpenSSH`` is still a beta product and is constantly + being updated to include new features and bugfixes. If you are using SSH as + a connection option for Windows, it is highly recommend you install the + latest release from one of the 3 methods above. + +Configuring the Win32-OpenSSH shell +----------------------------------- + +By default ``Win32-OpenSSH`` will use ``cmd.exe`` as a shell. To configure a +different shell, use an Ansible task to define the registry setting:: + + - name: set the default shell to PowerShell + win_regedit: + path: HKLM:\SOFTWARE\OpenSSH + name: DefaultShell + data: C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe + type: string + state: present + + # Or revert the settings back to the default, cmd + - name: set the default shell to cmd + win_regedit: + path: HKLM:\SOFTWARE\OpenSSH + name: DefaultShell + state: absent + +Win32-OpenSSH Authentication +---------------------------- +Win32-OpenSSH authentication with Windows is similar to SSH +authentication on Unix/Linux hosts. You can use a plaintext password or +SSH public key authentication, add public keys to an ``authorized_key`` file +in the ``.ssh`` folder of the user's profile directory, and configure the +service using the ``sshd_config`` file used by the SSH service as you would on +a Unix/Linux host. + +When using SSH key authentication with Ansible, the remote session won't have access to the +user's credentials and will fail when attempting to access a network resource. +This is also known as the double-hop or credential delegation issue. There are +two ways to work around this issue: + +* Use plaintext password auth by setting ``ansible_password`` +* Use ``become`` on the task with the credentials of the user that needs access to the remote resource + +Configuring Ansible for SSH on Windows +-------------------------------------- +To configure Ansible to use SSH for Windows hosts, you must set two connection variables: + +* set ``ansible_connection`` to ``ssh`` +* set ``ansible_shell_type`` to ``cmd`` or ``powershell`` + +The ``ansible_shell_type`` variable should reflect the ``DefaultShell`` +configured on the Windows host. Set to ``cmd`` for the default shell or set to +``powershell`` if the ``DefaultShell`` has been changed to PowerShell. + +Known issues with SSH on Windows +-------------------------------- +Using SSH with Windows is experimental, and we expect to uncover more issues. +Here are the known ones: + +* Win32-OpenSSH versions older than ``v7.9.0.0p1-Beta`` do not work when ``powershell`` is the shell type +* While SCP should work, SFTP is the recommended SSH file transfer mechanism to use when copying or fetching a file + + +.. seealso:: + + :ref:`about_playbooks` + An introduction to playbooks + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + :ref:`List of Windows Modules <windows_modules>` + Windows specific module list, all implemented in PowerShell + `User Mailing List <https://groups.google.com/group/ansible-project>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/windows_usage.rst b/docs/docsite/rst/user_guide/windows_usage.rst new file mode 100644 index 00000000..b39413cd --- /dev/null +++ b/docs/docsite/rst/user_guide/windows_usage.rst @@ -0,0 +1,513 @@ +Using Ansible and Windows +========================= +When using Ansible to manage Windows, many of the syntax and rules that apply +for Unix/Linux hosts also apply to Windows, but there are still some differences +when it comes to components like path separators and OS-specific tasks. +This document covers details specific to using Ansible for Windows. + +.. contents:: Topics + :local: + +Use Cases +````````` +Ansible can be used to orchestrate a multitude of tasks on Windows servers. +Below are some examples and info about common tasks. + +Installing Software +------------------- +There are three main ways that Ansible can be used to install software: + +* Using the ``win_chocolatey`` module. This sources the program data from the default + public `Chocolatey <https://chocolatey.org/>`_ repository. Internal repositories can + be used instead by setting the ``source`` option. + +* Using the ``win_package`` module. This installs software using an MSI or .exe installer + from a local/network path or URL. + +* Using the ``win_command`` or ``win_shell`` module to run an installer manually. + +The ``win_chocolatey`` module is recommended since it has the most complete logic for checking to see if a package has already been installed and is up-to-date. + +Below are some examples of using all three options to install 7-Zip: + +.. code-block:: yaml+jinja + + # Install/uninstall with chocolatey + - name: Ensure 7-Zip is installed via Chocolatey + win_chocolatey: + name: 7zip + state: present + + - name: Ensure 7-Zip is not installed via Chocolatey + win_chocolatey: + name: 7zip + state: absent + + # Install/uninstall with win_package + - name: Download the 7-Zip package + win_get_url: + url: https://www.7-zip.org/a/7z1701-x64.msi + dest: C:\temp\7z.msi + + - name: Ensure 7-Zip is installed via win_package + win_package: + path: C:\temp\7z.msi + state: present + + - name: Ensure 7-Zip is not installed via win_package + win_package: + path: C:\temp\7z.msi + state: absent + + # Install/uninstall with win_command + - name: Download the 7-Zip package + win_get_url: + url: https://www.7-zip.org/a/7z1701-x64.msi + dest: C:\temp\7z.msi + + - name: Check if 7-Zip is already installed + win_reg_stat: + name: HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\{23170F69-40C1-2702-1701-000001000000} + register: 7zip_installed + + - name: Ensure 7-Zip is installed via win_command + win_command: C:\Windows\System32\msiexec.exe /i C:\temp\7z.msi /qn /norestart + when: 7zip_installed.exists == false + + - name: Ensure 7-Zip is uninstalled via win_command + win_command: C:\Windows\System32\msiexec.exe /x {23170F69-40C1-2702-1701-000001000000} /qn /norestart + when: 7zip_installed.exists == true + +Some installers like Microsoft Office or SQL Server require credential delegation or +access to components restricted by WinRM. The best method to bypass these +issues is to use ``become`` with the task. With ``become``, Ansible will run +the installer as if it were run interactively on the host. + +.. Note:: Many installers do not properly pass back error information over WinRM. In these cases, if the install has been verified to work locally the recommended method is to use become. + +.. Note:: Some installers restart the WinRM or HTTP services, or cause them to become temporarily unavailable, making Ansible assume the system is unreachable. + +Installing Updates +------------------ +The ``win_updates`` and ``win_hotfix`` modules can be used to install updates +or hotfixes on a host. The module ``win_updates`` is used to install multiple +updates by category, while ``win_hotfix`` can be used to install a single +update or hotfix file that has been downloaded locally. + +.. Note:: The ``win_hotfix`` module has a requirement that the DISM PowerShell cmdlets are + present. These cmdlets were only added by default on Windows Server 2012 + and newer and must be installed on older Windows hosts. + +The following example shows how ``win_updates`` can be used: + +.. code-block:: yaml+jinja + + - name: Install all critical and security updates + win_updates: + category_names: + - CriticalUpdates + - SecurityUpdates + state: installed + register: update_result + + - name: Reboot host if required + win_reboot: + when: update_result.reboot_required + +The following example show how ``win_hotfix`` can be used to install a single +update or hotfix: + +.. code-block:: yaml+jinja + + - name: Download KB3172729 for Server 2012 R2 + win_get_url: + url: http://download.windowsupdate.com/d/msdownload/update/software/secu/2016/07/windows8.1-kb3172729-x64_e8003822a7ef4705cbb65623b72fd3cec73fe222.msu + dest: C:\temp\KB3172729.msu + + - name: Install hotfix + win_hotfix: + hotfix_kb: KB3172729 + source: C:\temp\KB3172729.msu + state: present + register: hotfix_result + + - name: Reboot host if required + win_reboot: + when: hotfix_result.reboot_required + +Set Up Users and Groups +----------------------- +Ansible can be used to create Windows users and groups both locally and on a domain. + +Local ++++++ +The modules ``win_user``, ``win_group`` and ``win_group_membership`` manage +Windows users, groups and group memberships locally. + +The following is an example of creating local accounts and groups that can +access a folder on the same host: + +.. code-block:: yaml+jinja + + - name: Create local group to contain new users + win_group: + name: LocalGroup + description: Allow access to C:\Development folder + + - name: Create local user + win_user: + name: '{{ item.name }}' + password: '{{ item.password }}' + groups: LocalGroup + update_password: no + password_never_expires: yes + loop: + - name: User1 + password: Password1 + - name: User2 + password: Password2 + + - name: Create Development folder + win_file: + path: C:\Development + state: directory + + - name: Set ACL of Development folder + win_acl: + path: C:\Development + rights: FullControl + state: present + type: allow + user: LocalGroup + + - name: Remove parent inheritance of Development folder + win_acl_inheritance: + path: C:\Development + reorganize: yes + state: absent + +Domain +++++++ +The modules ``win_domain_user`` and ``win_domain_group`` manages users and +groups in a domain. The below is an example of ensuring a batch of domain users +are created: + +.. code-block:: yaml+jinja + + - name: Ensure each account is created + win_domain_user: + name: '{{ item.name }}' + upn: '{{ item.name }}@MY.DOMAIN.COM' + password: '{{ item.password }}' + password_never_expires: no + groups: + - Test User + - Application + company: Ansible + update_password: on_create + loop: + - name: Test User + password: Password + - name: Admin User + password: SuperSecretPass01 + - name: Dev User + password: '@fvr3IbFBujSRh!3hBg%wgFucD8^x8W5' + +Running Commands +---------------- +In cases where there is no appropriate module available for a task, +a command or script can be run using the ``win_shell``, ``win_command``, ``raw``, and ``script`` modules. + +The ``raw`` module simply executes a Powershell command remotely. Since ``raw`` +has none of the wrappers that Ansible typically uses, ``become``, ``async`` +and environment variables do not work. + +The ``script`` module executes a script from the Ansible controller on +one or more Windows hosts. Like ``raw``, ``script`` currently does not support +``become``, ``async``, or environment variables. + +The ``win_command`` module is used to execute a command which is either an +executable or batch file, while the ``win_shell`` module is used to execute commands within a shell. + +Choosing Command or Shell ++++++++++++++++++++++++++ +The ``win_shell`` and ``win_command`` modules can both be used to execute a command or commands. +The ``win_shell`` module is run within a shell-like process like ``PowerShell`` or ``cmd``, so it has access to shell +operators like ``<``, ``>``, ``|``, ``;``, ``&&``, and ``||``. Multi-lined commands can also be run in ``win_shell``. + +The ``win_command`` module simply runs a process outside of a shell. It can still +run a shell command like ``mkdir`` or ``New-Item`` by passing the shell commands +to a shell executable like ``cmd.exe`` or ``PowerShell.exe``. + +Here are some examples of using ``win_command`` and ``win_shell``: + +.. code-block:: yaml+jinja + + - name: Run a command under PowerShell + win_shell: Get-Service -Name service | Stop-Service + + - name: Run a command under cmd + win_shell: mkdir C:\temp + args: + executable: cmd.exe + + - name: Run a multiple shell commands + win_shell: | + New-Item -Path C:\temp -ItemType Directory + Remove-Item -Path C:\temp -Force -Recurse + $path_info = Get-Item -Path C:\temp + $path_info.FullName + + - name: Run an executable using win_command + win_command: whoami.exe + + - name: Run a cmd command + win_command: cmd.exe /c mkdir C:\temp + + - name: Run a vbs script + win_command: cscript.exe script.vbs + +.. Note:: Some commands like ``mkdir``, ``del``, and ``copy`` only exist in + the CMD shell. To run them with ``win_command`` they must be + prefixed with ``cmd.exe /c``. + +Argument Rules +++++++++++++++ +When running a command through ``win_command``, the standard Windows argument +rules apply: + +* Each argument is delimited by a white space, which can either be a space or a + tab. + +* An argument can be surrounded by double quotes ``"``. Anything inside these + quotes is interpreted as a single argument even if it contains whitespace. + +* A double quote preceded by a backslash ``\`` is interpreted as just a double + quote ``"`` and not as an argument delimiter. + +* Backslashes are interpreted literally unless it immediately precedes double + quotes; for example ``\`` == ``\`` and ``\"`` == ``"`` + +* If an even number of backslashes is followed by a double quote, one + backslash is used in the argument for every pair, and the double quote is + used as a string delimiter for the argument. + +* If an odd number of backslashes is followed by a double quote, one backslash + is used in the argument for every pair, and the double quote is escaped and + made a literal double quote in the argument. + +With those rules in mind, here are some examples of quoting: + +.. code-block:: yaml+jinja + + - win_command: C:\temp\executable.exe argument1 "argument 2" "C:\path\with space" "double \"quoted\"" + + argv[0] = C:\temp\executable.exe + argv[1] = argument1 + argv[2] = argument 2 + argv[3] = C:\path\with space + argv[4] = double "quoted" + + - win_command: '"C:\Program Files\Program\program.exe" "escaped \\\" backslash" unquoted-end-backslash\' + + argv[0] = C:\Program Files\Program\program.exe + argv[1] = escaped \" backslash + argv[2] = unquoted-end-backslash\ + + # Due to YAML and Ansible parsing '\"' must be written as '{% raw %}\\{% endraw %}"' + - win_command: C:\temp\executable.exe C:\no\space\path "arg with end \ before end quote{% raw %}\\{% endraw %}" + + argv[0] = C:\temp\executable.exe + argv[1] = C:\no\space\path + argv[2] = arg with end \ before end quote\" + +For more information, see `escaping arguments <https://msdn.microsoft.com/en-us/library/17w5ykft(v=vs.85).aspx>`_. + +Creating and Running a Scheduled Task +------------------------------------- +WinRM has some restrictions in place that cause errors when running certain +commands. One way to bypass these restrictions is to run a command through a +scheduled task. A scheduled task is a Windows component that provides the +ability to run an executable on a schedule and under a different account. + +Ansible version 2.5 added modules that make it easier to work with scheduled tasks in Windows. +The following is an example of running a script as a scheduled task that deletes itself after +running: + +.. code-block:: yaml+jinja + + - name: Create scheduled task to run a process + win_scheduled_task: + name: adhoc-task + username: SYSTEM + actions: + - path: PowerShell.exe + arguments: | + Start-Sleep -Seconds 30 # This isn't required, just here as a demonstration + New-Item -Path C:\temp\test -ItemType Directory + # Remove this action if the task shouldn't be deleted on completion + - path: cmd.exe + arguments: /c schtasks.exe /Delete /TN "adhoc-task" /F + triggers: + - type: registration + + - name: Wait for the scheduled task to complete + win_scheduled_task_stat: + name: adhoc-task + register: task_stat + until: (task_stat.state is defined and task_stat.state.status != "TASK_STATE_RUNNING") or (task_stat.task_exists == False) + retries: 12 + delay: 10 + +.. Note:: The modules used in the above example were updated/added in Ansible + version 2.5. + +Path Formatting for Windows +``````````````````````````` +Windows differs from a traditional POSIX operating system in many ways. One of +the major changes is the shift from ``/`` as the path separator to ``\``. This +can cause major issues with how playbooks are written, since ``\`` is often used +as an escape character on POSIX systems. + +Ansible allows two different styles of syntax; each deals with path separators for Windows differently: + +YAML Style +---------- +When using the YAML syntax for tasks, the rules are well-defined by the YAML +standard: + +* When using a normal string (without quotes), YAML will not consider the + backslash an escape character. + +* When using single quotes ``'``, YAML will not consider the backslash an + escape character. + +* When using double quotes ``"``, the backslash is considered an escape + character and needs to escaped with another backslash. + +.. Note:: You should only quote strings when it is absolutely + necessary or required by YAML, and then use single quotes. + +The YAML specification considers the following `escape sequences <https://yaml.org/spec/current.html#id2517668>`_: + +* ``\0``, ``\\``, ``\"``, ``\_``, ``\a``, ``\b``, ``\e``, ``\f``, ``\n``, ``\r``, ``\t``, + ``\v``, ``\L``, ``\N`` and ``\P`` -- Single character escape + +* ``<TAB>``, ``<SPACE>``, ``<NBSP>``, ``<LNSP>``, ``<PSP>`` -- Special + characters + +* ``\x..`` -- 2-digit hex escape + +* ``\u....`` -- 4-digit hex escape + +* ``\U........`` -- 8-digit hex escape + +Here are some examples on how to write Windows paths:: + + # GOOD + tempdir: C:\Windows\Temp + + # WORKS + tempdir: 'C:\Windows\Temp' + tempdir: "C:\\Windows\\Temp" + + # BAD, BUT SOMETIMES WORKS + tempdir: C:\\Windows\\Temp + tempdir: 'C:\\Windows\\Temp' + tempdir: C:/Windows/Temp + +This is an example which will fail: + +.. code-block:: text + + # FAILS + tempdir: "C:\Windows\Temp" + +This example shows the use of single quotes when they are required:: + + --- + - name: Copy tomcat config + win_copy: + src: log4j.xml + dest: '{{tc_home}}\lib\log4j.xml' + +Legacy key=value Style +---------------------- +The legacy ``key=value`` syntax is used on the command line for ad-hoc commands, +or inside playbooks. The use of this style is discouraged within playbooks +because backslash characters need to be escaped, making playbooks harder to read. +The legacy syntax depends on the specific implementation in Ansible, and quoting +(both single and double) does not have any effect on how it is parsed by +Ansible. + +The Ansible key=value parser parse_kv() considers the following escape +sequences: + +* ``\``, ``'``, ``"``, ``\a``, ``\b``, ``\f``, ``\n``, ``\r``, ``\t`` and + ``\v`` -- Single character escape + +* ``\x..`` -- 2-digit hex escape + +* ``\u....`` -- 4-digit hex escape + +* ``\U........`` -- 8-digit hex escape + +* ``\N{...}`` -- Unicode character by name + +This means that the backslash is an escape character for some sequences, and it +is usually safer to escape a backslash when in this form. + +Here are some examples of using Windows paths with the key=value style: + +.. code-block:: ini + + # GOOD + tempdir=C:\\Windows\\Temp + + # WORKS + tempdir='C:\\Windows\\Temp' + tempdir="C:\\Windows\\Temp" + + # BAD, BUT SOMETIMES WORKS + tempdir=C:\Windows\Temp + tempdir='C:\Windows\Temp' + tempdir="C:\Windows\Temp" + tempdir=C:/Windows/Temp + + # FAILS + tempdir=C:\Windows\temp + tempdir='C:\Windows\temp' + tempdir="C:\Windows\temp" + +The failing examples don't fail outright but will substitute ``\t`` with the +``<TAB>`` character resulting in ``tempdir`` being ``C:\Windows<TAB>emp``. + +Limitations +``````````` +Some things you cannot do with Ansible and Windows are: + +* Upgrade PowerShell + +* Interact with the WinRM listeners + +Because WinRM is reliant on the services being online and running during normal operations, you cannot upgrade PowerShell or interact with WinRM listeners with Ansible. Both of these actions will cause the connection to fail. This can technically be avoided by using ``async`` or a scheduled task, but those methods are fragile if the process it runs breaks the underlying connection Ansible uses, and are best left to the bootstrapping process or before an image is +created. + +Developing Windows Modules +`````````````````````````` +Because Ansible modules for Windows are written in PowerShell, the development +guides for Windows modules differ substantially from those for standard standard modules. Please see +:ref:`developing_modules_general_windows` for more information. + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + :ref:`List of Windows Modules <windows_modules>` + Windows specific module list, all implemented in PowerShell + `User Mailing List <https://groups.google.com/group/ansible-project>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/rst/user_guide/windows_winrm.rst b/docs/docsite/rst/user_guide/windows_winrm.rst new file mode 100644 index 00000000..03421cfb --- /dev/null +++ b/docs/docsite/rst/user_guide/windows_winrm.rst @@ -0,0 +1,913 @@ +.. _windows_winrm: + +Windows Remote Management +========================= +Unlike Linux/Unix hosts, which use SSH by default, Windows hosts are +configured with WinRM. This topic covers how to configure and use WinRM with Ansible. + +.. contents:: Topics + :local: + +What is WinRM? +`````````````` +WinRM is a management protocol used by Windows to remotely communicate with +another server. It is a SOAP-based protocol that communicates over HTTP/HTTPS, and is +included in all recent Windows operating systems. Since Windows +Server 2012, WinRM has been enabled by default, but in most cases extra +configuration is required to use WinRM with Ansible. + +Ansible uses the `pywinrm <https://github.com/diyan/pywinrm>`_ package to +communicate with Windows servers over WinRM. It is not installed by default +with the Ansible package, but can be installed by running the following: + +.. code-block:: shell + + pip install "pywinrm>=0.3.0" + +.. Note:: on distributions with multiple python versions, use pip2 or pip2.x, + where x matches the python minor version Ansible is running under. + +.. Warning:: + Using the ``winrm`` or ``psrp`` connection plugins in Ansible on MacOS in + the latest releases typically fail. This is a known problem that occurs + deep within the Python stack and cannot be changed by Ansible. The only + workaround today is to set the environment variable ``no_proxy=*`` and + avoid using Kerberos auth. + + +Authentication Options +`````````````````````` +When connecting to a Windows host, there are several different options that can be used +when authenticating with an account. The authentication type may be set on inventory +hosts or groups with the ``ansible_winrm_transport`` variable. + +The following matrix is a high level overview of the options: + ++-------------+----------------+---------------------------+-----------------------+-----------------+ +| Option | Local Accounts | Active Directory Accounts | Credential Delegation | HTTP Encryption | ++=============+================+===========================+=======================+=================+ +| Basic | Yes | No | No | No | ++-------------+----------------+---------------------------+-----------------------+-----------------+ +| Certificate | Yes | No | No | No | ++-------------+----------------+---------------------------+-----------------------+-----------------+ +| Kerberos | No | Yes | Yes | Yes | ++-------------+----------------+---------------------------+-----------------------+-----------------+ +| NTLM | Yes | Yes | No | Yes | ++-------------+----------------+---------------------------+-----------------------+-----------------+ +| CredSSP | Yes | Yes | Yes | Yes | ++-------------+----------------+---------------------------+-----------------------+-----------------+ + +Basic +----- +Basic authentication is one of the simplest authentication options to use, but is +also the most insecure. This is because the username and password are simply +base64 encoded, and if a secure channel is not in use (eg, HTTPS) then it can be +decoded by anyone. Basic authentication can only be used for local accounts (not domain accounts). + +The following example shows host vars configured for basic authentication: + +.. code-block:: yaml+jinja + + ansible_user: LocalUsername + ansible_password: Password + ansible_connection: winrm + ansible_winrm_transport: basic + +Basic authentication is not enabled by default on a Windows host but can be +enabled by running the following in PowerShell:: + + Set-Item -Path WSMan:\localhost\Service\Auth\Basic -Value $true + +Certificate +----------- +Certificate authentication uses certificates as keys similar to SSH key +pairs, but the file format and key generation process is different. + +The following example shows host vars configured for certificate authentication: + +.. code-block:: yaml+jinja + + ansible_connection: winrm + ansible_winrm_cert_pem: /path/to/certificate/public/key.pem + ansible_winrm_cert_key_pem: /path/to/certificate/private/key.pem + ansible_winrm_transport: certificate + +Certificate authentication is not enabled by default on a Windows host but can +be enabled by running the following in PowerShell:: + + Set-Item -Path WSMan:\localhost\Service\Auth\Certificate -Value $true + +.. Note:: Encrypted private keys cannot be used as the urllib3 library that + is used by Ansible for WinRM does not support this functionality. + +Generate a Certificate +++++++++++++++++++++++ +A certificate must be generated before it can be mapped to a local user. +This can be done using one of the following methods: + +* OpenSSL +* PowerShell, using the ``New-SelfSignedCertificate`` cmdlet +* Active Directory Certificate Services + +Active Directory Certificate Services is beyond of scope in this documentation but may be +the best option to use when running in a domain environment. For more information, +see the `Active Directory Certificate Services documentation <https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2008-R2-and-2008/cc732625(v=ws.11)>`_. + +.. Note:: Using the PowerShell cmdlet ``New-SelfSignedCertificate`` to generate + a certificate for authentication only works when being generated from a + Windows 10 or Windows Server 2012 R2 host or later. OpenSSL is still required to + extract the private key from the PFX certificate to a PEM file for Ansible + to use. + +To generate a certificate with ``OpenSSL``: + +.. code-block:: shell + + # Set the name of the local user that will have the key mapped to + USERNAME="username" + + cat > openssl.conf << EOL + distinguished_name = req_distinguished_name + [req_distinguished_name] + [v3_req_client] + extendedKeyUsage = clientAuth + subjectAltName = otherName:1.3.6.1.4.1.311.20.2.3;UTF8:$USERNAME@localhost + EOL + + export OPENSSL_CONF=openssl.conf + openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -out cert.pem -outform PEM -keyout cert_key.pem -subj "/CN=$USERNAME" -extensions v3_req_client + rm openssl.conf + + +To generate a certificate with ``New-SelfSignedCertificate``: + +.. code-block:: powershell + + # Set the name of the local user that will have the key mapped + $username = "username" + $output_path = "C:\temp" + + # Instead of generating a file, the cert will be added to the personal + # LocalComputer folder in the certificate store + $cert = New-SelfSignedCertificate -Type Custom ` + -Subject "CN=$username" ` + -TextExtension @("2.5.29.37={text}1.3.6.1.5.5.7.3.2","2.5.29.17={text}upn=$username@localhost") ` + -KeyUsage DigitalSignature,KeyEncipherment ` + -KeyAlgorithm RSA ` + -KeyLength 2048 + + # Export the public key + $pem_output = @() + $pem_output += "-----BEGIN CERTIFICATE-----" + $pem_output += [System.Convert]::ToBase64String($cert.RawData) -replace ".{64}", "$&`n" + $pem_output += "-----END CERTIFICATE-----" + [System.IO.File]::WriteAllLines("$output_path\cert.pem", $pem_output) + + # Export the private key in a PFX file + [System.IO.File]::WriteAllBytes("$output_path\cert.pfx", $cert.Export("Pfx")) + + +.. Note:: To convert the PFX file to a private key that pywinrm can use, run + the following command with OpenSSL + ``openssl pkcs12 -in cert.pfx -nocerts -nodes -out cert_key.pem -passin pass: -passout pass:`` + +Import a Certificate to the Certificate Store ++++++++++++++++++++++++++++++++++++++++++++++ +Once a certificate has been generated, the issuing certificate needs to be +imported into the ``Trusted Root Certificate Authorities`` of the +``LocalMachine`` store, and the client certificate public key must be present +in the ``Trusted People`` folder of the ``LocalMachine`` store. For this example, +both the issuing certificate and public key are the same. + +Following example shows how to import the issuing certificate: + +.. code-block:: powershell + + $cert = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Certificate2 + $cert.Import("cert.pem") + + $store_name = [System.Security.Cryptography.X509Certificates.StoreName]::Root + $store_location = [System.Security.Cryptography.X509Certificates.StoreLocation]::LocalMachine + $store = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Store -ArgumentList $store_name, $store_location + $store.Open("MaxAllowed") + $store.Add($cert) + $store.Close() + + +.. Note:: If using ADCS to generate the certificate, then the issuing + certificate will already be imported and this step can be skipped. + +The code to import the client certificate public key is: + +.. code-block:: powershell + + $cert = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Certificate2 + $cert.Import("cert.pem") + + $store_name = [System.Security.Cryptography.X509Certificates.StoreName]::TrustedPeople + $store_location = [System.Security.Cryptography.X509Certificates.StoreLocation]::LocalMachine + $store = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Store -ArgumentList $store_name, $store_location + $store.Open("MaxAllowed") + $store.Add($cert) + $store.Close() + + +Mapping a Certificate to an Account ++++++++++++++++++++++++++++++++++++ +Once the certificate has been imported, map it to the local user account:: + + $username = "username" + $password = ConvertTo-SecureString -String "password" -AsPlainText -Force + $credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $username, $password + + # This is the issuer thumbprint which in the case of a self generated cert + # is the public key thumbprint, additional logic may be required for other + # scenarios + $thumbprint = (Get-ChildItem -Path cert:\LocalMachine\root | Where-Object { $_.Subject -eq "CN=$username" }).Thumbprint + + New-Item -Path WSMan:\localhost\ClientCertificate ` + -Subject "$username@localhost" ` + -URI * ` + -Issuer $thumbprint ` + -Credential $credential ` + -Force + + +Once this is complete, the hostvar ``ansible_winrm_cert_pem`` should be set to +the path of the public key and the ``ansible_winrm_cert_key_pem`` variable should be set to +the path of the private key. + +NTLM +---- +NTLM is an older authentication mechanism used by Microsoft that can support +both local and domain accounts. NTLM is enabled by default on the WinRM +service, so no setup is required before using it. + +NTLM is the easiest authentication protocol to use and is more secure than +``Basic`` authentication. If running in a domain environment, ``Kerberos`` should be used +instead of NTLM. + +Kerberos has several advantages over using NTLM: + +* NTLM is an older protocol and does not support newer encryption + protocols. +* NTLM is slower to authenticate because it requires more round trips to the host in + the authentication stage. +* Unlike Kerberos, NTLM does not allow credential delegation. + +This example shows host variables configured to use NTLM authentication: + +.. code-block:: yaml+jinja + + ansible_user: LocalUsername + ansible_password: Password + ansible_connection: winrm + ansible_winrm_transport: ntlm + +Kerberos +-------- +Kerberos is the recommended authentication option to use when running in a +domain environment. Kerberos supports features like credential delegation and +message encryption over HTTP and is one of the more secure options that +is available through WinRM. + +Kerberos requires some additional setup work on the Ansible host before it can be +used properly. + +The following example shows host vars configured for Kerberos authentication: + +.. code-block:: yaml+jinja + + ansible_user: username@MY.DOMAIN.COM + ansible_password: Password + ansible_connection: winrm + ansible_winrm_transport: kerberos + +As of Ansible version 2.3, the Kerberos ticket will be created based on +``ansible_user`` and ``ansible_password``. If running on an older version of +Ansible or when ``ansible_winrm_kinit_mode`` is ``manual``, a Kerberos +ticket must already be obtained. See below for more details. + +There are some extra host variables that can be set:: + + ansible_winrm_kinit_mode: managed/manual (manual means Ansible will not obtain a ticket) + ansible_winrm_kinit_cmd: the kinit binary to use to obtain a Kerberos ticket (default to kinit) + ansible_winrm_service: overrides the SPN prefix that is used, the default is ``HTTP`` and should rarely ever need changing + ansible_winrm_kerberos_delegation: allows the credentials to traverse multiple hops + ansible_winrm_kerberos_hostname_override: the hostname to be used for the kerberos exchange + +Installing the Kerberos Library ++++++++++++++++++++++++++++++++ +Some system dependencies that must be installed prior to using Kerberos. The script below lists the dependencies based on the distro: + +.. code-block:: shell + + # Via Yum (RHEL/Centos/Fedora) + yum -y install gcc python-devel krb5-devel krb5-libs krb5-workstation + + # Via Apt (Ubuntu) + sudo apt-get install python-dev libkrb5-dev krb5-user + + # Via Portage (Gentoo) + emerge -av app-crypt/mit-krb5 + emerge -av dev-python/setuptools + + # Via Pkg (FreeBSD) + sudo pkg install security/krb5 + + # Via OpenCSW (Solaris) + pkgadd -d http://get.opencsw.org/now + /opt/csw/bin/pkgutil -U + /opt/csw/bin/pkgutil -y -i libkrb5_3 + + # Via Pacman (Arch Linux) + pacman -S krb5 + + +Once the dependencies have been installed, the ``python-kerberos`` wrapper can +be install using ``pip``: + +.. code-block:: shell + + pip install pywinrm[kerberos] + + +.. note:: + While Ansible has supported Kerberos auth through ``pywinrm`` for some + time, optional features or more secure options may only be available in + newer versions of the ``pywinrm`` and/or ``pykerberos`` libraries. It is + recommended you upgrade each version to the latest available to resolve + any warnings or errors. This can be done through tools like ``pip`` or a + system package manager like ``dnf``, ``yum``, ``apt`` but the package + names and versions available may differ between tools. + + +Configuring Host Kerberos ++++++++++++++++++++++++++ +Once the dependencies have been installed, Kerberos needs to be configured so +that it can communicate with a domain. This configuration is done through the +``/etc/krb5.conf`` file, which is installed with the packages in the script above. + +To configure Kerberos, in the section that starts with: + +.. code-block:: ini + + [realms] + +Add the full domain name and the fully qualified domain names of the primary +and secondary Active Directory domain controllers. It should look something +like this: + +.. code-block:: ini + + [realms] + MY.DOMAIN.COM = { + kdc = domain-controller1.my.domain.com + kdc = domain-controller2.my.domain.com + } + +In the section that starts with: + +.. code-block:: ini + + [domain_realm] + +Add a line like the following for each domain that Ansible needs access for: + +.. code-block:: ini + + [domain_realm] + .my.domain.com = MY.DOMAIN.COM + +You can configure other settings in this file such as the default domain. See +`krb5.conf <https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html>`_ +for more details. + +Automatic Kerberos Ticket Management +++++++++++++++++++++++++++++++++++++ +Ansible version 2.3 and later defaults to automatically managing Kerberos tickets +when both ``ansible_user`` and ``ansible_password`` are specified for a host. In +this process, a new ticket is created in a temporary credential cache for each +host. This is done before each task executes to minimize the chance of ticket +expiration. The temporary credential caches are deleted after each task +completes and will not interfere with the default credential cache. + +To disable automatic ticket management, set ``ansible_winrm_kinit_mode=manual`` +via the inventory. + +Automatic ticket management requires a standard ``kinit`` binary on the control +host system path. To specify a different location or binary name, set the +``ansible_winrm_kinit_cmd`` hostvar to the fully qualified path to a MIT krbv5 +``kinit``-compatible binary. + +Manual Kerberos Ticket Management ++++++++++++++++++++++++++++++++++ +To manually manage Kerberos tickets, the ``kinit`` binary is used. To +obtain a new ticket the following command is used: + +.. code-block:: shell + + kinit username@MY.DOMAIN.COM + +.. Note:: The domain must match the configured Kerberos realm exactly, and must be in upper case. + +To see what tickets (if any) have been acquired, use the following command: + +.. code-block:: shell + + klist + +To destroy all the tickets that have been acquired, use the following command: + +.. code-block:: shell + + kdestroy + +Troubleshooting Kerberos +++++++++++++++++++++++++ +Kerberos is reliant on a properly-configured environment to +work. To troubleshoot Kerberos issues, ensure that: + +* The hostname set for the Windows host is the FQDN and not an IP address. + +* The forward and reverse DNS lookups are working properly in the domain. To + test this, ping the windows host by name and then use the ip address returned + with ``nslookup``. The same name should be returned when using ``nslookup`` + on the IP address. + +* The Ansible host's clock is synchronized with the domain controller. Kerberos + is time sensitive, and a little clock drift can cause the ticket generation + process to fail. + +* Ensure that the fully qualified domain name for the domain is configured in + the ``krb5.conf`` file. To check this, run:: + + kinit -C username@MY.DOMAIN.COM + klist + + If the domain name returned by ``klist`` is different from the one requested, + an alias is being used. The ``krb5.conf`` file needs to be updated so that + the fully qualified domain name is used and not an alias. + +* If the default kerberos tooling has been replaced or modified (some IdM solutions may do this), this may cause issues when installing or upgrading the Python Kerberos library. As of the time of this writing, this library is called ``pykerberos`` and is known to work with both MIT and Heimdal Kerberos libraries. To resolve ``pykerberos`` installation issues, ensure the system dependencies for Kerberos have been met (see: `Installing the Kerberos Library`_), remove any custom Kerberos tooling paths from the PATH environment variable, and retry the installation of Python Kerberos library package. + +CredSSP +------- +CredSSP authentication is a newer authentication protocol that allows +credential delegation. This is achieved by encrypting the username and password +after authentication has succeeded and sending that to the server using the +CredSSP protocol. + +Because the username and password are sent to the server to be used for double +hop authentication, ensure that the hosts that the Windows host communicates with are +not compromised and are trusted. + +CredSSP can be used for both local and domain accounts and also supports +message encryption over HTTP. + +To use CredSSP authentication, the host vars are configured like so: + +.. code-block:: yaml+jinja + + ansible_user: Username + ansible_password: Password + ansible_connection: winrm + ansible_winrm_transport: credssp + +There are some extra host variables that can be set as shown below:: + + ansible_winrm_credssp_disable_tlsv1_2: when true, will not use TLS 1.2 in the CredSSP auth process + +CredSSP authentication is not enabled by default on a Windows host, but can +be enabled by running the following in PowerShell: + +.. code-block:: powershell + + Enable-WSManCredSSP -Role Server -Force + +Installing CredSSP Library +++++++++++++++++++++++++++ + +The ``requests-credssp`` wrapper can be installed using ``pip``: + +.. code-block:: bash + + pip install pywinrm[credssp] + +CredSSP and TLS 1.2 ++++++++++++++++++++ +By default the ``requests-credssp`` library is configured to authenticate over +the TLS 1.2 protocol. TLS 1.2 is installed and enabled by default for Windows Server 2012 +and Windows 8 and more recent releases. + +There are two ways that older hosts can be used with CredSSP: + +* Install and enable a hotfix to enable TLS 1.2 support (recommended + for Server 2008 R2 and Windows 7). + +* Set ``ansible_winrm_credssp_disable_tlsv1_2=True`` in the inventory to run + over TLS 1.0. This is the only option when connecting to Windows Server 2008, which + has no way of supporting TLS 1.2 + +See :ref:`winrm_tls12` for more information on how to enable TLS 1.2 on the +Windows host. + +Set CredSSP Certificate ++++++++++++++++++++++++ +CredSSP works by encrypting the credentials through the TLS protocol and uses a self-signed certificate by default. The ``CertificateThumbprint`` option under the WinRM service configuration can be used to specify the thumbprint of +another certificate. + +.. Note:: This certificate configuration is independent of the WinRM listener + certificate. With CredSSP, message transport still occurs over the WinRM listener, + but the TLS-encrypted messages inside the channel use the service-level certificate. + +To explicitly set the certificate to use for CredSSP:: + + # Note the value $certificate_thumbprint will be different in each + # situation, this needs to be set based on the cert that is used. + $certificate_thumbprint = "7C8DCBD5427AFEE6560F4AF524E325915F51172C" + + # Set the thumbprint value + Set-Item -Path WSMan:\localhost\Service\CertificateThumbprint -Value $certificate_thumbprint + +Non-Administrator Accounts +`````````````````````````` +WinRM is configured by default to only allow connections from accounts in the local +``Administrators`` group. This can be changed by running: + +.. code-block:: powershell + + winrm configSDDL default + +This will display an ACL editor, where new users or groups may be added. To run commands +over WinRM, users and groups must have at least the ``Read`` and ``Execute`` permissions +enabled. + +While non-administrative accounts can be used with WinRM, most typical server administration +tasks require some level of administrative access, so the utility is usually limited. + +WinRM Encryption +```````````````` +By default WinRM will fail to work when running over an unencrypted channel. +The WinRM protocol considers the channel to be encrypted if using TLS over HTTP +(HTTPS) or using message level encryption. Using WinRM with TLS is the +recommended option as it works with all authentication options, but requires +a certificate to be created and used on the WinRM listener. + +The ``ConfigureRemotingForAnsible.ps1`` creates a self-signed certificate and +creates the listener with that certificate. If in a domain environment, ADCS +can also create a certificate for the host that is issued by the domain itself. + +If using HTTPS is not an option, then HTTP can be used when the authentication +option is ``NTLM``, ``Kerberos`` or ``CredSSP``. These protocols will encrypt +the WinRM payload with their own encryption method before sending it to the +server. The message-level encryption is not used when running over HTTPS because the +encryption uses the more secure TLS protocol instead. If both transport and +message encryption is required, set ``ansible_winrm_message_encryption=always`` +in the host vars. + +.. Note:: Message encryption over HTTP requires pywinrm>=0.3.0. + +A last resort is to disable the encryption requirement on the Windows host. This +should only be used for development and debugging purposes, as anything sent +from Ansible can be viewed, manipulated and also the remote session can completely +be taken over by anyone on the same network. To disable the encryption +requirement:: + + Set-Item -Path WSMan:\localhost\Service\AllowUnencrypted -Value $true + +.. Note:: Do not disable the encryption check unless it is + absolutely required. Doing so could allow sensitive information like + credentials and files to be intercepted by others on the network. + +Inventory Options +````````````````` +Ansible's Windows support relies on a few standard variables to indicate the +username, password, and connection type of the remote hosts. These variables +are most easily set up in the inventory, but can be set on the ``host_vars``/ +``group_vars`` level. + +When setting up the inventory, the following variables are required: + +.. code-block:: yaml+jinja + + # It is suggested that these be encrypted with ansible-vault: + # ansible-vault edit group_vars/windows.yml + ansible_connection: winrm + + # May also be passed on the command-line via --user + ansible_user: Administrator + + # May also be supplied at runtime with --ask-pass + ansible_password: SecretPasswordGoesHere + + +Using the variables above, Ansible will connect to the Windows host with Basic +authentication through HTTPS. If ``ansible_user`` has a UPN value like +``username@MY.DOMAIN.COM`` then the authentication option will automatically attempt +to use Kerberos unless ``ansible_winrm_transport`` has been set to something other than +``kerberos``. + +The following custom inventory variables are also supported +for additional configuration of WinRM connections: + +* ``ansible_port``: The port WinRM will run over, HTTPS is ``5986`` which is + the default while HTTP is ``5985`` + +* ``ansible_winrm_scheme``: Specify the connection scheme (``http`` or + ``https``) to use for the WinRM connection. Ansible uses ``https`` by default + unless ``ansible_port`` is ``5985`` + +* ``ansible_winrm_path``: Specify an alternate path to the WinRM endpoint, + Ansible uses ``/wsman`` by default + +* ``ansible_winrm_realm``: Specify the realm to use for Kerberos + authentication. If ``ansible_user`` contains ``@``, Ansible will use the part + of the username after ``@`` by default + +* ``ansible_winrm_transport``: Specify one or more authentication transport + options as a comma-separated list. By default, Ansible will use ``kerberos, + basic`` if the ``kerberos`` module is installed and a realm is defined, + otherwise it will be ``plaintext`` + +* ``ansible_winrm_server_cert_validation``: Specify the server certificate + validation mode (``ignore`` or ``validate``). Ansible defaults to + ``validate`` on Python 2.7.9 and higher, which will result in certificate + validation errors against the Windows self-signed certificates. Unless + verifiable certificates have been configured on the WinRM listeners, this + should be set to ``ignore`` + +* ``ansible_winrm_operation_timeout_sec``: Increase the default timeout for + WinRM operations, Ansible uses ``20`` by default + +* ``ansible_winrm_read_timeout_sec``: Increase the WinRM read timeout, Ansible + uses ``30`` by default. Useful if there are intermittent network issues and + read timeout errors keep occurring + +* ``ansible_winrm_message_encryption``: Specify the message encryption + operation (``auto``, ``always``, ``never``) to use, Ansible uses ``auto`` by + default. ``auto`` means message encryption is only used when + ``ansible_winrm_scheme`` is ``http`` and ``ansible_winrm_transport`` supports + message encryption. ``always`` means message encryption will always be used + and ``never`` means message encryption will never be used + +* ``ansible_winrm_ca_trust_path``: Used to specify a different cacert container + than the one used in the ``certifi`` module. See the HTTPS Certificate + Validation section for more details. + +* ``ansible_winrm_send_cbt``: When using ``ntlm`` or ``kerberos`` over HTTPS, + the authentication library will try to send channel binding tokens to + mitigate against man in the middle attacks. This flag controls whether these + bindings will be sent or not (default: ``yes``). + +* ``ansible_winrm_*``: Any additional keyword arguments supported by + ``winrm.Protocol`` may be provided in place of ``*`` + +In addition, there are also specific variables that need to be set +for each authentication option. See the section on authentication above for more information. + +.. Note:: Ansible 2.0 has deprecated the "ssh" from ``ansible_ssh_user``, + ``ansible_ssh_pass``, ``ansible_ssh_host``, and ``ansible_ssh_port`` to + become ``ansible_user``, ``ansible_password``, ``ansible_host``, and + ``ansible_port``. If using a version of Ansible prior to 2.0, the older + style (``ansible_ssh_*``) should be used instead. The shorter variables + are ignored, without warning, in older versions of Ansible. + +.. Note:: ``ansible_winrm_message_encryption`` is different from transport + encryption done over TLS. The WinRM payload is still encrypted with TLS + when run over HTTPS, even if ``ansible_winrm_message_encryption=never``. + +IPv6 Addresses +`````````````` +IPv6 addresses can be used instead of IPv4 addresses or hostnames. This option +is normally set in an inventory. Ansible will attempt to parse the address +using the `ipaddress <https://docs.python.org/3/library/ipaddress.html>`_ +package and pass to pywinrm correctly. + +When defining a host using an IPv6 address, just add the IPv6 address as you +would an IPv4 address or hostname: + +.. code-block:: ini + + [windows-server] + 2001:db8::1 + + [windows-server:vars] + ansible_user=username + ansible_password=password + ansible_connection=winrm + + +.. Note:: The ipaddress library is only included by default in Python 3.x. To + use IPv6 addresses in Python 2.7, make sure to run ``pip install ipaddress`` which installs + a backported package. + +HTTPS Certificate Validation +```````````````````````````` +As part of the TLS protocol, the certificate is validated to ensure the host +matches the subject and the client trusts the issuer of the server certificate. +When using a self-signed certificate or setting +``ansible_winrm_server_cert_validation: ignore`` these security mechanisms are +bypassed. While self signed certificates will always need the ``ignore`` flag, +certificates that have been issued from a certificate authority can still be +validated. + +One of the more common ways of setting up a HTTPS listener in a domain +environment is to use Active Directory Certificate Service (AD CS). AD CS is +used to generate signed certificates from a Certificate Signing Request (CSR). +If the WinRM HTTPS listener is using a certificate that has been signed by +another authority, like AD CS, then Ansible can be set up to trust that +issuer as part of the TLS handshake. + +To get Ansible to trust a Certificate Authority (CA) like AD CS, the issuer +certificate of the CA can be exported as a PEM encoded certificate. This +certificate can then be copied locally to the Ansible controller and used as a +source of certificate validation, otherwise known as a CA chain. + +The CA chain can contain a single or multiple issuer certificates and each +entry is contained on a new line. To then use the custom CA chain as part of +the validation process, set ``ansible_winrm_ca_trust_path`` to the path of the +file. If this variable is not set, the default CA chain is used instead which +is located in the install path of the Python package +`certifi <https://github.com/certifi/python-certifi>`_. + +.. Note:: Each HTTP call is done by the Python requests library which does not + use the systems built-in certificate store as a trust authority. + Certificate validation will fail if the server's certificate issuer is + only added to the system's truststore. + +.. _winrm_tls12: + +TLS 1.2 Support +``````````````` +As WinRM runs over the HTTP protocol, using HTTPS means that the TLS protocol +is used to encrypt the WinRM messages. TLS will automatically attempt to +negotiate the best protocol and cipher suite that is available to both the +client and the server. If a match cannot be found then Ansible will error out +with a message similar to:: + + HTTPSConnectionPool(host='server', port=5986): Max retries exceeded with url: /wsman (Caused by SSLError(SSLError(1, '[SSL: UNSUPPORTED_PROTOCOL] unsupported protocol (_ssl.c:1056)'))) + +Commonly this is when the Windows host has not been configured to support +TLS v1.2 but it could also mean the Ansible controller has an older OpenSSL +version installed. + +Windows 8 and Windows Server 2012 come with TLS v1.2 installed and enabled by +default but older hosts, like Server 2008 R2 and Windows 7, have to be enabled +manually. + +.. Note:: There is a bug with the TLS 1.2 patch for Server 2008 which will stop + Ansible from connecting to the Windows host. This means that Server 2008 + cannot be configured to use TLS 1.2. Server 2008 R2 and Windows 7 are not + affected by this issue and can use TLS 1.2. + +To verify what protocol the Windows host supports, you can run the following +command on the Ansible controller:: + + openssl s_client -connect <hostname>:5986 + +The output will contain information about the TLS session and the ``Protocol`` +line will display the version that was negotiated:: + + New, TLSv1/SSLv3, Cipher is ECDHE-RSA-AES256-SHA + Server public key is 2048 bit + Secure Renegotiation IS supported + Compression: NONE + Expansion: NONE + No ALPN negotiated + SSL-Session: + Protocol : TLSv1 + Cipher : ECDHE-RSA-AES256-SHA + Session-ID: 962A00001C95D2A601BE1CCFA7831B85A7EEE897AECDBF3D9ECD4A3BE4F6AC9B + Session-ID-ctx: + Master-Key: .... + Start Time: 1552976474 + Timeout : 7200 (sec) + Verify return code: 21 (unable to verify the first certificate) + --- + + New, TLSv1/SSLv3, Cipher is ECDHE-RSA-AES256-GCM-SHA384 + Server public key is 2048 bit + Secure Renegotiation IS supported + Compression: NONE + Expansion: NONE + No ALPN negotiated + SSL-Session: + Protocol : TLSv1.2 + Cipher : ECDHE-RSA-AES256-GCM-SHA384 + Session-ID: AE16000050DA9FD44D03BB8839B64449805D9E43DBD670346D3D9E05D1AEEA84 + Session-ID-ctx: + Master-Key: .... + Start Time: 1552976538 + Timeout : 7200 (sec) + Verify return code: 21 (unable to verify the first certificate) + +If the host is returning ``TLSv1`` then it should be configured so that +TLS v1.2 is enable. You can do this by running the following PowerShell +script: + +.. code-block:: powershell + + Function Enable-TLS12 { + param( + [ValidateSet("Server", "Client")] + [String]$Component = "Server" + ) + + $protocols_path = 'HKLM:\SYSTEM\CurrentControlSet\Control\SecurityProviders\SCHANNEL\Protocols' + New-Item -Path "$protocols_path\TLS 1.2\$Component" -Force + New-ItemProperty -Path "$protocols_path\TLS 1.2\$Component" -Name Enabled -Value 1 -Type DWORD -Force + New-ItemProperty -Path "$protocols_path\TLS 1.2\$Component" -Name DisabledByDefault -Value 0 -Type DWORD -Force + } + + Enable-TLS12 -Component Server + + # Not required but highly recommended to enable the Client side TLS 1.2 components + Enable-TLS12 -Component Client + + Restart-Computer + +The below Ansible tasks can also be used to enable TLS v1.2: + +.. code-block:: yaml+jinja + + - name: enable TLSv1.2 support + win_regedit: + path: HKLM:\SYSTEM\CurrentControlSet\Control\SecurityProviders\SCHANNEL\Protocols\TLS 1.2\{{ item.type }} + name: '{{ item.property }}' + data: '{{ item.value }}' + type: dword + state: present + register: enable_tls12 + loop: + - type: Server + property: Enabled + value: 1 + - type: Server + property: DisabledByDefault + value: 0 + - type: Client + property: Enabled + value: 1 + - type: Client + property: DisabledByDefault + value: 0 + + - name: reboot if TLS config was applied + win_reboot: + when: enable_tls12 is changed + +There are other ways to configure the TLS protocols as well as the cipher +suites that are offered by the Windows host. One tool that can give you a GUI +to manage these settings is `IIS Crypto <https://www.nartac.com/Products/IISCrypto/>`_ +from Nartac Software. + +Limitations +``````````` +Due to the design of the WinRM protocol , there are a few limitations +when using WinRM that can cause issues when creating playbooks for Ansible. +These include: + +* Credentials are not delegated for most authentication types, which causes + authentication errors when accessing network resources or installing certain + programs. + +* Many calls to the Windows Update API are blocked when running over WinRM. + +* Some programs fail to install with WinRM due to no credential delegation or + because they access forbidden Windows API like WUA over WinRM. + +* Commands under WinRM are done under a non-interactive session, which can prevent + certain commands or executables from running. + +* You cannot run a process that interacts with ``DPAPI``, which is used by some + installers (like Microsoft SQL Server). + +Some of these limitations can be mitigated by doing one of the following: + +* Set ``ansible_winrm_transport`` to ``credssp`` or ``kerberos`` (with + ``ansible_winrm_kerberos_delegation=true``) to bypass the double hop issue + and access network resources + +* Use ``become`` to bypass all WinRM restrictions and run a command as it would + locally. Unlike using an authentication transport like ``credssp``, this will + also remove the non-interactive restriction and API restrictions like WUA and + DPAPI + +* Use a scheduled task to run a command which can be created with the + ``win_scheduled_task`` module. Like ``become``, this bypasses all WinRM + restrictions but can only run a command and not modules. + + +.. seealso:: + + :ref:`playbooks_intro` + An introduction to playbooks + :ref:`playbooks_best_practices` + Tips and tricks for playbooks + :ref:`List of Windows Modules <windows_modules>` + Windows specific module list, all implemented in PowerShell + `User Mailing List <https://groups.google.com/group/ansible-project>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel diff --git a/docs/docsite/sphinx_conf/2.10_conf.py b/docs/docsite/sphinx_conf/2.10_conf.py new file mode 100644 index 00000000..e68b90c8 --- /dev/null +++ b/docs/docsite/sphinx_conf/2.10_conf.py @@ -0,0 +1,300 @@ +# -*- coding: utf-8 -*- +# +# documentation build configuration file, created by +# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# The contents of this file are pickled, so don't put values in the namespace +# that aren't pickleable (module imports are okay, they're removed +# automatically). +# +# All configuration values have a default value; values that are commented out +# serve to show the default value. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import os + +# pip install sphinx_rtd_theme +# import sphinx_rtd_theme +# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +# If your extensions are in another directory, add it here. If the directory +# is relative to the documentation root, use os.path.abspath to make it +# absolute, like shown here. +# sys.path.append(os.path.abspath('some/directory')) +# +sys.path.insert(0, os.path.join('ansible', 'lib')) +sys.path.append(os.path.abspath(os.path.join('..', '_extensions'))) + +# We want sphinx to document the ansible modules contained in this repository, +# not those that may happen to be installed in the version +# of Python used to run sphinx. When sphinx loads in order to document, +# the repository version needs to be the one that is loaded: +sys.path.insert(0, os.path.abspath(os.path.join('..', '..', '..', 'lib'))) + +VERSION = '2.10' +AUTHOR = 'Ansible, Inc' + + +# General configuration +# --------------------- + +# Add any Sphinx extension module names here, as strings. +# They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +# TEST: 'sphinxcontrib.fulltoc' +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments_lexer', 'notfound.extension'] + +# Later on, add 'sphinx.ext.viewcode' to the list if you want to have +# colorized code generated too for references. + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['.templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General substitutions. +project = 'Ansible' +copyright = "2021 Red Hat, Inc." + +# The default replacements for |version| and |release|, also used in various +# other places throughout the built documents. +# +# The short X.Y version. +version = VERSION +# The full version, including alpha/beta/rc tags. +release = VERSION + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +# unused_docs = [] + +# List of directories, relative to source directories, that shouldn't be +# searched for source files. +# exclude_dirs = [] + +# A list of glob-style patterns that should be excluded when looking +# for source files. +exclude_patterns = [ + '2.10_index.rst', + 'ansible_index.rst', + 'core_index.rst', + 'porting_guides/core_porting_guides', +] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +highlight_language = 'YAML+Jinja' + +# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything. +# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_: +# |br| is useful for formatting fields inside of tables +# |_| is a nonbreaking space; similarly useful inside of tables +rst_epilog = """ +.. |br| raw:: html + + <br> +.. |_| unicode:: 0xA0 + :trim: +""" + + +# Options for HTML output +# ----------------------- + +html_theme_path = ['../_themes'] +html_theme = 'sphinx_rtd_theme' +html_short_title = 'Ansible Documentation' +html_show_sphinx = False + +html_theme_options = { + 'canonical_url': "https://docs.ansible.com/ansible/latest/", + 'vcs_pageview_mode': 'edit' +} + +html_context = { + 'display_github': 'True', + 'github_user': 'ansible', + 'github_repo': 'ansible', + 'github_version': 'devel/docs/docsite/rst/', + 'github_module_version': 'devel/lib/ansible/modules/', + 'github_root_dir': 'devel/lib/ansible', + 'github_cli_version': 'devel/lib/ansible/cli/', + 'current_version': version, + 'latest_version': '3', + # list specifically out of order to make latest work + 'available_versions': ('latest', '2.10', '2.9', '2.9_ja', '2.8', 'devel'), + 'css_files': ('_static/ansible.css', # overrides to the standard theme + ), +} + +# The style sheet to use for HTML and HTML Help pages. A file of that name +# must exist either in Sphinx' static/ path, or in one of the custom paths +# given in html_static_path. +# html_style = 'solar.css' + +# The name for this set of Sphinx documents. If None, it defaults to +# "<project> v<release> documentation". +html_title = 'Ansible Documentation' + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (within the static path) to place at the top of +# the sidebar. +# html_logo = + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = 'favicon.ico' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['../_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_use_modindex = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, the reST sources are included in the HTML build as _sources/<name>. +html_copy_source = False + +# If true, an OpenSearch description file will be output, and all pages will +# contain a <link> tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = 'https://docs.ansible.com/ansible/latest' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Poseidodoc' + +# Configuration for sphinx-notfound-pages +# with no 'notfound_template' and no 'notfound_context' set, +# the extension builds 404.rst into a location-agnostic 404 page +# +# default is `en` - using this for the sub-site: +notfound_default_language = "ansible" +# default is `latest`: +# setting explicitly - docsite serves up /ansible/latest/404.html +# so keep this set to `latest` even on the `devel` branch +# then no maintenance is needed when we branch a new stable_x.x +notfound_default_version = "latest" +# makes default setting explicit: +notfound_no_urls_prefix = False + +# Options for LaTeX output +# ------------------------ + +# The paper size ('letter' or 'a4'). +# latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +# latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, document class +# [howto/manual]). +latex_documents = [ + ('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +# latex_preamble = '' + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_use_modindex = True + +autoclass_content = 'both' + +# Note: Our strategy for intersphinx mappings is to have the upstream build location as the +# canonical source and then cached copies of the mapping stored locally in case someone is building +# when disconnected from the internet. We then have a script to update the cached copies. +# +# Because of that, each entry in this mapping should have this format: +# name: ('http://UPSTREAM_URL', (None, 'path/to/local/cache.inv')) +# +# The update script depends on this format so deviating from this (for instance, adding a third +# location for the mappning to live) will confuse it. +intersphinx_mapping = {'python': ('https://docs.python.org/2/', (None, '../python2.inv')), + 'python3': ('https://docs.python.org/3/', (None, '../python3.inv')), + 'jinja2': ('http://jinja.palletsprojects.com/', (None, '../jinja2.inv')), + 'ansible_2_10': ('https://docs.ansible.com/ansible/2.10/', (None, '../ansible_2_10.inv')), + 'ansible_2_9': ('https://docs.ansible.com/ansible/2.9/', (None, '../ansible_2_9.inv')), + 'ansible_2_8': ('https://docs.ansible.com/ansible/2.8/', (None, '../ansible_2_8.inv')), + 'ansible_2_7': ('https://docs.ansible.com/ansible/2.7/', (None, '../ansible_2_7.inv')), + 'ansible_2_6': ('https://docs.ansible.com/ansible/2.6/', (None, '../ansible_2_6.inv')), + 'ansible_2_5': ('https://docs.ansible.com/ansible/2.5/', (None, '../ansible_2_5.inv')), + } + +# linckchecker settings +linkcheck_ignore = [ + r'http://irc\.freenode\.net', +] +linkcheck_workers = 25 +# linkcheck_anchors = False diff --git a/docs/docsite/sphinx_conf/ansible_conf.py b/docs/docsite/sphinx_conf/ansible_conf.py new file mode 100644 index 00000000..44af3496 --- /dev/null +++ b/docs/docsite/sphinx_conf/ansible_conf.py @@ -0,0 +1,306 @@ +# -*- coding: utf-8 -*- +# +# documentation build configuration file, created by +# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# The contents of this file are pickled, so don't put values in the namespace +# that aren't pickleable (module imports are okay, they're removed +# automatically). +# +# All configuration values have a default value; values that are commented out +# serve to show the default value. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import os + +# pip install sphinx_rtd_theme +# import sphinx_rtd_theme +# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +# If your extensions are in another directory, add it here. If the directory +# is relative to the documentation root, use os.path.abspath to make it +# absolute, like shown here. +# sys.path.append(os.path.abspath('some/directory')) +# +sys.path.insert(0, os.path.join('ansible', 'lib')) +sys.path.append(os.path.abspath(os.path.join('..', '_extensions'))) + +# We want sphinx to document the ansible modules contained in this repository, +# not those that may happen to be installed in the version +# of Python used to run sphinx. When sphinx loads in order to document, +# the repository version needs to be the one that is loaded: +sys.path.insert(0, os.path.abspath(os.path.join('..', '..', '..', 'lib'))) + +VERSION = '3' +AUTHOR = 'Ansible, Inc' + + +# General configuration +# --------------------- + +# Add any Sphinx extension module names here, as strings. +# They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +# TEST: 'sphinxcontrib.fulltoc' +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments_lexer', 'notfound.extension'] + +# Later on, add 'sphinx.ext.viewcode' to the list if you want to have +# colorized code generated too for references. + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['.templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General substitutions. +project = 'Ansible' +copyright = "2021 Red Hat, Inc." + +# The default replacements for |version| and |release|, also used in various +# other places throughout the built documents. +# +# The short X.Y version. +version = VERSION +# The full version, including alpha/beta/rc tags. +release = VERSION + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +# unused_docs = [] + +# List of directories, relative to source directories, that shouldn't be +# searched for source files. +# exclude_dirs = [] + +# A list of glob-style patterns that should be excluded when looking +# for source files. +exclude_patterns = [ + '2.10_index.rst', + 'ansible_index.rst', + 'core_index.rst', + 'porting_guides/core_porting_guides.rst', + 'porting_guides/porting_guide_base_2.10.rst', + 'porting_guides/porting_guide_core_2.11.rst', + 'roadmap/index.rst', + 'roadmap/ansible_base_roadmap_index.rst', + 'roadmap/ROADMAP_2_10.rst', + 'roadmap/ROADMAP_2_11.rst' +] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +highlight_language = 'YAML+Jinja' + +# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything. +# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_: +# |br| is useful for formatting fields inside of tables +# |_| is a nonbreaking space; similarly useful inside of tables +rst_epilog = """ +.. |br| raw:: html + + <br> +.. |_| unicode:: 0xA0 + :trim: +""" + + +# Options for HTML output +# ----------------------- + +html_theme_path = ['../_themes'] +html_theme = 'sphinx_rtd_theme' +html_short_title = 'Ansible Documentation' +html_show_sphinx = False + +html_theme_options = { + 'canonical_url': "https://docs.ansible.com/ansible/latest/", + 'vcs_pageview_mode': 'edit' +} + +html_context = { + 'display_github': 'True', + 'github_user': 'ansible', + 'github_repo': 'ansible', + 'github_version': 'devel/docs/docsite/rst/', + 'github_module_version': 'devel/lib/ansible/modules/', + 'github_root_dir': 'devel/lib/ansible', + 'github_cli_version': 'devel/lib/ansible/cli/', + 'current_version': version, + 'latest_version': '3', + # list specifically out of order to make latest work + 'available_versions': ('latest', '2.10', '2.9', '2.9_ja', '2.8', 'devel'), + 'css_files': ('_static/ansible.css', # overrides to the standard theme + ), +} + +# The style sheet to use for HTML and HTML Help pages. A file of that name +# must exist either in Sphinx' static/ path, or in one of the custom paths +# given in html_static_path. +# html_style = 'solar.css' + +# The name for this set of Sphinx documents. If None, it defaults to +# "<project> v<release> documentation". +html_title = 'Ansible Documentation' + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (within the static path) to place at the top of +# the sidebar. +# html_logo = + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = 'favicon.ico' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['../_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_use_modindex = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, the reST sources are included in the HTML build as _sources/<name>. +html_copy_source = False + +# If true, an OpenSearch description file will be output, and all pages will +# contain a <link> tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = 'https://docs.ansible.com/ansible/latest' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Poseidodoc' + +# Configuration for sphinx-notfound-pages +# with no 'notfound_template' and no 'notfound_context' set, +# the extension builds 404.rst into a location-agnostic 404 page +# +# default is `en` - using this for the sub-site: +notfound_default_language = "ansible" +# default is `latest`: +# setting explicitly - docsite serves up /ansible/latest/404.html +# so keep this set to `latest` even on the `devel` branch +# then no maintenance is needed when we branch a new stable_x.x +notfound_default_version = "latest" +# makes default setting explicit: +notfound_no_urls_prefix = False + +# Options for LaTeX output +# ------------------------ + +# The paper size ('letter' or 'a4'). +# latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +# latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, document class +# [howto/manual]). +latex_documents = [ + ('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +# latex_preamble = '' + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_use_modindex = True + +autoclass_content = 'both' + +# Note: Our strategy for intersphinx mappings is to have the upstream build location as the +# canonical source and then cached copies of the mapping stored locally in case someone is building +# when disconnected from the internet. We then have a script to update the cached copies. +# +# Because of that, each entry in this mapping should have this format: +# name: ('http://UPSTREAM_URL', (None, 'path/to/local/cache.inv')) +# +# The update script depends on this format so deviating from this (for instance, adding a third +# location for the mappning to live) will confuse it. +intersphinx_mapping = {'python': ('https://docs.python.org/2/', (None, '../python2.inv')), + 'python3': ('https://docs.python.org/3/', (None, '../python3.inv')), + 'jinja2': ('http://jinja.palletsprojects.com/', (None, '../jinja2.inv')), + 'ansible_2_10': ('https://docs.ansible.com/ansible/2.10/', (None, '../ansible_2_10.inv')), + 'ansible_2_9': ('https://docs.ansible.com/ansible/2.9/', (None, '../ansible_2_9.inv')), + 'ansible_2_8': ('https://docs.ansible.com/ansible/2.8/', (None, '../ansible_2_8.inv')), + 'ansible_2_7': ('https://docs.ansible.com/ansible/2.7/', (None, '../ansible_2_7.inv')), + 'ansible_2_6': ('https://docs.ansible.com/ansible/2.6/', (None, '../ansible_2_6.inv')), + 'ansible_2_5': ('https://docs.ansible.com/ansible/2.5/', (None, '../ansible_2_5.inv')), + } + +# linckchecker settings +linkcheck_ignore = [ + r'http://irc\.freenode\.net', +] +linkcheck_workers = 25 +# linkcheck_anchors = False diff --git a/docs/docsite/sphinx_conf/core_conf.py b/docs/docsite/sphinx_conf/core_conf.py new file mode 100644 index 00000000..c10fe028 --- /dev/null +++ b/docs/docsite/sphinx_conf/core_conf.py @@ -0,0 +1,314 @@ +# -*- coding: utf-8 -*- +# +# documentation build configuration file, created by +# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# The contents of this file are pickled, so don't put values in the namespace +# that aren't pickleable (module imports are okay, they're removed +# automatically). +# +# All configuration values have a default value; values that are commented out +# serve to show the default value. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import os + +# pip install sphinx_rtd_theme +# import sphinx_rtd_theme +# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +# If your extensions are in another directory, add it here. If the directory +# is relative to the documentation root, use os.path.abspath to make it +# absolute, like shown here. +# sys.path.append(os.path.abspath('some/directory')) +# +sys.path.insert(0, os.path.join('ansible', 'lib')) +sys.path.append(os.path.abspath(os.path.join('..', '_extensions'))) + +# We want sphinx to document the ansible modules contained in this repository, +# not those that may happen to be installed in the version +# of Python used to run sphinx. When sphinx loads in order to document, +# the repository version needs to be the one that is loaded: +sys.path.insert(0, os.path.abspath(os.path.join('..', '..', '..', 'lib'))) + +VERSION = '2.10' +AUTHOR = 'Ansible, Inc' + + +# General configuration +# --------------------- + +# Add any Sphinx extension module names here, as strings. +# They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +# TEST: 'sphinxcontrib.fulltoc' +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments_lexer', 'notfound.extension'] + +# Later on, add 'sphinx.ext.viewcode' to the list if you want to have +# colorized code generated too for references. + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['.templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General substitutions. +project = 'Ansible' +copyright = "2021 Red Hat, Inc." + +# The default replacements for |version| and |release|, also used in various +# other places throughout the built documents. +# +# The short X.Y version. +version = VERSION +# The full version, including alpha/beta/rc tags. +release = VERSION + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +# unused_docs = [] + +# List of directories, relative to source directories, that shouldn't be +# searched for source files. +# exclude_dirs = [] + +# A list of glob-style patterns that should be excluded when looking +# for source files. +exclude_patterns = [ + '2.10_index.rst', + 'ansible_index.rst', + 'core_index.rst', + 'galaxy', + 'network', + 'scenario_guides', + 'porting_guides/porting_guides.rst', + 'porting_guides/porting_guide_2*', + 'porting_guides/porting_guide_3*', + 'roadmap/index.rst', + 'roadmap/ansible_roadmap_index.rst', + 'roadmap/old_roadmap_index.rst', + 'roadmap/ROADMAP_2_5.rst', + 'roadmap/ROADMAP_2_6.rst', + 'roadmap/ROADMAP_2_7.rst', + 'roadmap/ROADMAP_2_8.rst', + 'roadmap/ROADMAP_2_9.rst', + 'roadmap/COLLECTIONS*' +] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +highlight_language = 'YAML+Jinja' + +# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything. +# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_: +# |br| is useful for formatting fields inside of tables +# |_| is a nonbreaking space; similarly useful inside of tables +rst_epilog = """ +.. |br| raw:: html + + <br> +.. |_| unicode:: 0xA0 + :trim: +""" + + +# Options for HTML output +# ----------------------- + +html_theme_path = ['../_themes'] +html_theme = 'sphinx_rtd_theme' +html_short_title = 'Ansible Core Documentation' +html_show_sphinx = False + +html_theme_options = { + 'canonical_url': "https://docs.ansible.com/ansible/latest/", + 'vcs_pageview_mode': 'edit' +} + +html_context = { + 'display_github': 'True', + 'github_user': 'ansible', + 'github_repo': 'ansible', + 'github_version': 'devel/docs/docsite/rst/', + 'github_module_version': 'devel/lib/ansible/modules/', + 'github_root_dir': 'devel/lib/ansible', + 'github_cli_version': 'devel/lib/ansible/cli/', + 'current_version': version, + 'latest_version': '2.10', + # list specifically out of order to make latest work + 'available_versions': ('2.10', 'devel',), + 'css_files': ('_static/ansible.css', # overrides to the standard theme + ), +} + +# The style sheet to use for HTML and HTML Help pages. A file of that name +# must exist either in Sphinx' static/ path, or in one of the custom paths +# given in html_static_path. +# html_style = 'solar.css' + +# The name for this set of Sphinx documents. If None, it defaults to +# "<project> v<release> documentation". +html_title = 'Ansible Core Documentation' + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (within the static path) to place at the top of +# the sidebar. +# html_logo = + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = 'favicon.ico' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['../_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_use_modindex = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, the reST sources are included in the HTML build as _sources/<name>. +html_copy_source = False + +# If true, an OpenSearch description file will be output, and all pages will +# contain a <link> tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = 'https://docs.ansible.com/ansible/latest' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Poseidodoc' + +# Configuration for sphinx-notfound-pages +# with no 'notfound_template' and no 'notfound_context' set, +# the extension builds 404.rst into a location-agnostic 404 page +# +# default is `en` - using this for the sub-site: +notfound_default_language = "ansible" +# default is `latest`: +# setting explicitly - docsite serves up /ansible/latest/404.html +# so keep this set to `latest` even on the `devel` branch +# then no maintenance is needed when we branch a new stable_x.x +notfound_default_version = "latest" +# makes default setting explicit: +notfound_no_urls_prefix = False + +# Options for LaTeX output +# ------------------------ + +# The paper size ('letter' or 'a4'). +# latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +# latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, document class +# [howto/manual]). +latex_documents = [ + ('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +# latex_preamble = '' + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_use_modindex = True + +autoclass_content = 'both' + +# Note: Our strategy for intersphinx mappings is to have the upstream build location as the +# canonical source and then cached copies of the mapping stored locally in case someone is building +# when disconnected from the internet. We then have a script to update the cached copies. +# +# Because of that, each entry in this mapping should have this format: +# name: ('http://UPSTREAM_URL', (None, 'path/to/local/cache.inv')) +# +# The update script depends on this format so deviating from this (for instance, adding a third +# location for the mappning to live) will confuse it. +intersphinx_mapping = {'python': ('https://docs.python.org/2/', (None, '../python2.inv')), + 'python3': ('https://docs.python.org/3/', (None, '../python3.inv')), + 'jinja2': ('http://jinja.palletsprojects.com/', (None, '../jinja2.inv')), + 'ansible_2_10': ('https://docs.ansible.com/ansible/2.10/', (None, '../ansible_2_10.inv')), + 'ansible_2_9': ('https://docs.ansible.com/ansible/2.9/', (None, '../ansible_2_9.inv')), + 'ansible_2_8': ('https://docs.ansible.com/ansible/2.8/', (None, '../ansible_2_8.inv')), + 'ansible_2_7': ('https://docs.ansible.com/ansible/2.7/', (None, '../ansible_2_7.inv')), + 'ansible_2_6': ('https://docs.ansible.com/ansible/2.6/', (None, '../ansible_2_6.inv')), + 'ansible_2_5': ('https://docs.ansible.com/ansible/2.5/', (None, '../ansible_2_5.inv')), + } + +# linckchecker settings +linkcheck_ignore = [ + r'http://irc\.freenode\.net', +] +linkcheck_workers = 25 +# linkcheck_anchors = False diff --git a/docs/docsite/variables.dot b/docs/docsite/variables.dot new file mode 100644 index 00000000..f5860dcb --- /dev/null +++ b/docs/docsite/variables.dot @@ -0,0 +1,38 @@ +digraph G { + + subgraph cluster_0 { + "command line variables" -> "--extra-args" + } + + subgraph cluster_1 { + "role variables" -> "roles/rolename/vars.yml" -> "parameters passed to role" -> "parameters from dependent roles" + } + + subgraph cluster_2 { + "top-level playbook variables" -> "vars: directives" -> "vars_files: directives"; + } + + subgraph cluster_3 { + "inventory variables" -> "group_vars/all" -> "group_vars/grandparent1" -> "group_vars/parent1" -> "host_vars/myhostname"; + "group_vars/all" -> "group_vars/grandparent2"; + "group_vars/grandparent1" -> "group_vars/parent2" + "group_vars/grandparent2" -> "host_vars/myhostname"; + "group_vars/parent2" -> "host_vars/myhostname" + } + + subgraph cluster_4 { + "facts" -> "gathered host facts" + "facts" -> "host facts from /etc/ansible/facts.d" + "facts" -> "set_fact" + "facts" -> "include_vars" + } + + subgraph cluster_5 { + "role defaults" -> "roles/rolename/defaults.yml" + } + + "command line variables" -> "role variables" -> "top-level playbook variables" -> "inventory variables" -> "role defaults" -> "facts" + + + +} |