summaryrefslogtreecommitdiffstats
path: root/test/lib/ansible_test/_internal
diff options
context:
space:
mode:
Diffstat (limited to 'test/lib/ansible_test/_internal')
-rw-r--r--test/lib/ansible_test/_internal/__init__.py3
-rw-r--r--test/lib/ansible_test/_internal/ansible_util.py296
-rw-r--r--test/lib/ansible_test/_internal/cache.py35
-rw-r--r--test/lib/ansible_test/_internal/ci/__init__.py227
-rw-r--r--test/lib/ansible_test/_internal/ci/azp.py268
-rw-r--r--test/lib/ansible_test/_internal/ci/local.py217
-rw-r--r--test/lib/ansible_test/_internal/ci/shippable.py269
-rw-r--r--test/lib/ansible_test/_internal/classification.py977
-rw-r--r--test/lib/ansible_test/_internal/cli.py1217
-rw-r--r--test/lib/ansible_test/_internal/cloud/__init__.py429
-rw-r--r--test/lib/ansible_test/_internal/cloud/acme.py193
-rw-r--r--test/lib/ansible_test/_internal/cloud/aws.py124
-rw-r--r--test/lib/ansible_test/_internal/cloud/azure.py213
-rw-r--r--test/lib/ansible_test/_internal/cloud/cloudscale.py80
-rw-r--r--test/lib/ansible_test/_internal/cloud/cs.py300
-rw-r--r--test/lib/ansible_test/_internal/cloud/fallaxy.py177
-rw-r--r--test/lib/ansible_test/_internal/cloud/foreman.py191
-rw-r--r--test/lib/ansible_test/_internal/cloud/gcp.py62
-rw-r--r--test/lib/ansible_test/_internal/cloud/hcloud.py116
-rw-r--r--test/lib/ansible_test/_internal/cloud/nios.py193
-rw-r--r--test/lib/ansible_test/_internal/cloud/opennebula.py66
-rw-r--r--test/lib/ansible_test/_internal/cloud/openshift.py236
-rw-r--r--test/lib/ansible_test/_internal/cloud/scaleway.py72
-rw-r--r--test/lib/ansible_test/_internal/cloud/tower.py255
-rw-r--r--test/lib/ansible_test/_internal/cloud/vcenter.py232
-rw-r--r--test/lib/ansible_test/_internal/cloud/vultr.py71
-rw-r--r--test/lib/ansible_test/_internal/config.py356
-rw-r--r--test/lib/ansible_test/_internal/constants.py10
-rw-r--r--test/lib/ansible_test/_internal/core_ci.py680
-rw-r--r--test/lib/ansible_test/_internal/coverage/__init__.py325
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/__init__.py19
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py154
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py64
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py39
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py104
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py146
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py109
-rw-r--r--test/lib/ansible_test/_internal/coverage/combine.py303
-rw-r--r--test/lib/ansible_test/_internal/coverage/erase.py27
-rw-r--r--test/lib/ansible_test/_internal/coverage/html.py45
-rw-r--r--test/lib/ansible_test/_internal/coverage/report.py156
-rw-r--r--test/lib/ansible_test/_internal/coverage/xml.py191
-rw-r--r--test/lib/ansible_test/_internal/coverage_util.py125
-rw-r--r--test/lib/ansible_test/_internal/csharp_import_analysis.py106
-rw-r--r--test/lib/ansible_test/_internal/data.py200
-rw-r--r--test/lib/ansible_test/_internal/delegation.py667
-rw-r--r--test/lib/ansible_test/_internal/diff.py256
-rw-r--r--test/lib/ansible_test/_internal/docker_util.py409
-rw-r--r--test/lib/ansible_test/_internal/encoding.py41
-rw-r--r--test/lib/ansible_test/_internal/env.py293
-rw-r--r--test/lib/ansible_test/_internal/executor.py2146
-rw-r--r--test/lib/ansible_test/_internal/git.py137
-rw-r--r--test/lib/ansible_test/_internal/http.py181
-rw-r--r--test/lib/ansible_test/_internal/import_analysis.py362
-rw-r--r--test/lib/ansible_test/_internal/init.py16
-rw-r--r--test/lib/ansible_test/_internal/integration/__init__.py349
-rw-r--r--test/lib/ansible_test/_internal/io.py94
-rw-r--r--test/lib/ansible_test/_internal/manage_ci.py335
-rw-r--r--test/lib/ansible_test/_internal/metadata.py151
-rw-r--r--test/lib/ansible_test/_internal/payload.py146
-rw-r--r--test/lib/ansible_test/_internal/powershell_import_analysis.py105
-rw-r--r--test/lib/ansible_test/_internal/provider/__init__.py78
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/__init__.py232
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/ansible.py47
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/collection.py123
-rw-r--r--test/lib/ansible_test/_internal/provider/source/__init__.py18
-rw-r--r--test/lib/ansible_test/_internal/provider/source/git.py72
-rw-r--r--test/lib/ansible_test/_internal/provider/source/installed.py43
-rw-r--r--test/lib/ansible_test/_internal/provider/source/unversioned.py87
-rw-r--r--test/lib/ansible_test/_internal/sanity/__init__.py946
-rw-r--r--test/lib/ansible_test/_internal/sanity/ansible_doc.py144
-rw-r--r--test/lib/ansible_test/_internal/sanity/bin_symlinks.py110
-rw-r--r--test/lib/ansible_test/_internal/sanity/compile.py92
-rw-r--r--test/lib/ansible_test/_internal/sanity/ignores.py89
-rw-r--r--test/lib/ansible_test/_internal/sanity/import.py184
-rw-r--r--test/lib/ansible_test/_internal/sanity/integration_aliases.py399
-rw-r--r--test/lib/ansible_test/_internal/sanity/pep8.py109
-rw-r--r--test/lib/ansible_test/_internal/sanity/pslint.py121
-rw-r--r--test/lib/ansible_test/_internal/sanity/pylint.py289
-rw-r--r--test/lib/ansible_test/_internal/sanity/rstcheck.py95
-rw-r--r--test/lib/ansible_test/_internal/sanity/sanity_docs.py62
-rw-r--r--test/lib/ansible_test/_internal/sanity/shellcheck.py110
-rw-r--r--test/lib/ansible_test/_internal/sanity/validate_modules.py149
-rw-r--r--test/lib/ansible_test/_internal/sanity/yamllint.py136
-rw-r--r--test/lib/ansible_test/_internal/target.py694
-rw-r--r--test/lib/ansible_test/_internal/test.py524
-rw-r--r--test/lib/ansible_test/_internal/thread.py57
-rw-r--r--test/lib/ansible_test/_internal/types.py32
-rw-r--r--test/lib/ansible_test/_internal/units/__init__.py159
-rw-r--r--test/lib/ansible_test/_internal/util.py853
-rw-r--r--test/lib/ansible_test/_internal/util_common.py487
-rw-r--r--test/lib/ansible_test/_internal/venv.py227
92 files changed, 22134 insertions, 0 deletions
diff --git a/test/lib/ansible_test/_internal/__init__.py b/test/lib/ansible_test/_internal/__init__.py
new file mode 100644
index 00000000..35f04422
--- /dev/null
+++ b/test/lib/ansible_test/_internal/__init__.py
@@ -0,0 +1,3 @@
+"""Support code for Ansible testing infrastructure."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/test/lib/ansible_test/_internal/ansible_util.py b/test/lib/ansible_test/_internal/ansible_util.py
new file mode 100644
index 00000000..c1cf8552
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ansible_util.py
@@ -0,0 +1,296 @@
+"""Miscellaneous utility functions and classes specific to ansible cli tools."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from . import types as t
+
+from .constants import (
+ SOFT_RLIMIT_NOFILE,
+)
+
+from .io import (
+ write_text_file,
+)
+
+from .util import (
+ common_environment,
+ display,
+ find_python,
+ ApplicationError,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_BIN_PATH,
+ ANSIBLE_SOURCE_ROOT,
+ get_ansible_version,
+)
+
+from .util_common import (
+ create_temp_dir,
+ run_command,
+ ResultType,
+)
+
+from .config import (
+ IntegrationConfig,
+ PosixIntegrationConfig,
+ EnvironmentConfig,
+ CommonConfig,
+)
+
+from .data import (
+ data_context,
+)
+
+CHECK_YAML_VERSIONS = {}
+
+
+def ansible_environment(args, color=True, ansible_config=None):
+ """
+ :type args: CommonConfig
+ :type color: bool
+ :type ansible_config: str | None
+ :rtype: dict[str, str]
+ """
+ env = common_environment()
+ path = env['PATH']
+
+ if not path.startswith(ANSIBLE_BIN_PATH + os.path.pathsep):
+ path = ANSIBLE_BIN_PATH + os.path.pathsep + path
+
+ if not ansible_config:
+ # use the default empty configuration unless one has been provided
+ ansible_config = args.get_ansible_config()
+
+ if not args.explain and not os.path.exists(ansible_config):
+ raise ApplicationError('Configuration not found: %s' % ansible_config)
+
+ ansible = dict(
+ ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE=str(SOFT_RLIMIT_NOFILE),
+ ANSIBLE_FORCE_COLOR='%s' % 'true' if args.color and color else 'false',
+ ANSIBLE_FORCE_HANDLERS='true', # allow cleanup handlers to run when tests fail
+ ANSIBLE_HOST_PATTERN_MISMATCH='error', # prevent tests from unintentionally passing when hosts are not found
+ ANSIBLE_INVENTORY='/dev/null', # force tests to provide inventory
+ ANSIBLE_DEPRECATION_WARNINGS='false',
+ ANSIBLE_HOST_KEY_CHECKING='false',
+ ANSIBLE_RETRY_FILES_ENABLED='false',
+ ANSIBLE_CONFIG=ansible_config,
+ ANSIBLE_LIBRARY='/dev/null',
+ ANSIBLE_DEVEL_WARNING='false', # Don't show warnings that CI is running devel
+ PYTHONPATH=get_ansible_python_path(args),
+ PAGER='/bin/cat',
+ PATH=path,
+ # give TQM worker processes time to report code coverage results
+ # without this the last task in a play may write no coverage file, an empty file, or an incomplete file
+ # enabled even when not using code coverage to surface warnings when worker processes do not exit cleanly
+ ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT='100',
+ ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY='0.1',
+ )
+
+ if isinstance(args, IntegrationConfig) and args.coverage:
+ # standard path injection is not effective for ansible-connection, instead the location must be configured
+ # ansible-connection only requires the injector for code coverage
+ # the correct python interpreter is already selected using the sys.executable used to invoke ansible
+ ansible.update(dict(
+ ANSIBLE_CONNECTION_PATH=os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector', 'ansible-connection'),
+ ))
+
+ if isinstance(args, PosixIntegrationConfig):
+ ansible.update(dict(
+ ANSIBLE_PYTHON_INTERPRETER='/set/ansible_python_interpreter/in/inventory', # force tests to set ansible_python_interpreter in inventory
+ ))
+
+ env.update(ansible)
+
+ if args.debug:
+ env.update(dict(
+ ANSIBLE_DEBUG='true',
+ ANSIBLE_LOG_PATH=os.path.join(ResultType.LOGS.name, 'debug.log'),
+ ))
+
+ if data_context().content.collection:
+ env.update(dict(
+ ANSIBLE_COLLECTIONS_PATH=data_context().content.collection.root,
+ ))
+
+ if data_context().content.is_ansible:
+ env.update(configure_plugin_paths(args))
+
+ return env
+
+
+def configure_plugin_paths(args): # type: (CommonConfig) -> t.Dict[str, str]
+ """Return environment variables with paths to plugins relevant for the current command."""
+ if not isinstance(args, IntegrationConfig):
+ return {}
+
+ support_path = os.path.join(ANSIBLE_SOURCE_ROOT, 'test', 'support', args.command)
+
+ # provide private copies of collections for integration tests
+ collection_root = os.path.join(support_path, 'collections')
+
+ env = dict(
+ ANSIBLE_COLLECTIONS_PATH=collection_root,
+ )
+
+ # provide private copies of plugins for integration tests
+ plugin_root = os.path.join(support_path, 'plugins')
+
+ plugin_list = [
+ 'action',
+ 'become',
+ 'cache',
+ 'callback',
+ 'cliconf',
+ 'connection',
+ 'filter',
+ 'httpapi',
+ 'inventory',
+ 'lookup',
+ 'netconf',
+ # 'shell' is not configurable
+ 'strategy',
+ 'terminal',
+ 'test',
+ 'vars',
+ ]
+
+ # most plugins follow a standard naming convention
+ plugin_map = dict(('%s_plugins' % name, name) for name in plugin_list)
+
+ # these plugins do not follow the standard naming convention
+ plugin_map.update(
+ doc_fragment='doc_fragments',
+ library='modules',
+ module_utils='module_utils',
+ )
+
+ env.update(dict(('ANSIBLE_%s' % key.upper(), os.path.join(plugin_root, value)) for key, value in plugin_map.items()))
+
+ # only configure directories which exist
+ env = dict((key, value) for key, value in env.items() if os.path.isdir(value))
+
+ return env
+
+
+def get_ansible_python_path(args): # type: (CommonConfig) -> str
+ """
+ Return a directory usable for PYTHONPATH, containing only the ansible package.
+ If a temporary directory is required, it will be cached for the lifetime of the process and cleaned up at exit.
+ """
+ try:
+ return get_ansible_python_path.python_path
+ except AttributeError:
+ pass
+
+ if ANSIBLE_SOURCE_ROOT:
+ # when running from source there is no need for a temporary directory to isolate the ansible package
+ python_path = os.path.dirname(ANSIBLE_LIB_ROOT)
+ else:
+ # when not running from source the installed directory is unsafe to add to PYTHONPATH
+ # doing so would expose many unwanted packages on sys.path
+ # instead a temporary directory is created which contains only ansible using a symlink
+ python_path = create_temp_dir(prefix='ansible-test-')
+
+ os.symlink(ANSIBLE_LIB_ROOT, os.path.join(python_path, 'ansible'))
+
+ if not args.explain:
+ generate_egg_info(python_path)
+
+ get_ansible_python_path.python_path = python_path
+
+ return python_path
+
+
+def generate_egg_info(path): # type: (str) -> None
+ """Generate an egg-info in the specified base directory."""
+ # minimal PKG-INFO stub following the format defined in PEP 241
+ # required for older setuptools versions to avoid a traceback when importing pkg_resources from packages like cryptography
+ # newer setuptools versions are happy with an empty directory
+ # including a stub here means we don't need to locate the existing file or have setup.py generate it when running from source
+ pkg_info = '''
+Metadata-Version: 1.0
+Name: ansible
+Version: %s
+Platform: UNKNOWN
+Summary: Radically simple IT automation
+Author-email: info@ansible.com
+License: GPLv3+
+''' % get_ansible_version()
+
+ pkg_info_path = os.path.join(path, 'ansible_base.egg-info', 'PKG-INFO')
+
+ if os.path.exists(pkg_info_path):
+ return
+
+ write_text_file(pkg_info_path, pkg_info.lstrip(), create_directories=True)
+
+
+def check_pyyaml(args, version, required=True, quiet=False):
+ """
+ :type args: EnvironmentConfig
+ :type version: str
+ :type required: bool
+ :type quiet: bool
+ """
+ try:
+ return CHECK_YAML_VERSIONS[version]
+ except KeyError:
+ pass
+
+ python = find_python(version)
+ stdout, _dummy = run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'yamlcheck.py')],
+ capture=True, always=True)
+
+ result = json.loads(stdout)
+
+ yaml = result['yaml']
+ cloader = result['cloader']
+
+ if yaml or required:
+ # results are cached only if pyyaml is required or present
+ # it is assumed that tests will not uninstall/re-install pyyaml -- if they do, those changes will go undetected
+ CHECK_YAML_VERSIONS[version] = result
+
+ if not quiet:
+ if not yaml and required:
+ display.warning('PyYAML is not installed for interpreter: %s' % python)
+ elif not cloader:
+ display.warning('PyYAML will be slow due to installation without libyaml support for interpreter: %s' % python)
+
+ return result
+
+
+class CollectionDetail:
+ """Collection detail."""
+ def __init__(self): # type: () -> None
+ self.version = None # type: t.Optional[str]
+
+
+class CollectionDetailError(ApplicationError):
+ """An error occurred retrieving collection detail."""
+ def __init__(self, reason): # type: (str) -> None
+ super(CollectionDetailError, self).__init__('Error collecting collection detail: %s' % reason)
+ self.reason = reason
+
+
+def get_collection_detail(args, python): # type: (EnvironmentConfig, str) -> CollectionDetail
+ """Return collection detail."""
+ collection = data_context().content.collection
+ directory = os.path.join(collection.root, collection.directory)
+
+ stdout = run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'collection_detail.py'), directory], capture=True, always=True)[0]
+ result = json.loads(stdout)
+ error = result.get('error')
+
+ if error:
+ raise CollectionDetailError(error)
+
+ version = result.get('version')
+
+ detail = CollectionDetail()
+ detail.version = str(version) if version is not None else None
+
+ return detail
diff --git a/test/lib/ansible_test/_internal/cache.py b/test/lib/ansible_test/_internal/cache.py
new file mode 100644
index 00000000..85fdbb1f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cache.py
@@ -0,0 +1,35 @@
+"""Cache for commonly shared data that is intended to be immutable."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class CommonCache:
+ """Common cache."""
+ def __init__(self, args):
+ """
+ :param args: CommonConfig
+ """
+ self.args = args
+
+ def get(self, key, factory):
+ """
+ :param key: str
+ :param factory: () -> any
+ :rtype: any
+ """
+ if key not in self.args.cache:
+ self.args.cache[key] = factory()
+
+ return self.args.cache[key]
+
+ def get_with_args(self, key, factory):
+ """
+ :param key: str
+ :param factory: (CommonConfig) -> any
+ :rtype: any
+ """
+
+ if key not in self.args.cache:
+ self.args.cache[key] = factory(self.args)
+
+ return self.args.cache[key]
diff --git a/test/lib/ansible_test/_internal/ci/__init__.py b/test/lib/ansible_test/_internal/ci/__init__.py
new file mode 100644
index 00000000..d6e2ad6e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/__init__.py
@@ -0,0 +1,227 @@
+"""Support code for CI environments."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import base64
+import json
+import os
+import tempfile
+
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+ to_text,
+)
+
+from ..io import (
+ read_text_file,
+ write_text_file,
+)
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..util import (
+ ABC,
+ ApplicationError,
+ display,
+ get_subclasses,
+ import_plugins,
+ raw_command,
+)
+
+
+class ChangeDetectionNotSupported(ApplicationError):
+ """Exception for cases where change detection is not supported."""
+
+
+class AuthContext:
+ """Context information required for Ansible Core CI authentication."""
+ def __init__(self): # type: () -> None
+ self.region = None # type: t.Optional[str]
+
+
+class CIProvider(ABC):
+ """Base class for CI provider plugins."""
+ priority = 500
+
+ @staticmethod
+ @abc.abstractmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+
+ @property
+ @abc.abstractmethod
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+
+ @property
+ @abc.abstractmethod
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+
+ @abc.abstractmethod
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+
+ @abc.abstractmethod
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+
+ @abc.abstractmethod
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+
+ @abc.abstractmethod
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+
+ @abc.abstractmethod
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+
+ @abc.abstractmethod
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+
+
+def get_ci_provider(): # type: () -> CIProvider
+ """Return a CI provider instance for the current environment."""
+ try:
+ return get_ci_provider.provider
+ except AttributeError:
+ pass
+
+ provider = None
+
+ import_plugins('ci')
+
+ candidates = sorted(get_subclasses(CIProvider), key=lambda c: (c.priority, c.__name__))
+
+ for candidate in candidates:
+ if candidate.is_supported():
+ provider = candidate()
+ break
+
+ if provider.code:
+ display.info('Detected CI provider: %s' % provider.name)
+
+ get_ci_provider.provider = provider
+
+ return provider
+
+
+class AuthHelper(ABC):
+ """Public key based authentication helper for Ansible Core CI."""
+ def sign_request(self, request): # type: (t.Dict[str, t.Any]) -> None
+ """Sign the given auth request and make the public key available."""
+ payload_bytes = to_bytes(json.dumps(request, sort_keys=True))
+ signature_raw_bytes = self.sign_bytes(payload_bytes)
+ signature = to_text(base64.b64encode(signature_raw_bytes))
+
+ request.update(signature=signature)
+
+ def initialize_private_key(self): # type: () -> str
+ """
+ Initialize and publish a new key pair (if needed) and return the private key.
+ The private key is cached across ansible-test invocations so it is only generated and published once per CI job.
+ """
+ path = os.path.expanduser('~/.ansible-core-ci-private.key')
+
+ if os.path.exists(to_bytes(path)):
+ private_key_pem = read_text_file(path)
+ else:
+ private_key_pem = self.generate_private_key()
+ write_text_file(path, private_key_pem)
+
+ return private_key_pem
+
+ @abc.abstractmethod
+ def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+
+ @abc.abstractmethod
+ def publish_public_key(self, public_key_pem): # type: (str) -> None
+ """Publish the given public key."""
+
+ @abc.abstractmethod
+ def generate_private_key(self): # type: () -> str
+ """Generate a new key pair, publishing the public key and returning the private key."""
+
+
+class CryptographyAuthHelper(AuthHelper, ABC): # pylint: disable=abstract-method
+ """Cryptography based public key based authentication helper for Ansible Core CI."""
+ def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+ # import cryptography here to avoid overhead and failures in environments which do not use/provide it
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.primitives.asymmetric import ec
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
+
+ private_key_pem = self.initialize_private_key()
+ private_key = load_pem_private_key(to_bytes(private_key_pem), None, default_backend())
+
+ signature_raw_bytes = private_key.sign(payload_bytes, ec.ECDSA(hashes.SHA256()))
+
+ return signature_raw_bytes
+
+ def generate_private_key(self): # type: () -> str
+ """Generate a new key pair, publishing the public key and returning the private key."""
+ # import cryptography here to avoid overhead and failures in environments which do not use/provide it
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import serialization
+ from cryptography.hazmat.primitives.asymmetric import ec
+
+ private_key = ec.generate_private_key(ec.SECP384R1(), default_backend())
+ public_key = private_key.public_key()
+
+ private_key_pem = to_text(private_key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.PKCS8,
+ encryption_algorithm=serialization.NoEncryption(),
+ ))
+
+ public_key_pem = to_text(public_key.public_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PublicFormat.SubjectPublicKeyInfo,
+ ))
+
+ self.publish_public_key(public_key_pem)
+
+ return private_key_pem
+
+
+class OpenSSLAuthHelper(AuthHelper, ABC): # pylint: disable=abstract-method
+ """OpenSSL based public key based authentication helper for Ansible Core CI."""
+ def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+ private_key_pem = self.initialize_private_key()
+
+ with tempfile.NamedTemporaryFile() as private_key_file:
+ private_key_file.write(to_bytes(private_key_pem))
+ private_key_file.flush()
+
+ with tempfile.NamedTemporaryFile() as payload_file:
+ payload_file.write(payload_bytes)
+ payload_file.flush()
+
+ with tempfile.NamedTemporaryFile() as signature_file:
+ raw_command(['openssl', 'dgst', '-sha256', '-sign', private_key_file.name, '-out', signature_file.name, payload_file.name], capture=True)
+ signature_raw_bytes = signature_file.read()
+
+ return signature_raw_bytes
+
+ def generate_private_key(self): # type: () -> str
+ """Generate a new key pair, publishing the public key and returning the private key."""
+ private_key_pem = raw_command(['openssl', 'ecparam', '-genkey', '-name', 'secp384r1', '-noout'], capture=True)[0]
+ public_key_pem = raw_command(['openssl', 'ec', '-pubout'], data=private_key_pem, capture=True)[0]
+
+ self.publish_public_key(public_key_pem)
+
+ return private_key_pem
diff --git a/test/lib/ansible_test/_internal/ci/azp.py b/test/lib/ansible_test/_internal/ci/azp.py
new file mode 100644
index 00000000..f2a9d206
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/azp.py
@@ -0,0 +1,268 @@
+"""Support code for working with Azure Pipelines."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import tempfile
+import uuid
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..git import (
+ Git,
+)
+
+from ..http import (
+ HttpClient,
+ urlencode,
+)
+
+from ..util import (
+ display,
+ MissingEnvironmentVariable,
+)
+
+from . import (
+ AuthContext,
+ ChangeDetectionNotSupported,
+ CIProvider,
+ CryptographyAuthHelper,
+)
+
+CODE = 'azp'
+
+
+class AzurePipelines(CIProvider):
+ """CI provider implementation for Azure Pipelines."""
+ def __init__(self):
+ self.auth = AzurePipelinesAuthHelper()
+
+ @staticmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+ return os.environ.get('SYSTEM_COLLECTIONURI', '').startswith('https://dev.azure.com/')
+
+ @property
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+ return CODE
+
+ @property
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+ return 'Azure Pipelines'
+
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+ try:
+ prefix = 'azp-%s-%s-%s' % (
+ os.environ['BUILD_BUILDID'],
+ os.environ['SYSTEM_JOBATTEMPT'],
+ os.environ['SYSTEM_JOBIDENTIFIER'],
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ prefix = re.sub(r'[^a-zA-Z0-9]+', '-', prefix).lower()
+
+ return prefix
+
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+ base_branch = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH') or os.environ.get('BUILD_SOURCEBRANCHNAME')
+
+ if base_branch:
+ base_branch = 'origin/%s' % base_branch
+
+ return base_branch or ''
+
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+ result = AzurePipelinesChanges(args)
+
+ if result.is_pr:
+ job_type = 'pull request'
+ else:
+ job_type = 'merge commit'
+
+ display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
+
+ if not args.metadata.changes:
+ args.metadata.populate_changes(result.diff)
+
+ if result.paths is None:
+ # There are several likely causes of this:
+ # - First run on a new branch.
+ # - Too many pull requests passed since the last merge run passed.
+ display.warning('No successful commit found. All tests will be executed.')
+
+ return result.paths
+
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+ return True
+
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+ try:
+ request = dict(
+ org_name=os.environ['SYSTEM_COLLECTIONURI'].strip('/').split('/')[-1],
+ project_name=os.environ['SYSTEM_TEAMPROJECT'],
+ build_id=int(os.environ['BUILD_BUILDID']),
+ task_id=str(uuid.UUID(os.environ['SYSTEM_TASKINSTANCEID'])),
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ self.auth.sign_request(request)
+
+ auth = dict(
+ azp=request,
+ )
+
+ return auth
+
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+ changes = AzurePipelinesChanges(args)
+
+ details = dict(
+ base_commit=changes.base_commit,
+ commit=changes.commit,
+ )
+
+ return details
+
+
+class AzurePipelinesAuthHelper(CryptographyAuthHelper):
+ """
+ Authentication helper for Azure Pipelines.
+ Based on cryptography since it is provided by the default Azure Pipelines environment.
+ """
+ def publish_public_key(self, public_key_pem): # type: (str) -> None
+ """Publish the given public key."""
+ try:
+ agent_temp_directory = os.environ['AGENT_TEMPDIRECTORY']
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ # the temporary file cannot be deleted because we do not know when the agent has processed it
+ # placing the file in the agent's temp directory allows it to be picked up when the job is running in a container
+ with tempfile.NamedTemporaryFile(prefix='public-key-', suffix='.pem', delete=False, dir=agent_temp_directory) as public_key_file:
+ public_key_file.write(to_bytes(public_key_pem))
+ public_key_file.flush()
+
+ # make the agent aware of the public key by declaring it as an attachment
+ vso_add_attachment('ansible-core-ci', 'public-key.pem', public_key_file.name)
+
+
+class AzurePipelinesChanges:
+ """Change information for an Azure Pipelines build."""
+ def __init__(self, args): # type: (CommonConfig) -> None
+ self.args = args
+ self.git = Git()
+
+ try:
+ self.org_uri = os.environ['SYSTEM_COLLECTIONURI'] # ex: https://dev.azure.com/{org}/
+ self.project = os.environ['SYSTEM_TEAMPROJECT']
+ self.repo_type = os.environ['BUILD_REPOSITORY_PROVIDER'] # ex: GitHub
+ self.source_branch = os.environ['BUILD_SOURCEBRANCH']
+ self.source_branch_name = os.environ['BUILD_SOURCEBRANCHNAME']
+ self.pr_branch_name = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH')
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ if self.source_branch.startswith('refs/tags/'):
+ raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
+
+ self.org = self.org_uri.strip('/').split('/')[-1]
+ self.is_pr = self.pr_branch_name is not None
+
+ if self.is_pr:
+ # HEAD is a merge commit of the PR branch into the target branch
+ # HEAD^1 is HEAD of the target branch (first parent of merge commit)
+ # HEAD^2 is HEAD of the PR branch (second parent of merge commit)
+ # see: https://git-scm.com/docs/gitrevisions
+ self.branch = self.pr_branch_name
+ self.base_commit = 'HEAD^1'
+ self.commit = 'HEAD^2'
+ else:
+ commits = self.get_successful_merge_run_commits()
+
+ self.branch = self.source_branch_name
+ self.base_commit = self.get_last_successful_commit(commits)
+ self.commit = 'HEAD'
+
+ self.commit = self.git.run_git(['rev-parse', self.commit]).strip()
+
+ if self.base_commit:
+ self.base_commit = self.git.run_git(['rev-parse', self.base_commit]).strip()
+
+ # <commit>...<commit>
+ # This form is to view the changes on the branch containing and up to the second <commit>, starting at a common ancestor of both <commit>.
+ # see: https://git-scm.com/docs/git-diff
+ dot_range = '%s...%s' % (self.base_commit, self.commit)
+
+ self.paths = sorted(self.git.get_diff_names([dot_range]))
+ self.diff = self.git.get_diff([dot_range])
+ else:
+ self.paths = None # act as though change detection not enabled, do not filter targets
+ self.diff = []
+
+ def get_successful_merge_run_commits(self): # type: () -> t.Set[str]
+ """Return a set of recent successsful merge commits from Azure Pipelines."""
+ parameters = dict(
+ maxBuildsPerDefinition=100, # max 5000
+ queryOrder='queueTimeDescending', # assumes under normal circumstances that later queued jobs are for later commits
+ resultFilter='succeeded',
+ reasonFilter='batchedCI', # may miss some non-PR reasons, the alternative is to filter the list after receiving it
+ repositoryType=self.repo_type,
+ repositoryId='%s/%s' % (self.org, self.project),
+ )
+
+ url = '%s%s/build/builds?%s' % (self.org_uri, self.project, urlencode(parameters))
+
+ http = HttpClient(self.args)
+ response = http.get(url)
+
+ # noinspection PyBroadException
+ try:
+ result = response.json()
+ except Exception: # pylint: disable=broad-except
+ # most likely due to a private project, which returns an HTTP 203 response with HTML
+ display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
+ return set()
+
+ commits = set(build['sourceVersion'] for build in result['value'])
+
+ return commits
+
+ def get_last_successful_commit(self, commits): # type: (t.Set[str]) -> t.Optional[str]
+ """Return the last successful commit from git history that is found in the given commit list, or None."""
+ commit_history = self.git.get_rev_list(max_count=100)
+ ordered_successful_commits = [commit for commit in commit_history if commit in commits]
+ last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
+ return last_successful_commit
+
+
+def vso_add_attachment(file_type, file_name, path): # type: (str, str, str) -> None
+ """Upload and attach a file to the current timeline record."""
+ vso('task.addattachment', dict(type=file_type, name=file_name), path)
+
+
+def vso(name, data, message): # type: (str, t.Dict[str, str], str) -> None
+ """
+ Write a logging command for the Azure Pipelines agent to process.
+ See: https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash
+ """
+ display.info('##vso[%s %s]%s' % (name, ';'.join('='.join((key, value)) for key, value in data.items()), message))
diff --git a/test/lib/ansible_test/_internal/ci/local.py b/test/lib/ansible_test/_internal/ci/local.py
new file mode 100644
index 00000000..5f605c86
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/local.py
@@ -0,0 +1,217 @@
+"""Support code for working without a supported CI provider."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import platform
+import random
+import re
+
+from .. import types as t
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..git import (
+ Git,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ is_binary_file,
+ SubprocessError,
+)
+
+from . import (
+ AuthContext,
+ CIProvider,
+)
+
+CODE = '' # not really a CI provider, so use an empty string for the code
+
+
+class Local(CIProvider):
+ """CI provider implementation when not using CI."""
+ priority = 1000
+
+ @staticmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+ return True
+
+ @property
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+ return CODE
+
+ @property
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+ return 'Local'
+
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+ node = re.sub(r'[^a-zA-Z0-9]+', '-', platform.node().split('.')[0]).lower()
+
+ prefix = 'ansible-test-%s-%d' % (node, random.randint(10000000, 99999999))
+
+ return prefix
+
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+ return ''
+
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+ result = LocalChanges(args)
+
+ display.info('Detected branch %s forked from %s at commit %s' % (
+ result.current_branch, result.fork_branch, result.fork_point))
+
+ if result.untracked and not args.untracked:
+ display.warning('Ignored %s untracked file(s). Use --untracked to include them.' %
+ len(result.untracked))
+
+ if result.committed and not args.committed:
+ display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' %
+ len(result.committed))
+
+ if result.staged and not args.staged:
+ display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' %
+ len(result.staged))
+
+ if result.unstaged and not args.unstaged:
+ display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' %
+ len(result.unstaged))
+
+ names = set()
+
+ if args.tracked:
+ names |= set(result.tracked)
+ if args.untracked:
+ names |= set(result.untracked)
+ if args.committed:
+ names |= set(result.committed)
+ if args.staged:
+ names |= set(result.staged)
+ if args.unstaged:
+ names |= set(result.unstaged)
+
+ if not args.metadata.changes:
+ args.metadata.populate_changes(result.diff)
+
+ for path in result.untracked:
+ if is_binary_file(path):
+ args.metadata.changes[path] = ((0, 0),)
+ continue
+
+ line_count = len(read_text_file(path).splitlines())
+
+ args.metadata.changes[path] = ((1, line_count),)
+
+ return sorted(names)
+
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+ path = self._get_aci_key_path(context)
+ return os.path.exists(path)
+
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+ path = self._get_aci_key_path(context)
+ auth_key = read_text_file(path).strip()
+
+ request = dict(
+ key=auth_key,
+ nonce=None,
+ )
+
+ auth = dict(
+ remote=request,
+ )
+
+ return auth
+
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+ return None # not yet implemented for local
+
+ def _get_aci_key_path(self, context): # type: (AuthContext) -> str
+ path = os.path.expanduser('~/.ansible-core-ci.key')
+
+ if context.region:
+ path += '.%s' % context.region
+
+ return path
+
+
+class InvalidBranch(ApplicationError):
+ """Exception for invalid branch specification."""
+ def __init__(self, branch, reason): # type: (str, str) -> None
+ message = 'Invalid branch: %s\n%s' % (branch, reason)
+
+ super(InvalidBranch, self).__init__(message)
+
+ self.branch = branch
+
+
+class LocalChanges:
+ """Change information for local work."""
+ def __init__(self, args): # type: (TestConfig) -> None
+ self.args = args
+ self.git = Git()
+
+ self.current_branch = self.git.get_branch()
+
+ if self.is_official_branch(self.current_branch):
+ raise InvalidBranch(branch=self.current_branch,
+ reason='Current branch is not a feature branch.')
+
+ self.fork_branch = None
+ self.fork_point = None
+
+ self.local_branches = sorted(self.git.get_branches())
+ self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
+
+ for self.fork_branch in self.official_branches:
+ try:
+ self.fork_point = self.git.get_branch_fork_point(self.fork_branch)
+ break
+ except SubprocessError:
+ pass
+
+ if self.fork_point is None:
+ raise ApplicationError('Unable to auto-detect fork branch and fork point.')
+
+ # tracked files (including unchanged)
+ self.tracked = sorted(self.git.get_file_names(['--cached']))
+ # untracked files (except ignored)
+ self.untracked = sorted(self.git.get_file_names(['--others', '--exclude-standard']))
+ # tracked changes (including deletions) committed since the branch was forked
+ self.committed = sorted(self.git.get_diff_names([self.fork_point, 'HEAD']))
+ # tracked changes (including deletions) which are staged
+ self.staged = sorted(self.git.get_diff_names(['--cached']))
+ # tracked changes (including deletions) which are not staged
+ self.unstaged = sorted(self.git.get_diff_names([]))
+ # diff of all tracked files from fork point to working copy
+ self.diff = self.git.get_diff([self.fork_point])
+
+ def is_official_branch(self, name): # type: (str) -> bool
+ """Return True if the given branch name an official branch for development or releases."""
+ if self.args.base_branch:
+ return name == self.args.base_branch
+
+ if name == 'devel':
+ return True
+
+ if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
+ return True
+
+ return False
diff --git a/test/lib/ansible_test/_internal/ci/shippable.py b/test/lib/ansible_test/_internal/ci/shippable.py
new file mode 100644
index 00000000..f9f0a192
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/shippable.py
@@ -0,0 +1,269 @@
+"""Support code for working with Shippable."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import time
+
+from .. import types as t
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..git import (
+ Git,
+)
+
+from ..http import (
+ HttpClient,
+ urlencode,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ MissingEnvironmentVariable,
+ SubprocessError,
+)
+
+from . import (
+ AuthContext,
+ ChangeDetectionNotSupported,
+ CIProvider,
+ OpenSSLAuthHelper,
+)
+
+
+CODE = 'shippable'
+
+
+class Shippable(CIProvider):
+ """CI provider implementation for Shippable."""
+ def __init__(self):
+ self.auth = ShippableAuthHelper()
+
+ @staticmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+ return os.environ.get('SHIPPABLE') == 'true'
+
+ @property
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+ return CODE
+
+ @property
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+ return 'Shippable'
+
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+ try:
+ prefix = 'shippable-%s-%s' % (
+ os.environ['SHIPPABLE_BUILD_NUMBER'],
+ os.environ['SHIPPABLE_JOB_NUMBER'],
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ return prefix
+
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+ base_branch = os.environ.get('BASE_BRANCH')
+
+ if base_branch:
+ base_branch = 'origin/%s' % base_branch
+
+ return base_branch or ''
+
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+ result = ShippableChanges(args)
+
+ if result.is_pr:
+ job_type = 'pull request'
+ elif result.is_tag:
+ job_type = 'tag'
+ else:
+ job_type = 'merge commit'
+
+ display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
+
+ if not args.metadata.changes:
+ args.metadata.populate_changes(result.diff)
+
+ if result.paths is None:
+ # There are several likely causes of this:
+ # - First run on a new branch.
+ # - Too many pull requests passed since the last merge run passed.
+ display.warning('No successful commit found. All tests will be executed.')
+
+ return result.paths
+
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+ return True
+
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+ try:
+ request = dict(
+ run_id=os.environ['SHIPPABLE_BUILD_ID'],
+ job_number=int(os.environ['SHIPPABLE_JOB_NUMBER']),
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ self.auth.sign_request(request)
+
+ auth = dict(
+ shippable=request,
+ )
+
+ return auth
+
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+ commit = os.environ.get('COMMIT')
+ base_commit = os.environ.get('BASE_COMMIT')
+
+ details = dict(
+ base_commit=base_commit,
+ commit=commit,
+ merged_commit=self._get_merged_commit(args, commit),
+ )
+
+ return details
+
+ # noinspection PyUnusedLocal
+ def _get_merged_commit(self, args, commit): # type: (CommonConfig, str) -> t.Optional[str] # pylint: disable=unused-argument
+ """Find the merged commit that should be present."""
+ if not commit:
+ return None
+
+ git = Git()
+
+ try:
+ show_commit = git.run_git(['show', '--no-patch', '--no-abbrev', commit])
+ except SubprocessError as ex:
+ # This should only fail for pull requests where the commit does not exist.
+ # Merge runs would fail much earlier when attempting to checkout the commit.
+ raise ApplicationError('Commit %s was not found:\n\n%s\n\n'
+ 'GitHub may not have fully replicated the commit across their infrastructure.\n'
+ 'It is also possible the commit was removed by a force push between job creation and execution.\n'
+ 'Find the latest run for the pull request and restart failed jobs as needed.'
+ % (commit, ex.stderr.strip()))
+
+ head_commit = git.run_git(['show', '--no-patch', '--no-abbrev', 'HEAD'])
+
+ if show_commit == head_commit:
+ # Commit is HEAD, so this is not a pull request or the base branch for the pull request is up-to-date.
+ return None
+
+ match_merge = re.search(r'^Merge: (?P<parents>[0-9a-f]{40} [0-9a-f]{40})$', head_commit, flags=re.MULTILINE)
+
+ if not match_merge:
+ # The most likely scenarios resulting in a failure here are:
+ # A new run should or does supersede this job, but it wasn't cancelled in time.
+ # A job was superseded and then later restarted.
+ raise ApplicationError('HEAD is not commit %s or a merge commit:\n\n%s\n\n'
+ 'This job has likely been superseded by another run due to additional commits being pushed.\n'
+ 'Find the latest run for the pull request and restart failed jobs as needed.'
+ % (commit, head_commit.strip()))
+
+ parents = set(match_merge.group('parents').split(' '))
+
+ if len(parents) != 2:
+ raise ApplicationError('HEAD is a %d-way octopus merge.' % len(parents))
+
+ if commit not in parents:
+ raise ApplicationError('Commit %s is not a parent of HEAD.' % commit)
+
+ parents.remove(commit)
+
+ last_commit = parents.pop()
+
+ return last_commit
+
+
+class ShippableAuthHelper(OpenSSLAuthHelper):
+ """
+ Authentication helper for Shippable.
+ Based on OpenSSL since cryptography is not provided by the default Shippable environment.
+ """
+ def publish_public_key(self, public_key_pem): # type: (str) -> None
+ """Publish the given public key."""
+ # display the public key as a single line to avoid mangling such as when prefixing each line with a timestamp
+ display.info(public_key_pem.replace('\n', ' '))
+ # allow time for logs to become available to reduce repeated API calls
+ time.sleep(3)
+
+
+class ShippableChanges:
+ """Change information for Shippable build."""
+ def __init__(self, args): # type: (TestConfig) -> None
+ self.args = args
+ self.git = Git()
+
+ try:
+ self.branch = os.environ['BRANCH']
+ self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true'
+ self.is_tag = os.environ['IS_GIT_TAG'] == 'true'
+ self.commit = os.environ['COMMIT']
+ self.project_id = os.environ['PROJECT_ID']
+ self.commit_range = os.environ['SHIPPABLE_COMMIT_RANGE']
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ if self.is_tag:
+ raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
+
+ if self.is_pr:
+ self.paths = sorted(self.git.get_diff_names([self.commit_range]))
+ self.diff = self.git.get_diff([self.commit_range])
+ else:
+ commits = self.get_successful_merge_run_commits(self.project_id, self.branch)
+ last_successful_commit = self.get_last_successful_commit(commits)
+
+ if last_successful_commit:
+ self.paths = sorted(self.git.get_diff_names([last_successful_commit, self.commit]))
+ self.diff = self.git.get_diff([last_successful_commit, self.commit])
+ else:
+ # first run for branch
+ self.paths = None # act as though change detection not enabled, do not filter targets
+ self.diff = []
+
+ def get_successful_merge_run_commits(self, project_id, branch): # type: (str, str) -> t.Set[str]
+ """Return a set of recent successsful merge commits from Shippable for the given project and branch."""
+ parameters = dict(
+ isPullRequest='false',
+ projectIds=project_id,
+ branch=branch,
+ )
+
+ url = 'https://api.shippable.com/runs?%s' % urlencode(parameters)
+
+ http = HttpClient(self.args, always=True)
+ response = http.get(url)
+ result = response.json()
+
+ if 'id' in result and result['id'] == 4004:
+ # most likely due to a private project, which returns an HTTP 200 response with JSON
+ display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
+ return set()
+
+ commits = set(run['commitSha'] for run in result if run['statusCode'] == 30)
+
+ return commits
+
+ def get_last_successful_commit(self, successful_commits): # type: (t.Set[str]) -> t.Optional[str]
+ """Return the last successful commit from git history that is found in the given commit list, or None."""
+ commit_history = self.git.get_rev_list(max_count=100)
+ ordered_successful_commits = [commit for commit in commit_history if commit in successful_commits]
+ last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
+ return last_successful_commit
diff --git a/test/lib/ansible_test/_internal/classification.py b/test/lib/ansible_test/_internal/classification.py
new file mode 100644
index 00000000..52385d1e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/classification.py
@@ -0,0 +1,977 @@
+"""Classify changes in Ansible code."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import os
+import re
+import time
+
+from . import types as t
+
+from .target import (
+ walk_module_targets,
+ walk_integration_targets,
+ walk_units_targets,
+ walk_compile_targets,
+ walk_sanity_targets,
+ load_integration_prefixes,
+ analyze_integration_target_dependencies,
+)
+
+from .util import (
+ display,
+ is_subdir,
+)
+
+from .import_analysis import (
+ get_python_module_utils_imports,
+ get_python_module_utils_name,
+)
+
+from .csharp_import_analysis import (
+ get_csharp_module_utils_imports,
+ get_csharp_module_utils_name,
+)
+
+from .powershell_import_analysis import (
+ get_powershell_module_utils_imports,
+ get_powershell_module_utils_name,
+)
+
+from .config import (
+ TestConfig,
+ IntegrationConfig,
+)
+
+from .metadata import (
+ ChangeDescription,
+)
+
+from .data import (
+ data_context,
+)
+
+FOCUSED_TARGET = '__focused__'
+
+
+def categorize_changes(args, paths, verbose_command=None):
+ """
+ :type args: TestConfig
+ :type paths: list[str]
+ :type verbose_command: str
+ :rtype: ChangeDescription
+ """
+ mapper = PathMapper(args)
+
+ commands = {
+ 'sanity': set(),
+ 'units': set(),
+ 'integration': set(),
+ 'windows-integration': set(),
+ 'network-integration': set(),
+ }
+
+ focused_commands = collections.defaultdict(set)
+
+ deleted_paths = set()
+ original_paths = set()
+ additional_paths = set()
+ no_integration_paths = set()
+
+ for path in paths:
+ if not os.path.exists(path):
+ deleted_paths.add(path)
+ continue
+
+ original_paths.add(path)
+
+ dependent_paths = mapper.get_dependent_paths(path)
+
+ if not dependent_paths:
+ continue
+
+ display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2)
+
+ for dependent_path in dependent_paths:
+ display.info(dependent_path, verbosity=2)
+ additional_paths.add(dependent_path)
+
+ additional_paths -= set(paths) # don't count changed paths as additional paths
+
+ if additional_paths:
+ display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
+ paths = sorted(set(paths) | additional_paths)
+
+ display.info('Mapping %d changed file(s) to tests.' % len(paths))
+
+ none_count = 0
+
+ for path in paths:
+ tests = mapper.classify(path)
+
+ if tests is None:
+ focused_target = False
+
+ display.info('%s -> all' % path, verbosity=1)
+ tests = all_tests(args) # not categorized, run all tests
+ display.warning('Path not categorized: %s' % path)
+ else:
+ focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths
+
+ tests = dict((key, value) for key, value in tests.items() if value)
+
+ if focused_target and not any('integration' in command for command in tests):
+ no_integration_paths.add(path) # path triggers no integration tests
+
+ if verbose_command:
+ result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
+
+ # identify targeted integration tests (those which only target a single integration command)
+ if 'integration' in verbose_command and tests.get(verbose_command):
+ if not any('integration' in command for command in tests if command != verbose_command):
+ if focused_target:
+ result += ' (focused)'
+
+ result += ' (targeted)'
+ else:
+ result = '%s' % tests
+
+ if not tests.get(verbose_command):
+ # minimize excessive output from potentially thousands of files which do not trigger tests
+ none_count += 1
+ verbosity = 2
+ else:
+ verbosity = 1
+
+ if args.verbosity >= verbosity:
+ display.info('%s -> %s' % (path, result), verbosity=1)
+
+ for command, target in tests.items():
+ commands[command].add(target)
+
+ if focused_target:
+ focused_commands[command].add(target)
+
+ if none_count > 0 and args.verbosity < 2:
+ display.notice('Omitted %d file(s) that triggered no tests.' % none_count)
+
+ for command in commands:
+ commands[command].discard('none')
+
+ if any(target == 'all' for target in commands[command]):
+ commands[command] = set(['all'])
+
+ commands = dict((c, sorted(commands[c])) for c in commands if commands[c])
+ focused_commands = dict((c, sorted(focused_commands[c])) for c in focused_commands)
+
+ for command in commands:
+ if commands[command] == ['all']:
+ commands[command] = [] # changes require testing all targets, do not filter targets
+
+ changes = ChangeDescription()
+ changes.command = verbose_command
+ changes.changed_paths = sorted(original_paths)
+ changes.deleted_paths = sorted(deleted_paths)
+ changes.regular_command_targets = commands
+ changes.focused_command_targets = focused_commands
+ changes.no_integration_paths = sorted(no_integration_paths)
+
+ return changes
+
+
+class PathMapper:
+ """Map file paths to test commands and targets."""
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ self.args = args
+ self.integration_all_target = get_integration_all_target(self.args)
+
+ self.integration_targets = list(walk_integration_targets())
+ self.module_targets = list(walk_module_targets())
+ self.compile_targets = list(walk_compile_targets())
+ self.units_targets = list(walk_units_targets())
+ self.sanity_targets = list(walk_sanity_targets())
+ self.powershell_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1')]
+ self.csharp_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] == '.cs']
+
+ self.units_modules = set(target.module for target in self.units_targets if target.module)
+ self.units_paths = set(a for target in self.units_targets for a in target.aliases)
+ self.sanity_paths = set(target.path for target in self.sanity_targets)
+
+ self.module_names_by_path = dict((target.path, target.module) for target in self.module_targets)
+ self.integration_targets_by_name = dict((target.name, target) for target in self.integration_targets)
+ self.integration_targets_by_alias = dict((a, target) for target in self.integration_targets for a in target.aliases)
+
+ self.posix_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'posix/' in target.aliases for m in target.modules)
+ self.windows_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'windows/' in target.aliases for m in target.modules)
+ self.network_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'network/' in target.aliases for m in target.modules)
+
+ self.prefixes = load_integration_prefixes()
+ self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
+
+ self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed
+ self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed
+ self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed
+
+ self.paths_to_dependent_targets = {}
+
+ for target in self.integration_targets:
+ for path in target.needs_file:
+ if path not in self.paths_to_dependent_targets:
+ self.paths_to_dependent_targets[path] = set()
+
+ self.paths_to_dependent_targets[path].add(target)
+
+ def get_dependent_paths(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ unprocessed_paths = set(self.get_dependent_paths_non_recursive(path))
+ paths = set()
+
+ while unprocessed_paths:
+ queued_paths = list(unprocessed_paths)
+ paths |= unprocessed_paths
+ unprocessed_paths = set()
+
+ for queued_path in queued_paths:
+ new_paths = self.get_dependent_paths_non_recursive(queued_path)
+
+ for new_path in new_paths:
+ if new_path not in paths:
+ unprocessed_paths.add(new_path)
+
+ return sorted(paths)
+
+ def get_dependent_paths_non_recursive(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ paths = self.get_dependent_paths_internal(path)
+ paths += [target.path + '/' for target in self.paths_to_dependent_targets.get(path, set())]
+ paths = sorted(set(paths))
+
+ return paths
+
+ def get_dependent_paths_internal(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ ext = os.path.splitext(os.path.split(path)[1])[1]
+
+ if is_subdir(path, data_context().content.module_utils_path):
+ if ext == '.py':
+ return self.get_python_module_utils_usage(path)
+
+ if ext == '.psm1':
+ return self.get_powershell_module_utils_usage(path)
+
+ if ext == '.cs':
+ return self.get_csharp_module_utils_usage(path)
+
+ if is_subdir(path, data_context().content.integration_targets_path):
+ return self.get_integration_target_usage(path)
+
+ return []
+
+ def get_python_module_utils_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ if not self.python_module_utils_imports:
+ display.info('Analyzing python module_utils imports...')
+ before = time.time()
+ self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
+ after = time.time()
+ display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
+
+ name = get_python_module_utils_name(path)
+
+ return sorted(self.python_module_utils_imports[name])
+
+ def get_powershell_module_utils_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ if not self.powershell_module_utils_imports:
+ display.info('Analyzing powershell module_utils imports...')
+ before = time.time()
+ self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
+ after = time.time()
+ display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
+
+ name = get_powershell_module_utils_name(path)
+
+ return sorted(self.powershell_module_utils_imports[name])
+
+ def get_csharp_module_utils_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ if not self.csharp_module_utils_imports:
+ display.info('Analyzing C# module_utils imports...')
+ before = time.time()
+ self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
+ after = time.time()
+ display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
+
+ name = get_csharp_module_utils_name(path)
+
+ return sorted(self.csharp_module_utils_imports[name])
+
+ def get_integration_target_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ target_name = path.split('/')[3]
+ dependents = [os.path.join(data_context().content.integration_targets_path, target) + os.path.sep
+ for target in sorted(self.integration_dependencies.get(target_name, set()))]
+
+ return dependents
+
+ def classify(self, path):
+ """
+ :type path: str
+ :rtype: dict[str, str] | None
+ """
+ result = self._classify(path)
+
+ # run all tests when no result given
+ if result is None:
+ return None
+
+ # run sanity on path unless result specified otherwise
+ if path in self.sanity_paths and 'sanity' not in result:
+ result['sanity'] = path
+
+ return result
+
+ def _classify(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path."""
+ if data_context().content.is_ansible:
+ return self._classify_ansible(path)
+
+ if data_context().content.collection:
+ return self._classify_collection(path)
+
+ return None
+
+ def _classify_common(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path using rules common to all layouts."""
+ dirname = os.path.dirname(path)
+ filename = os.path.basename(path)
+ name, ext = os.path.splitext(filename)
+
+ minimal = {}
+
+ if os.path.sep not in path:
+ if filename in (
+ 'azure-pipelines.yml',
+ 'shippable.yml',
+ ):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if is_subdir(path, '.azure-pipelines'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if is_subdir(path, '.github'):
+ return minimal
+
+ if is_subdir(path, data_context().content.integration_targets_path):
+ if not os.path.exists(path):
+ return minimal
+
+ target = self.integration_targets_by_name.get(path.split('/')[3])
+
+ if not target:
+ display.warning('Unexpected non-target found: %s' % path)
+ return minimal
+
+ if 'hidden/' in target.aliases:
+ return minimal # already expanded using get_dependent_paths
+
+ return {
+ 'integration': target.name if 'posix/' in target.aliases else None,
+ 'windows-integration': target.name if 'windows/' in target.aliases else None,
+ 'network-integration': target.name if 'network/' in target.aliases else None,
+ FOCUSED_TARGET: True,
+ }
+
+ if is_subdir(path, data_context().content.integration_path):
+ if dirname == data_context().content.integration_path:
+ for command in (
+ 'integration',
+ 'windows-integration',
+ 'network-integration',
+ ):
+ if name == command and ext == '.cfg':
+ return {
+ command: self.integration_all_target,
+ }
+
+ if name == command + '.requirements' and ext == '.txt':
+ return {
+ command: self.integration_all_target,
+ }
+
+ return {
+ 'integration': self.integration_all_target,
+ 'windows-integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ }
+
+ if is_subdir(path, data_context().content.sanity_path):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ }
+
+ if is_subdir(path, data_context().content.unit_path):
+ if path in self.units_paths:
+ return {
+ 'units': path,
+ }
+
+ # changes to files which are not unit tests should trigger tests from the nearest parent directory
+
+ test_path = os.path.dirname(path)
+
+ while test_path:
+ if test_path + '/' in self.units_paths:
+ return {
+ 'units': test_path + '/',
+ }
+
+ test_path = os.path.dirname(test_path)
+
+ if is_subdir(path, data_context().content.module_path):
+ module_name = self.module_names_by_path.get(path)
+
+ if module_name:
+ return {
+ 'units': module_name if module_name in self.units_modules else None,
+ 'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
+ 'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
+ 'network-integration': self.network_integration_by_module.get(module_name),
+ FOCUSED_TARGET: True,
+ }
+
+ return minimal
+
+ if is_subdir(path, data_context().content.module_utils_path):
+ if ext == '.cs':
+ return minimal # already expanded using get_dependent_paths
+
+ if ext == '.psm1':
+ return minimal # already expanded using get_dependent_paths
+
+ if ext == '.py':
+ return minimal # already expanded using get_dependent_paths
+
+ if is_subdir(path, data_context().content.plugin_paths['action']):
+ if ext == '.py':
+ if name.startswith('net_'):
+ network_target = 'network/.*_%s' % name[4:]
+
+ if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': 'all',
+ }
+
+ if self.prefixes.get(name) == 'network':
+ network_platform = name
+ elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
+ network_platform = name[:-7]
+ elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
+ network_platform = name[:-9]
+ else:
+ network_platform = None
+
+ if network_platform:
+ network_target = 'network/%s/' % network_platform
+
+ if network_target in self.integration_targets_by_alias:
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
+
+ return {
+ 'units': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['connection']):
+ units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'connection')
+ if name == '__init__':
+ return {
+ 'integration': self.integration_all_target,
+ 'windows-integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ 'units': units_dir,
+ }
+
+ units_path = os.path.join(units_dir, 'test_%s.py' % name)
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ integration_name = 'connection_%s' % name
+
+ if integration_name not in self.integration_targets_by_name:
+ integration_name = None
+
+ windows_integration_name = 'connection_windows_%s' % name
+
+ if windows_integration_name not in self.integration_targets_by_name:
+ windows_integration_name = None
+
+ # entire integration test commands depend on these connection plugins
+
+ if name in ['winrm', 'psrp']:
+ return {
+ 'windows-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'local':
+ return {
+ 'integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'network_cli':
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'paramiko_ssh':
+ return {
+ 'integration': integration_name,
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ # other connection plugins have isolated integration and unit tests
+
+ return {
+ 'integration': integration_name,
+ 'windows-integration': windows_integration_name,
+ 'units': units_path,
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['doc_fragments']):
+ return {
+ 'sanity': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['inventory']):
+ if name == '__init__':
+ return all_tests(self.args) # broad impact, run all tests
+
+ # These inventory plugins are enabled by default (see INVENTORY_ENABLED).
+ # Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
+ test_all = [
+ 'host_list',
+ 'script',
+ 'yaml',
+ 'ini',
+ 'auto',
+ ]
+
+ if name in test_all:
+ posix_integration_fallback = get_integration_all_target(self.args)
+ else:
+ posix_integration_fallback = None
+
+ target = self.integration_targets_by_name.get('inventory_%s' % name)
+ units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'inventory')
+ units_path = os.path.join(units_dir, 'test_%s.py' % name)
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return {
+ 'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
+ 'windows-integration': target.name if target and 'windows/' in target.aliases else None,
+ 'network-integration': target.name if target and 'network/' in target.aliases else None,
+ 'units': units_path,
+ FOCUSED_TARGET: target is not None,
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['filter']):
+ return self._simple_plugin_tests('filter', name)
+
+ if is_subdir(path, data_context().content.plugin_paths['lookup']):
+ return self._simple_plugin_tests('lookup', name)
+
+ if (is_subdir(path, data_context().content.plugin_paths['terminal']) or
+ is_subdir(path, data_context().content.plugin_paths['cliconf']) or
+ is_subdir(path, data_context().content.plugin_paths['netconf'])):
+ if ext == '.py':
+ if name in self.prefixes and self.prefixes[name] == 'network':
+ network_target = 'network/%s/' % name
+
+ if network_target in self.integration_targets_by_alias:
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
+
+ return {
+ 'units': 'all',
+ }
+
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['test']):
+ return self._simple_plugin_tests('test', name)
+
+ return None
+
+ def _classify_collection(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path using rules specific to collections."""
+ result = self._classify_common(path)
+
+ if result is not None:
+ return result
+
+ filename = os.path.basename(path)
+ dummy, ext = os.path.splitext(filename)
+
+ minimal = {}
+
+ if path.startswith('changelogs/'):
+ return minimal
+
+ if path.startswith('docs/'):
+ return minimal
+
+ if '/' not in path:
+ if path in (
+ '.gitignore',
+ 'COPYING',
+ 'LICENSE',
+ 'Makefile',
+ ):
+ return minimal
+
+ if ext in (
+ '.in',
+ '.md',
+ '.rst',
+ '.toml',
+ '.txt',
+ ):
+ return minimal
+
+ return None
+
+ def _classify_ansible(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path using rules specific to Ansible."""
+ if path.startswith('test/units/compat/'):
+ return {
+ 'units': 'test/units/',
+ }
+
+ result = self._classify_common(path)
+
+ if result is not None:
+ return result
+
+ dirname = os.path.dirname(path)
+ filename = os.path.basename(path)
+ name, ext = os.path.splitext(filename)
+
+ minimal = {}
+
+ if path.startswith('bin/'):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if path.startswith('changelogs/'):
+ return minimal
+
+ if path.startswith('contrib/'):
+ return {
+ 'units': 'test/units/contrib/'
+ }
+
+ if path.startswith('docs/'):
+ return minimal
+
+ if path.startswith('examples/'):
+ if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
+ return {
+ 'windows-integration': 'connection_winrm',
+ }
+
+ return minimal
+
+ if path.startswith('hacking/'):
+ return minimal
+
+ if path.startswith('lib/ansible/executor/powershell/'):
+ units_path = 'test/units/executor/powershell/'
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return {
+ 'windows-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if path.startswith('lib/ansible/'):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if path.startswith('licenses/'):
+ return minimal
+
+ if path.startswith('packaging/'):
+ if path.startswith('packaging/requirements/'):
+ if name.startswith('requirements-') and ext == '.txt':
+ component = name.split('-', 1)[1]
+
+ candidates = (
+ 'cloud/%s/' % component,
+ )
+
+ for candidate in candidates:
+ if candidate in self.integration_targets_by_alias:
+ return {
+ 'integration': candidate,
+ }
+
+ return all_tests(self.args) # broad impact, run all tests
+
+ return minimal
+
+ if path.startswith('test/ansible_test/'):
+ return minimal # these tests are not invoked from ansible-test
+
+ if path.startswith('test/lib/ansible_test/config/'):
+ if name.startswith('cloud-config-'):
+ # noinspection PyTypeChecker
+ cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/completion/'):
+ if path == 'test/lib/ansible_test/_data/completion/docker.txt':
+ return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
+
+ if path.startswith('test/lib/ansible_test/_internal/cloud/'):
+ cloud_target = 'cloud/%s/' % name
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/lib/ansible_test/_internal/sanity/'):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/sanity/'):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_internal/units/'):
+ return {
+ 'units': 'all', # test infrastructure, run all unit tests
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/units/'):
+ return {
+ 'units': 'all', # test infrastructure, run all unit tests
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/pytest/'):
+ return {
+ 'units': 'all', # test infrastructure, run all unit tests
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/requirements/'):
+ if name in (
+ 'integration',
+ 'network-integration',
+ 'windows-integration',
+ ):
+ return {
+ name: self.integration_all_target,
+ }
+
+ if name in (
+ 'sanity',
+ 'units',
+ ):
+ return {
+ name: 'all',
+ }
+
+ if name.startswith('integration.cloud.'):
+ cloud_target = 'cloud/%s/' % name.split('.')[2]
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ if path.startswith('test/lib/'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/support/'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/utils/shippable/'):
+ if dirname == 'test/utils/shippable':
+ test_map = {
+ 'cloud.sh': 'integration:cloud/',
+ 'linux.sh': 'integration:all',
+ 'network.sh': 'network-integration:all',
+ 'remote.sh': 'integration:all',
+ 'sanity.sh': 'sanity:all',
+ 'units.sh': 'units:all',
+ 'windows.sh': 'windows-integration:all',
+ }
+
+ test_match = test_map.get(filename)
+
+ if test_match:
+ test_command, test_target = test_match.split(':')
+
+ return {
+ test_command: test_target,
+ }
+
+ cloud_target = 'cloud/%s/' % name
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/utils/'):
+ return minimal
+
+ if '/' not in path:
+ if path in (
+ '.gitattributes',
+ '.gitignore',
+ '.mailmap',
+ 'COPYING',
+ 'Makefile',
+ ):
+ return minimal
+
+ if path in (
+ 'setup.py',
+ ):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if ext in (
+ '.in',
+ '.md',
+ '.rst',
+ '.toml',
+ '.txt',
+ ):
+ return minimal
+
+ return None # unknown, will result in fall-back to run all tests
+
+ def _simple_plugin_tests(self, plugin_type, plugin_name): # type: (str, str) -> t.Dict[str, t.Optional[str]]
+ """
+ Return tests for the given plugin type and plugin name.
+ This function is useful for plugin types which do not require special processing.
+ """
+ if plugin_name == '__init__':
+ return all_tests(self.args, True)
+
+ integration_target = self.integration_targets_by_name.get('%s_%s' % (plugin_type, plugin_name))
+
+ if integration_target:
+ integration_name = integration_target.name
+ else:
+ integration_name = None
+
+ units_path = os.path.join(data_context().content.unit_path, 'plugins', plugin_type, 'test_%s.py' % plugin_name)
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return dict(
+ integration=integration_name,
+ units=units_path,
+ )
+
+
+def all_tests(args, force=False):
+ """
+ :type args: TestConfig
+ :type force: bool
+ :rtype: dict[str, str]
+ """
+ if force:
+ integration_all_target = 'all'
+ else:
+ integration_all_target = get_integration_all_target(args)
+
+ return {
+ 'sanity': 'all',
+ 'units': 'all',
+ 'integration': integration_all_target,
+ 'windows-integration': integration_all_target,
+ 'network-integration': integration_all_target,
+ }
+
+
+def get_integration_all_target(args):
+ """
+ :type args: TestConfig
+ :rtype: str
+ """
+ if isinstance(args, IntegrationConfig):
+ return args.changed_all_target
+
+ return 'all'
diff --git a/test/lib/ansible_test/_internal/cli.py b/test/lib/ansible_test/_internal/cli.py
new file mode 100644
index 00000000..e406b2dd
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli.py
@@ -0,0 +1,1217 @@
+"""Test runner for all Ansible tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import os
+import sys
+
+# This import should occur as early as possible.
+# It must occur before subprocess has been imported anywhere in the current process.
+from .init import (
+ CURRENT_RLIMIT_NOFILE,
+)
+
+from . import types as t
+
+from .util import (
+ ApplicationError,
+ display,
+ raw_command,
+ generate_pip_command,
+ read_lines_without_comments,
+ MAXFD,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from .delegation import (
+ check_delegation_args,
+ delegate,
+)
+
+from .executor import (
+ command_posix_integration,
+ command_network_integration,
+ command_windows_integration,
+ command_shell,
+ SUPPORTED_PYTHON_VERSIONS,
+ ApplicationWarning,
+ Delegate,
+ generate_pip_install,
+ check_startup,
+)
+
+from .config import (
+ PosixIntegrationConfig,
+ WindowsIntegrationConfig,
+ NetworkIntegrationConfig,
+ SanityConfig,
+ UnitsConfig,
+ ShellConfig,
+)
+
+from .env import (
+ EnvConfig,
+ command_env,
+ configure_timeout,
+)
+
+from .sanity import (
+ command_sanity,
+ sanity_init,
+ sanity_get_tests,
+)
+
+from .units import (
+ command_units,
+)
+
+from .target import (
+ find_target_completion,
+ walk_posix_integration_targets,
+ walk_network_integration_targets,
+ walk_windows_integration_targets,
+ walk_units_targets,
+ walk_sanity_targets,
+)
+
+from .core_ci import (
+ AWS_ENDPOINTS,
+)
+
+from .cloud import (
+ initialize_cloud_plugins,
+)
+
+from .data import (
+ data_context,
+)
+
+from .util_common import (
+ get_docker_completion,
+ get_network_completion,
+ get_remote_completion,
+ CommonConfig,
+)
+
+from .coverage.combine import (
+ command_coverage_combine,
+)
+
+from .coverage.erase import (
+ command_coverage_erase,
+)
+
+from .coverage.html import (
+ command_coverage_html,
+)
+
+from .coverage.report import (
+ command_coverage_report,
+ CoverageReportConfig,
+)
+
+from .coverage.xml import (
+ command_coverage_xml,
+)
+
+from .coverage.analyze.targets.generate import (
+ command_coverage_analyze_targets_generate,
+ CoverageAnalyzeTargetsGenerateConfig,
+)
+
+from .coverage.analyze.targets.expand import (
+ command_coverage_analyze_targets_expand,
+ CoverageAnalyzeTargetsExpandConfig,
+)
+
+from .coverage.analyze.targets.filter import (
+ command_coverage_analyze_targets_filter,
+ CoverageAnalyzeTargetsFilterConfig,
+)
+
+from .coverage.analyze.targets.combine import (
+ command_coverage_analyze_targets_combine,
+ CoverageAnalyzeTargetsCombineConfig,
+)
+
+from .coverage.analyze.targets.missing import (
+ command_coverage_analyze_targets_missing,
+ CoverageAnalyzeTargetsMissingConfig,
+)
+
+from .coverage import (
+ COVERAGE_GROUPS,
+ CoverageConfig,
+)
+
+if t.TYPE_CHECKING:
+ import argparse as argparse_module
+
+
+def main():
+ """Main program function."""
+ try:
+ os.chdir(data_context().content.root)
+ initialize_cloud_plugins()
+ sanity_init()
+ args = parse_args()
+ config = args.config(args) # type: CommonConfig
+ display.verbosity = config.verbosity
+ display.truncate = config.truncate
+ display.redact = config.redact
+ display.color = config.color
+ display.info_stderr = config.info_stderr
+ check_startup()
+ check_delegation_args(config)
+ configure_timeout(config)
+
+ display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
+ display.info('MAXFD: %d' % MAXFD, verbosity=2)
+
+ try:
+ args.func(config)
+ delegate_args = None
+ except Delegate as ex:
+ # save delegation args for use once we exit the exception handler
+ delegate_args = (ex.exclude, ex.require, ex.integration_targets)
+
+ if delegate_args:
+ # noinspection PyTypeChecker
+ delegate(config, *delegate_args)
+
+ display.review_warnings()
+ except ApplicationWarning as ex:
+ display.warning(u'%s' % ex)
+ sys.exit(0)
+ except ApplicationError as ex:
+ display.error(u'%s' % ex)
+ sys.exit(1)
+ except KeyboardInterrupt:
+ sys.exit(2)
+ except IOError as ex:
+ if ex.errno == errno.EPIPE:
+ sys.exit(3)
+ raise
+
+
+def parse_args():
+ """Parse command line arguments."""
+ try:
+ import argparse
+ except ImportError:
+ if '--requirements' not in sys.argv:
+ raise
+ # install argparse without using constraints since pip may be too old to support them
+ # not using the ansible-test requirements file since this install is for sys.executable rather than the delegated python (which may be different)
+ # argparse has no special requirements, so upgrading pip is not required here
+ raw_command(generate_pip_install(generate_pip_command(sys.executable), '', packages=['argparse'], use_constraints=False))
+ import argparse
+
+ try:
+ import argcomplete
+ except ImportError:
+ argcomplete = None
+
+ if argcomplete:
+ epilog = 'Tab completion available using the "argcomplete" python package.'
+ else:
+ epilog = 'Install the "argcomplete" python package to enable tab completion.'
+
+ def key_value_type(value): # type: (str) -> t.Tuple[str, str]
+ """Wrapper around key_value."""
+ return key_value(argparse, value)
+
+ parser = argparse.ArgumentParser(epilog=epilog)
+
+ common = argparse.ArgumentParser(add_help=False)
+
+ common.add_argument('-e', '--explain',
+ action='store_true',
+ help='explain commands that would be executed')
+
+ common.add_argument('-v', '--verbose',
+ dest='verbosity',
+ action='count',
+ default=0,
+ help='display more output')
+
+ common.add_argument('--color',
+ metavar='COLOR',
+ nargs='?',
+ help='generate color output: %(choices)s',
+ choices=('yes', 'no', 'auto'),
+ const='yes',
+ default='auto')
+
+ common.add_argument('--debug',
+ action='store_true',
+ help='run ansible commands in debug mode')
+
+ # noinspection PyTypeChecker
+ common.add_argument('--truncate',
+ dest='truncate',
+ metavar='COLUMNS',
+ type=int,
+ default=display.columns,
+ help='truncate some long output (0=disabled) (default: auto)')
+
+ common.add_argument('--redact',
+ dest='redact',
+ action='store_true',
+ default=True,
+ help='redact sensitive values in output')
+
+ common.add_argument('--no-redact',
+ dest='redact',
+ action='store_false',
+ default=False,
+ help='show sensitive values in output')
+
+ common.add_argument('--check-python',
+ choices=SUPPORTED_PYTHON_VERSIONS,
+ help=argparse.SUPPRESS)
+
+ test = argparse.ArgumentParser(add_help=False, parents=[common])
+
+ test.add_argument('include',
+ metavar='TARGET',
+ nargs='*',
+ help='test the specified target').completer = complete_target
+
+ test.add_argument('--include',
+ metavar='TARGET',
+ action='append',
+ help='include the specified target').completer = complete_target
+
+ test.add_argument('--exclude',
+ metavar='TARGET',
+ action='append',
+ help='exclude the specified target').completer = complete_target
+
+ test.add_argument('--require',
+ metavar='TARGET',
+ action='append',
+ help='require the specified target').completer = complete_target
+
+ test.add_argument('--coverage',
+ action='store_true',
+ help='analyze code coverage when running tests')
+
+ test.add_argument('--coverage-label',
+ default='',
+ help='label to include in coverage output file names')
+
+ test.add_argument('--coverage-check',
+ action='store_true',
+ help='only verify code coverage can be enabled')
+
+ test.add_argument('--metadata',
+ help=argparse.SUPPRESS)
+
+ test.add_argument('--base-branch',
+ help='base branch used for change detection')
+
+ add_changes(test, argparse)
+ add_environments(test)
+
+ integration = argparse.ArgumentParser(add_help=False, parents=[test])
+
+ integration.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ integration.add_argument('--start-at',
+ metavar='TARGET',
+ help='start at the specified target').completer = complete_target
+
+ integration.add_argument('--start-at-task',
+ metavar='TASK',
+ help='start at the specified task')
+
+ integration.add_argument('--tags',
+ metavar='TAGS',
+ help='only run plays and tasks tagged with these values')
+
+ integration.add_argument('--skip-tags',
+ metavar='TAGS',
+ help='only run plays and tasks whose tags do not match these values')
+
+ integration.add_argument('--diff',
+ action='store_true',
+ help='show diff output')
+
+ integration.add_argument('--allow-destructive',
+ action='store_true',
+ help='allow destructive tests')
+
+ integration.add_argument('--allow-root',
+ action='store_true',
+ help='allow tests requiring root when not root')
+
+ integration.add_argument('--allow-disabled',
+ action='store_true',
+ help='allow tests which have been marked as disabled')
+
+ integration.add_argument('--allow-unstable',
+ action='store_true',
+ help='allow tests which have been marked as unstable')
+
+ integration.add_argument('--allow-unstable-changed',
+ action='store_true',
+ help='allow tests which have been marked as unstable when focused changes are detected')
+
+ integration.add_argument('--allow-unsupported',
+ action='store_true',
+ help='allow tests which have been marked as unsupported')
+
+ integration.add_argument('--retry-on-error',
+ action='store_true',
+ help='retry failed test with increased verbosity')
+
+ integration.add_argument('--continue-on-error',
+ action='store_true',
+ help='continue after failed test')
+
+ integration.add_argument('--debug-strategy',
+ action='store_true',
+ help='run test playbooks using the debug strategy')
+
+ integration.add_argument('--changed-all-target',
+ metavar='TARGET',
+ default='all',
+ help='target to run when all tests are needed')
+
+ integration.add_argument('--changed-all-mode',
+ metavar='MODE',
+ choices=('default', 'include', 'exclude'),
+ help='include/exclude behavior with --changed-all-target: %(choices)s')
+
+ integration.add_argument('--list-targets',
+ action='store_true',
+ help='list matching targets instead of running tests')
+
+ integration.add_argument('--no-temp-workdir',
+ action='store_true',
+ help='do not run tests from a temporary directory (use only for verifying broken tests)')
+
+ integration.add_argument('--no-temp-unicode',
+ action='store_true',
+ help='avoid unicode characters in temporary directory (use only for verifying broken tests)')
+
+ subparsers = parser.add_subparsers(metavar='COMMAND')
+ subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ posix_integration = subparsers.add_parser('integration',
+ parents=[integration],
+ help='posix integration tests')
+
+ posix_integration.set_defaults(func=command_posix_integration,
+ targets=walk_posix_integration_targets,
+ config=PosixIntegrationConfig)
+
+ add_extra_docker_options(posix_integration)
+ add_httptester_options(posix_integration, argparse)
+
+ network_integration = subparsers.add_parser('network-integration',
+ parents=[integration],
+ help='network integration tests')
+
+ network_integration.set_defaults(func=command_network_integration,
+ targets=walk_network_integration_targets,
+ config=NetworkIntegrationConfig)
+
+ add_extra_docker_options(network_integration, integration=False)
+
+ network_integration.add_argument('--platform',
+ metavar='PLATFORM',
+ action='append',
+ help='network platform/version').completer = complete_network_platform
+
+ network_integration.add_argument('--platform-collection',
+ type=key_value_type,
+ metavar='PLATFORM=COLLECTION',
+ action='append',
+ help='collection used to test platform').completer = complete_network_platform_collection
+
+ network_integration.add_argument('--platform-connection',
+ type=key_value_type,
+ metavar='PLATFORM=CONNECTION',
+ action='append',
+ help='connection used to test platform').completer = complete_network_platform_connection
+
+ network_integration.add_argument('--inventory',
+ metavar='PATH',
+ help='path to inventory used for tests')
+
+ network_integration.add_argument('--testcase',
+ metavar='TESTCASE',
+ help='limit a test to a specified testcase').completer = complete_network_testcase
+
+ windows_integration = subparsers.add_parser('windows-integration',
+ parents=[integration],
+ help='windows integration tests')
+
+ windows_integration.set_defaults(func=command_windows_integration,
+ targets=walk_windows_integration_targets,
+ config=WindowsIntegrationConfig)
+
+ add_extra_docker_options(windows_integration, integration=False)
+ add_httptester_options(windows_integration, argparse)
+
+ windows_integration.add_argument('--windows',
+ metavar='VERSION',
+ action='append',
+ help='windows version').completer = complete_windows
+
+ windows_integration.add_argument('--inventory',
+ metavar='PATH',
+ help='path to inventory used for tests')
+
+ units = subparsers.add_parser('units',
+ parents=[test],
+ help='unit tests')
+
+ units.set_defaults(func=command_units,
+ targets=walk_units_targets,
+ config=UnitsConfig)
+
+ units.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ units.add_argument('--collect-only',
+ action='store_true',
+ help='collect tests but do not execute them')
+
+ # noinspection PyTypeChecker
+ units.add_argument('--num-workers',
+ type=int,
+ help='number of workers to use (default: auto)')
+
+ units.add_argument('--requirements-mode',
+ choices=('only', 'skip'),
+ help=argparse.SUPPRESS)
+
+ add_extra_docker_options(units, integration=False)
+
+ sanity = subparsers.add_parser('sanity',
+ parents=[test],
+ help='sanity tests')
+
+ sanity.set_defaults(func=command_sanity,
+ targets=walk_sanity_targets,
+ config=SanityConfig)
+
+ sanity.add_argument('--test',
+ metavar='TEST',
+ action='append',
+ choices=[test.name for test in sanity_get_tests()],
+ help='tests to run').completer = complete_sanity_test
+
+ sanity.add_argument('--skip-test',
+ metavar='TEST',
+ action='append',
+ choices=[test.name for test in sanity_get_tests()],
+ help='tests to skip').completer = complete_sanity_test
+
+ sanity.add_argument('--allow-disabled',
+ action='store_true',
+ help='allow tests to run which are disabled by default')
+
+ sanity.add_argument('--list-tests',
+ action='store_true',
+ help='list available tests')
+
+ sanity.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ sanity.add_argument('--enable-optional-errors',
+ action='store_true',
+ help='enable optional errors')
+
+ add_lint(sanity)
+ add_extra_docker_options(sanity, integration=False)
+
+ shell = subparsers.add_parser('shell',
+ parents=[common],
+ help='open an interactive shell')
+
+ shell.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ shell.set_defaults(func=command_shell,
+ config=ShellConfig)
+
+ shell.add_argument('--raw',
+ action='store_true',
+ help='direct to shell with no setup')
+
+ add_environments(shell)
+ add_extra_docker_options(shell)
+ add_httptester_options(shell, argparse)
+
+ coverage_common = argparse.ArgumentParser(add_help=False, parents=[common])
+
+ add_environments(coverage_common, isolated_delegation=False)
+
+ coverage = subparsers.add_parser('coverage',
+ help='code coverage management and reporting')
+
+ coverage_subparsers = coverage.add_subparsers(metavar='COMMAND')
+ coverage_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ add_coverage_analyze(coverage_subparsers, coverage_common)
+
+ coverage_combine = coverage_subparsers.add_parser('combine',
+ parents=[coverage_common],
+ help='combine coverage data and rewrite remote paths')
+
+ coverage_combine.set_defaults(func=command_coverage_combine,
+ config=CoverageConfig)
+
+ coverage_combine.add_argument('--export',
+ help='directory to export combined coverage files to')
+
+ add_extra_coverage_options(coverage_combine)
+
+ coverage_erase = coverage_subparsers.add_parser('erase',
+ parents=[coverage_common],
+ help='erase coverage data files')
+
+ coverage_erase.set_defaults(func=command_coverage_erase,
+ config=CoverageConfig)
+
+ coverage_report = coverage_subparsers.add_parser('report',
+ parents=[coverage_common],
+ help='generate console coverage report')
+
+ coverage_report.set_defaults(func=command_coverage_report,
+ config=CoverageReportConfig)
+
+ coverage_report.add_argument('--show-missing',
+ action='store_true',
+ help='show line numbers of statements not executed')
+ coverage_report.add_argument('--include',
+ metavar='PAT1,PAT2,...',
+ help='include only files whose paths match one of these '
+ 'patterns. Accepts shell-style wildcards, which must be '
+ 'quoted.')
+ coverage_report.add_argument('--omit',
+ metavar='PAT1,PAT2,...',
+ help='omit files whose paths match one of these patterns. '
+ 'Accepts shell-style wildcards, which must be quoted.')
+
+ add_extra_coverage_options(coverage_report)
+
+ coverage_html = coverage_subparsers.add_parser('html',
+ parents=[coverage_common],
+ help='generate html coverage report')
+
+ coverage_html.set_defaults(func=command_coverage_html,
+ config=CoverageConfig)
+
+ add_extra_coverage_options(coverage_html)
+
+ coverage_xml = coverage_subparsers.add_parser('xml',
+ parents=[coverage_common],
+ help='generate xml coverage report')
+
+ coverage_xml.set_defaults(func=command_coverage_xml,
+ config=CoverageConfig)
+
+ add_extra_coverage_options(coverage_xml)
+
+ env = subparsers.add_parser('env',
+ parents=[common],
+ help='show information about the test environment')
+
+ env.set_defaults(func=command_env,
+ config=EnvConfig)
+
+ env.add_argument('--show',
+ action='store_true',
+ help='show environment on stdout')
+
+ env.add_argument('--dump',
+ action='store_true',
+ help='dump environment to disk')
+
+ env.add_argument('--list-files',
+ action='store_true',
+ help='list files on stdout')
+
+ # noinspection PyTypeChecker
+ env.add_argument('--timeout',
+ type=int,
+ metavar='MINUTES',
+ help='timeout for future ansible-test commands (0 clears)')
+
+ if argcomplete:
+ argcomplete.autocomplete(parser, always_complete_options=False, validator=lambda i, k: True)
+
+ args = parser.parse_args()
+
+ if args.explain and not args.verbosity:
+ args.verbosity = 1
+
+ if args.color == 'yes':
+ args.color = True
+ elif args.color == 'no':
+ args.color = False
+ else:
+ args.color = sys.stdout.isatty()
+
+ return args
+
+
+def key_value(argparse, value): # type: (argparse_module, str) -> t.Tuple[str, str]
+ """Type parsing and validation for argparse key/value pairs separated by an '=' character."""
+ parts = value.split('=')
+
+ if len(parts) != 2:
+ raise argparse.ArgumentTypeError('"%s" must be in the format "key=value"' % value)
+
+ return parts[0], parts[1]
+
+
+# noinspection PyProtectedMember
+def add_coverage_analyze(coverage_subparsers, coverage_common): # type: (argparse_module._SubParsersAction, argparse_module.ArgumentParser) -> None
+ """Add the `coverage analyze` subcommand."""
+ analyze = coverage_subparsers.add_parser(
+ 'analyze',
+ help='analyze collected coverage data',
+ )
+
+ analyze_subparsers = analyze.add_subparsers(metavar='COMMAND')
+ analyze_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ targets = analyze_subparsers.add_parser(
+ 'targets',
+ help='analyze integration test target coverage',
+ )
+
+ targets_subparsers = targets.add_subparsers(metavar='COMMAND')
+ targets_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ targets_generate = targets_subparsers.add_parser(
+ 'generate',
+ parents=[coverage_common],
+ help='aggregate coverage by integration test target',
+ )
+
+ targets_generate.set_defaults(
+ func=command_coverage_analyze_targets_generate,
+ config=CoverageAnalyzeTargetsGenerateConfig,
+ )
+
+ targets_generate.add_argument(
+ 'input_dir',
+ nargs='?',
+ help='directory to read coverage from',
+ )
+
+ targets_generate.add_argument(
+ 'output_file',
+ help='output file for aggregated coverage',
+ )
+
+ targets_expand = targets_subparsers.add_parser(
+ 'expand',
+ parents=[coverage_common],
+ help='expand target names from integers in aggregated coverage',
+ )
+
+ targets_expand.set_defaults(
+ func=command_coverage_analyze_targets_expand,
+ config=CoverageAnalyzeTargetsExpandConfig,
+ )
+
+ targets_expand.add_argument(
+ 'input_file',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_expand.add_argument(
+ 'output_file',
+ help='output file to write expanded coverage to',
+ )
+
+ targets_filter = targets_subparsers.add_parser(
+ 'filter',
+ parents=[coverage_common],
+ help='filter aggregated coverage data',
+ )
+
+ targets_filter.set_defaults(
+ func=command_coverage_analyze_targets_filter,
+ config=CoverageAnalyzeTargetsFilterConfig,
+ )
+
+ targets_filter.add_argument(
+ 'input_file',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_filter.add_argument(
+ 'output_file',
+ help='output file to write expanded coverage to',
+ )
+
+ targets_filter.add_argument(
+ '--include-target',
+ dest='include_targets',
+ action='append',
+ help='include the specified targets',
+ )
+
+ targets_filter.add_argument(
+ '--exclude-target',
+ dest='exclude_targets',
+ action='append',
+ help='exclude the specified targets',
+ )
+
+ targets_filter.add_argument(
+ '--include-path',
+ help='include paths matching the given regex',
+ )
+
+ targets_filter.add_argument(
+ '--exclude-path',
+ help='exclude paths matching the given regex',
+ )
+
+ targets_combine = targets_subparsers.add_parser(
+ 'combine',
+ parents=[coverage_common],
+ help='combine multiple aggregated coverage files',
+ )
+
+ targets_combine.set_defaults(
+ func=command_coverage_analyze_targets_combine,
+ config=CoverageAnalyzeTargetsCombineConfig,
+ )
+
+ targets_combine.add_argument(
+ 'input_file',
+ nargs='+',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_combine.add_argument(
+ 'output_file',
+ help='output file to write aggregated coverage to',
+ )
+
+ targets_missing = targets_subparsers.add_parser(
+ 'missing',
+ parents=[coverage_common],
+ help='identify coverage in one file missing in another',
+ )
+
+ targets_missing.set_defaults(
+ func=command_coverage_analyze_targets_missing,
+ config=CoverageAnalyzeTargetsMissingConfig,
+ )
+
+ targets_missing.add_argument(
+ 'from_file',
+ help='input file containing aggregated coverage',
+ )
+
+ targets_missing.add_argument(
+ 'to_file',
+ help='input file containing aggregated coverage',
+ )
+
+ targets_missing.add_argument(
+ 'output_file',
+ help='output file to write aggregated coverage to',
+ )
+
+ targets_missing.add_argument(
+ '--only-gaps',
+ action='store_true',
+ help='report only arcs/lines not hit by any target',
+ )
+
+ targets_missing.add_argument(
+ '--only-exists',
+ action='store_true',
+ help='limit results to files that exist',
+ )
+
+
+def add_lint(parser):
+ """
+ :type parser: argparse.ArgumentParser
+ """
+ parser.add_argument('--lint',
+ action='store_true',
+ help='write lint output to stdout, everything else stderr')
+
+ parser.add_argument('--junit',
+ action='store_true',
+ help='write test failures to junit xml files')
+
+ parser.add_argument('--failure-ok',
+ action='store_true',
+ help='exit successfully on failed tests after saving results')
+
+
+def add_changes(parser, argparse):
+ """
+ :type parser: argparse.ArgumentParser
+ :type argparse: argparse
+ """
+ parser.add_argument('--changed', action='store_true', help='limit targets based on changes')
+
+ changes = parser.add_argument_group(title='change detection arguments')
+
+ changes.add_argument('--tracked', action='store_true', help=argparse.SUPPRESS)
+ changes.add_argument('--untracked', action='store_true', help='include untracked files')
+ changes.add_argument('--ignore-committed', dest='committed', action='store_false', help='exclude committed files')
+ changes.add_argument('--ignore-staged', dest='staged', action='store_false', help='exclude staged files')
+ changes.add_argument('--ignore-unstaged', dest='unstaged', action='store_false', help='exclude unstaged files')
+
+ changes.add_argument('--changed-from', metavar='PATH', help=argparse.SUPPRESS)
+ changes.add_argument('--changed-path', metavar='PATH', action='append', help=argparse.SUPPRESS)
+
+
+def add_environments(parser, isolated_delegation=True):
+ """
+ :type parser: argparse.ArgumentParser
+ :type isolated_delegation: bool
+ """
+ parser.add_argument('--requirements',
+ action='store_true',
+ help='install command requirements')
+
+ parser.add_argument('--python-interpreter',
+ metavar='PATH',
+ default=None,
+ help='path to the docker or remote python interpreter')
+
+ parser.add_argument('--no-pip-check',
+ dest='pip_check',
+ default=True,
+ action='store_false',
+ help='do not run "pip check" to verify requirements')
+
+ environments = parser.add_mutually_exclusive_group()
+
+ environments.add_argument('--local',
+ action='store_true',
+ help='run from the local environment')
+
+ environments.add_argument('--venv',
+ action='store_true',
+ help='run from ansible-test managed virtual environments')
+
+ venv = parser.add_argument_group(title='venv arguments')
+
+ venv.add_argument('--venv-system-site-packages',
+ action='store_true',
+ help='enable system site packages')
+
+ if not isolated_delegation:
+ environments.set_defaults(
+ docker=None,
+ remote=None,
+ remote_stage=None,
+ remote_provider=None,
+ remote_aws_region=None,
+ remote_terminate=None,
+ remote_endpoint=None,
+ python_interpreter=None,
+ )
+
+ return
+
+ environments.add_argument('--docker',
+ metavar='IMAGE',
+ nargs='?',
+ default=None,
+ const='default',
+ help='run from a docker container').completer = complete_docker
+
+ environments.add_argument('--remote',
+ metavar='PLATFORM',
+ default=None,
+ help='run from a remote instance').completer = complete_remote_shell if parser.prog.endswith(' shell') else complete_remote
+
+ remote = parser.add_argument_group(title='remote arguments')
+
+ remote.add_argument('--remote-stage',
+ metavar='STAGE',
+ help='remote stage to use: prod, dev',
+ default='prod').completer = complete_remote_stage
+
+ remote.add_argument('--remote-provider',
+ metavar='PROVIDER',
+ help='remote provider to use: %(choices)s',
+ choices=['default', 'aws', 'azure', 'parallels', 'ibmvpc', 'ibmps'],
+ default='default')
+
+ remote.add_argument('--remote-endpoint',
+ metavar='ENDPOINT',
+ help='remote provisioning endpoint to use (default: auto)',
+ default=None)
+
+ remote.add_argument('--remote-aws-region',
+ metavar='REGION',
+ help='remote aws region to use: %(choices)s (default: auto)',
+ choices=sorted(AWS_ENDPOINTS),
+ default=None)
+
+ remote.add_argument('--remote-terminate',
+ metavar='WHEN',
+ help='terminate remote instance: %(choices)s (default: %(default)s)',
+ choices=['never', 'always', 'success'],
+ default='never')
+
+
+def add_extra_coverage_options(parser):
+ """
+ :type parser: argparse.ArgumentParser
+ """
+ parser.add_argument('--group-by',
+ metavar='GROUP',
+ action='append',
+ choices=COVERAGE_GROUPS,
+ help='group output by: %s' % ', '.join(COVERAGE_GROUPS))
+
+ parser.add_argument('--all',
+ action='store_true',
+ help='include all python/powershell source files')
+
+ parser.add_argument('--stub',
+ action='store_true',
+ help='generate empty report of all python/powershell source files')
+
+
+def add_httptester_options(parser, argparse):
+ """
+ :type parser: argparse.ArgumentParser
+ :type argparse: argparse
+ """
+ group = parser.add_mutually_exclusive_group()
+
+ group.add_argument('--httptester',
+ metavar='IMAGE',
+ default='quay.io/ansible/http-test-container:1.0.0',
+ help='docker image to use for the httptester container')
+
+ group.add_argument('--disable-httptester',
+ dest='httptester',
+ action='store_const',
+ const='',
+ help='do not use the httptester container')
+
+ parser.add_argument('--inject-httptester',
+ action='store_true',
+ help=argparse.SUPPRESS) # internal use only
+
+
+def add_extra_docker_options(parser, integration=True):
+ """
+ :type parser: argparse.ArgumentParser
+ :type integration: bool
+ """
+ docker = parser.add_argument_group(title='docker arguments')
+
+ docker.add_argument('--docker-no-pull',
+ action='store_false',
+ dest='docker_pull',
+ help='do not explicitly pull the latest docker images')
+
+ if data_context().content.is_ansible:
+ docker.add_argument('--docker-keep-git',
+ action='store_true',
+ help='transfer git related files into the docker container')
+ else:
+ docker.set_defaults(
+ docker_keep_git=False,
+ )
+
+ docker.add_argument('--docker-seccomp',
+ metavar='SC',
+ choices=('default', 'unconfined'),
+ default=None,
+ help='set seccomp confinement for the test container: %(choices)s')
+
+ docker.add_argument('--docker-terminate',
+ metavar='WHEN',
+ help='terminate docker container: %(choices)s (default: %(default)s)',
+ choices=['never', 'always', 'success'],
+ default='always')
+
+ if not integration:
+ return
+
+ docker.add_argument('--docker-privileged',
+ action='store_true',
+ help='run docker container in privileged mode')
+
+ docker.add_argument('--docker-network',
+ help='run using the specified docker network')
+
+ # noinspection PyTypeChecker
+ docker.add_argument('--docker-memory',
+ help='memory limit for docker in bytes', type=int)
+
+
+# noinspection PyUnusedLocal
+def complete_remote_stage(prefix, parsed_args, **_): # pylint: disable=unused-argument
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ return [stage for stage in ('prod', 'dev') if stage.startswith(prefix)]
+
+
+def complete_target(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ return find_target_completion(parsed_args.targets, prefix)
+
+
+# noinspection PyUnusedLocal
+def complete_remote(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ images = sorted(get_remote_completion().keys())
+
+ return [i for i in images if i.startswith(prefix)]
+
+
+# noinspection PyUnusedLocal
+def complete_remote_shell(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ images = sorted(get_remote_completion().keys())
+
+ # 2008 doesn't support SSH so we do not add to the list of valid images
+ windows_completion_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', 'windows.txt')
+ images.extend(["windows/%s" % i for i in read_lines_without_comments(windows_completion_path, remove_blank_lines=True) if i != '2008'])
+
+ return [i for i in images if i.startswith(prefix)]
+
+
+# noinspection PyUnusedLocal
+def complete_docker(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ images = sorted(get_docker_completion().keys())
+
+ return [i for i in images if i.startswith(prefix)]
+
+
+def complete_windows(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ images = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', 'windows.txt'), remove_blank_lines=True)
+
+ return [i for i in images if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)]
+
+
+def complete_network_platform(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ images = sorted(get_network_completion())
+
+ return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
+
+
+def complete_network_platform_collection(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ left = prefix.split('=')[0]
+ images = sorted(set(image.split('/')[0] for image in get_network_completion()))
+
+ return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_collection or i not in [x[0] for x in parsed_args.platform_collection])]
+
+
+def complete_network_platform_connection(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ left = prefix.split('=')[0]
+ images = sorted(set(image.split('/')[0] for image in get_network_completion()))
+
+ return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_connection or i not in [x[0] for x in parsed_args.platform_connection])]
+
+
+def complete_network_testcase(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ testcases = []
+
+ # since testcases are module specific, don't autocomplete if more than one
+ # module is specidied
+ if len(parsed_args.include) != 1:
+ return []
+
+ test_dir = os.path.join(data_context().content.integration_targets_path, parsed_args.include[0], 'tests')
+ connection_dirs = data_context().content.get_dirs(test_dir)
+
+ for connection_dir in connection_dirs:
+ for testcase in [os.path.basename(path) for path in data_context().content.get_files(connection_dir)]:
+ if testcase.startswith(prefix):
+ testcases.append(testcase.split('.')[0])
+
+ return testcases
+
+
+# noinspection PyUnusedLocal
+def complete_sanity_test(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ tests = sorted(test.name for test in sanity_get_tests())
+
+ return [i for i in tests if i.startswith(prefix)]
diff --git a/test/lib/ansible_test/_internal/cloud/__init__.py b/test/lib/ansible_test/_internal/cloud/__init__.py
new file mode 100644
index 00000000..04f592c4
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/__init__.py
@@ -0,0 +1,429 @@
+"""Plugin system for cloud providers and environments for use in integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import atexit
+import datetime
+import time
+import os
+import re
+import tempfile
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ import_plugins,
+ load_plugins,
+ ABC,
+ ANSIBLE_TEST_CONFIG_ROOT,
+)
+
+from ..util_common import (
+ write_json_test_results,
+ ResultType,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..config import (
+ IntegrationConfig,
+)
+
+from ..ci import (
+ get_ci_provider,
+)
+
+from ..data import (
+ data_context,
+)
+
+PROVIDERS = {}
+ENVIRONMENTS = {}
+
+
+def initialize_cloud_plugins():
+ """Import cloud plugins and load them into the plugin dictionaries."""
+ import_plugins('cloud')
+
+ load_plugins(CloudProvider, PROVIDERS)
+ load_plugins(CloudEnvironment, ENVIRONMENTS)
+
+
+def get_cloud_platforms(args, targets=None):
+ """
+ :type args: TestConfig
+ :type targets: tuple[IntegrationTarget] | None
+ :rtype: list[str]
+ """
+ if isinstance(args, IntegrationConfig):
+ if args.list_targets:
+ return []
+
+ if targets is None:
+ cloud_platforms = set(args.metadata.cloud_config or [])
+ else:
+ cloud_platforms = set(get_cloud_platform(target) for target in targets)
+
+ cloud_platforms.discard(None)
+
+ return sorted(cloud_platforms)
+
+
+def get_cloud_platform(target):
+ """
+ :type target: IntegrationTarget
+ :rtype: str | None
+ """
+ cloud_platforms = set(a.split('/')[1] for a in target.aliases if a.startswith('cloud/') and a.endswith('/') and a != 'cloud/')
+
+ if not cloud_platforms:
+ return None
+
+ if len(cloud_platforms) == 1:
+ cloud_platform = cloud_platforms.pop()
+
+ if cloud_platform not in PROVIDERS:
+ raise ApplicationError('Target %s aliases contains unknown cloud platform: %s' % (target.name, cloud_platform))
+
+ return cloud_platform
+
+ raise ApplicationError('Target %s aliases contains multiple cloud platforms: %s' % (target.name, ', '.join(sorted(cloud_platforms))))
+
+
+def get_cloud_providers(args, targets=None):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget] | None
+ :rtype: list[CloudProvider]
+ """
+ return [PROVIDERS[p](args) for p in get_cloud_platforms(args, targets)]
+
+
+def get_cloud_environment(args, target):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :rtype: CloudEnvironment
+ """
+ cloud_platform = get_cloud_platform(target)
+
+ if not cloud_platform:
+ return None
+
+ return ENVIRONMENTS[cloud_platform](args)
+
+
+def cloud_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :return: list[str]
+ """
+ if args.metadata.cloud_config is not None:
+ return [] # cloud filter already performed prior to delegation
+
+ exclude = []
+
+ for provider in get_cloud_providers(args, targets):
+ provider.filter(targets, exclude)
+
+ return exclude
+
+
+def cloud_init(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ """
+ if args.metadata.cloud_config is not None:
+ return # cloud configuration already established prior to delegation
+
+ args.metadata.cloud_config = {}
+
+ results = {}
+
+ for provider in get_cloud_providers(args, targets):
+ args.metadata.cloud_config[provider.platform] = {}
+
+ start_time = time.time()
+ provider.setup()
+ end_time = time.time()
+
+ results[provider.platform] = dict(
+ platform=provider.platform,
+ setup_seconds=int(end_time - start_time),
+ targets=[target.name for target in targets],
+ )
+
+ if not args.explain and results:
+ result_name = '%s-%s.json' % (
+ args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))
+
+ data = dict(
+ clouds=results,
+ )
+
+ write_json_test_results(ResultType.DATA, result_name, data)
+
+
+class CloudBase(ABC):
+ """Base class for cloud plugins."""
+ __metaclass__ = abc.ABCMeta
+
+ _CONFIG_PATH = 'config_path'
+ _RESOURCE_PREFIX = 'resource_prefix'
+ _MANAGED = 'managed'
+ _SETUP_EXECUTED = 'setup_executed'
+
+ def __init__(self, args):
+ """
+ :type args: IntegrationConfig
+ """
+ self.args = args
+ self.platform = self.__module__.split('.')[-1]
+
+ def config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """Add the config file to the payload file list."""
+ if self._get_cloud_config(self._CONFIG_PATH, ''):
+ pair = (self.config_path, os.path.relpath(self.config_path, data_context().content.root))
+
+ if pair not in files:
+ display.info('Including %s config: %s -> %s' % (self.platform, pair[0], pair[1]), verbosity=3)
+ files.append(pair)
+
+ data_context().register_payload_callback(config_callback)
+
+ @property
+ def setup_executed(self):
+ """
+ :rtype: bool
+ """
+ return self._get_cloud_config(self._SETUP_EXECUTED, False)
+
+ @setup_executed.setter
+ def setup_executed(self, value):
+ """
+ :type value: bool
+ """
+ self._set_cloud_config(self._SETUP_EXECUTED, value)
+
+ @property
+ def config_path(self):
+ """
+ :rtype: str
+ """
+ return os.path.join(data_context().content.root, self._get_cloud_config(self._CONFIG_PATH))
+
+ @config_path.setter
+ def config_path(self, value):
+ """
+ :type value: str
+ """
+ self._set_cloud_config(self._CONFIG_PATH, value)
+
+ @property
+ def resource_prefix(self):
+ """
+ :rtype: str
+ """
+ return self._get_cloud_config(self._RESOURCE_PREFIX)
+
+ @resource_prefix.setter
+ def resource_prefix(self, value):
+ """
+ :type value: str
+ """
+ self._set_cloud_config(self._RESOURCE_PREFIX, value)
+
+ @property
+ def managed(self):
+ """
+ :rtype: bool
+ """
+ return self._get_cloud_config(self._MANAGED)
+
+ @managed.setter
+ def managed(self, value):
+ """
+ :type value: bool
+ """
+ self._set_cloud_config(self._MANAGED, value)
+
+ def _get_cloud_config(self, key, default=None):
+ """
+ :type key: str
+ :type default: str | int | bool | None
+ :rtype: str | int | bool
+ """
+ if default is not None:
+ return self.args.metadata.cloud_config[self.platform].get(key, default)
+
+ return self.args.metadata.cloud_config[self.platform][key]
+
+ def _set_cloud_config(self, key, value):
+ """
+ :type key: str
+ :type value: str | int | bool
+ """
+ self.args.metadata.cloud_config[self.platform][key] = value
+
+
+class CloudProvider(CloudBase):
+ """Base class for cloud provider plugins. Sets up cloud resources before delegation."""
+ def __init__(self, args, config_extension='.ini'):
+ """
+ :type args: IntegrationConfig
+ :type config_extension: str
+ """
+ super(CloudProvider, self).__init__(args)
+
+ self.ci_provider = get_ci_provider()
+ self.remove_config = False
+ self.config_static_name = 'cloud-config-%s%s' % (self.platform, config_extension)
+ self.config_static_path = os.path.join(data_context().content.integration_path, self.config_static_name)
+ self.config_template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, '%s.template' % self.config_static_name)
+ self.config_extension = config_extension
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ self.resource_prefix = self.ci_provider.generate_resource_prefix()
+
+ atexit.register(self.cleanup)
+
+ def get_remote_ssh_options(self):
+ """Get any additional options needed when delegating tests to a remote instance via SSH.
+ :rtype: list[str]
+ """
+ return []
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.remove_config:
+ os.remove(self.config_path)
+
+ def _use_static_config(self):
+ """
+ :rtype: bool
+ """
+ if os.path.isfile(self.config_static_path):
+ display.info('Using existing %s cloud config: %s' % (self.platform, self.config_static_path), verbosity=1)
+ self.config_path = self.config_static_path
+ static = True
+ else:
+ static = False
+
+ self.managed = not static
+
+ return static
+
+ def _write_config(self, content):
+ """
+ :type content: str
+ """
+ prefix = '%s-' % os.path.splitext(os.path.basename(self.config_static_path))[0]
+
+ with tempfile.NamedTemporaryFile(dir=data_context().content.integration_path, prefix=prefix, suffix=self.config_extension, delete=False) as config_fd:
+ filename = os.path.join(data_context().content.integration_path, os.path.basename(config_fd.name))
+
+ self.config_path = filename
+ self.remove_config = True
+
+ display.info('>>> Config: %s\n%s' % (filename, content.strip()), verbosity=3)
+
+ config_fd.write(to_bytes(content))
+ config_fd.flush()
+
+ def _read_config_template(self):
+ """
+ :rtype: str
+ """
+ lines = read_text_file(self.config_template_path).splitlines()
+ lines = [line for line in lines if not line.startswith('#')]
+ config = '\n'.join(lines).strip() + '\n'
+ return config
+
+ @staticmethod
+ def _populate_config_template(template, values):
+ """
+ :type template: str
+ :type values: dict[str, str]
+ :rtype: str
+ """
+ for key in sorted(values):
+ value = values[key]
+ template = template.replace('@%s' % key, value)
+
+ return template
+
+
+class CloudEnvironment(CloudBase):
+ """Base class for cloud environment plugins. Updates integration test environment after delegation."""
+ def setup_once(self):
+ """Run setup if it has not already been run."""
+ if self.setup_executed:
+ return
+
+ self.setup()
+ self.setup_executed = True
+
+ def setup(self):
+ """Setup which should be done once per environment instead of once per test target."""
+
+ @abc.abstractmethod
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+
+ def on_failure(self, target, tries):
+ """
+ :type target: IntegrationTarget
+ :type tries: int
+ """
+
+
+class CloudEnvironmentConfig:
+ """Configuration for the environment."""
+ def __init__(self, env_vars=None, ansible_vars=None, module_defaults=None, callback_plugins=None):
+ """
+ :type env_vars: dict[str, str] | None
+ :type ansible_vars: dict[str, any] | None
+ :type module_defaults: dict[str, dict[str, any]] | None
+ :type callback_plugins: list[str] | None
+ """
+ self.env_vars = env_vars
+ self.ansible_vars = ansible_vars
+ self.module_defaults = module_defaults
+ self.callback_plugins = callback_plugins
diff --git a/test/lib/ansible_test/_internal/cloud/acme.py b/test/lib/ansible_test/_internal/cloud/acme.py
new file mode 100644
index 00000000..3d0ace24
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/acme.py
@@ -0,0 +1,193 @@
+"""ACME plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+ ApplicationError,
+ SubprocessError,
+)
+
+from ..http import (
+ HttpClient,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class ACMEProvider(CloudProvider):
+ """ACME plugin. Sets up cloud resources for tests."""
+ DOCKER_SIMULATOR_NAME = 'acme-simulator'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(ACMEProvider, self).__init__(args)
+
+ # The simulator must be pinned to a specific version to guarantee CI passes with the version used.
+ if os.environ.get('ANSIBLE_ACME_CONTAINER'):
+ self.image = os.environ.get('ANSIBLE_ACME_CONTAINER')
+ else:
+ self.image = 'quay.io/ansible/acme-test-container:2.0.0'
+ self.container_name = ''
+
+ def _wait_for_service(self, protocol, acme_host, port, local_part, name):
+ """Wait for an endpoint to accept connections."""
+ if self.args.explain:
+ return
+
+ client = HttpClient(self.args, always=True, insecure=True)
+ endpoint = '%s://%s:%d/%s' % (protocol, acme_host, port, local_part)
+
+ for dummy in range(1, 30):
+ display.info('Waiting for %s: %s' % (name, endpoint), verbosity=1)
+
+ try:
+ client.get(endpoint)
+ return
+ except SubprocessError:
+ pass
+
+ time.sleep(1)
+
+ raise ApplicationError('Timeout waiting for %s.' % name)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(ACMEProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(ACMEProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Create a ACME test container using docker."""
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing ACME docker test container.', verbosity=1)
+ else:
+ display.info('Starting a new ACME docker test container.', verbosity=1)
+
+ if not container_id:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', '5000:5000', # control port for flask app in container
+ '-p', '14000:14000', # Pebble ACME CA
+ ]
+ else:
+ publish_ports = []
+
+ if not os.environ.get('ANSIBLE_ACME_CONTAINER'):
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ acme_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ acme_host = self._get_simulator_address()
+ display.info('Found ACME test container address: %s' % acme_host, verbosity=1)
+ else:
+ acme_host = get_docker_hostname()
+
+ if container_id:
+ acme_host_ip = self._get_simulator_address()
+ else:
+ acme_host_ip = get_docker_hostname()
+
+ self._set_cloud_config('acme_host', acme_host)
+
+ self._wait_for_service('http', acme_host_ip, 5000, '', 'ACME controller')
+ self._wait_for_service('https', acme_host_ip, 14000, 'dir', 'ACME CA endpoint')
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ raise NotImplementedError()
+
+
+class ACMEEnvironment(CloudEnvironment):
+ """ACME environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ ansible_vars = dict(
+ acme_host=self._get_cloud_config('acme_host'),
+ )
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/aws.py b/test/lib/ansible_test/_internal/cloud/aws.py
new file mode 100644
index 00000000..190ef488
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/aws.py
@@ -0,0 +1,124 @@
+"""AWS plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util import (
+ ApplicationError,
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class AwsCloudProvider(CloudProvider):
+ """AWS cloud provider plugin. Sets up cloud resources before delegation."""
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ super(AwsCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(AwsCloudProvider, self).setup()
+
+ aws_config_path = os.path.expanduser('~/.aws')
+
+ if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
+ raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ def _setup_dynamic(self):
+ """Request AWS credentials through the Ansible Core CI service."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+
+ aci = self._create_ansible_core_ci()
+
+ response = aci.start()
+
+ if not self.args.explain:
+ credentials = response['aws']['credentials']
+
+ values = dict(
+ ACCESS_KEY=credentials['access_key'],
+ SECRET_KEY=credentials['secret_key'],
+ SECURITY_TOKEN=credentials['session_token'],
+ REGION='us-east-1',
+ )
+
+ display.sensitive.add(values['SECRET_KEY'])
+ display.sensitive.add(values['SECURITY_TOKEN'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self):
+ """
+ :rtype: AnsibleCoreCI
+ """
+ return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
+
+
+class AwsCloudEnvironment(CloudEnvironment):
+ """AWS cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ display.sensitive.add(ansible_vars.get('aws_secret_key'))
+ display.sensitive.add(ansible_vars.get('security_token'))
+
+ if 'aws_cleanup' not in ansible_vars:
+ ansible_vars['aws_cleanup'] = not self.managed
+
+ env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ callback_plugins=['aws_resource_actions'],
+ )
+
+ def on_failure(self, target, tries):
+ """
+ :type target: TestTarget
+ :type tries: int
+ """
+ if not tries and self.managed:
+ display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
+ 'https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html#aws-permissions-for-integration-tests.'
+ % target.name)
diff --git a/test/lib/ansible_test/_internal/cloud/azure.py b/test/lib/ansible_test/_internal/cloud/azure.py
new file mode 100644
index 00000000..02465eed
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/azure.py
@@ -0,0 +1,213 @@
+"""Azure plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..http import (
+ HttpClient,
+ urlparse,
+ urlunparse,
+ parse_qs,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class AzureCloudProvider(CloudProvider):
+ """Azure cloud provider plugin. Sets up cloud resources before delegation."""
+ SHERLOCK_CONFIG_PATH = os.path.expanduser('~/.ansible-sherlock-ci.cfg')
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(AzureCloudProvider, self).__init__(args)
+
+ self.aci = None
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ if os.path.isfile(self.SHERLOCK_CONFIG_PATH):
+ return
+
+ super(AzureCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(AzureCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ get_config(self.config_path) # check required variables
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.aci:
+ self.aci.stop()
+
+ super(AzureCloudProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Request Azure credentials through Sherlock."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+ response = {}
+
+ if os.path.isfile(self.SHERLOCK_CONFIG_PATH):
+ sherlock_uri = read_text_file(self.SHERLOCK_CONFIG_PATH).splitlines()[0].strip() + '&rgcount=2'
+
+ parts = urlparse(sherlock_uri)
+ query_string = parse_qs(parts.query)
+ base_uri = urlunparse(parts[:4] + ('', ''))
+
+ if 'code' not in query_string:
+ example_uri = 'https://example.azurewebsites.net/api/sandbox-provisioning'
+ raise ApplicationError('The Sherlock URI must include the API key in the query string. Example: %s?code=xxx' % example_uri)
+
+ display.info('Initializing azure/sherlock from: %s' % base_uri, verbosity=1)
+
+ http = HttpClient(self.args)
+ result = http.get(sherlock_uri)
+
+ display.info('Started azure/sherlock from: %s' % base_uri, verbosity=1)
+
+ if not self.args.explain:
+ response = result.json()
+ else:
+ aci = self._create_ansible_core_ci()
+
+ aci_result = aci.start()
+
+ if not self.args.explain:
+ response = aci_result['azure']
+ self.aci = aci
+
+ if not self.args.explain:
+ values = dict(
+ AZURE_CLIENT_ID=response['clientId'],
+ AZURE_SECRET=response['clientSecret'],
+ AZURE_SUBSCRIPTION_ID=response['subscriptionId'],
+ AZURE_TENANT=response['tenantId'],
+ RESOURCE_GROUP=response['resourceGroupNames'][0],
+ RESOURCE_GROUP_SECONDARY=response['resourceGroupNames'][1],
+ )
+
+ display.sensitive.add(values['AZURE_SECRET'])
+
+ config = '\n'.join('%s: %s' % (key, values[key]) for key in sorted(values))
+
+ config = '[default]\n' + config
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self):
+ """
+ :rtype: AnsibleCoreCI
+ """
+ return AnsibleCoreCI(self.args, 'azure', 'azure', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
+
+
+class AzureCloudEnvironment(CloudEnvironment):
+ """Azure cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ env_vars = get_config(self.config_path)
+
+ display.sensitive.add(env_vars.get('AZURE_SECRET'))
+ display.sensitive.add(env_vars.get('AZURE_PASSWORD'))
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
+
+ def on_failure(self, target, tries):
+ """
+ :type target: TestTarget
+ :type tries: int
+ """
+ if not tries and self.managed:
+ display.notice('If %s failed due to permissions, the test policy may need to be updated. '
+ 'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name)
+
+
+def get_config(config_path):
+ """
+ :type config_path: str
+ :rtype: dict[str, str]
+ """
+ parser = ConfigParser()
+ parser.read(config_path)
+
+ config = dict((key.upper(), value) for key, value in parser.items('default'))
+
+ rg_vars = (
+ 'RESOURCE_GROUP',
+ 'RESOURCE_GROUP_SECONDARY',
+ )
+
+ sp_vars = (
+ 'AZURE_CLIENT_ID',
+ 'AZURE_SECRET',
+ 'AZURE_SUBSCRIPTION_ID',
+ 'AZURE_TENANT',
+ )
+
+ ad_vars = (
+ 'AZURE_AD_USER',
+ 'AZURE_PASSWORD',
+ 'AZURE_SUBSCRIPTION_ID',
+ )
+
+ rg_ok = all(var in config for var in rg_vars)
+ sp_ok = all(var in config for var in sp_vars)
+ ad_ok = all(var in config for var in ad_vars)
+
+ if not rg_ok:
+ raise ApplicationError('Resource groups must be defined with: %s' % ', '.join(sorted(rg_vars)))
+
+ if not sp_ok and not ad_ok:
+ raise ApplicationError('Credentials must be defined using either:\nService Principal: %s\nActive Directory: %s' % (
+ ', '.join(sorted(sp_vars)), ', '.join(sorted(ad_vars))))
+
+ return config
diff --git a/test/lib/ansible_test/_internal/cloud/cloudscale.py b/test/lib/ansible_test/_internal/cloud/cloudscale.py
new file mode 100644
index 00000000..8e5885b2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/cloudscale.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Cloudscale plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import ConfigParser, display
+
+
+class CloudscaleCloudProvider(CloudProvider):
+ """Cloudscale cloud provider plugin. Sets up cloud resources before
+ delegation.
+ """
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(CloudscaleCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(CloudscaleCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(CloudscaleCloudProvider, self).setup()
+
+ if os.path.isfile(self.config_static_path):
+ display.info('Using existing %s cloud config: %s'
+ % (self.platform, self.config_static_path),
+ verbosity=1)
+ self.config_path = self.config_static_path
+ self.managed = False
+
+
+class CloudscaleCloudEnvironment(CloudEnvironment):
+ """Cloudscale cloud environment plugin. Updates integration test environment
+ after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ CLOUDSCALE_API_TOKEN=parser.get('default', 'cloudscale_api_token'),
+ )
+
+ display.sensitive.add(env_vars['CLOUDSCALE_API_TOKEN'])
+
+ ansible_vars = dict(
+ cloudscale_resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/cs.py b/test/lib/ansible_test/_internal/cloud/cs.py
new file mode 100644
index 00000000..d028d9c4
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/cs.py
@@ -0,0 +1,300 @@
+"""CloudStack plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import time
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ ApplicationError,
+ display,
+ SubprocessError,
+ ConfigParser,
+)
+
+from ..http import (
+ HttpClient,
+ HttpError,
+ urlparse,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ docker_network_inspect,
+ docker_exec,
+ get_docker_container_id,
+ get_docker_preferred_network_name,
+ get_docker_hostname,
+ is_docker_user_defined_network,
+)
+
+
+class CsCloudProvider(CloudProvider):
+ """CloudStack cloud provider plugin. Sets up cloud resources before delegation."""
+ DOCKER_SIMULATOR_NAME = 'cloudstack-sim'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(CsCloudProvider, self).__init__(args)
+
+ # The simulator must be pinned to a specific version to guarantee CI passes with the version used.
+ self.image = 'quay.io/ansible/cloudstack-test-container:1.2.0'
+ self.container_name = ''
+ self.endpoint = ''
+ self.host = ''
+ self.port = 0
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(CsCloudProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_remote_ssh_options(self):
+ """Get any additional options needed when delegating tests to a remote instance via SSH.
+ :rtype: list[str]
+ """
+ if self.managed:
+ return ['-R', '8888:%s:8888' % get_docker_hostname()]
+
+ return []
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ if self.ci_provider.code:
+ docker_rm(self.args, self.container_name)
+ elif not self.args.explain:
+ display.notice('Remember to run `docker rm -f %s` when finished testing.' % self.container_name)
+
+ super(CsCloudProvider, self).cleanup()
+
+ def _setup_static(self):
+ """Configure CloudStack tests for use with static configuration."""
+ parser = ConfigParser()
+ parser.read(self.config_static_path)
+
+ self.endpoint = parser.get('cloudstack', 'endpoint')
+
+ parts = urlparse(self.endpoint)
+
+ self.host = parts.hostname
+
+ if not self.host:
+ raise ApplicationError('Could not determine host from endpoint: %s' % self.endpoint)
+
+ if parts.port:
+ self.port = parts.port
+ elif parts.scheme == 'http':
+ self.port = 80
+ elif parts.scheme == 'https':
+ self.port = 443
+ else:
+ raise ApplicationError('Could not determine port from endpoint: %s' % self.endpoint)
+
+ display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1)
+
+ self._wait_for_service()
+
+ def _setup_dynamic(self):
+ """Create a CloudStack simulator using docker."""
+ config = self._read_config_template()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0]['State']['Running']:
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing CloudStack simulator docker container.', verbosity=1)
+ else:
+ display.info('Starting a new CloudStack simulator docker container.', verbosity=1)
+ docker_pull(self.args, self.image)
+ docker_run(self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name])
+
+ # apply work-around for OverlayFS issue
+ # https://github.com/docker/for-linux/issues/72#issuecomment-319904698
+ docker_exec(self.args, self.container_name, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'])
+
+ if not self.args.explain:
+ display.notice('The CloudStack simulator will probably be ready in 2 - 4 minutes.')
+
+ container_id = get_docker_container_id()
+
+ if container_id:
+ self.host = self._get_simulator_address()
+ display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1)
+ else:
+ self.host = get_docker_hostname()
+
+ self.port = 8888
+ self.endpoint = 'http://%s:%d' % (self.host, self.port)
+
+ self._wait_for_service()
+
+ if self.args.explain:
+ values = dict(
+ HOST=self.host,
+ PORT=str(self.port),
+ )
+ else:
+ credentials = self._get_credentials()
+
+ if self.args.docker:
+ host = self.DOCKER_SIMULATOR_NAME
+ elif self.args.remote:
+ host = 'localhost'
+ else:
+ host = self.host
+
+ values = dict(
+ HOST=host,
+ PORT=str(self.port),
+ KEY=credentials['apikey'],
+ SECRET=credentials['secretkey'],
+ )
+
+ display.sensitive.add(values['SECRET'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _get_simulator_address(self):
+ current_network = get_docker_preferred_network_name(self.args)
+ networks = docker_network_inspect(self.args, current_network)
+
+ try:
+ network = [network for network in networks if network['Name'] == current_network][0]
+ containers = network['Containers']
+ container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_SIMULATOR_NAME][0]
+ return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
+ except Exception:
+ display.error('Failed to process the following docker network inspect output:\n%s' %
+ json.dumps(networks, indent=4, sort_keys=True))
+ raise
+
+ def _wait_for_service(self):
+ """Wait for the CloudStack service endpoint to accept connections."""
+ if self.args.explain:
+ return
+
+ client = HttpClient(self.args, always=True)
+ endpoint = self.endpoint
+
+ for _iteration in range(1, 30):
+ display.info('Waiting for CloudStack service: %s' % endpoint, verbosity=1)
+
+ try:
+ client.get(endpoint)
+ return
+ except SubprocessError:
+ pass
+
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for CloudStack service.')
+
+ def _get_credentials(self):
+ """Wait for the CloudStack simulator to return credentials.
+ :rtype: dict[str, str]
+ """
+ client = HttpClient(self.args, always=True)
+ endpoint = '%s/admin.json' % self.endpoint
+
+ for _iteration in range(1, 30):
+ display.info('Waiting for CloudStack credentials: %s' % endpoint, verbosity=1)
+
+ response = client.get(endpoint)
+
+ if response.status_code == 200:
+ try:
+ return response.json()
+ except HttpError as ex:
+ display.error(ex)
+
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for CloudStack credentials.')
+
+
+class CsCloudEnvironment(CloudEnvironment):
+ """CloudStack cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ config = dict(parser.items('default'))
+
+ env_vars = dict(
+ CLOUDSTACK_ENDPOINT=config['endpoint'],
+ CLOUDSTACK_KEY=config['key'],
+ CLOUDSTACK_SECRET=config['secret'],
+ CLOUDSTACK_TIMEOUT=config['timeout'],
+ )
+
+ display.sensitive.add(env_vars['CLOUDSTACK_SECRET'])
+
+ ansible_vars = dict(
+ cs_resource_prefix=self.resource_prefix,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/fallaxy.py b/test/lib/ansible_test/_internal/cloud/fallaxy.py
new file mode 100644
index 00000000..504094bd
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/fallaxy.py
@@ -0,0 +1,177 @@
+"""Fallaxy (ansible-galaxy) plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import uuid
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+)
+
+
+class FallaxyProvider(CloudProvider):
+ """Fallaxy plugin.
+
+ Sets up Fallaxy (ansible-galaxy) stub server for tests.
+
+ It's source source itself resides at: https://github.com/ansible/fallaxy-test-container
+ """
+
+ DOCKER_SIMULATOR_NAME = 'fallaxy-stub'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(FallaxyProvider, self).__init__(args)
+
+ if os.environ.get('ANSIBLE_FALLAXY_CONTAINER'):
+ self.image = os.environ.get('ANSIBLE_FALLAXY_CONTAINER')
+ else:
+ self.image = 'quay.io/ansible/fallaxy-test-container:2.0.1'
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the tests with the necessary config and res unavailable.
+
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker_cmd = 'docker'
+ docker = find_executable(docker_cmd, required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "%s" command: %s'
+ % (skip.rstrip('/'), docker_cmd, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super(FallaxyProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get additional options needed when delegating tests to a container.
+
+ :rtype: list[str]
+ """
+ return ['--link', self.DOCKER_SIMULATOR_NAME] if self.managed else []
+
+ def cleanup(self):
+ """Clean up the resource and temporary configs files after tests."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(FallaxyProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ container_id = get_docker_container_id()
+
+ if container_id:
+ display.info('Running in docker container: %s' % container_id, verbosity=1)
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ display.info('%s Fallaxy simulator docker container.'
+ % ('Using the existing' if results else 'Starting a new'),
+ verbosity=1)
+
+ fallaxy_port = 8080
+ fallaxy_token = str(uuid.uuid4()).replace('-', '')
+
+ if not results:
+ if self.args.docker or container_id:
+ publish_ports = []
+ else:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', ':'.join((str(fallaxy_port),) * 2),
+ ]
+
+ if not os.environ.get('ANSIBLE_FALLAXY_CONTAINER'):
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name, '-e', 'FALLAXY_TOKEN=%s' % fallaxy_token] + publish_ports,
+ )
+
+ if self.args.docker:
+ fallaxy_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ fallaxy_host = self._get_simulator_address()
+ display.info('Found Fallaxy simulator container address: %s' % fallaxy_host, verbosity=1)
+ else:
+ fallaxy_host = 'localhost'
+
+ self._set_cloud_config('FALLAXY_HOST', fallaxy_host)
+ self._set_cloud_config('FALLAXY_PORT', str(fallaxy_port))
+ self._set_cloud_config('FALLAXY_TOKEN', fallaxy_token)
+
+ def _get_simulator_address(self):
+ results = docker_inspect(self.args, self.container_name)
+ ipaddress = results[0]['NetworkSettings']['IPAddress']
+ return ipaddress
+
+ def _setup_static(self):
+ raise NotImplementedError()
+
+
+class FallaxyEnvironment(CloudEnvironment):
+ """Fallaxy environment plugin.
+
+ Updates integration test environment after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ fallaxy_token = self._get_cloud_config('FALLAXY_TOKEN')
+ fallaxy_host = self._get_cloud_config('FALLAXY_HOST')
+ fallaxy_port = self._get_cloud_config('FALLAXY_PORT')
+
+ return CloudEnvironmentConfig(
+ ansible_vars=dict(
+ fallaxy_token=fallaxy_token,
+ fallaxy_galaxy_server='http://%s:%s/api/' % (fallaxy_host, fallaxy_port),
+ fallaxy_ah_server='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port),
+ ),
+ env_vars=dict(
+ FALLAXY_TOKEN=fallaxy_token,
+ FALLAXY_GALAXY_SERVER='http://%s:%s/api/' % (fallaxy_host, fallaxy_port),
+ FALLAXY_AH_SERVER='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port),
+ ),
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/foreman.py b/test/lib/ansible_test/_internal/cloud/foreman.py
new file mode 100644
index 00000000..7517f1f6
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/foreman.py
@@ -0,0 +1,191 @@
+"""Foreman plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class ForemanProvider(CloudProvider):
+ """Foreman plugin.
+
+ Sets up Foreman stub server for tests.
+ """
+
+ DOCKER_SIMULATOR_NAME = 'foreman-stub'
+
+ DOCKER_IMAGE = 'quay.io/ansible/foreman-test-container:1.4.0'
+ """Default image to run Foreman stub from.
+
+ The simulator must be pinned to a specific version
+ to guarantee CI passes with the version used.
+
+ It's source source itself resides at:
+ https://github.com/ansible/foreman-test-container
+ """
+
+ def __init__(self, args):
+ """Set up container references for provider.
+
+ :type args: TestConfig
+ """
+ super(ForemanProvider, self).__init__(args)
+
+ self.__container_from_env = os.environ.get('ANSIBLE_FRMNSIM_CONTAINER')
+ """Overrides target container, might be used for development.
+
+ Use ANSIBLE_FRMNSIM_CONTAINER=whatever_you_want if you want
+ to use other image. Omit/empty otherwise.
+ """
+
+ self.image = self.__container_from_env or self.DOCKER_IMAGE
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the tests with the necessary config and res unavailable.
+
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker_cmd = 'docker'
+ docker = find_executable(docker_cmd, required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning(
+ 'Excluding tests marked "%s" '
+ 'which require the "%s" command: %s'
+ % (skip.rstrip('/'), docker_cmd, ', '.join(skipped))
+ )
+
+ def setup(self):
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super(ForemanProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get additional options needed when delegating tests to a container.
+
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the resource and temporary configs files after tests."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(ForemanProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Spawn a Foreman stub within docker container."""
+ foreman_port = 8080
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ display.info(
+ '%s Foreman simulator docker container.'
+ % ('Using the existing' if results else 'Starting a new'),
+ verbosity=1,
+ )
+
+ if not results:
+ if self.args.docker or container_id:
+ publish_ports = []
+ else:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', ':'.join((str(foreman_port), ) * 2),
+ ]
+
+ if not self.__container_from_env:
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ foreman_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ foreman_host = self._get_simulator_address()
+ display.info(
+ 'Found Foreman simulator container address: %s'
+ % foreman_host, verbosity=1
+ )
+ else:
+ foreman_host = get_docker_hostname()
+
+ self._set_cloud_config('FOREMAN_HOST', foreman_host)
+ self._set_cloud_config('FOREMAN_PORT', str(foreman_port))
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ raise NotImplementedError
+
+
+class ForemanEnvironment(CloudEnvironment):
+ """Foreman environment plugin.
+
+ Updates integration test environment after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ env_vars = dict(
+ FOREMAN_HOST=self._get_cloud_config('FOREMAN_HOST'),
+ FOREMAN_PORT=self._get_cloud_config('FOREMAN_PORT'),
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/gcp.py b/test/lib/ansible_test/_internal/cloud/gcp.py
new file mode 100644
index 00000000..c8de1835
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/gcp.py
@@ -0,0 +1,62 @@
+# Copyright: (c) 2018, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""GCP plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util import (
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+
+class GcpCloudProvider(CloudProvider):
+ """GCP cloud provider plugin. Sets up cloud resources before delegation."""
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(GcpCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(GcpCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ display.notice(
+ 'static configuration could not be used. are you missing a template file?'
+ )
+
+
+class GcpCloudEnvironment(CloudEnvironment):
+ """GCP cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/hcloud.py b/test/lib/ansible_test/_internal/cloud/hcloud.py
new file mode 100644
index 00000000..5902b566
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/hcloud.py
@@ -0,0 +1,116 @@
+"""Hetzner Cloud plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util import (
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class HcloudCloudProvider(CloudProvider):
+ """Hetzner Cloud provider plugin. Sets up cloud resources before
+ delegation.
+ """
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(HcloudCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ super(HcloudCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(HcloudCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ def _setup_dynamic(self):
+ """Request Hetzner credentials through the Ansible Core CI service."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+
+ aci = self._create_ansible_core_ci()
+
+ response = aci.start()
+
+ if not self.args.explain:
+ token = response['hetzner']['token']
+
+ display.sensitive.add(token)
+ display.info('Hetzner Cloud Token: %s' % token, verbosity=1)
+
+ values = dict(
+ TOKEN=token,
+ )
+
+ display.sensitive.add(values['TOKEN'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self):
+ """
+ :rtype: AnsibleCoreCI
+ """
+ return AnsibleCoreCI(self.args, 'hetzner', 'hetzner', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
+
+
+class HcloudCloudEnvironment(CloudEnvironment):
+ """Hetzner Cloud cloud environment plugin. Updates integration test environment
+ after delegation.
+ """
+
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ HCLOUD_TOKEN=parser.get('default', 'hcloud_api_token'),
+ )
+
+ display.sensitive.add(env_vars['HCLOUD_TOKEN'])
+
+ ansible_vars = dict(
+ hcloud_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/nios.py b/test/lib/ansible_test/_internal/cloud/nios.py
new file mode 100644
index 00000000..b9a1a4e4
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/nios.py
@@ -0,0 +1,193 @@
+"""NIOS plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class NiosProvider(CloudProvider):
+ """Nios plugin.
+
+ Sets up NIOS mock server for tests.
+ """
+
+ DOCKER_SIMULATOR_NAME = 'nios-simulator'
+
+ DOCKER_IMAGE = 'quay.io/ansible/nios-test-container:1.3.0'
+ """Default image to run the nios simulator.
+
+ The simulator must be pinned to a specific version
+ to guarantee CI passes with the version used.
+
+ It's source source itself resides at:
+ https://github.com/ansible/nios-test-container
+ """
+
+ def __init__(self, args):
+ """Set up container references for provider.
+
+ :type args: TestConfig
+ """
+ super(NiosProvider, self).__init__(args)
+
+ self.__container_from_env = os.environ.get('ANSIBLE_NIOSSIM_CONTAINER')
+ """Overrides target container, might be used for development.
+
+ Use ANSIBLE_NIOSSIM_CONTAINER=whatever_you_want if you want
+ to use other image. Omit/empty otherwise.
+ """
+
+ self.image = self.__container_from_env or self.DOCKER_IMAGE
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the tests with the necessary config and res unavailable.
+
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker_cmd = 'docker'
+ docker = find_executable(docker_cmd, required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning(
+ 'Excluding tests marked "%s" '
+ 'which require the "%s" command: %s'
+ % (skip.rstrip('/'), docker_cmd, ', '.join(skipped))
+ )
+
+ def setup(self):
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super(NiosProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get additional options needed when delegating tests to a container.
+
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the resource and temporary configs files after tests."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(NiosProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Spawn a NIOS simulator within docker container."""
+ nios_port = 443
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ display.info(
+ '%s NIOS simulator docker container.'
+ % ('Using the existing' if results else 'Starting a new'),
+ verbosity=1,
+ )
+
+ if not results:
+ if self.args.docker or container_id:
+ publish_ports = []
+ else:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', ':'.join((str(nios_port), ) * 2),
+ ]
+
+ if not self.__container_from_env:
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ nios_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ nios_host = self._get_simulator_address()
+ display.info(
+ 'Found NIOS simulator container address: %s'
+ % nios_host, verbosity=1
+ )
+ else:
+ nios_host = get_docker_hostname()
+
+ self._set_cloud_config('NIOS_HOST', nios_host)
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ raise NotImplementedError
+
+
+class NiosEnvironment(CloudEnvironment):
+ """NIOS environment plugin.
+
+ Updates integration test environment after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ ansible_vars = dict(
+ nios_provider=dict(
+ host=self._get_cloud_config('NIOS_HOST'),
+ username='admin',
+ password='infoblox',
+ ),
+ )
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/opennebula.py b/test/lib/ansible_test/_internal/cloud/opennebula.py
new file mode 100644
index 00000000..559093e3
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/opennebula.py
@@ -0,0 +1,66 @@
+"""OpenNebula plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ display,
+ ConfigParser,
+)
+
+
+class OpenNebulaCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+
+ def filter(self, targets, exclude):
+ """ no need to filter modules, they can either run from config file or from fixtures"""
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(OpenNebulaCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ def _setup_dynamic(self):
+ display.info('No config file provided, will run test from fixtures')
+
+ config = self._read_config_template()
+ values = dict(
+ URL="http://localhost/RPC2",
+ USERNAME='oneadmin',
+ PASSWORD='onepass',
+ FIXTURES='true',
+ REPLAY='true',
+ )
+ config = self._populate_config_template(config, values)
+ self._write_config(config)
+
+
+class OpenNebulaCloudEnvironment(CloudEnvironment):
+ """
+ Updates integration test environment after delegation. Will setup the config file as parameter.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ display.sensitive.add(ansible_vars.get('opennebula_password'))
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/openshift.py b/test/lib/ansible_test/_internal/cloud/openshift.py
new file mode 100644
index 00000000..450816bf
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/openshift.py
@@ -0,0 +1,236 @@
+"""OpenShift plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import time
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ find_executable,
+ ApplicationError,
+ display,
+ SubprocessError,
+)
+
+from ..http import (
+ HttpClient,
+)
+
+from ..docker_util import (
+ docker_exec,
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ docker_network_inspect,
+ get_docker_container_id,
+ get_docker_preferred_network_name,
+ get_docker_hostname,
+ is_docker_user_defined_network,
+)
+
+
+class OpenShiftCloudProvider(CloudProvider):
+ """OpenShift cloud provider plugin. Sets up cloud resources before delegation."""
+ DOCKER_CONTAINER_NAME = 'openshift-origin'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(OpenShiftCloudProvider, self).__init__(args, config_extension='.kubeconfig')
+
+ # The image must be pinned to a specific version to guarantee CI passes with the version used.
+ self.image = 'openshift/origin:v3.9.0'
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(OpenShiftCloudProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_remote_ssh_options(self):
+ """Get any additional options needed when delegating tests to a remote instance via SSH.
+ :rtype: list[str]
+ """
+ if self.managed:
+ return ['-R', '8443:%s:8443' % get_docker_hostname()]
+
+ return []
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_CONTAINER_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(OpenShiftCloudProvider, self).cleanup()
+
+ def _setup_static(self):
+ """Configure OpenShift tests for use with static configuration."""
+ config = read_text_file(self.config_static_path)
+
+ match = re.search(r'^ *server: (?P<server>.*)$', config, flags=re.MULTILINE)
+
+ if match:
+ endpoint = match.group('server')
+ self._wait_for_service(endpoint)
+ else:
+ display.warning('Could not find OpenShift endpoint in kubeconfig. Skipping check for OpenShift service availability.')
+
+ def _setup_dynamic(self):
+ """Create a OpenShift container using docker."""
+ self.container_name = self.DOCKER_CONTAINER_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0]['State']['Running']:
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing OpenShift docker container.', verbosity=1)
+ else:
+ display.info('Starting a new OpenShift docker container.', verbosity=1)
+ docker_pull(self.args, self.image)
+ cmd = ['start', 'master', '--listen', 'https://0.0.0.0:8443']
+ docker_run(self.args, self.image, ['-d', '-p', '8443:8443', '--name', self.container_name], cmd)
+
+ container_id = get_docker_container_id()
+
+ if container_id:
+ host = self._get_container_address()
+ display.info('Found OpenShift container address: %s' % host, verbosity=1)
+ else:
+ host = get_docker_hostname()
+
+ port = 8443
+ endpoint = 'https://%s:%s/' % (host, port)
+
+ self._wait_for_service(endpoint)
+
+ if self.args.explain:
+ config = '# Unknown'
+ else:
+ if self.args.docker:
+ host = self.DOCKER_CONTAINER_NAME
+ elif self.args.remote:
+ host = 'localhost'
+
+ server = 'https://%s:%s' % (host, port)
+ config = self._get_config(server)
+
+ self._write_config(config)
+
+ def _get_container_address(self):
+ current_network = get_docker_preferred_network_name(self.args)
+ networks = docker_network_inspect(self.args, current_network)
+
+ try:
+ network = [network for network in networks if network['Name'] == current_network][0]
+ containers = network['Containers']
+ container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_CONTAINER_NAME][0]
+ return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
+ except Exception:
+ display.error('Failed to process the following docker network inspect output:\n%s' %
+ json.dumps(networks, indent=4, sort_keys=True))
+ raise
+
+ def _wait_for_service(self, endpoint):
+ """Wait for the OpenShift service endpoint to accept connections.
+ :type endpoint: str
+ """
+ if self.args.explain:
+ return
+
+ client = HttpClient(self.args, always=True, insecure=True)
+
+ for dummy in range(1, 30):
+ display.info('Waiting for OpenShift service: %s' % endpoint, verbosity=1)
+
+ try:
+ client.get(endpoint)
+ return
+ except SubprocessError:
+ pass
+
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for OpenShift service.')
+
+ def _get_config(self, server):
+ """Get OpenShift config from container.
+ :type server: str
+ :rtype: dict[str, str]
+ """
+ cmd = ['cat', '/var/lib/origin/openshift.local.config/master/admin.kubeconfig']
+
+ stdout, dummy = docker_exec(self.args, self.container_name, cmd, capture=True)
+
+ config = stdout
+ config = re.sub(r'^( *)certificate-authority-data: .*$', r'\1insecure-skip-tls-verify: true', config, flags=re.MULTILINE)
+ config = re.sub(r'^( *)server: .*$', r'\1server: %s' % server, config, flags=re.MULTILINE)
+
+ return config
+
+
+class OpenShiftCloudEnvironment(CloudEnvironment):
+ """OpenShift cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ env_vars = dict(
+ K8S_AUTH_KUBECONFIG=self.config_path,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/scaleway.py b/test/lib/ansible_test/_internal/cloud/scaleway.py
new file mode 100644
index 00000000..22abe197
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/scaleway.py
@@ -0,0 +1,72 @@
+"""Scaleway plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ ConfigParser,
+ display,
+)
+
+
+class ScalewayCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(ScalewayCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(ScalewayCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(ScalewayCloudProvider, self).setup()
+
+ if os.path.isfile(self.config_static_path):
+ self.config_path = self.config_static_path
+ self.managed = False
+
+
+class ScalewayCloudEnvironment(CloudEnvironment):
+ """
+ Updates integration test environment after delegation. Will setup the config file as parameter.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ SCW_API_KEY=parser.get('default', 'key'),
+ SCW_ORG=parser.get('default', 'org')
+ )
+
+ display.sensitive.add(env_vars['SCW_API_KEY'])
+
+ ansible_vars = dict(
+ scw_org=parser.get('default', 'org'),
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/tower.py b/test/lib/ansible_test/_internal/cloud/tower.py
new file mode 100644
index 00000000..227d170c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/tower.py
@@ -0,0 +1,255 @@
+"""Tower plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+
+from ..util import (
+ display,
+ ApplicationError,
+ SubprocessError,
+ ConfigParser,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class TowerCloudProvider(CloudProvider):
+ """Tower cloud provider plugin. Sets up cloud resources before delegation."""
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(TowerCloudProvider, self).__init__(args)
+
+ self.aci = None
+ self.version = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = get_tower_aci(self.args)
+
+ if aci.available:
+ return
+
+ super(TowerCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(TowerCloudProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def check_tower_version(self, fallback=None):
+ """Check the Tower version being tested and determine the correct CLI version to use.
+ :type fallback: str | None
+ """
+ tower_cli_version_map = {
+ '3.1.5': '3.1.8',
+ '3.2.3': '3.3.0',
+ '3.3.5': '3.3.3',
+ '3.4.3': '3.3.3',
+ '3.6.3': '3.3.8',
+ }
+
+ cli_version = tower_cli_version_map.get(self.version, fallback)
+
+ if not cli_version:
+ raise ApplicationError('Mapping to ansible-tower-cli version required for Tower version: %s' % self.version)
+
+ self._set_cloud_config('tower_cli_version', cli_version)
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ # cleanup on success or failure is not yet supported due to how cleanup is called
+ if self.aci and self.args.remote_terminate == 'always':
+ self.aci.stop()
+
+ super(TowerCloudProvider, self).cleanup()
+
+ def _setup_static(self):
+ config = TowerConfig.parse(self.config_static_path)
+
+ self.version = config.version
+ self.check_tower_version()
+
+ def _setup_dynamic(self):
+ """Request Tower credentials through the Ansible Core CI service."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ # temporary solution to allow version selection
+ self.version = os.environ.get('TOWER_VERSION', '3.6.3')
+ self.check_tower_version(os.environ.get('TOWER_CLI_VERSION'))
+
+ aci = get_tower_aci(self.args, self.version)
+ aci.start()
+ aci.wait()
+
+ connection = aci.get()
+
+ config = self._read_config_template()
+
+ if not self.args.explain:
+ self.aci = aci
+
+ values = dict(
+ VERSION=self.version,
+ HOST=connection.hostname,
+ USERNAME=connection.username,
+ PASSWORD=connection.password,
+ )
+
+ display.sensitive.add(values['PASSWORD'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+
+class TowerCloudEnvironment(CloudEnvironment):
+ """Tower cloud environment plugin. Updates integration test environment after delegation."""
+ def setup(self):
+ """Setup which should be done once per environment instead of once per test target."""
+ self.setup_cli()
+ self.disable_pendo()
+
+ def setup_cli(self):
+ """Install the correct Tower CLI for the version of Tower being tested."""
+ tower_cli_version = self._get_cloud_config('tower_cli_version')
+
+ display.info('Installing Tower CLI version: %s' % tower_cli_version)
+
+ cmd = self.args.pip_command + ['install', '--disable-pip-version-check', 'ansible-tower-cli==%s' % tower_cli_version]
+
+ run_command(self.args, cmd)
+
+ cmd = ['tower-cli', 'config', 'verify_ssl', 'false']
+ run_command(self.args, cmd, capture=True)
+
+ def disable_pendo(self):
+ """Disable Pendo tracking."""
+ display.info('Disable Pendo tracking')
+
+ config = TowerConfig.parse(self.config_path)
+
+ # tower-cli does not recognize TOWER_ environment variables
+ cmd = ['tower-cli', 'setting', 'modify', 'PENDO_TRACKING_STATE', 'off',
+ '-h', config.host, '-u', config.username, '-p', config.password]
+
+ attempts = 60
+
+ while True:
+ attempts -= 1
+
+ try:
+ run_command(self.args, cmd, capture=True)
+ return
+ except SubprocessError as ex:
+ if not attempts:
+ raise ApplicationError('Timed out trying to disable Pendo tracking:\n%s' % ex)
+
+ time.sleep(5)
+
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ config = TowerConfig.parse(self.config_path)
+
+ env_vars = config.environment
+
+ ansible_vars = dict((key.lower(), value) for key, value in env_vars.items())
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
+
+
+class TowerConfig:
+ """Tower settings."""
+ def __init__(self, values):
+ self.version = values.get('version')
+ self.host = values.get('host')
+ self.username = values.get('username')
+ self.password = values.get('password')
+
+ if self.password:
+ display.sensitive.add(self.password)
+
+ @property
+ def environment(self):
+ """Tower settings as environment variables.
+ :rtype: dict[str, str]
+ """
+ env = dict(
+ TOWER_VERSION=self.version,
+ TOWER_HOST=self.host,
+ TOWER_USERNAME=self.username,
+ TOWER_PASSWORD=self.password,
+ )
+
+ return env
+
+ @staticmethod
+ def parse(path):
+ """
+ :type path: str
+ :rtype: TowerConfig
+ """
+ parser = ConfigParser()
+ parser.read(path)
+
+ keys = (
+ 'version',
+ 'host',
+ 'username',
+ 'password',
+ )
+
+ values = dict((k, parser.get('default', k)) for k in keys)
+ config = TowerConfig(values)
+
+ missing = [k for k in keys if not values.get(k)]
+
+ if missing:
+ raise ApplicationError('Missing or empty Tower configuration value(s): %s' % ', '.join(missing))
+
+ return config
+
+
+def get_tower_aci(args, version=None):
+ """
+ :type args: EnvironmentConfig
+ :type version: str | None
+ :rtype: AnsibleCoreCI
+ """
+ if version:
+ persist = True
+ else:
+ version = ''
+ persist = False
+
+ return AnsibleCoreCI(args, 'tower', version, persist=persist, stage=args.remote_stage, provider=args.remote_provider)
diff --git a/test/lib/ansible_test/_internal/cloud/vcenter.py b/test/lib/ansible_test/_internal/cloud/vcenter.py
new file mode 100644
index 00000000..3b38a19e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/vcenter.py
@@ -0,0 +1,232 @@
+"""VMware vCenter plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+ ConfigParser,
+ ApplicationError,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class VcenterProvider(CloudProvider):
+ """VMware vcenter/esx plugin. Sets up cloud resources for tests."""
+ DOCKER_SIMULATOR_NAME = 'vcenter-simulator'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(VcenterProvider, self).__init__(args)
+
+ # The simulator must be pinned to a specific version to guarantee CI passes with the version used.
+ if os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
+ self.image = os.environ.get('ANSIBLE_VCSIM_CONTAINER')
+ else:
+ self.image = 'quay.io/ansible/vcenter-test-container:1.7.0'
+ self.container_name = ''
+
+ # VMware tests can be run on govcsim or BYO with a static config file.
+ # The simulator is the default if no config is provided.
+ self.vmware_test_platform = os.environ.get('VMWARE_TEST_PLATFORM', 'govcsim')
+ self.insecure = False
+ self.proxy = None
+ self.platform = 'vcenter'
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if self.vmware_test_platform == 'govcsim' or (self.vmware_test_platform == '' and not os.path.isfile(self.config_static_path)):
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+ elif self.vmware_test_platform == 'static':
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(VcenterProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(VcenterProvider, self).setup()
+
+ self._set_cloud_config('vmware_test_platform', self.vmware_test_platform)
+ if self.vmware_test_platform == 'govcsim':
+ self._setup_dynamic_simulator()
+ self.managed = True
+ elif self.vmware_test_platform == 'static':
+ self._use_static_config()
+ self._setup_static()
+ else:
+ raise ApplicationError('Unknown vmware_test_platform: %s' % self.vmware_test_platform)
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(VcenterProvider, self).cleanup()
+
+ def _setup_dynamic_simulator(self):
+ """Create a vcenter simulator using docker."""
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing vCenter simulator docker container.', verbosity=1)
+ else:
+ display.info('Starting a new vCenter simulator docker container.', verbosity=1)
+
+ if not self.args.docker and not container_id:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', '1443:443',
+ '-p', '8080:8080',
+ '-p', '8989:8989',
+ '-p', '5000:5000', # control port for flask app in simulator
+ ]
+ else:
+ publish_ports = []
+
+ if not os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ vcenter_hostname = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ vcenter_hostname = self._get_simulator_address()
+ display.info('Found vCenter simulator container address: %s' % vcenter_hostname, verbosity=1)
+ else:
+ vcenter_hostname = get_docker_hostname()
+
+ self._set_cloud_config('vcenter_hostname', vcenter_hostname)
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ if not os.path.exists(self.config_static_path):
+ raise ApplicationError('Configuration file does not exist: %s' % self.config_static_path)
+
+ parser = ConfigParser({
+ 'vcenter_port': '443',
+ 'vmware_proxy_host': '',
+ 'vmware_proxy_port': '8080'})
+ parser.read(self.config_static_path)
+
+ if parser.get('DEFAULT', 'vmware_validate_certs').lower() in ('no', 'false'):
+ self.insecure = True
+ proxy_host = parser.get('DEFAULT', 'vmware_proxy_host')
+ proxy_port = int(parser.get('DEFAULT', 'vmware_proxy_port'))
+ if proxy_host and proxy_port:
+ self.proxy = 'http://%s:%d' % (proxy_host, proxy_port)
+
+
+class VcenterEnvironment(CloudEnvironment):
+ """VMware vcenter/esx environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ try:
+ # We may be in a container, so we cannot just reach VMWARE_TEST_PLATFORM,
+ # We do a try/except instead
+ parser = ConfigParser()
+ parser.read(self.config_path) # static
+
+ env_vars = dict()
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+ ansible_vars.update(dict(parser.items('DEFAULT', raw=True)))
+ except KeyError: # govcsim
+ env_vars = dict(
+ VCENTER_HOSTNAME=self._get_cloud_config('vcenter_hostname'),
+ VCENTER_USERNAME='user',
+ VCENTER_PASSWORD='pass',
+ )
+
+ ansible_vars = dict(
+ vcsim=self._get_cloud_config('vcenter_hostname'),
+ vcenter_hostname=self._get_cloud_config('vcenter_hostname'),
+ vcenter_username='user',
+ vcenter_password='pass',
+ )
+ # Shippable starts ansible-test from withing an existing container,
+ # and in this case, we don't have to change the vcenter port.
+ if not self.args.docker and not get_docker_container_id():
+ ansible_vars['vcenter_port'] = '1443'
+
+ for key, value in ansible_vars.items():
+ if key.endswith('_password'):
+ display.sensitive.add(value)
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ module_defaults={
+ 'group/vmware': {
+ 'hostname': ansible_vars['vcenter_hostname'],
+ 'username': ansible_vars['vcenter_username'],
+ 'password': ansible_vars['vcenter_password'],
+ 'port': ansible_vars.get('vcenter_port', '443'),
+ 'validate_certs': ansible_vars.get('vmware_validate_certs', 'no'),
+ },
+ },
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/vultr.py b/test/lib/ansible_test/_internal/cloud/vultr.py
new file mode 100644
index 00000000..ce6184f7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/vultr.py
@@ -0,0 +1,71 @@
+"""Vultr plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ ConfigParser,
+ display,
+)
+
+
+class VultrCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(VultrCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(VultrCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(VultrCloudProvider, self).setup()
+
+ if os.path.isfile(self.config_static_path):
+ self.config_path = self.config_static_path
+ self.managed = False
+
+
+class VultrCloudEnvironment(CloudEnvironment):
+ """
+ Updates integration test environment after delegation. Will setup the config file as parameter.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ VULTR_API_KEY=parser.get('default', 'key'),
+ )
+
+ display.sensitive.add(env_vars['VULTR_API_KEY'])
+
+ ansible_vars = dict(
+ vultr_resource_prefix=self.resource_prefix,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/config.py b/test/lib/ansible_test/_internal/config.py
new file mode 100644
index 00000000..a3c31959
--- /dev/null
+++ b/test/lib/ansible_test/_internal/config.py
@@ -0,0 +1,356 @@
+"""Configuration classes."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from . import types as t
+
+from .util import (
+ find_python,
+ generate_pip_command,
+ ApplicationError,
+)
+
+from .util_common import (
+ docker_qualify_image,
+ get_docker_completion,
+ get_remote_completion,
+ CommonConfig,
+)
+
+from .metadata import (
+ Metadata,
+)
+
+from .data import (
+ data_context,
+)
+
+try:
+ TIntegrationConfig = t.TypeVar('TIntegrationConfig', bound='IntegrationConfig')
+except AttributeError:
+ TIntegrationConfig = None # pylint: disable=invalid-name
+
+
+class ParsedRemote:
+ """A parsed version of a "remote" string."""
+ def __init__(self, arch, platform, version): # type: (t.Optional[str], str, str) -> None
+ self.arch = arch
+ self.platform = platform
+ self.version = version
+
+ @staticmethod
+ def parse(value): # type: (str) -> t.Optional['ParsedRemote']
+ """Return a ParsedRemote from the given value or None if the syntax is invalid."""
+ parts = value.split('/')
+
+ if len(parts) == 2:
+ arch = None
+ platform, version = parts
+ elif len(parts) == 3:
+ arch, platform, version = parts
+ else:
+ return None
+
+ return ParsedRemote(arch, platform, version)
+
+
+class EnvironmentConfig(CommonConfig):
+ """Configuration common to all commands which execute in an environment."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ super(EnvironmentConfig, self).__init__(args, command)
+
+ self.local = args.local is True
+ self.venv = args.venv
+ self.venv_system_site_packages = args.venv_system_site_packages
+
+ self.python = args.python if 'python' in args else None # type: str
+
+ self.docker = docker_qualify_image(args.docker) # type: str
+ self.docker_raw = args.docker # type: str
+ self.remote = args.remote # type: str
+
+ if self.remote:
+ self.parsed_remote = ParsedRemote.parse(self.remote)
+
+ if not self.parsed_remote or not self.parsed_remote.platform or not self.parsed_remote.version:
+ raise ApplicationError('Unrecognized remote "%s" syntax. Use "platform/version" or "arch/platform/version".' % self.remote)
+ else:
+ self.parsed_remote = None
+
+ self.docker_privileged = args.docker_privileged if 'docker_privileged' in args else False # type: bool
+ self.docker_pull = args.docker_pull if 'docker_pull' in args else False # type: bool
+ self.docker_keep_git = args.docker_keep_git if 'docker_keep_git' in args else False # type: bool
+ self.docker_seccomp = args.docker_seccomp if 'docker_seccomp' in args else None # type: str
+ self.docker_memory = args.docker_memory if 'docker_memory' in args else None
+ self.docker_terminate = args.docker_terminate if 'docker_terminate' in args else None # type: str
+ self.docker_network = args.docker_network if 'docker_network' in args else None # type: str
+
+ if self.docker_seccomp is None:
+ self.docker_seccomp = get_docker_completion().get(self.docker_raw, {}).get('seccomp', 'default')
+
+ self.remote_stage = args.remote_stage # type: str
+ self.remote_provider = args.remote_provider # type: str
+ self.remote_endpoint = args.remote_endpoint # type: t.Optional[str]
+ self.remote_aws_region = args.remote_aws_region # type: str
+ self.remote_terminate = args.remote_terminate # type: str
+
+ if self.remote_provider == 'default':
+ self.remote_provider = None
+
+ self.requirements = args.requirements # type: bool
+
+ if self.python == 'default':
+ self.python = None
+
+ actual_major_minor = '.'.join(str(i) for i in sys.version_info[:2])
+
+ self.python_version = self.python or actual_major_minor
+ self.python_interpreter = args.python_interpreter
+
+ self.pip_check = args.pip_check
+
+ self.delegate = self.docker or self.remote or self.venv
+ self.delegate_args = [] # type: t.List[str]
+
+ if self.delegate:
+ self.requirements = True
+
+ self.inject_httptester = args.inject_httptester if 'inject_httptester' in args else False # type: bool
+ self.httptester = docker_qualify_image(args.httptester if 'httptester' in args else '') # type: str
+
+ if self.get_delegated_completion().get('httptester', 'enabled') == 'disabled':
+ self.httptester = False
+
+ if self.get_delegated_completion().get('pip-check', 'enabled') == 'disabled':
+ self.pip_check = False
+
+ if args.check_python and args.check_python != actual_major_minor:
+ raise ApplicationError('Running under Python %s instead of Python %s as expected.' % (actual_major_minor, args.check_python))
+
+ if self.docker_keep_git:
+ def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """Add files from the content root .git directory to the payload file list."""
+ for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')):
+ paths = [os.path.join(dirpath, filename) for filename in filenames]
+ files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths)
+
+ data_context().register_payload_callback(git_callback)
+
+ @property
+ def python_executable(self):
+ """
+ :rtype: str
+ """
+ return find_python(self.python_version)
+
+ @property
+ def pip_command(self):
+ """
+ :rtype: list[str]
+ """
+ return generate_pip_command(self.python_executable)
+
+ def get_delegated_completion(self):
+ """Returns a dictionary of settings specific to the selected delegation system, if any. Otherwise returns an empty dictionary.
+ :rtype: dict[str, str]
+ """
+ if self.docker:
+ return get_docker_completion().get(self.docker_raw, {})
+
+ if self.remote:
+ return get_remote_completion().get(self.remote, {})
+
+ return {}
+
+
+class TestConfig(EnvironmentConfig):
+ """Configuration common to all test commands."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ super(TestConfig, self).__init__(args, command)
+
+ self.coverage = args.coverage # type: bool
+ self.coverage_label = args.coverage_label # type: str
+ self.coverage_check = args.coverage_check # type: bool
+ self.coverage_config_base_path = None # type: t.Optional[str]
+ self.include = args.include or [] # type: t.List[str]
+ self.exclude = args.exclude or [] # type: t.List[str]
+ self.require = args.require or [] # type: t.List[str]
+
+ self.changed = args.changed # type: bool
+ self.tracked = args.tracked # type: bool
+ self.untracked = args.untracked # type: bool
+ self.committed = args.committed # type: bool
+ self.staged = args.staged # type: bool
+ self.unstaged = args.unstaged # type: bool
+ self.changed_from = args.changed_from # type: str
+ self.changed_path = args.changed_path # type: t.List[str]
+ self.base_branch = args.base_branch # type: str
+
+ self.lint = args.lint if 'lint' in args else False # type: bool
+ self.junit = args.junit if 'junit' in args else False # type: bool
+ self.failure_ok = args.failure_ok if 'failure_ok' in args else False # type: bool
+
+ self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata()
+ self.metadata_path = None
+
+ if self.coverage_check:
+ self.coverage = True
+
+ def metadata_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """Add the metadata file to the payload file list."""
+ config = self
+
+ if self.metadata_path:
+ files.append((os.path.abspath(config.metadata_path), config.metadata_path))
+
+ data_context().register_payload_callback(metadata_callback)
+
+
+class ShellConfig(EnvironmentConfig):
+ """Configuration for the shell command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(ShellConfig, self).__init__(args, 'shell')
+
+ self.raw = args.raw # type: bool
+
+ if self.raw:
+ self.httptester = False
+
+
+class SanityConfig(TestConfig):
+ """Configuration for the sanity command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(SanityConfig, self).__init__(args, 'sanity')
+
+ self.test = args.test # type: t.List[str]
+ self.skip_test = args.skip_test # type: t.List[str]
+ self.list_tests = args.list_tests # type: bool
+ self.allow_disabled = args.allow_disabled # type: bool
+ self.enable_optional_errors = args.enable_optional_errors # type: bool
+ self.info_stderr = self.lint
+
+
+class IntegrationConfig(TestConfig):
+ """Configuration for the integration command."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ super(IntegrationConfig, self).__init__(args, command)
+
+ self.start_at = args.start_at # type: str
+ self.start_at_task = args.start_at_task # type: str
+ self.allow_destructive = args.allow_destructive # type: bool
+ self.allow_root = args.allow_root # type: bool
+ self.allow_disabled = args.allow_disabled # type: bool
+ self.allow_unstable = args.allow_unstable # type: bool
+ self.allow_unstable_changed = args.allow_unstable_changed # type: bool
+ self.allow_unsupported = args.allow_unsupported # type: bool
+ self.retry_on_error = args.retry_on_error # type: bool
+ self.continue_on_error = args.continue_on_error # type: bool
+ self.debug_strategy = args.debug_strategy # type: bool
+ self.changed_all_target = args.changed_all_target # type: str
+ self.changed_all_mode = args.changed_all_mode # type: str
+ self.list_targets = args.list_targets # type: bool
+ self.tags = args.tags
+ self.skip_tags = args.skip_tags
+ self.diff = args.diff
+ self.no_temp_workdir = args.no_temp_workdir
+ self.no_temp_unicode = args.no_temp_unicode
+
+ if self.get_delegated_completion().get('temp-unicode', 'enabled') == 'disabled':
+ self.no_temp_unicode = True
+
+ if self.list_targets:
+ self.explain = True
+ self.info_stderr = True
+
+ def get_ansible_config(self): # type: () -> str
+ """Return the path to the Ansible config for the given config."""
+ ansible_config_relative_path = os.path.join(data_context().content.integration_path, '%s.cfg' % self.command)
+ ansible_config_path = os.path.join(data_context().content.root, ansible_config_relative_path)
+
+ if not os.path.exists(ansible_config_path):
+ # use the default empty configuration unless one has been provided
+ ansible_config_path = super(IntegrationConfig, self).get_ansible_config()
+
+ return ansible_config_path
+
+
+class PosixIntegrationConfig(IntegrationConfig):
+ """Configuration for the posix integration command."""
+
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(PosixIntegrationConfig, self).__init__(args, 'integration')
+
+
+class WindowsIntegrationConfig(IntegrationConfig):
+ """Configuration for the windows integration command."""
+
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(WindowsIntegrationConfig, self).__init__(args, 'windows-integration')
+
+ self.windows = args.windows # type: t.List[str]
+ self.inventory = args.inventory # type: str
+
+ if self.windows:
+ self.allow_destructive = True
+
+
+class NetworkIntegrationConfig(IntegrationConfig):
+ """Configuration for the network integration command."""
+
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(NetworkIntegrationConfig, self).__init__(args, 'network-integration')
+
+ self.platform = args.platform # type: t.List[str]
+ self.platform_collection = dict(args.platform_collection or []) # type: t.Dict[str, str]
+ self.platform_connection = dict(args.platform_connection or []) # type: t.Dict[str, str]
+ self.inventory = args.inventory # type: str
+ self.testcase = args.testcase # type: str
+
+
+class UnitsConfig(TestConfig):
+ """Configuration for the units command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(UnitsConfig, self).__init__(args, 'units')
+
+ self.collect_only = args.collect_only # type: bool
+ self.num_workers = args.num_workers # type: int
+
+ self.requirements_mode = args.requirements_mode if 'requirements_mode' in args else ''
+
+ if self.requirements_mode == 'only':
+ self.requirements = True
+ elif self.requirements_mode == 'skip':
+ self.requirements = False
diff --git a/test/lib/ansible_test/_internal/constants.py b/test/lib/ansible_test/_internal/constants.py
new file mode 100644
index 00000000..f4307822
--- /dev/null
+++ b/test/lib/ansible_test/_internal/constants.py
@@ -0,0 +1,10 @@
+"""Constants used by ansible-test. Imports should not be used in this file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Setting a low soft RLIMIT_NOFILE value will improve the performance of subprocess.Popen on Python 2.x when close_fds=True.
+# This will affect all Python subprocesses. It will also affect the current Python process if set before subprocess is imported for the first time.
+SOFT_RLIMIT_NOFILE = 1024
+
+# File used to track the ansible-test test execution timeout.
+TIMEOUT_PATH = '.ansible-test-timeout.json'
diff --git a/test/lib/ansible_test/_internal/core_ci.py b/test/lib/ansible_test/_internal/core_ci.py
new file mode 100644
index 00000000..c984f4fe
--- /dev/null
+++ b/test/lib/ansible_test/_internal/core_ci.py
@@ -0,0 +1,680 @@
+"""Access Ansible Core CI remote services."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import traceback
+import uuid
+import errno
+import time
+
+from . import types as t
+
+from .http import (
+ HttpClient,
+ HttpResponse,
+ HttpError,
+)
+
+from .io import (
+ make_dirs,
+ read_text_file,
+ write_json_file,
+ write_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ display,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from .util_common import (
+ run_command,
+ ResultType,
+)
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .ci import (
+ AuthContext,
+ get_ci_provider,
+)
+
+from .data import (
+ data_context,
+)
+
+AWS_ENDPOINTS = {
+ 'us-east-1': 'https://ansible-core-ci.testing.ansible.com',
+}
+
+
+class AnsibleCoreCI:
+ """Client for Ansible Core CI services."""
+ def __init__(self, args, platform, version, stage='prod', persist=True, load=True, provider=None, arch=None):
+ """
+ :type args: EnvironmentConfig
+ :type platform: str
+ :type version: str
+ :type stage: str
+ :type persist: bool
+ :type load: bool
+ :type provider: str | None
+ :type arch: str | None
+ """
+ self.args = args
+ self.arch = arch
+ self.platform = platform
+ self.version = version
+ self.stage = stage
+ self.client = HttpClient(args)
+ self.connection = None
+ self.instance_id = None
+ self.endpoint = None
+ self.max_threshold = 1
+ self.retries = 3
+ self.ci_provider = get_ci_provider()
+ self.auth_context = AuthContext()
+
+ if self.arch:
+ self.name = '%s-%s-%s' % (self.arch, self.platform, self.version)
+ else:
+ self.name = '%s-%s' % (self.platform, self.version)
+
+ # Assign each supported platform to one provider.
+ # This is used to determine the provider from the platform when no provider is specified.
+ providers = dict(
+ aws=(
+ 'aws',
+ 'windows',
+ 'freebsd',
+ 'vyos',
+ 'junos',
+ 'ios',
+ 'tower',
+ 'rhel',
+ 'hetzner',
+ ),
+ azure=(
+ 'azure',
+ ),
+ ibmps=(
+ 'aix',
+ 'ibmi',
+ ),
+ ibmvpc=(
+ 'centos arch=power', # avoid ibmvpc as default for no-arch centos to avoid making centos default to power
+ ),
+ parallels=(
+ 'macos',
+ 'osx',
+ ),
+ )
+
+ # Currently ansible-core-ci has no concept of arch selection. This effectively means each provider only supports one arch.
+ # The list below identifies which platforms accept an arch, and which one. These platforms can only be used with the specified arch.
+ provider_arches = dict(
+ ibmvpc='power',
+ )
+
+ if provider:
+ # override default provider selection (not all combinations are valid)
+ self.provider = provider
+ else:
+ self.provider = None
+
+ for candidate in providers:
+ choices = [
+ platform,
+ '%s arch=%s' % (platform, arch),
+ ]
+
+ if any(choice in providers[candidate] for choice in choices):
+ # assign default provider based on platform
+ self.provider = candidate
+ break
+
+ # If a provider has been selected, make sure the correct arch (or none) has been selected.
+ if self.provider:
+ required_arch = provider_arches.get(self.provider)
+
+ if self.arch != required_arch:
+ if required_arch:
+ if self.arch:
+ raise ApplicationError('Provider "%s" requires the "%s" arch instead of "%s".' % (self.provider, required_arch, self.arch))
+
+ raise ApplicationError('Provider "%s" requires the "%s" arch.' % (self.provider, required_arch))
+
+ raise ApplicationError('Provider "%s" does not support specification of an arch.' % self.provider)
+
+ self.path = os.path.expanduser('~/.ansible/test/instances/%s-%s-%s' % (self.name, self.provider, self.stage))
+
+ if self.provider in ('aws', 'azure', 'ibmps', 'ibmvpc'):
+ if args.remote_aws_region:
+ display.warning('The --remote-aws-region option is obsolete and will be removed in a future version of ansible-test.')
+ # permit command-line override of region selection
+ region = args.remote_aws_region
+ # use a dedicated CI key when overriding the region selection
+ self.auth_context.region = args.remote_aws_region
+ else:
+ region = 'us-east-1'
+
+ self.path = "%s-%s" % (self.path, region)
+
+ if self.args.remote_endpoint:
+ self.endpoints = (self.args.remote_endpoint,)
+ else:
+ self.endpoints = (AWS_ENDPOINTS[region],)
+
+ self.ssh_key = SshKey(args)
+
+ if self.platform == 'windows':
+ self.port = 5986
+ else:
+ self.port = 22
+
+ if self.provider == 'ibmps':
+ # Additional retries are neededed to accommodate images transitioning
+ # to the active state in the IBM cloud. This operation can take up to
+ # 90 seconds
+ self.retries = 7
+ elif self.provider == 'parallels':
+ if self.args.remote_endpoint:
+ self.endpoints = (self.args.remote_endpoint,)
+ else:
+ self.endpoints = (AWS_ENDPOINTS['us-east-1'],)
+
+ self.ssh_key = SshKey(args)
+ self.port = None
+ else:
+ if self.arch:
+ raise ApplicationError('Provider not detected for platform "%s" on arch "%s".' % (self.platform, self.arch))
+
+ raise ApplicationError('Provider not detected for platform "%s" with no arch specified.' % self.platform)
+
+ if persist and load and self._load():
+ try:
+ display.info('Checking existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+
+ self.connection = self.get(always_raise_on=[404])
+
+ display.info('Loaded existing %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
+ except HttpError as ex:
+ if ex.status != 404:
+ raise
+
+ self._clear()
+
+ display.info('Cleared stale %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+
+ self.instance_id = None
+ self.endpoint = None
+ elif not persist:
+ self.instance_id = None
+ self.endpoint = None
+ self._clear()
+
+ if self.instance_id:
+ self.started = True
+ else:
+ self.started = False
+ self.instance_id = str(uuid.uuid4())
+ self.endpoint = None
+
+ display.sensitive.add(self.instance_id)
+
+ def _get_parallels_endpoints(self):
+ """
+ :rtype: tuple[str]
+ """
+ client = HttpClient(self.args, always=True)
+ display.info('Getting available endpoints...', verbosity=1)
+ sleep = 3
+
+ for _iteration in range(1, 10):
+ response = client.get('https://ansible-ci-files.s3.amazonaws.com/ansible-test/parallels-endpoints.txt')
+
+ if response.status_code == 200:
+ endpoints = tuple(response.response.splitlines())
+ display.info('Available endpoints (%d):\n%s' % (len(endpoints), '\n'.join(' - %s' % endpoint for endpoint in endpoints)), verbosity=1)
+ return endpoints
+
+ display.warning('HTTP %d error getting endpoints, trying again in %d seconds.' % (response.status_code, sleep))
+ time.sleep(sleep)
+
+ raise ApplicationError('Unable to get available endpoints.')
+
+ @property
+ def available(self):
+ """Return True if Ansible Core CI is supported."""
+ return self.ci_provider.supports_core_ci_auth(self.auth_context)
+
+ def start(self):
+ """Start instance."""
+ if self.started:
+ display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return None
+
+ return self._start(self.ci_provider.prepare_core_ci_auth(self.auth_context))
+
+ def stop(self):
+ """Stop instance."""
+ if not self.started:
+ display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return
+
+ response = self.client.delete(self._uri)
+
+ if response.status_code == 404:
+ self._clear()
+ display.info('Cleared invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return
+
+ if response.status_code == 200:
+ self._clear()
+ display.info('Stopped running %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return
+
+ raise self._create_http_error(response)
+
+ def get(self, tries=3, sleep=15, always_raise_on=None):
+ """
+ Get instance connection information.
+ :type tries: int
+ :type sleep: int
+ :type always_raise_on: list[int] | None
+ :rtype: InstanceConnection
+ """
+ if not self.started:
+ display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return None
+
+ if not always_raise_on:
+ always_raise_on = []
+
+ if self.connection and self.connection.running:
+ return self.connection
+
+ while True:
+ tries -= 1
+ response = self.client.get(self._uri)
+
+ if response.status_code == 200:
+ break
+
+ error = self._create_http_error(response)
+
+ if not tries or response.status_code in always_raise_on:
+ raise error
+
+ display.warning('%s. Trying again after %d seconds.' % (error, sleep))
+ time.sleep(sleep)
+
+ if self.args.explain:
+ self.connection = InstanceConnection(
+ running=True,
+ hostname='cloud.example.com',
+ port=self.port or 12345,
+ username='username',
+ password='password' if self.platform == 'windows' else None,
+ )
+ else:
+ response_json = response.json()
+ status = response_json['status']
+ con = response_json.get('connection')
+
+ if con:
+ self.connection = InstanceConnection(
+ running=status == 'running',
+ hostname=con['hostname'],
+ port=int(con.get('port', self.port)),
+ username=con['username'],
+ password=con.get('password'),
+ response_json=response_json,
+ )
+ else:
+ self.connection = InstanceConnection(
+ running=status == 'running',
+ response_json=response_json,
+ )
+
+ if self.connection.password:
+ display.sensitive.add(str(self.connection.password))
+
+ status = 'running' if self.connection.running else 'starting'
+
+ display.info('Status update: %s/%s on instance %s is %s.' %
+ (self.platform, self.version, self.instance_id, status),
+ verbosity=1)
+
+ return self.connection
+
+ def wait(self, iterations=90): # type: (t.Optional[int]) -> None
+ """Wait for the instance to become ready."""
+ for _iteration in range(1, iterations):
+ if self.get().running:
+ return
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.platform, self.version, self.instance_id))
+
+ @property
+ def _uri(self):
+ return '%s/%s/%s/%s' % (self.endpoint, self.stage, self.provider, self.instance_id)
+
+ def _start(self, auth):
+ """Start instance."""
+ display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1)
+
+ if self.platform == 'windows':
+ winrm_config = read_text_file(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'ConfigureRemotingForAnsible.ps1'))
+ else:
+ winrm_config = None
+
+ data = dict(
+ config=dict(
+ platform=self.platform,
+ version=self.version,
+ public_key=self.ssh_key.pub_contents if self.ssh_key else None,
+ query=False,
+ winrm_config=winrm_config,
+ )
+ )
+
+ data.update(dict(auth=auth))
+
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ response = self._start_try_endpoints(data, headers)
+
+ self.started = True
+ self._save()
+
+ display.info('Started %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
+
+ if self.args.explain:
+ return {}
+
+ return response.json()
+
+ def _start_try_endpoints(self, data, headers):
+ """
+ :type data: dict[str, any]
+ :type headers: dict[str, str]
+ :rtype: HttpResponse
+ """
+ threshold = 1
+
+ while threshold <= self.max_threshold:
+ for self.endpoint in self.endpoints:
+ try:
+ return self._start_at_threshold(data, headers, threshold)
+ except CoreHttpError as ex:
+ if ex.status == 503:
+ display.info('Service Unavailable: %s' % ex.remote_message, verbosity=1)
+ continue
+ display.error(ex.remote_message)
+ except HttpError as ex:
+ display.error(u'%s' % ex)
+
+ time.sleep(3)
+
+ threshold += 1
+
+ raise ApplicationError('Maximum threshold reached and all endpoints exhausted.')
+
+ def _start_at_threshold(self, data, headers, threshold):
+ """
+ :type data: dict[str, any]
+ :type headers: dict[str, str]
+ :type threshold: int
+ :rtype: HttpResponse | None
+ """
+ tries = self.retries
+ sleep = 15
+
+ data['threshold'] = threshold
+
+ display.info('Trying endpoint: %s (threshold %d)' % (self.endpoint, threshold), verbosity=1)
+
+ while True:
+ tries -= 1
+ response = self.client.put(self._uri, data=json.dumps(data), headers=headers)
+
+ if response.status_code == 200:
+ return response
+
+ error = self._create_http_error(response)
+
+ if response.status_code == 503:
+ raise error
+
+ if not tries:
+ raise error
+
+ display.warning('%s. Trying again after %d seconds.' % (error, sleep))
+ time.sleep(sleep)
+
+ def _clear(self):
+ """Clear instance information."""
+ try:
+ self.connection = None
+ os.remove(self.path)
+ except OSError as ex:
+ if ex.errno != errno.ENOENT:
+ raise
+
+ def _load(self):
+ """Load instance information."""
+ try:
+ data = read_text_file(self.path)
+ except IOError as ex:
+ if ex.errno != errno.ENOENT:
+ raise
+
+ return False
+
+ if not data.startswith('{'):
+ return False # legacy format
+
+ config = json.loads(data)
+
+ return self.load(config)
+
+ def load(self, config):
+ """
+ :type config: dict[str, str]
+ :rtype: bool
+ """
+ self.instance_id = str(config['instance_id'])
+ self.endpoint = config['endpoint']
+ self.started = True
+
+ display.sensitive.add(self.instance_id)
+
+ return True
+
+ def _save(self):
+ """Save instance information."""
+ if self.args.explain:
+ return
+
+ config = self.save()
+
+ write_json_file(self.path, config, create_directories=True)
+
+ def save(self):
+ """
+ :rtype: dict[str, str]
+ """
+ return dict(
+ platform_version='%s/%s' % (self.platform, self.version),
+ instance_id=self.instance_id,
+ endpoint=self.endpoint,
+ )
+
+ @staticmethod
+ def _create_http_error(response):
+ """
+ :type response: HttpResponse
+ :rtype: ApplicationError
+ """
+ response_json = response.json()
+ stack_trace = ''
+
+ if 'message' in response_json:
+ message = response_json['message']
+ elif 'errorMessage' in response_json:
+ message = response_json['errorMessage'].strip()
+ if 'stackTrace' in response_json:
+ traceback_lines = response_json['stackTrace']
+
+ # AWS Lambda on Python 2.7 returns a list of tuples
+ # AWS Lambda on Python 3.7 returns a list of strings
+ if traceback_lines and isinstance(traceback_lines[0], list):
+ traceback_lines = traceback.format_list(traceback_lines)
+
+ trace = '\n'.join([x.rstrip() for x in traceback_lines])
+ stack_trace = ('\nTraceback (from remote server):\n%s' % trace)
+ else:
+ message = str(response_json)
+
+ return CoreHttpError(response.status_code, message, stack_trace)
+
+
+class CoreHttpError(HttpError):
+ """HTTP response as an error."""
+ def __init__(self, status, remote_message, remote_stack_trace):
+ """
+ :type status: int
+ :type remote_message: str
+ :type remote_stack_trace: str
+ """
+ super(CoreHttpError, self).__init__(status, '%s%s' % (remote_message, remote_stack_trace))
+
+ self.remote_message = remote_message
+ self.remote_stack_trace = remote_stack_trace
+
+
+class SshKey:
+ """Container for SSH key used to connect to remote instances."""
+ KEY_NAME = 'id_rsa'
+ PUB_NAME = 'id_rsa.pub'
+
+ def __init__(self, args):
+ """
+ :type args: EnvironmentConfig
+ """
+ key_pair = self.get_key_pair()
+
+ if not key_pair:
+ key_pair = self.generate_key_pair(args)
+
+ key, pub = key_pair
+ key_dst, pub_dst = self.get_in_tree_key_pair_paths()
+
+ def ssh_key_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """
+ Add the SSH keys to the payload file list.
+ They are either outside the source tree or in the cache dir which is ignored by default.
+ """
+ files.append((key, os.path.relpath(key_dst, data_context().content.root)))
+ files.append((pub, os.path.relpath(pub_dst, data_context().content.root)))
+
+ data_context().register_payload_callback(ssh_key_callback)
+
+ self.key, self.pub = key, pub
+
+ if args.explain:
+ self.pub_contents = None
+ else:
+ self.pub_contents = read_text_file(self.pub).strip()
+
+ def get_in_tree_key_pair_paths(self): # type: () -> t.Optional[t.Tuple[str, str]]
+ """Return the ansible-test SSH key pair paths from the content tree."""
+ temp_dir = ResultType.TMP.path
+
+ key = os.path.join(temp_dir, self.KEY_NAME)
+ pub = os.path.join(temp_dir, self.PUB_NAME)
+
+ return key, pub
+
+ def get_source_key_pair_paths(self): # type: () -> t.Optional[t.Tuple[str, str]]
+ """Return the ansible-test SSH key pair paths for the current user."""
+ base_dir = os.path.expanduser('~/.ansible/test/')
+
+ key = os.path.join(base_dir, self.KEY_NAME)
+ pub = os.path.join(base_dir, self.PUB_NAME)
+
+ return key, pub
+
+ def get_key_pair(self): # type: () -> t.Optional[t.Tuple[str, str]]
+ """Return the ansible-test SSH key pair paths if present, otherwise return None."""
+ key, pub = self.get_in_tree_key_pair_paths()
+
+ if os.path.isfile(key) and os.path.isfile(pub):
+ return key, pub
+
+ key, pub = self.get_source_key_pair_paths()
+
+ if os.path.isfile(key) and os.path.isfile(pub):
+ return key, pub
+
+ return None
+
+ def generate_key_pair(self, args): # type: (EnvironmentConfig) -> t.Tuple[str, str]
+ """Generate an SSH key pair for use by all ansible-test invocations for the current user."""
+ key, pub = self.get_source_key_pair_paths()
+
+ if not args.explain:
+ make_dirs(os.path.dirname(key))
+
+ if not os.path.isfile(key) or not os.path.isfile(pub):
+ run_command(args, ['ssh-keygen', '-m', 'PEM', '-q', '-t', 'rsa', '-N', '', '-f', key])
+
+ # newer ssh-keygen PEM output (such as on RHEL 8.1) is not recognized by paramiko
+ key_contents = read_text_file(key)
+ key_contents = re.sub(r'(BEGIN|END) PRIVATE KEY', r'\1 RSA PRIVATE KEY', key_contents)
+
+ write_text_file(key, key_contents)
+
+ return key, pub
+
+
+class InstanceConnection:
+ """Container for remote instance status and connection details."""
+ def __init__(self,
+ running, # type: bool
+ hostname=None, # type: t.Optional[str]
+ port=None, # type: t.Optional[int]
+ username=None, # type: t.Optional[str]
+ password=None, # type: t.Optional[str]
+ response_json=None, # type: t.Optional[t.Dict[str, t.Any]]
+ ): # type: (...) -> None
+ self.running = running
+ self.hostname = hostname
+ self.port = port
+ self.username = username
+ self.password = password
+ self.response_json = response_json or {}
+
+ def __str__(self):
+ if self.password:
+ return '%s:%s [%s:%s]' % (self.hostname, self.port, self.username, self.password)
+
+ return '%s:%s [%s]' % (self.hostname, self.port, self.username)
diff --git a/test/lib/ansible_test/_internal/coverage/__init__.py b/test/lib/ansible_test/_internal/coverage/__init__.py
new file mode 100644
index 00000000..ebb1ca22
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/__init__.py
@@ -0,0 +1,325 @@
+"""Common logic for the coverage subcommand."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..io import (
+ open_binary_file,
+ read_json_file,
+)
+
+from ..util import (
+ ApplicationError,
+ common_environment,
+ display,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from ..util_common import (
+ intercept_command,
+ ResultType,
+)
+
+from ..config import (
+ EnvironmentConfig,
+)
+
+from ..executor import (
+ Delegate,
+ install_command_requirements,
+)
+
+from .. target import (
+ walk_module_targets,
+)
+
+from ..data import (
+ data_context,
+)
+
+if t.TYPE_CHECKING:
+ import coverage as coverage_module
+
+COVERAGE_GROUPS = ('command', 'target', 'environment', 'version')
+COVERAGE_CONFIG_PATH = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'coveragerc')
+COVERAGE_OUTPUT_FILE_NAME = 'coverage'
+
+
+class CoverageConfig(EnvironmentConfig):
+ """Configuration for the coverage command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageConfig, self).__init__(args, 'coverage')
+
+ self.group_by = frozenset(args.group_by) if 'group_by' in args and args.group_by else set() # type: t.FrozenSet[str]
+ self.all = args.all if 'all' in args else False # type: bool
+ self.stub = args.stub if 'stub' in args else False # type: bool
+ self.export = args.export if 'export' in args else None # type: str
+ self.coverage = False # temporary work-around to support intercept_command in cover.py
+
+
+def initialize_coverage(args): # type: (CoverageConfig) -> coverage_module
+ """Delegate execution if requested, install requirements, then import and return the coverage module. Raises an exception if coverage is not available."""
+ if args.delegate:
+ raise Delegate()
+
+ if args.requirements:
+ install_command_requirements(args)
+
+ try:
+ import coverage
+ except ImportError:
+ coverage = None
+
+ if not coverage:
+ raise ApplicationError('You must install the "coverage" python module to use this command.')
+
+ coverage_version_string = coverage.__version__
+ coverage_version = tuple(int(v) for v in coverage_version_string.split('.'))
+
+ min_version = (4, 2)
+ max_version = (5, 0)
+
+ supported_version = True
+ recommended_version = '4.5.4'
+
+ if coverage_version < min_version or coverage_version >= max_version:
+ supported_version = False
+
+ if not supported_version:
+ raise ApplicationError('Version %s of "coverage" is not supported. Version %s is known to work and is recommended.' % (
+ coverage_version_string, recommended_version))
+
+ return coverage
+
+
+def run_coverage(args, output_file, command, cmd): # type: (CoverageConfig, str, str, t.List[str]) -> None
+ """Run the coverage cli tool with the specified options."""
+ env = common_environment()
+ env.update(dict(COVERAGE_FILE=output_file))
+
+ cmd = ['python', '-m', 'coverage.__main__', command, '--rcfile', COVERAGE_CONFIG_PATH] + cmd
+
+ intercept_command(args, target_name='coverage', env=env, cmd=cmd, disable_coverage=True)
+
+
+def get_python_coverage_files(path=None): # type: (t.Optional[str]) -> t.List[str]
+ """Return the list of Python coverage file paths."""
+ return get_coverage_files('python', path)
+
+
+def get_powershell_coverage_files(path=None): # type: (t.Optional[str]) -> t.List[str]
+ """Return the list of PowerShell coverage file paths."""
+ return get_coverage_files('powershell', path)
+
+
+def get_coverage_files(language, path=None): # type: (str, t.Optional[str]) -> t.List[str]
+ """Return the list of coverage file paths for the given language."""
+ coverage_dir = path or ResultType.COVERAGE.path
+ coverage_files = [os.path.join(coverage_dir, f) for f in os.listdir(coverage_dir)
+ if '=coverage.' in f and '=%s' % language in f]
+
+ return coverage_files
+
+
+def get_collection_path_regexes(): # type: () -> t.Tuple[t.Optional[t.Pattern], t.Optional[t.Pattern]]
+ """Return a pair of regexes used for identifying and manipulating collection paths."""
+ if data_context().content.collection:
+ collection_search_re = re.compile(r'/%s/' % data_context().content.collection.directory)
+ collection_sub_re = re.compile(r'^.*?/%s/' % data_context().content.collection.directory)
+ else:
+ collection_search_re = None
+ collection_sub_re = None
+
+ return collection_search_re, collection_sub_re
+
+
+def get_python_modules(): # type: () -> t.Dict[str, str]
+ """Return a dictionary of Ansible module names and their paths."""
+ return dict((target.module, target.path) for target in list(walk_module_targets()) if target.path.endswith('.py'))
+
+
+def enumerate_python_arcs(
+ path, # type: str
+ coverage, # type: coverage_module
+ modules, # type: t.Dict[str, str]
+ collection_search_re, # type: t.Optional[t.Pattern]
+ collection_sub_re, # type: t.Optional[t.Pattern]
+): # type: (...) -> t.Generator[t.Tuple[str, t.Set[t.Tuple[int, int]]]]
+ """Enumerate Python code coverage arcs in the given file."""
+ if os.path.getsize(path) == 0:
+ display.warning('Empty coverage file: %s' % path, verbosity=2)
+ return
+
+ original = coverage.CoverageData()
+
+ try:
+ original.read_file(path)
+ except Exception as ex: # pylint: disable=locally-disabled, broad-except
+ with open_binary_file(path) as file:
+ header = file.read(6)
+
+ if header == b'SQLite':
+ display.error('File created by "coverage" 5.0+: %s' % os.path.relpath(path))
+ else:
+ display.error(u'%s' % ex)
+
+ return
+
+ for filename in original.measured_files():
+ arcs = original.arcs(filename)
+
+ if not arcs:
+ # This is most likely due to using an unsupported version of coverage.
+ display.warning('No arcs found for "%s" in coverage file: %s' % (filename, path))
+ continue
+
+ filename = sanitize_filename(filename, modules=modules, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re)
+
+ if not filename:
+ continue
+
+ yield filename, set(arcs)
+
+
+def enumerate_powershell_lines(
+ path, # type: str
+ collection_search_re, # type: t.Optional[t.Pattern]
+ collection_sub_re, # type: t.Optional[t.Pattern]
+): # type: (...) -> t.Generator[t.Tuple[str, t.Dict[int, int]]]
+ """Enumerate PowerShell code coverage lines in the given file."""
+ if os.path.getsize(path) == 0:
+ display.warning('Empty coverage file: %s' % path, verbosity=2)
+ return
+
+ try:
+ coverage_run = read_json_file(path)
+ except Exception as ex: # pylint: disable=locally-disabled, broad-except
+ display.error(u'%s' % ex)
+ return
+
+ for filename, hits in coverage_run.items():
+ filename = sanitize_filename(filename, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re)
+
+ if not filename:
+ continue
+
+ if isinstance(hits, dict) and not hits.get('Line'):
+ # Input data was previously aggregated and thus uses the standard ansible-test output format for PowerShell coverage.
+ # This format differs from the more verbose format of raw coverage data from the remote Windows hosts.
+ hits = dict((int(key), value) for key, value in hits.items())
+
+ yield filename, hits
+ continue
+
+ # PowerShell unpacks arrays if there's only a single entry so this is a defensive check on that
+ if not isinstance(hits, list):
+ hits = [hits]
+
+ hits = dict((hit['Line'], hit['HitCount']) for hit in hits if hit)
+
+ yield filename, hits
+
+
+def sanitize_filename(
+ filename, # type: str
+ modules=None, # type: t.Optional[t.Dict[str, str]]
+ collection_search_re=None, # type: t.Optional[t.Pattern]
+ collection_sub_re=None, # type: t.Optional[t.Pattern]
+): # type: (...) -> t.Optional[str]
+ """Convert the given code coverage path to a local absolute path and return its, or None if the path is not valid."""
+ ansible_path = os.path.abspath('lib/ansible/') + '/'
+ root_path = data_context().content.root + '/'
+ integration_temp_path = os.path.sep + os.path.join(ResultType.TMP.relative_path, 'integration') + os.path.sep
+
+ if modules is None:
+ modules = {}
+
+ if '/ansible_modlib.zip/ansible/' in filename:
+ # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.6 and earlier.
+ new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif collection_search_re and collection_search_re.search(filename):
+ new_name = os.path.abspath(collection_sub_re.sub('', filename))
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search(r'/ansible_[^/]+_payload\.zip/ansible/', filename):
+ # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.7 and later.
+ new_name = re.sub(r'^.*/ansible_[^/]+_payload\.zip/ansible/', ansible_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif '/ansible_module_' in filename:
+ # Rewrite the module path from the remote host to match the controller. Ansible 2.6 and earlier.
+ module_name = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
+ if module_name not in modules:
+ display.warning('Skipping coverage of unknown module: %s' % module_name)
+ return None
+ new_name = os.path.abspath(modules[module_name])
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search(r'/ansible_[^/]+_payload(_[^/]+|\.zip)/__main__\.py$', filename):
+ # Rewrite the module path from the remote host to match the controller. Ansible 2.7 and later.
+ # AnsiballZ versions using zipimporter will match the `.zip` portion of the regex.
+ # AnsiballZ versions not using zipimporter will match the `_[^/]+` portion of the regex.
+ module_name = re.sub(r'^.*/ansible_(?P<module>[^/]+)_payload(_[^/]+|\.zip)/__main__\.py$',
+ '\\g<module>', filename).rstrip('_')
+ if module_name not in modules:
+ display.warning('Skipping coverage of unknown module: %s' % module_name)
+ return None
+ new_name = os.path.abspath(modules[module_name])
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search('^(/.*?)?/root/ansible/', filename):
+ # Rewrite the path of code running on a remote host or in a docker container as root.
+ new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif integration_temp_path in filename:
+ # Rewrite the path of code running from an integration test temporary directory.
+ new_name = re.sub(r'^.*' + re.escape(integration_temp_path) + '[^/]+/', root_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+
+ filename = os.path.abspath(filename) # make sure path is absolute (will be relative if previously exported)
+
+ return filename
+
+
+class PathChecker:
+ """Checks code coverage paths to verify they are valid and reports on the findings."""
+ def __init__(self, args, collection_search_re=None): # type: (CoverageConfig, t.Optional[t.Pattern]) -> None
+ self.args = args
+ self.collection_search_re = collection_search_re
+ self.invalid_paths = []
+ self.invalid_path_chars = 0
+
+ def check_path(self, path): # type: (str) -> bool
+ """Return True if the given coverage path is valid, otherwise display a warning and return False."""
+ if os.path.isfile(to_bytes(path)):
+ return True
+
+ if self.collection_search_re and self.collection_search_re.search(path) and os.path.basename(path) == '__init__.py':
+ # the collection loader uses implicit namespace packages, so __init__.py does not need to exist on disk
+ # coverage is still reported for these non-existent files, but warnings are not needed
+ return False
+
+ self.invalid_paths.append(path)
+ self.invalid_path_chars += len(path)
+
+ if self.args.verbosity > 1:
+ display.warning('Invalid coverage path: %s' % path)
+
+ return False
+
+ def report(self): # type: () -> None
+ """Display a warning regarding invalid paths if any were found."""
+ if self.invalid_paths:
+ display.warning('Ignored %d characters from %d invalid coverage path(s).' % (self.invalid_path_chars, len(self.invalid_paths)))
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/__init__.py b/test/lib/ansible_test/_internal/coverage/analyze/__init__.py
new file mode 100644
index 00000000..45770373
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/__init__.py
@@ -0,0 +1,19 @@
+"""Common logic for the `coverage analyze` subcommand."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ... import types as t
+
+from .. import (
+ CoverageConfig,
+)
+
+
+class CoverageAnalyzeConfig(CoverageConfig):
+ """Configuration for the `coverage analyze` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeConfig, self).__init__(args)
+
+ # avoid mixing log messages with file output when using `/dev/stdout` for the output file on commands
+ # this may be worth considering as the default behavior in the future, instead of being dependent on the command or options used
+ self.info_stderr = True
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py
new file mode 100644
index 00000000..8fe571b8
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py
@@ -0,0 +1,154 @@
+"""Analyze integration test target code coverage."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .... import types as t
+
+from ....io import (
+ read_json_file,
+ write_json_file,
+)
+
+from ....util import (
+ ApplicationError,
+ display,
+)
+
+from .. import (
+ CoverageAnalyzeConfig,
+)
+
+if t.TYPE_CHECKING:
+ TargetKey = t.TypeVar('TargetKey', int, t.Tuple[int, int])
+ NamedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[str]]]
+ IndexedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[int]]]
+ Arcs = t.Dict[str, t.Dict[t.Tuple[int, int], t.Set[int]]]
+ Lines = t.Dict[str, t.Dict[int, t.Set[int]]]
+ TargetIndexes = t.Dict[str, int]
+ TargetSetIndexes = t.Dict[t.FrozenSet[int], int]
+
+
+class CoverageAnalyzeTargetsConfig(CoverageAnalyzeConfig):
+ """Configuration for the `coverage analyze targets` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsConfig, self).__init__(args)
+
+ self.info_stderr = True
+
+
+def make_report(target_indexes, arcs, lines): # type: (TargetIndexes, Arcs, Lines) -> t.Dict[str, t.Any]
+ """Condense target indexes, arcs and lines into a compact report."""
+ set_indexes = {}
+ arc_refs = dict((path, dict((format_arc(arc), get_target_set_index(indexes, set_indexes)) for arc, indexes in data.items())) for path, data in arcs.items())
+ line_refs = dict((path, dict((line, get_target_set_index(indexes, set_indexes)) for line, indexes in data.items())) for path, data in lines.items())
+
+ report = dict(
+ targets=[name for name, index in sorted(target_indexes.items(), key=lambda kvp: kvp[1])],
+ target_sets=[sorted(data) for data, index in sorted(set_indexes.items(), key=lambda kvp: kvp[1])],
+ arcs=arc_refs,
+ lines=line_refs,
+ )
+
+ return report
+
+
+def load_report(report): # type: (t.Dict[str, t.Any]) -> t.Tuple[t.List[str], Arcs, Lines]
+ """Extract target indexes, arcs and lines from an existing report."""
+ try:
+ target_indexes = report['targets'] # type: t.List[str]
+ target_sets = report['target_sets'] # type: t.List[t.List[int]]
+ arc_data = report['arcs'] # type: t.Dict[str, t.Dict[str, int]]
+ line_data = report['lines'] # type: t.Dict[str, t.Dict[int, int]]
+ except KeyError as ex:
+ raise ApplicationError('Document is missing key "%s".' % ex.args)
+ except TypeError:
+ raise ApplicationError('Document is type "%s" instead of "dict".' % type(report).__name__)
+
+ arcs = dict((path, dict((parse_arc(arc), set(target_sets[index])) for arc, index in data.items())) for path, data in arc_data.items())
+ lines = dict((path, dict((int(line), set(target_sets[index])) for line, index in data.items())) for path, data in line_data.items())
+
+ return target_indexes, arcs, lines
+
+
+def read_report(path): # type: (str) -> t.Tuple[t.List[str], Arcs, Lines]
+ """Read a JSON report from disk."""
+ try:
+ report = read_json_file(path)
+ except Exception as ex:
+ raise ApplicationError('File "%s" is not valid JSON: %s' % (path, ex))
+
+ try:
+ return load_report(report)
+ except ApplicationError as ex:
+ raise ApplicationError('File "%s" is not an aggregated coverage data file. %s' % (path, ex))
+
+
+def write_report(args, report, path): # type: (CoverageAnalyzeTargetsConfig, t.Dict[str, t.Any], str) -> None
+ """Write a JSON report to disk."""
+ if args.explain:
+ return
+
+ write_json_file(path, report, formatted=False)
+
+ display.info('Generated %d byte report with %d targets covering %d files.' % (
+ os.path.getsize(path), len(report['targets']), len(set(report['arcs'].keys()) | set(report['lines'].keys())),
+ ), verbosity=1)
+
+
+def format_arc(value): # type: (t.Tuple[int, int]) -> str
+ """Format an arc tuple as a string."""
+ return '%d:%d' % value
+
+
+def parse_arc(value): # type: (str) -> t.Tuple[int, int]
+ """Parse an arc string into a tuple."""
+ first, last = tuple(map(int, value.split(':')))
+ return first, last
+
+
+def get_target_set_index(data, target_set_indexes): # type: (t.Set[int], TargetSetIndexes) -> int
+ """Find or add the target set in the result set and return the target set index."""
+ return target_set_indexes.setdefault(frozenset(data), len(target_set_indexes))
+
+
+def get_target_index(name, target_indexes): # type: (str, TargetIndexes) -> int
+ """Find or add the target in the result set and return the target index."""
+ return target_indexes.setdefault(name, len(target_indexes))
+
+
+def expand_indexes(
+ source_data, # type: IndexedPoints
+ source_index, # type: t.List[str]
+ format_func, # type: t.Callable[t.Tuple[t.Any], str]
+): # type: (...) -> NamedPoints
+ """Expand indexes from the source into target names for easier processing of the data (arcs or lines)."""
+ combined_data = {} # type: t.Dict[str, t.Dict[t.Any, t.Set[str]]]
+
+ for covered_path, covered_points in source_data.items():
+ combined_points = combined_data.setdefault(covered_path, {})
+
+ for covered_point, covered_target_indexes in covered_points.items():
+ combined_point = combined_points.setdefault(format_func(covered_point), set())
+
+ for covered_target_index in covered_target_indexes:
+ combined_point.add(source_index[covered_target_index])
+
+ return combined_data
+
+
+def generate_indexes(target_indexes, data): # type: (TargetIndexes, NamedPoints) -> IndexedPoints
+ """Return an indexed version of the given data (arcs or points)."""
+ results = {} # type: IndexedPoints
+
+ for path, points in data.items():
+ result_points = results[path] = {}
+
+ for point, target_names in points.items():
+ result_point = result_points[point] = set()
+
+ for target_name in target_names:
+ result_point.add(get_target_index(target_name, target_indexes))
+
+ return results
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py
new file mode 100644
index 00000000..35148ff6
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py
@@ -0,0 +1,64 @@
+"""Combine integration test target code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .... import types as t
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ read_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ Arcs,
+ IndexedPoints,
+ Lines,
+ TargetIndexes,
+ )
+
+
+class CoverageAnalyzeTargetsCombineConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets combine` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsCombineConfig, self).__init__(args)
+
+ self.input_files = args.input_file # type: t.List[str]
+ self.output_file = args.output_file # type: str
+
+
+def command_coverage_analyze_targets_combine(args): # type: (CoverageAnalyzeTargetsCombineConfig) -> None
+ """Combine integration test target code coverage reports."""
+ combined_target_indexes = {} # type: TargetIndexes
+ combined_path_arcs = {} # type: Arcs
+ combined_path_lines = {} # type: Lines
+
+ for report_path in args.input_files:
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(report_path)
+
+ merge_indexes(covered_path_arcs, covered_targets, combined_path_arcs, combined_target_indexes)
+ merge_indexes(covered_path_lines, covered_targets, combined_path_lines, combined_target_indexes)
+
+ report = make_report(combined_target_indexes, combined_path_arcs, combined_path_lines)
+
+ write_report(args, report, args.output_file)
+
+
+def merge_indexes(
+ source_data, # type: IndexedPoints
+ source_index, # type: t.List[str]
+ combined_data, # type: IndexedPoints
+ combined_index, # type: TargetIndexes
+): # type: (...) -> None
+ """Merge indexes from the source into the combined data set (arcs or lines)."""
+ for covered_path, covered_points in source_data.items():
+ combined_points = combined_data.setdefault(covered_path, {})
+
+ for covered_point, covered_target_indexes in covered_points.items():
+ combined_point = combined_points.setdefault(covered_point, set())
+
+ for covered_target_index in covered_target_indexes:
+ combined_point.add(get_target_index(source_index[covered_target_index], combined_index))
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py
new file mode 100644
index 00000000..388dd6cb
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py
@@ -0,0 +1,39 @@
+"""Expand target names in an aggregated coverage file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .... import types as t
+
+from ....io import (
+ SortedSetEncoder,
+ write_json_file,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ expand_indexes,
+ format_arc,
+ read_report,
+)
+
+
+class CoverageAnalyzeTargetsExpandConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets expand` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsExpandConfig, self).__init__(args)
+
+ self.input_file = args.input_file # type: str
+ self.output_file = args.output_file # type: str
+
+
+def command_coverage_analyze_targets_expand(args): # type: (CoverageAnalyzeTargetsExpandConfig) -> None
+ """Expand target names in an aggregated coverage file."""
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
+
+ report = dict(
+ arcs=expand_indexes(covered_path_arcs, covered_targets, format_arc),
+ lines=expand_indexes(covered_path_lines, covered_targets, str),
+ )
+
+ if not args.explain:
+ write_json_file(args.output_file, report, encoder=SortedSetEncoder)
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py
new file mode 100644
index 00000000..e90fb227
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py
@@ -0,0 +1,104 @@
+"""Filter an aggregated coverage file, keeping only the specified targets."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from .... import types as t
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ expand_indexes,
+ generate_indexes,
+ make_report,
+ read_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ NamedPoints,
+ TargetIndexes,
+ )
+
+
+class CoverageAnalyzeTargetsFilterConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets filter` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsFilterConfig, self).__init__(args)
+
+ self.input_file = args.input_file # type: str
+ self.output_file = args.output_file # type: str
+ self.include_targets = args.include_targets # type: t.List[str]
+ self.exclude_targets = args.exclude_targets # type: t.List[str]
+ self.include_path = args.include_path # type: t.Optional[str]
+ self.exclude_path = args.exclude_path # type: t.Optional[str]
+
+
+def command_coverage_analyze_targets_filter(args): # type: (CoverageAnalyzeTargetsFilterConfig) -> None
+ """Filter target names in an aggregated coverage file."""
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
+
+ filtered_path_arcs = expand_indexes(covered_path_arcs, covered_targets, lambda v: v)
+ filtered_path_lines = expand_indexes(covered_path_lines, covered_targets, lambda v: v)
+
+ include_targets = set(args.include_targets) if args.include_targets else None
+ exclude_targets = set(args.exclude_targets) if args.exclude_targets else None
+
+ include_path = re.compile(args.include_path) if args.include_path else None
+ exclude_path = re.compile(args.exclude_path) if args.exclude_path else None
+
+ def path_filter_func(path):
+ if include_path and not re.search(include_path, path):
+ return False
+
+ if exclude_path and re.search(exclude_path, path):
+ return False
+
+ return True
+
+ def target_filter_func(targets):
+ if include_targets:
+ targets &= include_targets
+
+ if exclude_targets:
+ targets -= exclude_targets
+
+ return targets
+
+ filtered_path_arcs = filter_data(filtered_path_arcs, path_filter_func, target_filter_func)
+ filtered_path_lines = filter_data(filtered_path_lines, path_filter_func, target_filter_func)
+
+ target_indexes = {} # type: TargetIndexes
+ indexed_path_arcs = generate_indexes(target_indexes, filtered_path_arcs)
+ indexed_path_lines = generate_indexes(target_indexes, filtered_path_lines)
+
+ report = make_report(target_indexes, indexed_path_arcs, indexed_path_lines)
+
+ write_report(args, report, args.output_file)
+
+
+def filter_data(
+ data, # type: NamedPoints
+ path_filter_func, # type: t.Callable[[str], bool]
+ target_filter_func, # type: t.Callable[[t.Set[str]], t.Set[str]]
+): # type: (...) -> NamedPoints
+ """Filter the data set using the specified filter function."""
+ result = {} # type: NamedPoints
+
+ for src_path, src_points in data.items():
+ if not path_filter_func(src_path):
+ continue
+
+ dst_points = {}
+
+ for src_point, src_targets in src_points.items():
+ dst_targets = target_filter_func(src_targets)
+
+ if dst_targets:
+ dst_points[src_point] = dst_targets
+
+ if dst_points:
+ result[src_path] = dst_points
+
+ return result
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py
new file mode 100644
index 00000000..a14b6f55
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py
@@ -0,0 +1,146 @@
+"""Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .... import types as t
+
+from ....encoding import (
+ to_text,
+)
+
+from ....data import (
+ data_context,
+)
+
+from ....util_common import (
+ ResultType,
+)
+
+from ... import (
+ enumerate_powershell_lines,
+ enumerate_python_arcs,
+ get_collection_path_regexes,
+ get_powershell_coverage_files,
+ get_python_coverage_files,
+ get_python_modules,
+ initialize_coverage,
+ PathChecker,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ Arcs,
+ Lines,
+ TargetIndexes,
+ )
+
+
+class CoverageAnalyzeTargetsGenerateConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets generate` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsGenerateConfig, self).__init__(args)
+
+ self.input_dir = args.input_dir or ResultType.COVERAGE.path # type: str
+ self.output_file = args.output_file # type: str
+
+
+def command_coverage_analyze_targets_generate(args): # type: (CoverageAnalyzeTargetsGenerateConfig) -> None
+ """Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
+ root = data_context().content.root
+ target_indexes = {}
+ arcs = dict((os.path.relpath(path, root), data) for path, data in analyze_python_coverage(args, args.input_dir, target_indexes).items())
+ lines = dict((os.path.relpath(path, root), data) for path, data in analyze_powershell_coverage(args, args.input_dir, target_indexes).items())
+ report = make_report(target_indexes, arcs, lines)
+ write_report(args, report, args.output_file)
+
+
+def analyze_python_coverage(
+ args, # type: CoverageAnalyzeTargetsGenerateConfig
+ path, # type: str
+ target_indexes, # type: TargetIndexes
+): # type: (...) -> Arcs
+ """Analyze Python code coverage."""
+ results = {} # type: Arcs
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+ modules = get_python_modules()
+ python_files = get_python_coverage_files(path)
+ coverage = initialize_coverage(args)
+
+ for python_file in python_files:
+ if not is_integration_coverage_file(python_file):
+ continue
+
+ target_name = get_target_name(python_file)
+ target_index = get_target_index(target_name, target_indexes)
+
+ for filename, covered_arcs in enumerate_python_arcs(python_file, coverage, modules, collection_search_re, collection_sub_re):
+ arcs = results.setdefault(filename, {})
+
+ for covered_arc in covered_arcs:
+ arc = arcs.setdefault(covered_arc, set())
+ arc.add(target_index)
+
+ prune_invalid_filenames(args, results, collection_search_re=collection_search_re)
+
+ return results
+
+
+def analyze_powershell_coverage(
+ args, # type: CoverageAnalyzeTargetsGenerateConfig
+ path, # type: str
+ target_indexes, # type: TargetIndexes
+): # type: (...) -> Lines
+ """Analyze PowerShell code coverage"""
+ results = {} # type: Lines
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+ powershell_files = get_powershell_coverage_files(path)
+
+ for powershell_file in powershell_files:
+ if not is_integration_coverage_file(powershell_file):
+ continue
+
+ target_name = get_target_name(powershell_file)
+ target_index = get_target_index(target_name, target_indexes)
+
+ for filename, hits in enumerate_powershell_lines(powershell_file, collection_search_re, collection_sub_re):
+ lines = results.setdefault(filename, {})
+
+ for covered_line in hits:
+ line = lines.setdefault(covered_line, set())
+ line.add(target_index)
+
+ prune_invalid_filenames(args, results)
+
+ return results
+
+
+def prune_invalid_filenames(
+ args, # type: CoverageAnalyzeTargetsGenerateConfig
+ results, # type: t.Dict[str, t.Any]
+ collection_search_re=None, # type: t.Optional[str]
+): # type: (...) -> None
+ """Remove invalid filenames from the given result set."""
+ path_checker = PathChecker(args, collection_search_re)
+
+ for path in list(results.keys()):
+ if not path_checker.check_path(path):
+ del results[path]
+
+
+def get_target_name(path): # type: (str) -> str
+ """Extract the test target name from the given coverage path."""
+ return to_text(os.path.basename(path).split('=')[1])
+
+
+def is_integration_coverage_file(path): # type: (str) -> bool
+ """Returns True if the coverage file came from integration tests, otherwise False."""
+ return os.path.basename(path).split('=')[0] in ('integration', 'windows-integration', 'network-integration')
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py
new file mode 100644
index 00000000..613a0ef2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py
@@ -0,0 +1,109 @@
+"""Identify aggregated coverage in one file missing from another."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .... import types as t
+
+from ....encoding import (
+ to_bytes,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ read_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ TargetIndexes,
+ IndexedPoints,
+ )
+
+
+class CoverageAnalyzeTargetsMissingConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets missing` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsMissingConfig, self).__init__(args)
+
+ self.from_file = args.from_file # type: str
+ self.to_file = args.to_file # type: str
+ self.output_file = args.output_file # type: str
+
+ self.only_gaps = args.only_gaps # type: bool
+ self.only_exists = args.only_exists # type: bool
+
+
+def command_coverage_analyze_targets_missing(args): # type: (CoverageAnalyzeTargetsMissingConfig) -> None
+ """Identify aggregated coverage in one file missing from another."""
+ from_targets, from_path_arcs, from_path_lines = read_report(args.from_file)
+ to_targets, to_path_arcs, to_path_lines = read_report(args.to_file)
+ target_indexes = {}
+
+ if args.only_gaps:
+ arcs = find_gaps(from_path_arcs, from_targets, to_path_arcs, target_indexes, args.only_exists)
+ lines = find_gaps(from_path_lines, from_targets, to_path_lines, target_indexes, args.only_exists)
+ else:
+ arcs = find_missing(from_path_arcs, from_targets, to_path_arcs, to_targets, target_indexes, args.only_exists)
+ lines = find_missing(from_path_lines, from_targets, to_path_lines, to_targets, target_indexes, args.only_exists)
+
+ report = make_report(target_indexes, arcs, lines)
+ write_report(args, report, args.output_file)
+
+
+def find_gaps(
+ from_data, # type: IndexedPoints
+ from_index, # type: t.List[str]
+ to_data, # type: IndexedPoints
+ target_indexes, # type: TargetIndexes
+ only_exists, # type: bool
+): # type: (...) -> IndexedPoints
+ """Find gaps in coverage between the from and to data sets."""
+ target_data = {}
+
+ for from_path, from_points in from_data.items():
+ if only_exists and not os.path.isfile(to_bytes(from_path)):
+ continue
+
+ to_points = to_data.get(from_path, {})
+
+ gaps = set(from_points.keys()) - set(to_points.keys())
+
+ if gaps:
+ gap_points = dict((key, value) for key, value in from_points.items() if key in gaps)
+ target_data[from_path] = dict((gap, set(get_target_index(from_index[i], target_indexes) for i in indexes)) for gap, indexes in gap_points.items())
+
+ return target_data
+
+
+def find_missing(
+ from_data, # type: IndexedPoints
+ from_index, # type: t.List[str]
+ to_data, # type: IndexedPoints
+ to_index, # type: t.List[str]
+ target_indexes, # type: TargetIndexes
+ only_exists, # type: bool
+): # type: (...) -> IndexedPoints
+ """Find coverage in from_data not present in to_data (arcs or lines)."""
+ target_data = {}
+
+ for from_path, from_points in from_data.items():
+ if only_exists and not os.path.isfile(to_bytes(from_path)):
+ continue
+
+ to_points = to_data.get(from_path, {})
+
+ for from_point, from_target_indexes in from_points.items():
+ to_target_indexes = to_points.get(from_point, set())
+
+ remaining_targets = set(from_index[i] for i in from_target_indexes) - set(to_index[i] for i in to_target_indexes)
+
+ if remaining_targets:
+ target_index = target_data.setdefault(from_path, {}).setdefault(from_point, set())
+ target_index.update(get_target_index(name, target_indexes) for name in remaining_targets)
+
+ return target_data
diff --git a/test/lib/ansible_test/_internal/coverage/combine.py b/test/lib/ansible_test/_internal/coverage/combine.py
new file mode 100644
index 00000000..7f726267
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/combine.py
@@ -0,0 +1,303 @@
+"""Combine code coverage files."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..target import (
+ walk_compile_targets,
+ walk_powershell_targets,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ display,
+)
+
+from ..util_common import (
+ ResultType,
+ write_json_file,
+ write_json_test_results,
+)
+
+from . import (
+ enumerate_python_arcs,
+ enumerate_powershell_lines,
+ get_collection_path_regexes,
+ get_python_coverage_files,
+ get_python_modules,
+ get_powershell_coverage_files,
+ initialize_coverage,
+ COVERAGE_OUTPUT_FILE_NAME,
+ COVERAGE_GROUPS,
+ CoverageConfig,
+ PathChecker,
+)
+
+
+def command_coverage_combine(args):
+ """Patch paths in coverage files and merge into a single file.
+ :type args: CoverageConfig
+ :rtype: list[str]
+ """
+ paths = _command_coverage_combine_powershell(args) + _command_coverage_combine_python(args)
+
+ for path in paths:
+ display.info('Generated combined output: %s' % path, verbosity=1)
+
+ return paths
+
+
+def _command_coverage_combine_python(args):
+ """
+ :type args: CoverageConfig
+ :rtype: list[str]
+ """
+ coverage = initialize_coverage(args)
+
+ modules = get_python_modules()
+
+ coverage_files = get_python_coverage_files()
+
+ counter = 0
+ sources = _get_coverage_targets(args, walk_compile_targets)
+ groups = _build_stub_groups(args, sources, lambda line_count: set())
+
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+
+ for coverage_file in coverage_files:
+ counter += 1
+ display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
+
+ group = get_coverage_group(args, coverage_file)
+
+ if group is None:
+ display.warning('Unexpected name for coverage file: %s' % coverage_file)
+ continue
+
+ for filename, arcs in enumerate_python_arcs(coverage_file, coverage, modules, collection_search_re, collection_sub_re):
+ if args.export:
+ filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems
+
+ if group not in groups:
+ groups[group] = {}
+
+ arc_data = groups[group]
+
+ if filename not in arc_data:
+ arc_data[filename] = set()
+
+ arc_data[filename].update(arcs)
+
+ output_files = []
+
+ if args.export:
+ coverage_file = os.path.join(args.export, '')
+ suffix = '=coverage.combined'
+ else:
+ coverage_file = os.path.join(ResultType.COVERAGE.path, COVERAGE_OUTPUT_FILE_NAME)
+ suffix = ''
+
+ path_checker = PathChecker(args, collection_search_re)
+
+ for group in sorted(groups):
+ arc_data = groups[group]
+
+ updated = coverage.CoverageData()
+
+ for filename in arc_data:
+ if not path_checker.check_path(filename):
+ continue
+
+ updated.add_arcs({filename: list(arc_data[filename])})
+
+ if args.all:
+ updated.add_arcs(dict((source[0], []) for source in sources))
+
+ if not args.explain:
+ output_file = coverage_file + group + suffix
+ updated.write_file(output_file) # always write files to make sure stale files do not exist
+
+ if updated:
+ # only report files which are non-empty to prevent coverage from reporting errors
+ output_files.append(output_file)
+
+ path_checker.report()
+
+ return sorted(output_files)
+
+
+def _command_coverage_combine_powershell(args):
+ """
+ :type args: CoverageConfig
+ :rtype: list[str]
+ """
+ coverage_files = get_powershell_coverage_files()
+
+ def _default_stub_value(lines):
+ val = {}
+ for line in range(lines):
+ val[line] = 0
+ return val
+
+ counter = 0
+ sources = _get_coverage_targets(args, walk_powershell_targets)
+ groups = _build_stub_groups(args, sources, _default_stub_value)
+
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+
+ for coverage_file in coverage_files:
+ counter += 1
+ display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
+
+ group = get_coverage_group(args, coverage_file)
+
+ if group is None:
+ display.warning('Unexpected name for coverage file: %s' % coverage_file)
+ continue
+
+ for filename, hits in enumerate_powershell_lines(coverage_file, collection_search_re, collection_sub_re):
+ if args.export:
+ filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems
+
+ if group not in groups:
+ groups[group] = {}
+
+ coverage_data = groups[group]
+
+ if filename not in coverage_data:
+ coverage_data[filename] = {}
+
+ file_coverage = coverage_data[filename]
+
+ for line_no, hit_count in hits.items():
+ file_coverage[line_no] = file_coverage.get(line_no, 0) + hit_count
+
+ output_files = []
+
+ path_checker = PathChecker(args)
+
+ for group in sorted(groups):
+ coverage_data = dict((filename, data) for filename, data in groups[group].items() if path_checker.check_path(filename))
+
+ if args.all:
+ # Add 0 line entries for files not in coverage_data
+ for source, source_line_count in sources:
+ if source in coverage_data:
+ continue
+
+ coverage_data[source] = _default_stub_value(source_line_count)
+
+ if not args.explain:
+ if args.export:
+ output_file = os.path.join(args.export, group + '=coverage.combined')
+ write_json_file(output_file, coverage_data, formatted=False)
+ output_files.append(output_file)
+ continue
+
+ output_file = COVERAGE_OUTPUT_FILE_NAME + group + '-powershell'
+
+ write_json_test_results(ResultType.COVERAGE, output_file, coverage_data, formatted=False)
+
+ output_files.append(os.path.join(ResultType.COVERAGE.path, output_file))
+
+ path_checker.report()
+
+ return sorted(output_files)
+
+
+def _get_coverage_targets(args, walk_func):
+ """
+ :type args: CoverageConfig
+ :type walk_func: Func
+ :rtype: list[tuple[str, int]]
+ """
+ sources = []
+
+ if args.all or args.stub:
+ # excludes symlinks of regular files to avoid reporting on the same file multiple times
+ # in the future it would be nice to merge any coverage for symlinks into the real files
+ for target in walk_func(include_symlinks=False):
+ target_path = os.path.abspath(target.path)
+
+ target_lines = len(read_text_file(target_path).splitlines())
+
+ sources.append((target_path, target_lines))
+
+ sources.sort()
+
+ return sources
+
+
+def _build_stub_groups(args, sources, default_stub_value):
+ """
+ :type args: CoverageConfig
+ :type sources: List[tuple[str, int]]
+ :type default_stub_value: Func[int]
+ :rtype: dict
+ """
+ groups = {}
+
+ if args.stub:
+ stub_group = []
+ stub_groups = [stub_group]
+ stub_line_limit = 500000
+ stub_line_count = 0
+
+ for source, source_line_count in sources:
+ stub_group.append((source, source_line_count))
+ stub_line_count += source_line_count
+
+ if stub_line_count > stub_line_limit:
+ stub_line_count = 0
+ stub_group = []
+ stub_groups.append(stub_group)
+
+ for stub_index, stub_group in enumerate(stub_groups):
+ if not stub_group:
+ continue
+
+ groups['=stub-%02d' % (stub_index + 1)] = dict((source, default_stub_value(line_count))
+ for source, line_count in stub_group)
+
+ return groups
+
+
+def get_coverage_group(args, coverage_file):
+ """
+ :type args: CoverageConfig
+ :type coverage_file: str
+ :rtype: str
+ """
+ parts = os.path.basename(coverage_file).split('=', 4)
+
+ # noinspection PyTypeChecker
+ if len(parts) != 5 or not parts[4].startswith('coverage.'):
+ return None
+
+ names = dict(
+ command=parts[0],
+ target=parts[1],
+ environment=parts[2],
+ version=parts[3],
+ )
+
+ export_names = dict(
+ version=parts[3],
+ )
+
+ group = ''
+
+ for part in COVERAGE_GROUPS:
+ if part in args.group_by:
+ group += '=%s' % names[part]
+ elif args.export:
+ group += '=%s' % export_names.get(part, 'various')
+
+ if args.export:
+ group = group.lstrip('=')
+
+ return group
diff --git a/test/lib/ansible_test/_internal/coverage/erase.py b/test/lib/ansible_test/_internal/coverage/erase.py
new file mode 100644
index 00000000..92d241c7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/erase.py
@@ -0,0 +1,27 @@
+"""Erase code coverage files."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util_common import (
+ ResultType,
+)
+
+from . import (
+ CoverageConfig,
+)
+
+
+def command_coverage_erase(args): # type: (CoverageConfig) -> None
+ """Erase code coverage data files collected during test runs."""
+ coverage_dir = ResultType.COVERAGE.path
+
+ for name in os.listdir(coverage_dir):
+ if not name.startswith('coverage') and '=coverage.' not in name:
+ continue
+
+ path = os.path.join(coverage_dir, name)
+
+ if not args.explain:
+ os.remove(path)
diff --git a/test/lib/ansible_test/_internal/coverage/html.py b/test/lib/ansible_test/_internal/coverage/html.py
new file mode 100644
index 00000000..63956a19
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/html.py
@@ -0,0 +1,45 @@
+"""Generate HTML code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..io import (
+ make_dirs,
+)
+
+from ..util import (
+ display,
+)
+
+from ..util_common import (
+ ResultType,
+)
+
+from .combine import (
+ command_coverage_combine,
+)
+
+from . import (
+ run_coverage,
+ CoverageConfig,
+)
+
+
+def command_coverage_html(args):
+ """
+ :type args: CoverageConfig
+ """
+ output_files = command_coverage_combine(args)
+
+ for output_file in output_files:
+ if output_file.endswith('-powershell'):
+ # coverage.py does not support non-Python files so we just skip the local html report.
+ display.info("Skipping output file %s in html generation" % output_file, verbosity=3)
+ continue
+
+ dir_name = os.path.join(ResultType.REPORTS.path, os.path.basename(output_file))
+ make_dirs(dir_name)
+ run_coverage(args, output_file, 'html', ['-i', '-d', dir_name])
+
+ display.info('HTML report generated: file:///%s' % os.path.join(dir_name, 'index.html'))
diff --git a/test/lib/ansible_test/_internal/coverage/report.py b/test/lib/ansible_test/_internal/coverage/report.py
new file mode 100644
index 00000000..24efa637
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/report.py
@@ -0,0 +1,156 @@
+"""Generate console code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..io import (
+ read_json_file,
+)
+
+from ..util import (
+ display,
+)
+
+from ..data import (
+ data_context,
+)
+
+from .combine import (
+ command_coverage_combine,
+)
+
+from . import (
+ run_coverage,
+ CoverageConfig,
+)
+
+
+def command_coverage_report(args):
+ """
+ :type args: CoverageReportConfig
+ """
+ output_files = command_coverage_combine(args)
+
+ for output_file in output_files:
+ if args.group_by or args.stub:
+ display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:]))
+
+ if output_file.endswith('-powershell'):
+ display.info(_generate_powershell_output_report(args, output_file))
+ else:
+ options = []
+
+ if args.show_missing:
+ options.append('--show-missing')
+
+ if args.include:
+ options.extend(['--include', args.include])
+
+ if args.omit:
+ options.extend(['--omit', args.omit])
+
+ run_coverage(args, output_file, 'report', options)
+
+
+def _generate_powershell_output_report(args, coverage_file):
+ """
+ :type args: CoverageReportConfig
+ :type coverage_file: str
+ :rtype: str
+ """
+ coverage_info = read_json_file(coverage_file)
+
+ root_path = data_context().content.root + '/'
+
+ name_padding = 7
+ cover_padding = 8
+
+ file_report = []
+ total_stmts = 0
+ total_miss = 0
+
+ for filename in sorted(coverage_info.keys()):
+ hit_info = coverage_info[filename]
+
+ if filename.startswith(root_path):
+ filename = filename[len(root_path):]
+
+ if args.omit and filename in args.omit:
+ continue
+ if args.include and filename not in args.include:
+ continue
+
+ stmts = len(hit_info)
+ miss = len([c for c in hit_info.values() if c == 0])
+
+ name_padding = max(name_padding, len(filename) + 3)
+
+ total_stmts += stmts
+ total_miss += miss
+
+ cover = "{0}%".format(int((stmts - miss) / stmts * 100))
+
+ missing = []
+ current_missing = None
+ sorted_lines = sorted([int(x) for x in hit_info.keys()])
+ for idx, line in enumerate(sorted_lines):
+ hit = hit_info[str(line)]
+ if hit == 0 and current_missing is None:
+ current_missing = line
+ elif hit != 0 and current_missing is not None:
+ end_line = sorted_lines[idx - 1]
+ if current_missing == end_line:
+ missing.append(str(current_missing))
+ else:
+ missing.append('%s-%s' % (current_missing, end_line))
+ current_missing = None
+
+ if current_missing is not None:
+ end_line = sorted_lines[-1]
+ if current_missing == end_line:
+ missing.append(str(current_missing))
+ else:
+ missing.append('%s-%s' % (current_missing, end_line))
+
+ file_report.append({'name': filename, 'stmts': stmts, 'miss': miss, 'cover': cover, 'missing': missing})
+
+ if total_stmts == 0:
+ return ''
+
+ total_percent = '{0}%'.format(int((total_stmts - total_miss) / total_stmts * 100))
+ stmts_padding = max(8, len(str(total_stmts)))
+ miss_padding = max(7, len(str(total_miss)))
+
+ line_length = name_padding + stmts_padding + miss_padding + cover_padding
+
+ header = 'Name'.ljust(name_padding) + 'Stmts'.rjust(stmts_padding) + 'Miss'.rjust(miss_padding) + \
+ 'Cover'.rjust(cover_padding)
+
+ if args.show_missing:
+ header += 'Lines Missing'.rjust(16)
+ line_length += 16
+
+ line_break = '-' * line_length
+ lines = ['%s%s%s%s%s' % (f['name'].ljust(name_padding), str(f['stmts']).rjust(stmts_padding),
+ str(f['miss']).rjust(miss_padding), f['cover'].rjust(cover_padding),
+ ' ' + ', '.join(f['missing']) if args.show_missing else '')
+ for f in file_report]
+ totals = 'TOTAL'.ljust(name_padding) + str(total_stmts).rjust(stmts_padding) + \
+ str(total_miss).rjust(miss_padding) + total_percent.rjust(cover_padding)
+
+ report = '{0}\n{1}\n{2}\n{1}\n{3}'.format(header, line_break, "\n".join(lines), totals)
+ return report
+
+
+class CoverageReportConfig(CoverageConfig):
+ """Configuration for the coverage report command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(CoverageReportConfig, self).__init__(args)
+
+ self.show_missing = args.show_missing # type: bool
+ self.include = args.include # type: str
+ self.omit = args.omit # type: str
diff --git a/test/lib/ansible_test/_internal/coverage/xml.py b/test/lib/ansible_test/_internal/coverage/xml.py
new file mode 100644
index 00000000..94b5abc5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/xml.py
@@ -0,0 +1,191 @@
+"""Generate XML code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+
+from xml.etree.ElementTree import (
+ Comment,
+ Element,
+ SubElement,
+ tostring,
+)
+
+from xml.dom import (
+ minidom,
+)
+
+from ..io import (
+ make_dirs,
+ read_json_file,
+)
+
+from ..util_common import (
+ ResultType,
+ write_text_test_results,
+)
+
+from ..env import (
+ get_ansible_version,
+)
+
+from ..data import (
+ data_context,
+)
+
+from .combine import (
+ command_coverage_combine,
+)
+
+from . import (
+ run_coverage,
+ CoverageConfig,
+)
+
+
+def command_coverage_xml(args):
+ """
+ :type args: CoverageConfig
+ """
+ output_files = command_coverage_combine(args)
+
+ for output_file in output_files:
+ xml_name = '%s.xml' % os.path.basename(output_file)
+ if output_file.endswith('-powershell'):
+ report = _generate_powershell_xml(output_file)
+
+ rough_string = tostring(report, 'utf-8')
+ reparsed = minidom.parseString(rough_string)
+ pretty = reparsed.toprettyxml(indent=' ')
+
+ write_text_test_results(ResultType.REPORTS, xml_name, pretty)
+ else:
+ xml_path = os.path.join(ResultType.REPORTS.path, xml_name)
+ make_dirs(ResultType.REPORTS.path)
+ run_coverage(args, output_file, 'xml', ['-i', '-o', xml_path])
+
+
+def _generate_powershell_xml(coverage_file):
+ """
+ :type coverage_file: str
+ :rtype: Element
+ """
+ coverage_info = read_json_file(coverage_file)
+
+ content_root = data_context().content.root
+ is_ansible = data_context().content.is_ansible
+
+ packages = {}
+ for path, results in coverage_info.items():
+ filename = os.path.splitext(os.path.basename(path))[0]
+
+ if filename.startswith('Ansible.ModuleUtils'):
+ package = 'ansible.module_utils'
+ elif is_ansible:
+ package = 'ansible.modules'
+ else:
+ rel_path = path[len(content_root) + 1:]
+ plugin_type = "modules" if rel_path.startswith("plugins/modules") else "module_utils"
+ package = 'ansible_collections.%splugins.%s' % (data_context().content.collection.prefix, plugin_type)
+
+ if package not in packages:
+ packages[package] = {}
+
+ packages[package][path] = results
+
+ elem_coverage = Element('coverage')
+ elem_coverage.append(
+ Comment(' Generated by ansible-test from the Ansible project: https://www.ansible.com/ '))
+ elem_coverage.append(
+ Comment(' Based on https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd '))
+
+ elem_sources = SubElement(elem_coverage, 'sources')
+
+ elem_source = SubElement(elem_sources, 'source')
+ elem_source.text = data_context().content.root
+
+ elem_packages = SubElement(elem_coverage, 'packages')
+
+ total_lines_hit = 0
+ total_line_count = 0
+
+ for package_name, package_data in packages.items():
+ lines_hit, line_count = _add_cobertura_package(elem_packages, package_name, package_data)
+
+ total_lines_hit += lines_hit
+ total_line_count += line_count
+
+ elem_coverage.attrib.update({
+ 'branch-rate': '0',
+ 'branches-covered': '0',
+ 'branches-valid': '0',
+ 'complexity': '0',
+ 'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
+ 'lines-covered': str(total_line_count),
+ 'lines-valid': str(total_lines_hit),
+ 'timestamp': str(int(time.time())),
+ 'version': get_ansible_version(),
+ })
+
+ return elem_coverage
+
+
+def _add_cobertura_package(packages, package_name, package_data):
+ """
+ :type packages: SubElement
+ :type package_name: str
+ :type package_data: Dict[str, Dict[str, int]]
+ :rtype: Tuple[int, int]
+ """
+ elem_package = SubElement(packages, 'package')
+ elem_classes = SubElement(elem_package, 'classes')
+
+ total_lines_hit = 0
+ total_line_count = 0
+
+ for path, results in package_data.items():
+ lines_hit = len([True for hits in results.values() if hits])
+ line_count = len(results)
+
+ total_lines_hit += lines_hit
+ total_line_count += line_count
+
+ elem_class = SubElement(elem_classes, 'class')
+
+ class_name = os.path.splitext(os.path.basename(path))[0]
+ if class_name.startswith("Ansible.ModuleUtils"):
+ class_name = class_name[20:]
+
+ content_root = data_context().content.root
+ filename = path
+ if filename.startswith(content_root):
+ filename = filename[len(content_root) + 1:]
+
+ elem_class.attrib.update({
+ 'branch-rate': '0',
+ 'complexity': '0',
+ 'filename': filename,
+ 'line-rate': str(round(lines_hit / line_count, 4)) if line_count else "0",
+ 'name': class_name,
+ })
+
+ SubElement(elem_class, 'methods')
+
+ elem_lines = SubElement(elem_class, 'lines')
+
+ for number, hits in results.items():
+ elem_line = SubElement(elem_lines, 'line')
+ elem_line.attrib.update(
+ hits=str(hits),
+ number=str(number),
+ )
+
+ elem_package.attrib.update({
+ 'branch-rate': '0',
+ 'complexity': '0',
+ 'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
+ 'name': package_name,
+ })
+
+ return total_lines_hit, total_line_count
diff --git a/test/lib/ansible_test/_internal/coverage_util.py b/test/lib/ansible_test/_internal/coverage_util.py
new file mode 100644
index 00000000..e5434231
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage_util.py
@@ -0,0 +1,125 @@
+"""Utility code for facilitating collection of code coverage when running tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import contextlib
+import os
+import tempfile
+
+from .config import (
+ IntegrationConfig,
+ SanityConfig,
+ TestConfig,
+)
+
+from .io import (
+ write_text_file,
+)
+
+from .util import (
+ COVERAGE_CONFIG_NAME,
+ remove_tree,
+)
+
+from .data import (
+ data_context,
+)
+
+
+@contextlib.contextmanager
+def coverage_context(args): # type: (TestConfig) -> None
+ """Content to set up and clean up code coverage configuration for tests."""
+ coverage_setup(args)
+
+ try:
+ yield
+ finally:
+ coverage_cleanup(args)
+
+
+def coverage_setup(args): # type: (TestConfig) -> None
+ """Set up code coverage configuration before running tests."""
+ if not args.coverage:
+ return
+
+ coverage_config = generate_coverage_config(args)
+
+ if args.explain:
+ args.coverage_config_base_path = '/tmp/coverage-temp-dir'
+ else:
+ args.coverage_config_base_path = tempfile.mkdtemp()
+
+ write_text_file(os.path.join(args.coverage_config_base_path, COVERAGE_CONFIG_NAME), coverage_config)
+
+
+def coverage_cleanup(args): # type: (TestConfig) -> None
+ """Clean up code coverage configuration after tests have finished."""
+ if args.coverage_config_base_path and not args.explain:
+ remove_tree(args.coverage_config_base_path)
+ args.coverage_config_base_path = None
+
+
+def generate_coverage_config(args): # type: (TestConfig) -> str
+ """Generate code coverage configuration for tests."""
+ if data_context().content.collection:
+ coverage_config = generate_collection_coverage_config(args)
+ else:
+ coverage_config = generate_ansible_coverage_config()
+
+ return coverage_config
+
+
+def generate_ansible_coverage_config(): # type: () -> str
+ """Generate code coverage configuration for Ansible tests."""
+ coverage_config = '''
+[run]
+branch = True
+concurrency = multiprocessing
+parallel = True
+
+omit =
+ */python*/dist-packages/*
+ */python*/site-packages/*
+ */python*/distutils/*
+ */pyshared/*
+ */pytest
+ */AnsiballZ_*.py
+ */test/results/*
+'''
+
+ return coverage_config
+
+
+def generate_collection_coverage_config(args): # type: (TestConfig) -> str
+ """Generate code coverage configuration for Ansible Collection tests."""
+ coverage_config = '''
+[run]
+branch = True
+concurrency = multiprocessing
+parallel = True
+disable_warnings =
+ no-data-collected
+'''
+
+ if isinstance(args, IntegrationConfig):
+ coverage_config += '''
+include =
+ %s/*
+ */%s/*
+''' % (data_context().content.root, data_context().content.collection.directory)
+ elif isinstance(args, SanityConfig):
+ # temporary work-around for import sanity test
+ coverage_config += '''
+include =
+ %s/*
+
+omit =
+ %s/*
+''' % (data_context().content.root, os.path.join(data_context().content.root, data_context().content.results_path))
+ else:
+ coverage_config += '''
+include =
+ %s/*
+''' % data_context().content.root
+
+ return coverage_config
diff --git a/test/lib/ansible_test/_internal/csharp_import_analysis.py b/test/lib/ansible_test/_internal/csharp_import_analysis.py
new file mode 100644
index 00000000..daa8892c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/csharp_import_analysis.py
@@ -0,0 +1,106 @@
+"""Analyze C# import statements."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from .io import (
+ open_text_file,
+)
+
+from .util import (
+ display,
+)
+
+from .util_common import (
+ resolve_csharp_ps_util,
+)
+
+from .data import (
+ data_context,
+)
+
+
+def get_csharp_module_utils_imports(powershell_targets, csharp_targets):
+ """Return a dictionary of module_utils names mapped to sets of powershell file paths.
+ :type powershell_targets: list[TestTarget] - C# files
+ :type csharp_targets: list[TestTarget] - PS files
+ :rtype: dict[str, set[str]]
+ """
+
+ module_utils = enumerate_module_utils()
+
+ imports_by_target_path = {}
+
+ for target in powershell_targets:
+ imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, False)
+
+ for target in csharp_targets:
+ imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, True)
+
+ imports = dict([(module_util, set()) for module_util in module_utils])
+
+ for target_path in imports_by_target_path:
+ for module_util in imports_by_target_path[target_path]:
+ imports[module_util].add(target_path)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_csharp_module_utils_name(path): # type: (str) -> str
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_csharp_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
+ else:
+ prefix = ''
+
+ name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils():
+ """Return a list of available module_utils imports.
+ :rtype: set[str]
+ """
+ return set(get_csharp_module_utils_name(p)
+ for p in data_context().content.walk_files(data_context().content.module_utils_csharp_path)
+ if os.path.splitext(p)[1] == '.cs')
+
+
+def extract_csharp_module_utils_imports(path, module_utils, is_pure_csharp):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ :type is_pure_csharp: bool
+ :rtype: set[str]
+ """
+ imports = set()
+ if is_pure_csharp:
+ pattern = re.compile(r'(?i)^using\s((?:Ansible|AnsibleCollections)\..+);$')
+ else:
+ pattern = re.compile(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((?:Ansible|ansible.collections|\.)\..+)')
+
+ with open_text_file(path) as module_file:
+ for line_number, line in enumerate(module_file, 1):
+ match = re.search(pattern, line)
+
+ if not match:
+ continue
+
+ import_name = resolve_csharp_ps_util(match.group(1), path)
+
+ if import_name in module_utils:
+ imports.add(import_name)
+ elif data_context().content.is_ansible or \
+ import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
+ display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
+
+ return imports
diff --git a/test/lib/ansible_test/_internal/data.py b/test/lib/ansible_test/_internal/data.py
new file mode 100644
index 00000000..38ae6d21
--- /dev/null
+++ b/test/lib/ansible_test/_internal/data.py
@@ -0,0 +1,200 @@
+"""Context information for the current invocation of ansible-test."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import types as t
+
+from .util import (
+ ApplicationError,
+ import_plugins,
+ is_subdir,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_ROOT,
+ ANSIBLE_SOURCE_ROOT,
+ display,
+)
+
+from .provider import (
+ find_path_provider,
+ get_path_provider_classes,
+ ProviderNotFoundForPath,
+)
+
+from .provider.source import (
+ SourceProvider,
+)
+
+from .provider.source.unversioned import (
+ UnversionedSource,
+)
+
+from .provider.source.installed import (
+ InstalledSource,
+)
+
+from .provider.layout import (
+ ContentLayout,
+ LayoutProvider,
+)
+
+
+class DataContext:
+ """Data context providing details about the current execution environment for ansible-test."""
+ def __init__(self):
+ content_path = os.environ.get('ANSIBLE_TEST_CONTENT_ROOT')
+ current_path = os.getcwd()
+
+ layout_providers = get_path_provider_classes(LayoutProvider)
+ source_providers = get_path_provider_classes(SourceProvider)
+
+ self.__layout_providers = layout_providers
+ self.__source_providers = source_providers
+ self.__ansible_source = None # type: t.Optional[t.Tuple[t.Tuple[str, str], ...]]
+
+ self.payload_callbacks = [] # type: t.List[t.Callable[t.List[t.Tuple[str, str]], None]]
+
+ if content_path:
+ content = self.__create_content_layout(layout_providers, source_providers, content_path, False)
+ elif ANSIBLE_SOURCE_ROOT and is_subdir(current_path, ANSIBLE_SOURCE_ROOT):
+ content = self.__create_content_layout(layout_providers, source_providers, ANSIBLE_SOURCE_ROOT, False)
+ else:
+ content = self.__create_content_layout(layout_providers, source_providers, current_path, True)
+
+ self.content = content # type: ContentLayout
+
+ def create_collection_layouts(self): # type: () -> t.List[ContentLayout]
+ """
+ Return a list of collection layouts, one for each collection in the same collection root as the current collection layout.
+ An empty list is returned if the current content layout is not a collection layout.
+ """
+ layout = self.content
+ collection = layout.collection
+
+ if not collection:
+ return []
+
+ root_path = os.path.join(collection.root, 'ansible_collections')
+ display.info('Scanning collection root: %s' % root_path, verbosity=1)
+ namespace_names = sorted(name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name)))
+ collections = []
+
+ for namespace_name in namespace_names:
+ namespace_path = os.path.join(root_path, namespace_name)
+ collection_names = sorted(name for name in os.listdir(namespace_path) if os.path.isdir(os.path.join(namespace_path, name)))
+
+ for collection_name in collection_names:
+ collection_path = os.path.join(namespace_path, collection_name)
+
+ if collection_path == os.path.join(collection.root, collection.directory):
+ collection_layout = layout
+ else:
+ collection_layout = self.__create_content_layout(self.__layout_providers, self.__source_providers, collection_path, False)
+
+ file_count = len(collection_layout.all_files())
+
+ if not file_count:
+ continue
+
+ display.info('Including collection: %s (%d files)' % (collection_layout.collection.full_name, file_count), verbosity=1)
+ collections.append(collection_layout)
+
+ return collections
+
+ @staticmethod
+ def __create_content_layout(layout_providers, # type: t.List[t.Type[LayoutProvider]]
+ source_providers, # type: t.List[t.Type[SourceProvider]]
+ root, # type: str
+ walk, # type: bool
+ ): # type: (...) -> ContentLayout
+ """Create a content layout using the given providers and root path."""
+ layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk)
+
+ try:
+ # Begin the search for the source provider at the layout provider root.
+ # This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error.
+ # Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project.
+ # It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control.
+ source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk)
+ except ProviderNotFoundForPath:
+ source_provider = UnversionedSource(layout_provider.root)
+
+ layout = layout_provider.create(layout_provider.root, source_provider.get_paths(layout_provider.root))
+
+ return layout
+
+ def __create_ansible_source(self):
+ """Return a tuple of Ansible source files with both absolute and relative paths."""
+ if not ANSIBLE_SOURCE_ROOT:
+ sources = []
+
+ source_provider = InstalledSource(ANSIBLE_LIB_ROOT)
+ sources.extend((os.path.join(source_provider.root, path), os.path.join('lib', 'ansible', path))
+ for path in source_provider.get_paths(source_provider.root))
+
+ source_provider = InstalledSource(ANSIBLE_TEST_ROOT)
+ sources.extend((os.path.join(source_provider.root, path), os.path.join('test', 'lib', 'ansible_test', path))
+ for path in source_provider.get_paths(source_provider.root))
+
+ return tuple(sources)
+
+ if self.content.is_ansible:
+ return tuple((os.path.join(self.content.root, path), path) for path in self.content.all_files())
+
+ try:
+ source_provider = find_path_provider(SourceProvider, self.__source_providers, ANSIBLE_SOURCE_ROOT, False)
+ except ProviderNotFoundForPath:
+ source_provider = UnversionedSource(ANSIBLE_SOURCE_ROOT)
+
+ return tuple((os.path.join(source_provider.root, path), path) for path in source_provider.get_paths(source_provider.root))
+
+ @property
+ def ansible_source(self): # type: () -> t.Tuple[t.Tuple[str, str], ...]
+ """Return a tuple of Ansible source files with both absolute and relative paths."""
+ if not self.__ansible_source:
+ self.__ansible_source = self.__create_ansible_source()
+
+ return self.__ansible_source
+
+ def register_payload_callback(self, callback): # type: (t.Callable[t.List[t.Tuple[str, str]], None]) -> None
+ """Register the given payload callback."""
+ self.payload_callbacks.append(callback)
+
+
+def data_init(): # type: () -> DataContext
+ """Initialize provider plugins."""
+ provider_types = (
+ 'layout',
+ 'source',
+ )
+
+ for provider_type in provider_types:
+ import_plugins('provider/%s' % provider_type)
+
+ try:
+ context = DataContext()
+ except ProviderNotFoundForPath:
+ options = [
+ ' - an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/',
+ ]
+
+ if ANSIBLE_SOURCE_ROOT:
+ options.insert(0, ' - the Ansible source: %s/' % ANSIBLE_SOURCE_ROOT)
+
+ raise ApplicationError('''The current working directory must be at or below:
+
+%s
+
+Current working directory: %s''' % ('\n'.join(options), os.getcwd()))
+
+ return context
+
+
+def data_context(): # type: () -> DataContext
+ """Return the current data context."""
+ try:
+ return data_context.instance
+ except AttributeError:
+ data_context.instance = data_init()
+ return data_context.instance
diff --git a/test/lib/ansible_test/_internal/delegation.py b/test/lib/ansible_test/_internal/delegation.py
new file mode 100644
index 00000000..3262dd51
--- /dev/null
+++ b/test/lib/ansible_test/_internal/delegation.py
@@ -0,0 +1,667 @@
+"""Delegate test execution to another environment."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import sys
+import tempfile
+
+from . import types as t
+
+from .io import (
+ make_dirs,
+)
+
+from .executor import (
+ SUPPORTED_PYTHON_VERSIONS,
+ HTTPTESTER_HOSTS,
+ create_shell_command,
+ run_httptester,
+ start_httptester,
+ get_python_interpreter,
+ get_python_version,
+)
+
+from .config import (
+ TestConfig,
+ EnvironmentConfig,
+ IntegrationConfig,
+ WindowsIntegrationConfig,
+ NetworkIntegrationConfig,
+ ShellConfig,
+ SanityConfig,
+ UnitsConfig,
+)
+
+from .core_ci import (
+ AnsibleCoreCI,
+)
+
+from .manage_ci import (
+ ManagePosixCI,
+ ManageWindowsCI,
+)
+
+from .util import (
+ ApplicationError,
+ common_environment,
+ display,
+ ANSIBLE_BIN_PATH,
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_ROOT,
+ tempdir,
+)
+
+from .util_common import (
+ run_command,
+ ResultType,
+ create_interpreter_wrapper,
+ get_docker_completion,
+ get_remote_completion,
+)
+
+from .docker_util import (
+ docker_exec,
+ docker_get,
+ docker_pull,
+ docker_put,
+ docker_rm,
+ docker_run,
+ docker_available,
+ docker_network_disconnect,
+ get_docker_networks,
+ get_docker_preferred_network_name,
+ get_docker_hostname,
+ is_docker_user_defined_network,
+)
+
+from .cloud import (
+ get_cloud_providers,
+)
+
+from .target import (
+ IntegrationTarget,
+)
+
+from .data import (
+ data_context,
+)
+
+from .payload import (
+ create_payload,
+)
+
+from .venv import (
+ create_virtual_environment,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+
+def check_delegation_args(args):
+ """
+ :type args: CommonConfig
+ """
+ if not isinstance(args, EnvironmentConfig):
+ return
+
+ if args.docker:
+ get_python_version(args, get_docker_completion(), args.docker_raw)
+ elif args.remote:
+ get_python_version(args, get_remote_completion(), args.remote)
+
+
+def delegate(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ :rtype: bool
+ """
+ if isinstance(args, TestConfig):
+ args.metadata.ci_provider = get_ci_provider().code
+
+ make_dirs(ResultType.TMP.path)
+
+ with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd:
+ args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name))
+ args.metadata.to_file(args.metadata_path)
+
+ try:
+ return delegate_command(args, exclude, require, integration_targets)
+ finally:
+ args.metadata_path = None
+ else:
+ return delegate_command(args, exclude, require, integration_targets)
+
+
+def delegate_command(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ :rtype: bool
+ """
+ if args.venv:
+ delegate_venv(args, exclude, require, integration_targets)
+ return True
+
+ if args.docker:
+ delegate_docker(args, exclude, require, integration_targets)
+ return True
+
+ if args.remote:
+ delegate_remote(args, exclude, require, integration_targets)
+ return True
+
+ return False
+
+
+def delegate_venv(args, # type: EnvironmentConfig
+ exclude, # type: t.List[str]
+ require, # type: t.List[str]
+ integration_targets, # type: t.Tuple[IntegrationTarget, ...]
+ ): # type: (...) -> None
+ """Delegate ansible-test execution to a virtual environment using venv or virtualenv."""
+ if args.python:
+ versions = (args.python_version,)
+ else:
+ versions = SUPPORTED_PYTHON_VERSIONS
+
+ if args.httptester:
+ needs_httptester = sorted(target.name for target in integration_targets if 'needs/httptester/' in target.aliases)
+
+ if needs_httptester:
+ display.warning('Use --docker or --remote to enable httptester for tests marked "needs/httptester": %s' % ', '.join(needs_httptester))
+
+ if args.venv_system_site_packages:
+ suffix = '-ssp'
+ else:
+ suffix = ''
+
+ venvs = dict((version, os.path.join(ResultType.TMP.path, 'delegation', 'python%s%s' % (version, suffix))) for version in versions)
+ venvs = dict((version, path) for version, path in venvs.items() if create_virtual_environment(args, version, path, args.venv_system_site_packages))
+
+ if not venvs:
+ raise ApplicationError('No usable virtual environment support found.')
+
+ options = {
+ '--venv': 0,
+ '--venv-system-site-packages': 0,
+ }
+
+ with tempdir() as inject_path:
+ for version, path in venvs.items():
+ create_interpreter_wrapper(os.path.join(path, 'bin', 'python'), os.path.join(inject_path, 'python%s' % version))
+
+ python_interpreter = os.path.join(inject_path, 'python%s' % args.python_version)
+
+ cmd = generate_command(args, python_interpreter, ANSIBLE_BIN_PATH, data_context().content.root, options, exclude, require)
+
+ if isinstance(args, TestConfig):
+ if args.coverage and not args.coverage_label:
+ cmd += ['--coverage-label', 'venv']
+
+ env = common_environment()
+
+ with tempdir() as library_path:
+ # expose ansible and ansible_test to the virtual environment (only required when running from an install)
+ os.symlink(ANSIBLE_LIB_ROOT, os.path.join(library_path, 'ansible'))
+ os.symlink(ANSIBLE_TEST_ROOT, os.path.join(library_path, 'ansible_test'))
+
+ env.update(
+ PATH=inject_path + os.path.pathsep + env['PATH'],
+ PYTHONPATH=library_path,
+ )
+
+ run_command(args, cmd, env=env)
+
+
+def delegate_docker(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ """
+ test_image = args.docker
+ privileged = args.docker_privileged
+
+ if isinstance(args, ShellConfig):
+ use_httptester = args.httptester
+ else:
+ use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
+
+ if use_httptester:
+ docker_pull(args, args.httptester)
+
+ docker_pull(args, test_image)
+
+ httptester_id = None
+ test_id = None
+ success = False
+
+ options = {
+ '--docker': 1,
+ '--docker-privileged': 0,
+ '--docker-util': 1,
+ }
+
+ python_interpreter = get_python_interpreter(args, get_docker_completion(), args.docker_raw)
+
+ pwd = '/root'
+ ansible_root = os.path.join(pwd, 'ansible')
+
+ if data_context().content.collection:
+ content_root = os.path.join(pwd, data_context().content.collection.directory)
+ else:
+ content_root = ansible_root
+
+ remote_results_root = os.path.join(content_root, data_context().content.results_path)
+
+ cmd = generate_command(args, python_interpreter, os.path.join(ansible_root, 'bin'), content_root, options, exclude, require)
+
+ if isinstance(args, TestConfig):
+ if args.coverage and not args.coverage_label:
+ image_label = args.docker_raw
+ image_label = re.sub('[^a-zA-Z0-9]+', '-', image_label)
+ cmd += ['--coverage-label', 'docker-%s' % image_label]
+
+ if isinstance(args, IntegrationConfig):
+ if not args.allow_destructive:
+ cmd.append('--allow-destructive')
+
+ cmd_options = []
+
+ if isinstance(args, ShellConfig) or (isinstance(args, IntegrationConfig) and args.debug_strategy):
+ cmd_options.append('-it')
+
+ with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd:
+ try:
+ create_payload(args, local_source_fd.name)
+
+ if use_httptester:
+ httptester_id = run_httptester(args)
+ else:
+ httptester_id = None
+
+ test_options = [
+ '--detach',
+ '--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro',
+ '--privileged=%s' % str(privileged).lower(),
+ ]
+
+ if args.docker_memory:
+ test_options.extend([
+ '--memory=%d' % args.docker_memory,
+ '--memory-swap=%d' % args.docker_memory,
+ ])
+
+ docker_socket = '/var/run/docker.sock'
+
+ if args.docker_seccomp != 'default':
+ test_options += ['--security-opt', 'seccomp=%s' % args.docker_seccomp]
+
+ if get_docker_hostname() != 'localhost' or os.path.exists(docker_socket):
+ test_options += ['--volume', '%s:%s' % (docker_socket, docker_socket)]
+
+ if httptester_id:
+ test_options += ['--env', 'HTTPTESTER=1']
+
+ network = get_docker_preferred_network_name(args)
+
+ if not is_docker_user_defined_network(network):
+ # legacy links are required when using the default bridge network instead of user-defined networks
+ for host in HTTPTESTER_HOSTS:
+ test_options += ['--link', '%s:%s' % (httptester_id, host)]
+
+ if isinstance(args, IntegrationConfig):
+ cloud_platforms = get_cloud_providers(args)
+
+ for cloud_platform in cloud_platforms:
+ test_options += cloud_platform.get_docker_run_options()
+
+ test_id = docker_run(args, test_image, options=test_options)[0]
+
+ if args.explain:
+ test_id = 'test_id'
+ else:
+ test_id = test_id.strip()
+
+ # write temporary files to /root since /tmp isn't ready immediately on container start
+ docker_put(args, test_id, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'docker.sh'), '/root/docker.sh')
+ docker_exec(args, test_id, ['/bin/bash', '/root/docker.sh'])
+ docker_put(args, test_id, local_source_fd.name, '/root/test.tgz')
+ docker_exec(args, test_id, ['tar', 'oxzf', '/root/test.tgz', '-C', '/root'])
+
+ # docker images are only expected to have a single python version available
+ if isinstance(args, UnitsConfig) and not args.python:
+ cmd += ['--python', 'default']
+
+ # run unit tests unprivileged to prevent stray writes to the source tree
+ # also disconnect from the network once requirements have been installed
+ if isinstance(args, UnitsConfig):
+ writable_dirs = [
+ os.path.join(content_root, ResultType.JUNIT.relative_path),
+ os.path.join(content_root, ResultType.COVERAGE.relative_path),
+ ]
+
+ docker_exec(args, test_id, ['mkdir', '-p'] + writable_dirs)
+ docker_exec(args, test_id, ['chmod', '777'] + writable_dirs)
+ docker_exec(args, test_id, ['chmod', '755', '/root'])
+ docker_exec(args, test_id, ['chmod', '644', os.path.join(content_root, args.metadata_path)])
+
+ docker_exec(args, test_id, ['useradd', 'pytest', '--create-home'])
+
+ docker_exec(args, test_id, cmd + ['--requirements-mode', 'only'], options=cmd_options)
+
+ networks = get_docker_networks(args, test_id)
+
+ if networks is not None:
+ for network in networks:
+ docker_network_disconnect(args, test_id, network)
+ else:
+ display.warning('Network disconnection is not supported (this is normal under podman). '
+ 'Tests will not be isolated from the network. Network-related tests may misbehave.')
+
+ cmd += ['--requirements-mode', 'skip']
+
+ cmd_options += ['--user', 'pytest']
+
+ try:
+ docker_exec(args, test_id, cmd, options=cmd_options)
+ # docker_exec will throw SubprocessError if not successful
+ # If we make it here, all the prep work earlier and the docker_exec line above were all successful.
+ success = True
+ finally:
+ local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
+
+ remote_test_root = os.path.dirname(remote_results_root)
+ remote_results_name = os.path.basename(remote_results_root)
+ remote_temp_file = os.path.join('/root', remote_results_name + '.tgz')
+
+ make_dirs(local_test_root) # make sure directory exists for collections which have no tests
+
+ with tempfile.NamedTemporaryFile(prefix='ansible-result-', suffix='.tgz') as local_result_fd:
+ docker_exec(args, test_id, ['tar', 'czf', remote_temp_file, '--exclude', ResultType.TMP.name, '-C', remote_test_root, remote_results_name])
+ docker_get(args, test_id, remote_temp_file, local_result_fd.name)
+ run_command(args, ['tar', 'oxzf', local_result_fd.name, '-C', local_test_root])
+ finally:
+ if httptester_id:
+ docker_rm(args, httptester_id)
+
+ if test_id:
+ if args.docker_terminate == 'always' or (args.docker_terminate == 'success' and success):
+ docker_rm(args, test_id)
+
+
+def delegate_remote(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ """
+ remote = args.parsed_remote
+
+ core_ci = AnsibleCoreCI(args, remote.platform, remote.version, stage=args.remote_stage, provider=args.remote_provider, arch=remote.arch)
+ success = False
+ raw = False
+
+ if isinstance(args, ShellConfig):
+ use_httptester = args.httptester
+ raw = args.raw
+ else:
+ use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
+
+ if use_httptester and not docker_available():
+ display.warning('Assuming --disable-httptester since `docker` is not available.')
+ use_httptester = False
+
+ httptester_id = None
+ ssh_options = []
+ content_root = None
+
+ try:
+ core_ci.start()
+
+ if use_httptester:
+ httptester_id, ssh_options = start_httptester(args)
+
+ core_ci.wait()
+
+ python_version = get_python_version(args, get_remote_completion(), args.remote)
+
+ if remote.platform == 'windows':
+ # Windows doesn't need the ansible-test fluff, just run the SSH command
+ manage = ManageWindowsCI(core_ci)
+ manage.setup(python_version)
+
+ cmd = ['powershell.exe']
+ elif raw:
+ manage = ManagePosixCI(core_ci)
+ manage.setup(python_version)
+
+ cmd = create_shell_command(['bash'])
+ else:
+ manage = ManagePosixCI(core_ci)
+ pwd = manage.setup(python_version)
+
+ options = {
+ '--remote': 1,
+ }
+
+ python_interpreter = get_python_interpreter(args, get_remote_completion(), args.remote)
+
+ ansible_root = os.path.join(pwd, 'ansible')
+
+ if data_context().content.collection:
+ content_root = os.path.join(pwd, data_context().content.collection.directory)
+ else:
+ content_root = ansible_root
+
+ cmd = generate_command(args, python_interpreter, os.path.join(ansible_root, 'bin'), content_root, options, exclude, require)
+
+ if httptester_id:
+ cmd += ['--inject-httptester']
+
+ if isinstance(args, TestConfig):
+ if args.coverage and not args.coverage_label:
+ cmd += ['--coverage-label', 'remote-%s-%s' % (remote.platform, remote.version)]
+
+ if isinstance(args, IntegrationConfig):
+ if not args.allow_destructive:
+ cmd.append('--allow-destructive')
+
+ # remote instances are only expected to have a single python version available
+ if isinstance(args, UnitsConfig) and not args.python:
+ cmd += ['--python', 'default']
+
+ if isinstance(args, IntegrationConfig):
+ cloud_platforms = get_cloud_providers(args)
+
+ for cloud_platform in cloud_platforms:
+ ssh_options += cloud_platform.get_remote_ssh_options()
+
+ try:
+ manage.ssh(cmd, ssh_options)
+ success = True
+ finally:
+ download = False
+
+ if remote.platform != 'windows':
+ download = True
+
+ if isinstance(args, ShellConfig):
+ if args.raw:
+ download = False
+
+ if download and content_root:
+ local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
+
+ remote_results_root = os.path.join(content_root, data_context().content.results_path)
+ remote_results_name = os.path.basename(remote_results_root)
+ remote_temp_path = os.path.join('/tmp', remote_results_name)
+
+ # AIX cp and GNU cp provide different options, no way could be found to have a common
+ # pattern and achieve the same goal
+ cp_opts = '-hr' if remote.platform in ['aix', 'ibmi'] else '-a'
+
+ manage.ssh('rm -rf {0} && mkdir {0} && cp {1} {2}/* {0}/ && chmod -R a+r {0}'.format(remote_temp_path, cp_opts, remote_results_root))
+ manage.download(remote_temp_path, local_test_root)
+ finally:
+ if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
+ core_ci.stop()
+
+ if httptester_id:
+ docker_rm(args, httptester_id)
+
+
+def generate_command(args, python_interpreter, ansible_bin_path, content_root, options, exclude, require):
+ """
+ :type args: EnvironmentConfig
+ :type python_interpreter: str | None
+ :type ansible_bin_path: str
+ :type content_root: str
+ :type options: dict[str, int]
+ :type exclude: list[str]
+ :type require: list[str]
+ :rtype: list[str]
+ """
+ options['--color'] = 1
+
+ cmd = [os.path.join(ansible_bin_path, 'ansible-test')]
+
+ if python_interpreter:
+ cmd = [python_interpreter] + cmd
+
+ # Force the encoding used during delegation.
+ # This is only needed because ansible-test relies on Python's file system encoding.
+ # Environments that do not have the locale configured are thus unable to work with unicode file paths.
+ # Examples include FreeBSD and some Linux containers.
+ env_vars = dict(
+ LC_ALL='en_US.UTF-8',
+ ANSIBLE_TEST_CONTENT_ROOT=content_root,
+ )
+
+ env_args = ['%s=%s' % (key, env_vars[key]) for key in sorted(env_vars)]
+
+ cmd = ['/usr/bin/env'] + env_args + cmd
+
+ cmd += list(filter_options(args, sys.argv[1:], options, exclude, require))
+ cmd += ['--color', 'yes' if args.color else 'no']
+
+ if args.requirements:
+ cmd += ['--requirements']
+
+ if isinstance(args, ShellConfig):
+ cmd = create_shell_command(cmd)
+ elif isinstance(args, SanityConfig):
+ base_branch = args.base_branch or get_ci_provider().get_base_branch()
+
+ if base_branch:
+ cmd += ['--base-branch', base_branch]
+
+ return cmd
+
+
+def filter_options(args, argv, options, exclude, require):
+ """
+ :type args: EnvironmentConfig
+ :type argv: list[str]
+ :type options: dict[str, int]
+ :type exclude: list[str]
+ :type require: list[str]
+ :rtype: collections.Iterable[str]
+ """
+ options = options.copy()
+
+ options['--requirements'] = 0
+ options['--truncate'] = 1
+ options['--redact'] = 0
+ options['--no-redact'] = 0
+
+ if isinstance(args, TestConfig):
+ options.update({
+ '--changed': 0,
+ '--tracked': 0,
+ '--untracked': 0,
+ '--ignore-committed': 0,
+ '--ignore-staged': 0,
+ '--ignore-unstaged': 0,
+ '--changed-from': 1,
+ '--changed-path': 1,
+ '--metadata': 1,
+ '--exclude': 1,
+ '--require': 1,
+ })
+ elif isinstance(args, SanityConfig):
+ options.update({
+ '--base-branch': 1,
+ })
+
+ if isinstance(args, IntegrationConfig):
+ options.update({
+ '--no-temp-unicode': 0,
+ '--no-pip-check': 0,
+ })
+
+ if isinstance(args, (NetworkIntegrationConfig, WindowsIntegrationConfig)):
+ options.update({
+ '--inventory': 1,
+ })
+
+ remaining = 0
+
+ for arg in argv:
+ if not arg.startswith('-') and remaining:
+ remaining -= 1
+ continue
+
+ remaining = 0
+
+ parts = arg.split('=', 1)
+ key = parts[0]
+
+ if key in options:
+ remaining = options[key] - len(parts) + 1
+ continue
+
+ yield arg
+
+ for arg in args.delegate_args:
+ yield arg
+
+ for target in exclude:
+ yield '--exclude'
+ yield target
+
+ for target in require:
+ yield '--require'
+ yield target
+
+ if isinstance(args, TestConfig):
+ if args.metadata_path:
+ yield '--metadata'
+ yield args.metadata_path
+
+ yield '--truncate'
+ yield '%d' % args.truncate
+
+ if args.redact:
+ yield '--redact'
+ else:
+ yield '--no-redact'
+
+ if isinstance(args, IntegrationConfig):
+ if args.no_temp_unicode:
+ yield '--no-temp-unicode'
+
+ if not args.pip_check:
+ yield '--no-pip-check'
diff --git a/test/lib/ansible_test/_internal/diff.py b/test/lib/ansible_test/_internal/diff.py
new file mode 100644
index 00000000..1e2038b9
--- /dev/null
+++ b/test/lib/ansible_test/_internal/diff.py
@@ -0,0 +1,256 @@
+"""Diff parsing functions and classes."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import textwrap
+import traceback
+
+from . import types as t
+
+from .util import (
+ ApplicationError,
+)
+
+
+def parse_diff(lines):
+ """
+ :type lines: list[str]
+ :rtype: list[FileDiff]
+ """
+ return DiffParser(lines).files
+
+
+class FileDiff:
+ """Parsed diff for a single file."""
+ def __init__(self, old_path, new_path):
+ """
+ :type old_path: str
+ :type new_path: str
+ """
+ self.old = DiffSide(old_path, new=False)
+ self.new = DiffSide(new_path, new=True)
+ self.headers = [] # type: t.List[str]
+ self.binary = False
+
+ def append_header(self, line):
+ """
+ :type line: str
+ """
+ self.headers.append(line)
+
+ @property
+ def is_complete(self):
+ """
+ :rtype: bool
+ """
+ return self.old.is_complete and self.new.is_complete
+
+
+class DiffSide:
+ """Parsed diff for a single 'side' of a single file."""
+ def __init__(self, path, new):
+ """
+ :type path: str
+ :type new: bool
+ """
+ self.path = path
+ self.new = new
+ self.prefix = '+' if self.new else '-'
+ self.eof_newline = True
+ self.exists = True
+
+ self.lines = [] # type: t.List[t.Tuple[int, str]]
+ self.lines_and_context = [] # type: t.List[t.Tuple[int, str]]
+ self.ranges = [] # type: t.List[t.Tuple[int, int]]
+
+ self._next_line_number = 0
+ self._lines_remaining = 0
+ self._range_start = 0
+
+ def set_start(self, line_start, line_count):
+ """
+ :type line_start: int
+ :type line_count: int
+ """
+ self._next_line_number = line_start
+ self._lines_remaining = line_count
+ self._range_start = 0
+
+ def append(self, line):
+ """
+ :type line: str
+ """
+ if self._lines_remaining <= 0:
+ raise Exception('Diff range overflow.')
+
+ entry = self._next_line_number, line
+
+ if line.startswith(' '):
+ pass
+ elif line.startswith(self.prefix):
+ self.lines.append(entry)
+
+ if not self._range_start:
+ self._range_start = self._next_line_number
+ else:
+ raise Exception('Unexpected diff content prefix.')
+
+ self.lines_and_context.append(entry)
+
+ self._lines_remaining -= 1
+
+ if self._range_start:
+ if self.is_complete:
+ range_end = self._next_line_number
+ elif line.startswith(' '):
+ range_end = self._next_line_number - 1
+ else:
+ range_end = 0
+
+ if range_end:
+ self.ranges.append((self._range_start, range_end))
+ self._range_start = 0
+
+ self._next_line_number += 1
+
+ @property
+ def is_complete(self):
+ """
+ :rtype: bool
+ """
+ return self._lines_remaining == 0
+
+ def format_lines(self, context=True):
+ """
+ :type context: bool
+ :rtype: list[str]
+ """
+ if context:
+ lines = self.lines_and_context
+ else:
+ lines = self.lines
+
+ return ['%s:%4d %s' % (self.path, line[0], line[1]) for line in lines]
+
+
+class DiffParser:
+ """Parse diff lines."""
+ def __init__(self, lines):
+ """
+ :type lines: list[str]
+ """
+ self.lines = lines
+ self.files = [] # type: t.List[FileDiff]
+
+ self.action = self.process_start
+ self.line_number = 0
+ self.previous_line = None # type: t.Optional[str]
+ self.line = None # type: t.Optional[str]
+ self.file = None # type: t.Optional[FileDiff]
+
+ for self.line in self.lines:
+ self.line_number += 1
+
+ try:
+ self.action()
+ except Exception as ex:
+ message = textwrap.dedent('''
+ %s
+
+ Line: %d
+ Previous: %s
+ Current: %s
+ %s
+ ''').strip() % (
+ ex,
+ self.line_number,
+ self.previous_line or '',
+ self.line or '',
+ traceback.format_exc(),
+ )
+
+ raise ApplicationError(message.strip())
+
+ self.previous_line = self.line
+
+ self.complete_file()
+
+ def process_start(self):
+ """Process a diff start line."""
+ self.complete_file()
+
+ match = re.search(r'^diff --git "?a/(?P<old_path>.*)"? "?b/(?P<new_path>.*)"?$', self.line)
+
+ if not match:
+ raise Exception('Unexpected diff start line.')
+
+ self.file = FileDiff(match.group('old_path'), match.group('new_path'))
+ self.action = self.process_continue
+
+ def process_range(self):
+ """Process a diff range line."""
+ match = re.search(r'^@@ -((?P<old_start>[0-9]+),)?(?P<old_count>[0-9]+) \+((?P<new_start>[0-9]+),)?(?P<new_count>[0-9]+) @@', self.line)
+
+ if not match:
+ raise Exception('Unexpected diff range line.')
+
+ self.file.old.set_start(int(match.group('old_start') or 1), int(match.group('old_count')))
+ self.file.new.set_start(int(match.group('new_start') or 1), int(match.group('new_count')))
+ self.action = self.process_content
+
+ def process_continue(self):
+ """Process a diff start, range or header line."""
+ if self.line.startswith('diff '):
+ self.process_start()
+ elif self.line.startswith('@@ '):
+ self.process_range()
+ else:
+ self.process_header()
+
+ def process_header(self):
+ """Process a diff header line."""
+ if self.line.startswith('Binary files '):
+ self.file.binary = True
+ elif self.line == '--- /dev/null':
+ self.file.old.exists = False
+ elif self.line == '+++ /dev/null':
+ self.file.new.exists = False
+ else:
+ self.file.append_header(self.line)
+
+ def process_content(self):
+ """Process a diff content line."""
+ if self.line == r'\ No newline at end of file':
+ if self.previous_line.startswith(' '):
+ self.file.old.eof_newline = False
+ self.file.new.eof_newline = False
+ elif self.previous_line.startswith('-'):
+ self.file.old.eof_newline = False
+ elif self.previous_line.startswith('+'):
+ self.file.new.eof_newline = False
+ else:
+ raise Exception('Unexpected previous diff content line.')
+
+ return
+
+ if self.file.is_complete:
+ self.process_continue()
+ return
+
+ if self.line.startswith(' '):
+ self.file.old.append(self.line)
+ self.file.new.append(self.line)
+ elif self.line.startswith('-'):
+ self.file.old.append(self.line)
+ elif self.line.startswith('+'):
+ self.file.new.append(self.line)
+ else:
+ raise Exception('Unexpected diff content line.')
+
+ def complete_file(self):
+ """Complete processing of the current file, if any."""
+ if not self.file:
+ return
+
+ self.files.append(self.file)
diff --git a/test/lib/ansible_test/_internal/docker_util.py b/test/lib/ansible_test/_internal/docker_util.py
new file mode 100644
index 00000000..1b47364d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/docker_util.py
@@ -0,0 +1,409 @@
+"""Functions for accessing docker via the docker cli."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import time
+
+from .io import (
+ open_binary_file,
+ read_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ common_environment,
+ display,
+ find_executable,
+ SubprocessError,
+)
+
+from .http import (
+ urlparse,
+)
+
+from .util_common import (
+ run_command,
+)
+
+from .config import (
+ EnvironmentConfig,
+)
+
+BUFFER_SIZE = 256 * 256
+
+
+def docker_available():
+ """
+ :rtype: bool
+ """
+ return find_executable('docker', required=False)
+
+
+def get_docker_hostname(): # type: () -> str
+ """Return the hostname of the Docker service."""
+ try:
+ return get_docker_hostname.hostname
+ except AttributeError:
+ pass
+
+ docker_host = os.environ.get('DOCKER_HOST')
+
+ if docker_host and docker_host.startswith('tcp://'):
+ try:
+ hostname = urlparse(docker_host)[1].split(':')[0]
+ display.info('Detected Docker host: %s' % hostname, verbosity=1)
+ except ValueError:
+ hostname = 'localhost'
+ display.warning('Could not parse DOCKER_HOST environment variable "%s", falling back to localhost.' % docker_host)
+ else:
+ hostname = 'localhost'
+ display.info('Assuming Docker is available on localhost.', verbosity=1)
+
+ get_docker_hostname.hostname = hostname
+
+ return hostname
+
+
+def get_docker_container_id():
+ """
+ :rtype: str | None
+ """
+ try:
+ return get_docker_container_id.container_id
+ except AttributeError:
+ pass
+
+ path = '/proc/self/cpuset'
+ container_id = None
+
+ if os.path.exists(path):
+ # File content varies based on the environment:
+ # No Container: /
+ # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507
+ # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891
+ # Podman: /../../../../../..
+ contents = read_text_file(path)
+
+ cgroup_path, cgroup_name = os.path.split(contents.strip())
+
+ if cgroup_path in ('/docker', '/azpl_job'):
+ container_id = cgroup_name
+
+ get_docker_container_id.container_id = container_id
+
+ if container_id:
+ display.info('Detected execution in Docker container: %s' % container_id, verbosity=1)
+
+ return container_id
+
+
+def get_docker_container_ip(args, container_id):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :rtype: str
+ """
+ results = docker_inspect(args, container_id)
+ network_settings = results[0]['NetworkSettings']
+ networks = network_settings.get('Networks')
+
+ if networks:
+ network_name = get_docker_preferred_network_name(args)
+ ipaddress = networks[network_name]['IPAddress']
+ else:
+ # podman doesn't provide Networks, fall back to using IPAddress
+ ipaddress = network_settings['IPAddress']
+
+ if not ipaddress:
+ raise ApplicationError('Cannot retrieve IP address for container: %s' % container_id)
+
+ return ipaddress
+
+
+def get_docker_network_name(args, container_id): # type: (EnvironmentConfig, str) -> str
+ """
+ Return the network name of the specified container.
+ Raises an exception if zero or more than one network is found.
+ """
+ networks = get_docker_networks(args, container_id)
+
+ if not networks:
+ raise ApplicationError('No network found for Docker container: %s.' % container_id)
+
+ if len(networks) > 1:
+ raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (container_id, ', '.join(networks)))
+
+ return networks[0]
+
+
+def get_docker_preferred_network_name(args): # type: (EnvironmentConfig) -> str
+ """
+ Return the preferred network name for use with Docker. The selection logic is:
+ - the network selected by the user with `--docker-network`
+ - the network of the currently running docker container (if any)
+ - the default docker network (returns None)
+ """
+ network = None
+
+ if args.docker_network:
+ network = args.docker_network
+ else:
+ current_container_id = get_docker_container_id()
+
+ if current_container_id:
+ # Make sure any additional containers we launch use the same network as the current container we're running in.
+ # This is needed when ansible-test is running in a container that is not connected to Docker's default network.
+ network = get_docker_network_name(args, current_container_id)
+
+ return network
+
+
+def is_docker_user_defined_network(network): # type: (str) -> bool
+ """Return True if the network being used is a user-defined network."""
+ return network and network != 'bridge'
+
+
+def get_docker_networks(args, container_id):
+ """
+ :param args: EnvironmentConfig
+ :param container_id: str
+ :rtype: list[str]
+ """
+ results = docker_inspect(args, container_id)
+ # podman doesn't return Networks- just silently return None if it's missing...
+ networks = results[0]['NetworkSettings'].get('Networks')
+ if networks is None:
+ return None
+ return sorted(networks)
+
+
+def docker_pull(args, image):
+ """
+ :type args: EnvironmentConfig
+ :type image: str
+ """
+ if ('@' in image or ':' in image) and docker_images(args, image):
+ display.info('Skipping docker pull of existing image with tag or digest: %s' % image, verbosity=2)
+ return
+
+ if not args.docker_pull:
+ display.warning('Skipping docker pull for "%s". Image may be out-of-date.' % image)
+ return
+
+ for _iteration in range(1, 10):
+ try:
+ docker_command(args, ['pull', image])
+ return
+ except SubprocessError:
+ display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image)
+ time.sleep(3)
+
+ raise ApplicationError('Failed to pull docker image "%s".' % image)
+
+
+def docker_put(args, container_id, src, dst):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :type src: str
+ :type dst: str
+ """
+ # avoid 'docker cp' due to a bug which causes 'docker rm' to fail
+ with open_binary_file(src) as src_fd:
+ docker_exec(args, container_id, ['dd', 'of=%s' % dst, 'bs=%s' % BUFFER_SIZE],
+ options=['-i'], stdin=src_fd, capture=True)
+
+
+def docker_get(args, container_id, src, dst):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :type src: str
+ :type dst: str
+ """
+ # avoid 'docker cp' due to a bug which causes 'docker rm' to fail
+ with open_binary_file(dst, 'wb') as dst_fd:
+ docker_exec(args, container_id, ['dd', 'if=%s' % src, 'bs=%s' % BUFFER_SIZE],
+ options=['-i'], stdout=dst_fd, capture=True)
+
+
+def docker_run(args, image, options, cmd=None):
+ """
+ :type args: EnvironmentConfig
+ :type image: str
+ :type options: list[str] | None
+ :type cmd: list[str] | None
+ :rtype: str | None, str | None
+ """
+ if not options:
+ options = []
+
+ if not cmd:
+ cmd = []
+
+ network = get_docker_preferred_network_name(args)
+
+ if is_docker_user_defined_network(network):
+ # Only when the network is not the default bridge network.
+ # Using this with the default bridge network results in an error when using --link: links are only supported for user-defined networks
+ options.extend(['--network', network])
+
+ for _iteration in range(1, 3):
+ try:
+ return docker_command(args, ['run'] + options + [image] + cmd, capture=True)
+ except SubprocessError as ex:
+ display.error(ex)
+ display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
+ time.sleep(3)
+
+ raise ApplicationError('Failed to run docker image "%s".' % image)
+
+
+def docker_images(args, image):
+ """
+ :param args: CommonConfig
+ :param image: str
+ :rtype: list[dict[str, any]]
+ """
+ try:
+ stdout, _dummy = docker_command(args, ['images', image, '--format', '{{json .}}'], capture=True, always=True)
+ except SubprocessError as ex:
+ if 'no such image' in ex.stderr:
+ return [] # podman does not handle this gracefully, exits 125
+
+ if 'function "json" not defined' in ex.stderr:
+ # podman > 2 && < 2.2.0 breaks with --format {{json .}}, and requires --format json
+ # So we try this as a fallback. If it fails again, we just raise the exception and bail.
+ stdout, _dummy = docker_command(args, ['images', image, '--format', 'json'], capture=True, always=True)
+ else:
+ raise ex
+
+ if stdout.startswith('['):
+ # modern podman outputs a pretty-printed json list. Just load the whole thing.
+ return json.loads(stdout)
+
+ # docker outputs one json object per line (jsonl)
+ return [json.loads(line) for line in stdout.splitlines()]
+
+
+def docker_rm(args, container_id):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ """
+ try:
+ docker_command(args, ['rm', '-f', container_id], capture=True)
+ except SubprocessError as ex:
+ if 'no such container' in ex.stderr:
+ pass # podman does not handle this gracefully, exits 1
+ else:
+ raise ex
+
+
+def docker_inspect(args, container_id):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :rtype: list[dict]
+ """
+ if args.explain:
+ return []
+
+ try:
+ stdout = docker_command(args, ['inspect', container_id], capture=True)[0]
+ return json.loads(stdout)
+ except SubprocessError as ex:
+ if 'no such image' in ex.stderr:
+ return [] # podman does not handle this gracefully, exits 125
+ try:
+ return json.loads(ex.stdout)
+ except Exception:
+ raise ex
+
+
+def docker_network_disconnect(args, container_id, network):
+ """
+ :param args: EnvironmentConfig
+ :param container_id: str
+ :param network: str
+ """
+ docker_command(args, ['network', 'disconnect', network, container_id], capture=True)
+
+
+def docker_network_inspect(args, network):
+ """
+ :type args: EnvironmentConfig
+ :type network: str
+ :rtype: list[dict]
+ """
+ if args.explain:
+ return []
+
+ try:
+ stdout = docker_command(args, ['network', 'inspect', network], capture=True)[0]
+ return json.loads(stdout)
+ except SubprocessError as ex:
+ try:
+ return json.loads(ex.stdout)
+ except Exception:
+ raise ex
+
+
+def docker_exec(args, container_id, cmd, options=None, capture=False, stdin=None, stdout=None):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :type cmd: list[str]
+ :type options: list[str] | None
+ :type capture: bool
+ :type stdin: BinaryIO | None
+ :type stdout: BinaryIO | None
+ :rtype: str | None, str | None
+ """
+ if not options:
+ options = []
+
+ return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout)
+
+
+def docker_info(args):
+ """
+ :param args: CommonConfig
+ :rtype: dict[str, any]
+ """
+ stdout, _dummy = docker_command(args, ['info', '--format', '{{json .}}'], capture=True, always=True)
+ return json.loads(stdout)
+
+
+def docker_version(args):
+ """
+ :param args: CommonConfig
+ :rtype: dict[str, any]
+ """
+ stdout, _dummy = docker_command(args, ['version', '--format', '{{json .}}'], capture=True, always=True)
+ return json.loads(stdout)
+
+
+def docker_command(args, cmd, capture=False, stdin=None, stdout=None, always=False):
+ """
+ :type args: CommonConfig
+ :type cmd: list[str]
+ :type capture: bool
+ :type stdin: file | None
+ :type stdout: file | None
+ :type always: bool
+ :rtype: str | None, str | None
+ """
+ env = docker_environment()
+ return run_command(args, ['docker'] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, always=always)
+
+
+def docker_environment():
+ """
+ :rtype: dict[str, str]
+ """
+ env = common_environment()
+ env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_')))
+ return env
diff --git a/test/lib/ansible_test/_internal/encoding.py b/test/lib/ansible_test/_internal/encoding.py
new file mode 100644
index 00000000..8e014794
--- /dev/null
+++ b/test/lib/ansible_test/_internal/encoding.py
@@ -0,0 +1,41 @@
+"""Functions for encoding and decoding strings."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from . import types as t
+
+ENCODING = 'utf-8'
+
+Text = type(u'')
+
+
+def to_optional_bytes(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[bytes]
+ """Return the given value as bytes encoded using UTF-8 if not already bytes, or None if the value is None."""
+ return None if value is None else to_bytes(value, errors)
+
+
+def to_optional_text(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[t.Text]
+ """Return the given value as text decoded using UTF-8 if not already text, or None if the value is None."""
+ return None if value is None else to_text(value, errors)
+
+
+def to_bytes(value, errors='strict'): # type: (t.AnyStr, str) -> bytes
+ """Return the given value as bytes encoded using UTF-8 if not already bytes."""
+ if isinstance(value, bytes):
+ return value
+
+ if isinstance(value, Text):
+ return value.encode(ENCODING, errors)
+
+ raise Exception('value is not bytes or text: %s' % type(value))
+
+
+def to_text(value, errors='strict'): # type: (t.AnyStr, str) -> t.Text
+ """Return the given value as text decoded using UTF-8 if not already text."""
+ if isinstance(value, bytes):
+ return value.decode(ENCODING, errors)
+
+ if isinstance(value, Text):
+ return value
+
+ raise Exception('value is not bytes or text: %s' % type(value))
diff --git a/test/lib/ansible_test/_internal/env.py b/test/lib/ansible_test/_internal/env.py
new file mode 100644
index 00000000..60c0245e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/env.py
@@ -0,0 +1,293 @@
+"""Show information about the test environment."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import functools
+import os
+import platform
+import signal
+import sys
+import time
+
+from .config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from .io import (
+ write_json_file,
+ read_json_file,
+)
+
+from .util import (
+ display,
+ find_executable,
+ SubprocessError,
+ ApplicationError,
+ get_ansible_version,
+ get_available_python_versions,
+)
+
+from .util_common import (
+ data_context,
+ write_json_test_results,
+ ResultType,
+)
+
+from .docker_util import (
+ docker_info,
+ docker_version
+)
+
+from .thread import (
+ WrappedThread,
+)
+
+from .constants import (
+ TIMEOUT_PATH,
+)
+
+from .test import (
+ TestTimeout,
+)
+
+from .executor import (
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+
+class EnvConfig(CommonConfig):
+ """Configuration for the tools command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(EnvConfig, self).__init__(args, 'env')
+
+ self.show = args.show
+ self.dump = args.dump
+ self.timeout = args.timeout
+ self.list_files = args.list_files
+
+ if not self.show and not self.dump and self.timeout is None and not self.list_files:
+ # default to --show if no options were given
+ self.show = True
+
+
+def command_env(args):
+ """
+ :type args: EnvConfig
+ """
+ show_dump_env(args)
+ list_files_env(args)
+ set_timeout(args)
+
+
+def show_dump_env(args):
+ """
+ :type args: EnvConfig
+ """
+ if not args.show and not args.dump:
+ return
+
+ data = dict(
+ ansible=dict(
+ version=get_ansible_version(),
+ ),
+ docker=get_docker_details(args),
+ environ=os.environ.copy(),
+ location=dict(
+ pwd=os.environ.get('PWD', None),
+ cwd=os.getcwd(),
+ ),
+ git=get_ci_provider().get_git_details(args),
+ platform=dict(
+ datetime=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
+ platform=platform.platform(),
+ uname=platform.uname(),
+ ),
+ python=dict(
+ executable=sys.executable,
+ version=platform.python_version(),
+ ),
+ interpreters=get_available_python_versions(SUPPORTED_PYTHON_VERSIONS),
+ )
+
+ if args.show:
+ verbose = {
+ 'docker': 3,
+ 'docker.executable': 0,
+ 'environ': 2,
+ 'platform.uname': 1,
+ }
+
+ show_dict(data, verbose)
+
+ if args.dump and not args.explain:
+ write_json_test_results(ResultType.BOT, 'data-environment.json', data)
+
+
+def list_files_env(args): # type: (EnvConfig) -> None
+ """List files on stdout."""
+ if not args.list_files:
+ return
+
+ for path in data_context().content.all_files():
+ display.info(path)
+
+
+def set_timeout(args):
+ """
+ :type args: EnvConfig
+ """
+ if args.timeout is None:
+ return
+
+ if args.timeout:
+ deadline = (datetime.datetime.utcnow() + datetime.timedelta(minutes=args.timeout)).strftime('%Y-%m-%dT%H:%M:%SZ')
+
+ display.info('Setting a %d minute test timeout which will end at: %s' % (args.timeout, deadline), verbosity=1)
+ else:
+ deadline = None
+
+ display.info('Clearing existing test timeout.', verbosity=1)
+
+ if args.explain:
+ return
+
+ if deadline:
+ data = dict(
+ duration=args.timeout,
+ deadline=deadline,
+ )
+
+ write_json_file(TIMEOUT_PATH, data)
+ elif os.path.exists(TIMEOUT_PATH):
+ os.remove(TIMEOUT_PATH)
+
+
+def get_timeout():
+ """
+ :rtype: dict[str, any] | None
+ """
+ if not os.path.exists(TIMEOUT_PATH):
+ return None
+
+ data = read_json_file(TIMEOUT_PATH)
+ data['deadline'] = datetime.datetime.strptime(data['deadline'], '%Y-%m-%dT%H:%M:%SZ')
+
+ return data
+
+
+def configure_timeout(args):
+ """
+ :type args: CommonConfig
+ """
+ if isinstance(args, TestConfig):
+ configure_test_timeout(args) # only tests are subject to the timeout
+
+
+def configure_test_timeout(args):
+ """
+ :type args: TestConfig
+ """
+ timeout = get_timeout()
+
+ if not timeout:
+ return
+
+ timeout_start = datetime.datetime.utcnow()
+ timeout_duration = timeout['duration']
+ timeout_deadline = timeout['deadline']
+ timeout_remaining = timeout_deadline - timeout_start
+
+ test_timeout = TestTimeout(timeout_duration)
+
+ if timeout_remaining <= datetime.timedelta():
+ test_timeout.write(args)
+
+ raise ApplicationError('The %d minute test timeout expired %s ago at %s.' % (
+ timeout_duration, timeout_remaining * -1, timeout_deadline))
+
+ display.info('The %d minute test timeout expires in %s at %s.' % (
+ timeout_duration, timeout_remaining, timeout_deadline), verbosity=1)
+
+ def timeout_handler(_dummy1, _dummy2):
+ """Runs when SIGUSR1 is received."""
+ test_timeout.write(args)
+
+ raise ApplicationError('Tests aborted after exceeding the %d minute time limit.' % timeout_duration)
+
+ def timeout_waiter(timeout_seconds):
+ """
+ :type timeout_seconds: int
+ """
+ time.sleep(timeout_seconds)
+ os.kill(os.getpid(), signal.SIGUSR1)
+
+ signal.signal(signal.SIGUSR1, timeout_handler)
+
+ instance = WrappedThread(functools.partial(timeout_waiter, timeout_remaining.seconds))
+ instance.daemon = True
+ instance.start()
+
+
+def show_dict(data, verbose, root_verbosity=0, path=None):
+ """
+ :type data: dict[str, any]
+ :type verbose: dict[str, int]
+ :type root_verbosity: int
+ :type path: list[str] | None
+ """
+ path = path if path else []
+
+ for key, value in sorted(data.items()):
+ indent = ' ' * len(path)
+ key_path = path + [key]
+ key_name = '.'.join(key_path)
+ verbosity = verbose.get(key_name, root_verbosity)
+
+ if isinstance(value, (tuple, list)):
+ display.info(indent + '%s:' % key, verbosity=verbosity)
+ for item in value:
+ display.info(indent + ' - %s' % item, verbosity=verbosity)
+ elif isinstance(value, dict):
+ min_verbosity = min([verbosity] + [v for k, v in verbose.items() if k.startswith('%s.' % key)])
+ display.info(indent + '%s:' % key, verbosity=min_verbosity)
+ show_dict(value, verbose, verbosity, key_path)
+ else:
+ display.info(indent + '%s: %s' % (key, value), verbosity=verbosity)
+
+
+def get_docker_details(args):
+ """
+ :type args: CommonConfig
+ :rtype: dict[str, any]
+ """
+ docker = find_executable('docker', required=False)
+ info = None
+ version = None
+
+ if docker:
+ try:
+ info = docker_info(args)
+ except SubprocessError as ex:
+ display.warning('Failed to collect docker info:\n%s' % ex)
+
+ try:
+ version = docker_version(args)
+ except SubprocessError as ex:
+ display.warning('Failed to collect docker version:\n%s' % ex)
+
+ docker_details = dict(
+ executable=docker,
+ info=info,
+ version=version,
+ )
+
+ return docker_details
diff --git a/test/lib/ansible_test/_internal/executor.py b/test/lib/ansible_test/_internal/executor.py
new file mode 100644
index 00000000..4f613049
--- /dev/null
+++ b/test/lib/ansible_test/_internal/executor.py
@@ -0,0 +1,2146 @@
+"""Execute Ansible tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import datetime
+import re
+import time
+import textwrap
+import functools
+import hashlib
+import difflib
+import filecmp
+import random
+import string
+import shutil
+
+from . import types as t
+
+from .thread import (
+ WrappedThread,
+)
+
+from .core_ci import (
+ AnsibleCoreCI,
+ SshKey,
+)
+
+from .manage_ci import (
+ ManageWindowsCI,
+ ManageNetworkCI,
+)
+
+from .cloud import (
+ cloud_filter,
+ cloud_init,
+ get_cloud_environment,
+ get_cloud_platforms,
+ CloudEnvironmentConfig,
+)
+
+from .io import (
+ make_dirs,
+ open_text_file,
+ read_binary_file,
+ read_text_file,
+ write_text_file,
+)
+
+from .util import (
+ ApplicationWarning,
+ ApplicationError,
+ SubprocessError,
+ display,
+ remove_tree,
+ find_executable,
+ raw_command,
+ get_available_port,
+ generate_pip_command,
+ find_python,
+ cmd_quote,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_TEST_CONFIG_ROOT,
+ get_ansible_version,
+ tempdir,
+ open_zipfile,
+ SUPPORTED_PYTHON_VERSIONS,
+ str_to_version,
+ version_to_str,
+)
+
+from .util_common import (
+ get_docker_completion,
+ get_network_settings,
+ get_remote_completion,
+ get_python_path,
+ intercept_command,
+ named_temporary_file,
+ run_command,
+ write_json_test_results,
+ ResultType,
+ handle_layout_messages,
+)
+
+from .docker_util import (
+ docker_pull,
+ docker_run,
+ docker_available,
+ docker_rm,
+ get_docker_container_id,
+ get_docker_container_ip,
+ get_docker_hostname,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+from .ansible_util import (
+ ansible_environment,
+ check_pyyaml,
+)
+
+from .target import (
+ IntegrationTarget,
+ walk_internal_targets,
+ walk_posix_integration_targets,
+ walk_network_integration_targets,
+ walk_windows_integration_targets,
+ TIntegrationTarget,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+from .classification import (
+ categorize_changes,
+)
+
+from .config import (
+ TestConfig,
+ EnvironmentConfig,
+ IntegrationConfig,
+ NetworkIntegrationConfig,
+ PosixIntegrationConfig,
+ ShellConfig,
+ WindowsIntegrationConfig,
+ TIntegrationConfig,
+)
+
+from .metadata import (
+ ChangeDescription,
+)
+
+from .integration import (
+ integration_test_environment,
+ integration_test_config_file,
+ setup_common_temp_dir,
+ get_inventory_relative_path,
+ check_inventory,
+ delegate_inventory,
+)
+
+from .data import (
+ data_context,
+)
+
+HTTPTESTER_HOSTS = (
+ 'ansible.http.tests',
+ 'sni1.ansible.http.tests',
+ 'fail.ansible.http.tests',
+)
+
+
+def check_startup():
+ """Checks to perform at startup before running commands."""
+ check_legacy_modules()
+
+
+def check_legacy_modules():
+ """Detect conflicts with legacy core/extras module directories to avoid problems later."""
+ for directory in 'core', 'extras':
+ path = 'lib/ansible/modules/%s' % directory
+
+ for root, _dir_names, file_names in os.walk(path):
+ if file_names:
+ # the directory shouldn't exist, but if it does, it must contain no files
+ raise ApplicationError('Files prohibited in "%s". '
+ 'These are most likely legacy modules from version 2.2 or earlier.' % root)
+
+
+def create_shell_command(command):
+ """
+ :type command: list[str]
+ :rtype: list[str]
+ """
+ optional_vars = (
+ 'TERM',
+ )
+
+ cmd = ['/usr/bin/env']
+ cmd += ['%s=%s' % (var, os.environ[var]) for var in optional_vars if var in os.environ]
+ cmd += command
+
+ return cmd
+
+
+def get_openssl_version(args, python, python_version): # type: (EnvironmentConfig, str, str) -> t.Optional[t.Tuple[int, ...]]
+ """Return the openssl version."""
+ if not python_version.startswith('2.'):
+ # OpenSSL version checking only works on Python 3.x.
+ # This should be the most accurate, since it is the Python we will be using.
+ version = json.loads(run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'sslcheck.py')], capture=True, always=True)[0])['version']
+
+ if version:
+ display.info('Detected OpenSSL version %s under Python %s.' % (version_to_str(version), python_version), verbosity=1)
+
+ return tuple(version)
+
+ # Fall back to detecting the OpenSSL version from the CLI.
+ # This should provide an adequate solution on Python 2.x.
+ openssl_path = find_executable('openssl', required=False)
+
+ if openssl_path:
+ try:
+ result = raw_command([openssl_path, 'version'], capture=True)[0]
+ except SubprocessError:
+ result = ''
+
+ match = re.search(r'^OpenSSL (?P<version>[0-9]+\.[0-9]+\.[0-9]+)', result)
+
+ if match:
+ version = str_to_version(match.group('version'))
+
+ display.info('Detected OpenSSL version %s using the openssl CLI.' % version_to_str(version), verbosity=1)
+
+ return version
+
+ display.info('Unable to detect OpenSSL version.', verbosity=1)
+
+ return None
+
+
+def get_setuptools_version(args, python): # type: (EnvironmentConfig, str) -> t.Tuple[int]
+ """Return the setuptools version for the given python."""
+ try:
+ return str_to_version(raw_command([python, '-c', 'import setuptools; print(setuptools.__version__)'], capture=True)[0])
+ except SubprocessError:
+ if args.explain:
+ return tuple() # ignore errors in explain mode in case setuptools is not aleady installed
+
+ raise
+
+
+def get_cryptography_requirement(args, python_version): # type: (EnvironmentConfig, str) -> str
+ """
+ Return the correct cryptography requirement for the given python version.
+ The version of cryptography installed depends on the python version, setuptools version and openssl version.
+ """
+ python = find_python(python_version)
+ setuptools_version = get_setuptools_version(args, python)
+ openssl_version = get_openssl_version(args, python, python_version)
+
+ if setuptools_version >= (18, 5):
+ if python_version == '2.6':
+ # cryptography 2.2+ requires python 2.7+
+ # see https://github.com/pyca/cryptography/blob/master/CHANGELOG.rst#22---2018-03-19
+ cryptography = 'cryptography < 2.2'
+ elif openssl_version and openssl_version < (1, 1, 0):
+ # cryptography 3.2 requires openssl 1.1.x or later
+ # see https://cryptography.io/en/latest/changelog.html#v3-2
+ cryptography = 'cryptography < 3.2'
+ else:
+ # cryptography 3.4+ fails to install on many systems
+ # this is a temporary work-around until a more permanent solution is available
+ cryptography = 'cryptography < 3.4'
+ else:
+ # cryptography 2.1+ requires setuptools 18.5+
+ # see https://github.com/pyca/cryptography/blob/62287ae18383447585606b9d0765c0f1b8a9777c/setup.py#L26
+ cryptography = 'cryptography < 2.1'
+
+ return cryptography
+
+
+def install_command_requirements(args, python_version=None, context=None, enable_pyyaml_check=False):
+ """
+ :type args: EnvironmentConfig
+ :type python_version: str | None
+ :type context: str | None
+ :type enable_pyyaml_check: bool
+ """
+ if not args.explain:
+ make_dirs(ResultType.COVERAGE.path)
+ make_dirs(ResultType.DATA.path)
+
+ if isinstance(args, ShellConfig):
+ if args.raw:
+ return
+
+ if not args.requirements:
+ return
+
+ if isinstance(args, ShellConfig):
+ return
+
+ packages = []
+
+ if isinstance(args, TestConfig):
+ if args.coverage:
+ packages.append('coverage')
+ if args.junit:
+ packages.append('junit-xml')
+
+ if not python_version:
+ python_version = args.python_version
+
+ pip = generate_pip_command(find_python(python_version))
+
+ # skip packages which have aleady been installed for python_version
+
+ try:
+ package_cache = install_command_requirements.package_cache
+ except AttributeError:
+ package_cache = install_command_requirements.package_cache = {}
+
+ installed_packages = package_cache.setdefault(python_version, set())
+ skip_packages = [package for package in packages if package in installed_packages]
+
+ for package in skip_packages:
+ packages.remove(package)
+
+ installed_packages.update(packages)
+
+ if args.command != 'sanity':
+ install_ansible_test_requirements(args, pip)
+
+ # make sure setuptools is available before trying to install cryptography
+ # the installed version of setuptools affects the version of cryptography to install
+ run_command(args, generate_pip_install(pip, '', packages=['setuptools']))
+
+ # install the latest cryptography version that the current requirements can support
+ # use a custom constraints file to avoid the normal constraints file overriding the chosen version of cryptography
+ # if not installed here later install commands may try to install an unsupported version due to the presence of older setuptools
+ # this is done instead of upgrading setuptools to allow tests to function with older distribution provided versions of setuptools
+ run_command(args, generate_pip_install(pip, '',
+ packages=[get_cryptography_requirement(args, python_version)],
+ constraints=os.path.join(ANSIBLE_TEST_DATA_ROOT, 'cryptography-constraints.txt')))
+
+ commands = [generate_pip_install(pip, args.command, packages=packages, context=context)]
+
+ if isinstance(args, IntegrationConfig):
+ for cloud_platform in get_cloud_platforms(args):
+ commands.append(generate_pip_install(pip, '%s.cloud.%s' % (args.command, cloud_platform)))
+
+ commands = [cmd for cmd in commands if cmd]
+
+ if not commands:
+ return # no need to detect changes or run pip check since we are not making any changes
+
+ # only look for changes when more than one requirements file is needed
+ detect_pip_changes = len(commands) > 1
+
+ # first pass to install requirements, changes expected unless environment is already set up
+ install_ansible_test_requirements(args, pip)
+ changes = run_pip_commands(args, pip, commands, detect_pip_changes)
+
+ if changes:
+ # second pass to check for conflicts in requirements, changes are not expected here
+ changes = run_pip_commands(args, pip, commands, detect_pip_changes)
+
+ if changes:
+ raise ApplicationError('Conflicts detected in requirements. The following commands reported changes during verification:\n%s' %
+ '\n'.join((' '.join(cmd_quote(c) for c in cmd) for cmd in changes)))
+
+ if args.pip_check:
+ # ask pip to check for conflicts between installed packages
+ try:
+ run_command(args, pip + ['check', '--disable-pip-version-check'], capture=True)
+ except SubprocessError as ex:
+ if ex.stderr.strip() == 'ERROR: unknown command "check"':
+ display.warning('Cannot check pip requirements for conflicts because "pip check" is not supported.')
+ else:
+ raise
+
+ if enable_pyyaml_check:
+ # pyyaml may have been one of the requirements that was installed, so perform an optional check for it
+ check_pyyaml(args, python_version, required=False)
+
+
+def install_ansible_test_requirements(args, pip): # type: (EnvironmentConfig, t.List[str]) -> None
+ """Install requirements for ansible-test for the given pip if not already installed."""
+ try:
+ installed = install_command_requirements.installed
+ except AttributeError:
+ installed = install_command_requirements.installed = set()
+
+ if tuple(pip) in installed:
+ return
+
+ # make sure basic ansible-test requirements are met, including making sure that pip is recent enough to support constraints
+ # virtualenvs created by older distributions may include very old pip versions, such as those created in the centos6 test container (pip 6.0.8)
+ run_command(args, generate_pip_install(pip, 'ansible-test', use_constraints=False))
+
+ installed.add(tuple(pip))
+
+
+def run_pip_commands(args, pip, commands, detect_pip_changes=False):
+ """
+ :type args: EnvironmentConfig
+ :type pip: list[str]
+ :type commands: list[list[str]]
+ :type detect_pip_changes: bool
+ :rtype: list[list[str]]
+ """
+ changes = []
+
+ after_list = pip_list(args, pip) if detect_pip_changes else None
+
+ for cmd in commands:
+ if not cmd:
+ continue
+
+ before_list = after_list
+
+ run_command(args, cmd)
+
+ after_list = pip_list(args, pip) if detect_pip_changes else None
+
+ if before_list != after_list:
+ changes.append(cmd)
+
+ return changes
+
+
+def pip_list(args, pip):
+ """
+ :type args: EnvironmentConfig
+ :type pip: list[str]
+ :rtype: str
+ """
+ stdout = run_command(args, pip + ['list'], capture=True)[0]
+ return stdout
+
+
+def generate_pip_install(pip, command, packages=None, constraints=None, use_constraints=True, context=None):
+ """
+ :type pip: list[str]
+ :type command: str
+ :type packages: list[str] | None
+ :type constraints: str | None
+ :type use_constraints: bool
+ :type context: str | None
+ :rtype: list[str] | None
+ """
+ constraints = constraints or os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'constraints.txt')
+ requirements = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', '%s.txt' % ('%s.%s' % (command, context) if context else command))
+ content_constraints = None
+
+ options = []
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ if command == 'sanity' and data_context().content.is_ansible:
+ requirements = os.path.join(data_context().content.sanity_path, 'code-smell', '%s.requirements.txt' % context)
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ if command == 'units':
+ requirements = os.path.join(data_context().content.unit_path, 'requirements.txt')
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ content_constraints = os.path.join(data_context().content.unit_path, 'constraints.txt')
+
+ if command in ('integration', 'windows-integration', 'network-integration'):
+ requirements = os.path.join(data_context().content.integration_path, 'requirements.txt')
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ requirements = os.path.join(data_context().content.integration_path, '%s.requirements.txt' % command)
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ content_constraints = os.path.join(data_context().content.integration_path, 'constraints.txt')
+
+ if command.startswith('integration.cloud.'):
+ content_constraints = os.path.join(data_context().content.integration_path, 'constraints.txt')
+
+ if packages:
+ options += packages
+
+ if not options:
+ return None
+
+ if use_constraints:
+ if content_constraints and os.path.exists(content_constraints) and os.path.getsize(content_constraints):
+ # listing content constraints first gives them priority over constraints provided by ansible-test
+ options.extend(['-c', content_constraints])
+
+ options.extend(['-c', constraints])
+
+ return pip + ['install', '--disable-pip-version-check'] + options
+
+
+def command_shell(args):
+ """
+ :type args: ShellConfig
+ """
+ if args.delegate:
+ raise Delegate()
+
+ install_command_requirements(args)
+
+ if args.inject_httptester:
+ inject_httptester(args)
+
+ cmd = create_shell_command(['bash', '-i'])
+ run_command(args, cmd)
+
+
+def command_posix_integration(args):
+ """
+ :type args: PosixIntegrationConfig
+ """
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ inventory_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, os.path.basename(inventory_relative_path))
+
+ all_targets = tuple(walk_posix_integration_targets(include_hidden=True))
+ internal_targets = command_integration_filter(args, all_targets)
+ command_integration_filtered(args, internal_targets, all_targets, inventory_path)
+
+
+def command_network_integration(args):
+ """
+ :type args: NetworkIntegrationConfig
+ """
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
+
+ if args.inventory:
+ inventory_path = os.path.join(data_context().content.root, data_context().content.integration_path, args.inventory)
+ else:
+ inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
+
+ if args.no_temp_workdir:
+ # temporary solution to keep DCI tests working
+ inventory_exists = os.path.exists(inventory_path)
+ else:
+ inventory_exists = os.path.isfile(inventory_path)
+
+ if not args.explain and not args.platform and not inventory_exists:
+ raise ApplicationError(
+ 'Inventory not found: %s\n'
+ 'Use --inventory to specify the inventory path.\n'
+ 'Use --platform to provision resources and generate an inventory file.\n'
+ 'See also inventory template: %s' % (inventory_path, template_path)
+ )
+
+ check_inventory(args, inventory_path)
+ delegate_inventory(args, inventory_path)
+
+ all_targets = tuple(walk_network_integration_targets(include_hidden=True))
+ internal_targets = command_integration_filter(args, all_targets, init_callback=network_init)
+ instances = [] # type: t.List[WrappedThread]
+
+ if args.platform:
+ get_python_path(args, args.python_executable) # initialize before starting threads
+
+ configs = dict((config['platform_version'], config) for config in args.metadata.instance_config)
+
+ for platform_version in args.platform:
+ platform, version = platform_version.split('/', 1)
+ config = configs.get(platform_version)
+
+ if not config:
+ continue
+
+ instance = WrappedThread(functools.partial(network_run, args, platform, version, config))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ remotes = [instance.wait_for_result() for instance in instances]
+ inventory = network_inventory(remotes)
+
+ display.info('>>> Inventory: %s\n%s' % (inventory_path, inventory.strip()), verbosity=3)
+
+ if not args.explain:
+ write_text_file(inventory_path, inventory)
+
+ success = False
+
+ try:
+ command_integration_filtered(args, internal_targets, all_targets, inventory_path)
+ success = True
+ finally:
+ if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
+ for instance in instances:
+ instance.result.stop()
+
+
+def network_init(args, internal_targets): # type: (NetworkIntegrationConfig, t.Tuple[IntegrationTarget, ...]) -> None
+ """Initialize platforms for network integration tests."""
+ if not args.platform:
+ return
+
+ if args.metadata.instance_config is not None:
+ return
+
+ platform_targets = set(a for target in internal_targets for a in target.aliases if a.startswith('network/'))
+
+ instances = [] # type: t.List[WrappedThread]
+
+ # generate an ssh key (if needed) up front once, instead of for each instance
+ SshKey(args)
+
+ for platform_version in args.platform:
+ platform, version = platform_version.split('/', 1)
+ platform_target = 'network/%s/' % platform
+
+ if platform_target not in platform_targets:
+ display.warning('Skipping "%s" because selected tests do not target the "%s" platform.' % (
+ platform_version, platform))
+ continue
+
+ instance = WrappedThread(functools.partial(network_start, args, platform, version))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ args.metadata.instance_config = [instance.wait_for_result() for instance in instances]
+
+
+def network_start(args, platform, version):
+ """
+ :type args: NetworkIntegrationConfig
+ :type platform: str
+ :type version: str
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider)
+ core_ci.start()
+
+ return core_ci.save()
+
+
+def network_run(args, platform, version, config):
+ """
+ :type args: NetworkIntegrationConfig
+ :type platform: str
+ :type version: str
+ :type config: dict[str, str]
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider, load=False)
+ core_ci.load(config)
+ core_ci.wait()
+
+ manage = ManageNetworkCI(core_ci)
+ manage.wait()
+
+ return core_ci
+
+
+def network_inventory(remotes):
+ """
+ :type remotes: list[AnsibleCoreCI]
+ :rtype: str
+ """
+ groups = dict([(remote.platform, []) for remote in remotes])
+ net = []
+
+ for remote in remotes:
+ options = dict(
+ ansible_host=remote.connection.hostname,
+ ansible_user=remote.connection.username,
+ ansible_ssh_private_key_file=os.path.abspath(remote.ssh_key.key),
+ )
+
+ settings = get_network_settings(remote.args, remote.platform, remote.version)
+
+ options.update(settings.inventory_vars)
+
+ groups[remote.platform].append(
+ '%s %s' % (
+ remote.name.replace('.', '-'),
+ ' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)),
+ )
+ )
+
+ net.append(remote.platform)
+
+ groups['net:children'] = net
+
+ template = ''
+
+ for group in groups:
+ hosts = '\n'.join(groups[group])
+
+ template += textwrap.dedent("""
+ [%s]
+ %s
+ """) % (group, hosts)
+
+ inventory = template
+
+ return inventory
+
+
+def command_windows_integration(args):
+ """
+ :type args: WindowsIntegrationConfig
+ """
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
+
+ if args.inventory:
+ inventory_path = os.path.join(data_context().content.root, data_context().content.integration_path, args.inventory)
+ else:
+ inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
+
+ if not args.explain and not args.windows and not os.path.isfile(inventory_path):
+ raise ApplicationError(
+ 'Inventory not found: %s\n'
+ 'Use --inventory to specify the inventory path.\n'
+ 'Use --windows to provision resources and generate an inventory file.\n'
+ 'See also inventory template: %s' % (inventory_path, template_path)
+ )
+
+ check_inventory(args, inventory_path)
+ delegate_inventory(args, inventory_path)
+
+ all_targets = tuple(walk_windows_integration_targets(include_hidden=True))
+ internal_targets = command_integration_filter(args, all_targets, init_callback=windows_init)
+ instances = [] # type: t.List[WrappedThread]
+ pre_target = None
+ post_target = None
+ httptester_id = None
+
+ if args.windows:
+ get_python_path(args, args.python_executable) # initialize before starting threads
+
+ configs = dict((config['platform_version'], config) for config in args.metadata.instance_config)
+
+ for version in args.windows:
+ config = configs['windows/%s' % version]
+
+ instance = WrappedThread(functools.partial(windows_run, args, version, config))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ remotes = [instance.wait_for_result() for instance in instances]
+ inventory = windows_inventory(remotes)
+
+ display.info('>>> Inventory: %s\n%s' % (inventory_path, inventory.strip()), verbosity=3)
+
+ if not args.explain:
+ write_text_file(inventory_path, inventory)
+
+ use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in internal_targets)
+ # if running under Docker delegation, the httptester may have already been started
+ docker_httptester = bool(os.environ.get("HTTPTESTER", False))
+
+ if use_httptester and not docker_available() and not docker_httptester:
+ display.warning('Assuming --disable-httptester since `docker` is not available.')
+ elif use_httptester:
+ if docker_httptester:
+ # we are running in a Docker container that is linked to the httptester container, we just need to
+ # forward these requests to the linked hostname
+ first_host = HTTPTESTER_HOSTS[0]
+ ssh_options = ["-R", "8080:%s:80" % first_host, "-R", "8443:%s:443" % first_host]
+ else:
+ # we are running directly and need to start the httptester container ourselves and forward the port
+ # from there manually set so HTTPTESTER env var is set during the run
+ args.inject_httptester = True
+ httptester_id, ssh_options = start_httptester(args)
+
+ # to get this SSH command to run in the background we need to set to run in background (-f) and disable
+ # the pty allocation (-T)
+ ssh_options.insert(0, "-fT")
+
+ # create a script that will continue to run in the background until the script is deleted, this will
+ # cleanup and close the connection
+ def forward_ssh_ports(target):
+ """
+ :type target: IntegrationTarget
+ """
+ if 'needs/httptester/' not in target.aliases:
+ return
+
+ for remote in [r for r in remotes if r.version != '2008']:
+ manage = ManageWindowsCI(remote)
+ manage.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'windows-httptester.ps1'), watcher_path)
+
+ # We cannot pass an array of string with -File so we just use a delimiter for multiple values
+ script = "powershell.exe -NoProfile -ExecutionPolicy Bypass -File .\\%s -Hosts \"%s\"" \
+ % (watcher_path, "|".join(HTTPTESTER_HOSTS))
+ if args.verbosity > 3:
+ script += " -Verbose"
+ manage.ssh(script, options=ssh_options, force_pty=False)
+
+ def cleanup_ssh_ports(target):
+ """
+ :type target: IntegrationTarget
+ """
+ if 'needs/httptester/' not in target.aliases:
+ return
+
+ for remote in [r for r in remotes if r.version != '2008']:
+ # delete the tmp file that keeps the http-tester alive
+ manage = ManageWindowsCI(remote)
+ manage.ssh("cmd.exe /c \"del %s /F /Q\"" % watcher_path, force_pty=False)
+
+ watcher_path = "ansible-test-http-watcher-%s.ps1" % time.time()
+ pre_target = forward_ssh_ports
+ post_target = cleanup_ssh_ports
+
+ def run_playbook(playbook, run_playbook_vars): # type: (str, t.Dict[str, t.Any]) -> None
+ playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook)
+ command = ['ansible-playbook', '-i', inventory_path, playbook_path, '-e', json.dumps(run_playbook_vars)]
+ if args.verbosity:
+ command.append('-%s' % ('v' * args.verbosity))
+
+ env = ansible_environment(args)
+ intercept_command(args, command, '', env, disable_coverage=True)
+
+ remote_temp_path = None
+
+ if args.coverage and not args.coverage_check:
+ # Create the remote directory that is writable by everyone. Use Ansible to talk to the remote host.
+ remote_temp_path = 'C:\\ansible_test_coverage_%s' % time.time()
+ playbook_vars = {'remote_temp_path': remote_temp_path}
+ run_playbook('windows_coverage_setup.yml', playbook_vars)
+
+ success = False
+
+ try:
+ command_integration_filtered(args, internal_targets, all_targets, inventory_path, pre_target=pre_target,
+ post_target=post_target, remote_temp_path=remote_temp_path)
+ success = True
+ finally:
+ if httptester_id:
+ docker_rm(args, httptester_id)
+
+ if remote_temp_path:
+ # Zip up the coverage files that were generated and fetch it back to localhost.
+ with tempdir() as local_temp_path:
+ playbook_vars = {'remote_temp_path': remote_temp_path, 'local_temp_path': local_temp_path}
+ run_playbook('windows_coverage_teardown.yml', playbook_vars)
+
+ for filename in os.listdir(local_temp_path):
+ with open_zipfile(os.path.join(local_temp_path, filename)) as coverage_zip:
+ coverage_zip.extractall(ResultType.COVERAGE.path)
+
+ if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
+ for instance in instances:
+ instance.result.stop()
+
+
+# noinspection PyUnusedLocal
+def windows_init(args, internal_targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: WindowsIntegrationConfig
+ :type internal_targets: tuple[IntegrationTarget]
+ """
+ if not args.windows:
+ return
+
+ if args.metadata.instance_config is not None:
+ return
+
+ instances = [] # type: t.List[WrappedThread]
+
+ for version in args.windows:
+ instance = WrappedThread(functools.partial(windows_start, args, version))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ args.metadata.instance_config = [instance.wait_for_result() for instance in instances]
+
+
+def windows_start(args, version):
+ """
+ :type args: WindowsIntegrationConfig
+ :type version: str
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider)
+ core_ci.start()
+
+ return core_ci.save()
+
+
+def windows_run(args, version, config):
+ """
+ :type args: WindowsIntegrationConfig
+ :type version: str
+ :type config: dict[str, str]
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider, load=False)
+ core_ci.load(config)
+ core_ci.wait()
+
+ manage = ManageWindowsCI(core_ci)
+ manage.wait()
+
+ return core_ci
+
+
+def windows_inventory(remotes):
+ """
+ :type remotes: list[AnsibleCoreCI]
+ :rtype: str
+ """
+ hosts = []
+
+ for remote in remotes:
+ options = dict(
+ ansible_host=remote.connection.hostname,
+ ansible_user=remote.connection.username,
+ ansible_password=remote.connection.password,
+ ansible_port=remote.connection.port,
+ )
+
+ # used for the connection_windows_ssh test target
+ if remote.ssh_key:
+ options["ansible_ssh_private_key_file"] = os.path.abspath(remote.ssh_key.key)
+
+ if remote.name == 'windows-2008':
+ options.update(
+ # force 2008 to use PSRP for the connection plugin
+ ansible_connection='psrp',
+ ansible_psrp_auth='basic',
+ ansible_psrp_cert_validation='ignore',
+ )
+ elif remote.name == 'windows-2016':
+ options.update(
+ # force 2016 to use NTLM + HTTP message encryption
+ ansible_connection='winrm',
+ ansible_winrm_server_cert_validation='ignore',
+ ansible_winrm_transport='ntlm',
+ ansible_winrm_scheme='http',
+ ansible_port='5985',
+ )
+ else:
+ options.update(
+ ansible_connection='winrm',
+ ansible_winrm_server_cert_validation='ignore',
+ )
+
+ hosts.append(
+ '%s %s' % (
+ remote.name.replace('/', '_'),
+ ' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)),
+ )
+ )
+
+ template = """
+ [windows]
+ %s
+
+ # support winrm binary module tests (temporary solution)
+ [testhost:children]
+ windows
+ """
+
+ template = textwrap.dedent(template)
+ inventory = template % ('\n'.join(hosts))
+
+ return inventory
+
+
+def command_integration_filter(args, # type: TIntegrationConfig
+ targets, # type: t.Iterable[TIntegrationTarget]
+ init_callback=None, # type: t.Callable[[TIntegrationConfig, t.Tuple[TIntegrationTarget, ...]], None]
+ ): # type: (...) -> t.Tuple[TIntegrationTarget, ...]
+ """Filter the given integration test targets."""
+ targets = tuple(target for target in targets if 'hidden/' not in target.aliases)
+ changes = get_changes_filter(args)
+
+ # special behavior when the --changed-all-target target is selected based on changes
+ if args.changed_all_target in changes:
+ # act as though the --changed-all-target target was in the include list
+ if args.changed_all_mode == 'include' and args.changed_all_target not in args.include:
+ args.include.append(args.changed_all_target)
+ args.delegate_args += ['--include', args.changed_all_target]
+ # act as though the --changed-all-target target was in the exclude list
+ elif args.changed_all_mode == 'exclude' and args.changed_all_target not in args.exclude:
+ args.exclude.append(args.changed_all_target)
+
+ require = args.require + changes
+ exclude = args.exclude
+
+ internal_targets = walk_internal_targets(targets, args.include, exclude, require)
+ environment_exclude = get_integration_filter(args, internal_targets)
+
+ environment_exclude += cloud_filter(args, internal_targets)
+
+ if environment_exclude:
+ exclude += environment_exclude
+ internal_targets = walk_internal_targets(targets, args.include, exclude, require)
+
+ if not internal_targets:
+ raise AllTargetsSkipped()
+
+ if args.start_at and not any(target.name == args.start_at for target in internal_targets):
+ raise ApplicationError('Start at target matches nothing: %s' % args.start_at)
+
+ if init_callback:
+ init_callback(args, internal_targets)
+
+ cloud_init(args, internal_targets)
+
+ vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+
+ if os.path.exists(vars_file_src):
+ def integration_config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """
+ Add the integration config vars file to the payload file list.
+ This will preserve the file during delegation even if the file is ignored by source control.
+ """
+ files.append((vars_file_src, data_context().content.integration_vars_path))
+
+ data_context().register_payload_callback(integration_config_callback)
+
+ if args.delegate:
+ raise Delegate(require=require, exclude=exclude, integration_targets=internal_targets)
+
+ install_command_requirements(args)
+
+ return internal_targets
+
+
+def command_integration_filtered(args, targets, all_targets, inventory_path, pre_target=None, post_target=None,
+ remote_temp_path=None):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :type all_targets: tuple[IntegrationTarget]
+ :type inventory_path: str
+ :type pre_target: (IntegrationTarget) -> None | None
+ :type post_target: (IntegrationTarget) -> None | None
+ :type remote_temp_path: str | None
+ """
+ found = False
+ passed = []
+ failed = []
+
+ targets_iter = iter(targets)
+ all_targets_dict = dict((target.name, target) for target in all_targets)
+
+ setup_errors = []
+ setup_targets_executed = set()
+
+ for target in all_targets:
+ for setup_target in target.setup_once + target.setup_always:
+ if setup_target not in all_targets_dict:
+ setup_errors.append('Target "%s" contains invalid setup target: %s' % (target.name, setup_target))
+
+ if setup_errors:
+ raise ApplicationError('Found %d invalid setup aliases:\n%s' % (len(setup_errors), '\n'.join(setup_errors)))
+
+ check_pyyaml(args, args.python_version)
+
+ test_dir = os.path.join(ResultType.TMP.path, 'output_dir')
+
+ if not args.explain and any('needs/ssh/' in target.aliases for target in targets):
+ max_tries = 20
+ display.info('SSH service required for tests. Checking to make sure we can connect.')
+ for i in range(1, max_tries + 1):
+ try:
+ run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True)
+ display.info('SSH service responded.')
+ break
+ except SubprocessError:
+ if i == max_tries:
+ raise
+ seconds = 3
+ display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds)
+ time.sleep(seconds)
+
+ # Windows is different as Ansible execution is done locally but the host is remote
+ if args.inject_httptester and not isinstance(args, WindowsIntegrationConfig):
+ inject_httptester(args)
+
+ start_at_task = args.start_at_task
+
+ results = {}
+
+ current_environment = None # type: t.Optional[EnvironmentDescription]
+
+ # common temporary directory path that will be valid on both the controller and the remote
+ # it must be common because it will be referenced in environment variables that are shared across multiple hosts
+ common_temp_path = '/tmp/ansible-test-%s' % ''.join(random.choice(string.ascii_letters + string.digits) for _idx in range(8))
+
+ setup_common_temp_dir(args, common_temp_path)
+
+ try:
+ for target in targets_iter:
+ if args.start_at and not found:
+ found = target.name == args.start_at
+
+ if not found:
+ continue
+
+ if args.list_targets:
+ print(target.name)
+ continue
+
+ tries = 2 if args.retry_on_error else 1
+ verbosity = args.verbosity
+
+ cloud_environment = get_cloud_environment(args, target)
+
+ original_environment = current_environment if current_environment else EnvironmentDescription(args)
+ current_environment = None
+
+ display.info('>>> Environment Description\n%s' % original_environment, verbosity=3)
+
+ try:
+ while tries:
+ tries -= 1
+
+ try:
+ if cloud_environment:
+ cloud_environment.setup_once()
+
+ run_setup_targets(args, test_dir, target.setup_once, all_targets_dict, setup_targets_executed, inventory_path, common_temp_path, False)
+
+ start_time = time.time()
+
+ run_setup_targets(args, test_dir, target.setup_always, all_targets_dict, setup_targets_executed, inventory_path, common_temp_path, True)
+
+ if not args.explain:
+ # create a fresh test directory for each test target
+ remove_tree(test_dir)
+ make_dirs(test_dir)
+
+ if pre_target:
+ pre_target(target)
+
+ try:
+ if target.script_path:
+ command_integration_script(args, target, test_dir, inventory_path, common_temp_path,
+ remote_temp_path=remote_temp_path)
+ else:
+ command_integration_role(args, target, start_at_task, test_dir, inventory_path,
+ common_temp_path, remote_temp_path=remote_temp_path)
+ start_at_task = None
+ finally:
+ if post_target:
+ post_target(target)
+
+ end_time = time.time()
+
+ results[target.name] = dict(
+ name=target.name,
+ type=target.type,
+ aliases=target.aliases,
+ modules=target.modules,
+ run_time_seconds=int(end_time - start_time),
+ setup_once=target.setup_once,
+ setup_always=target.setup_always,
+ coverage=args.coverage,
+ coverage_label=args.coverage_label,
+ python_version=args.python_version,
+ )
+
+ break
+ except SubprocessError:
+ if cloud_environment:
+ cloud_environment.on_failure(target, tries)
+
+ if not original_environment.validate(target.name, throw=False):
+ raise
+
+ if not tries:
+ raise
+
+ display.warning('Retrying test target "%s" with maximum verbosity.' % target.name)
+ display.verbosity = args.verbosity = 6
+
+ start_time = time.time()
+ current_environment = EnvironmentDescription(args)
+ end_time = time.time()
+
+ EnvironmentDescription.check(original_environment, current_environment, target.name, throw=True)
+
+ results[target.name]['validation_seconds'] = int(end_time - start_time)
+
+ passed.append(target)
+ except Exception as ex:
+ failed.append(target)
+
+ if args.continue_on_error:
+ display.error(ex)
+ continue
+
+ display.notice('To resume at this test target, use the option: --start-at %s' % target.name)
+
+ next_target = next(targets_iter, None)
+
+ if next_target:
+ display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name)
+
+ raise
+ finally:
+ display.verbosity = args.verbosity = verbosity
+
+ finally:
+ if not args.explain:
+ if args.coverage:
+ coverage_temp_path = os.path.join(common_temp_path, ResultType.COVERAGE.name)
+ coverage_save_path = ResultType.COVERAGE.path
+
+ for filename in os.listdir(coverage_temp_path):
+ shutil.copy(os.path.join(coverage_temp_path, filename), os.path.join(coverage_save_path, filename))
+
+ remove_tree(common_temp_path)
+
+ result_name = '%s-%s.json' % (
+ args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))
+
+ data = dict(
+ targets=results,
+ )
+
+ write_json_test_results(ResultType.DATA, result_name, data)
+
+ if failed:
+ raise ApplicationError('The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s' % (
+ len(failed), len(passed) + len(failed), '\n'.join(target.name for target in failed)))
+
+
+def start_httptester(args):
+ """
+ :type args: EnvironmentConfig
+ :rtype: str, list[str]
+ """
+
+ # map ports from remote -> localhost -> container
+ # passing through localhost is only used when ansible-test is not already running inside a docker container
+ ports = [
+ dict(
+ remote=8080,
+ container=80,
+ ),
+ dict(
+ remote=8443,
+ container=443,
+ ),
+ ]
+
+ container_id = get_docker_container_id()
+
+ if not container_id:
+ for item in ports:
+ item['localhost'] = get_available_port()
+
+ docker_pull(args, args.httptester)
+
+ httptester_id = run_httptester(args, dict((port['localhost'], port['container']) for port in ports if 'localhost' in port))
+
+ if container_id:
+ container_host = get_docker_container_ip(args, httptester_id)
+ display.info('Found httptester container address: %s' % container_host, verbosity=1)
+ else:
+ container_host = get_docker_hostname()
+
+ ssh_options = []
+
+ for port in ports:
+ ssh_options += ['-R', '%d:%s:%d' % (port['remote'], container_host, port.get('localhost', port['container']))]
+
+ return httptester_id, ssh_options
+
+
+def run_httptester(args, ports=None):
+ """
+ :type args: EnvironmentConfig
+ :type ports: dict[int, int] | None
+ :rtype: str
+ """
+ options = [
+ '--detach',
+ ]
+
+ if ports:
+ for localhost_port, container_port in ports.items():
+ options += ['-p', '%d:%d' % (localhost_port, container_port)]
+
+ network = get_docker_preferred_network_name(args)
+
+ if is_docker_user_defined_network(network):
+ # network-scoped aliases are only supported for containers in user defined networks
+ for alias in HTTPTESTER_HOSTS:
+ options.extend(['--network-alias', alias])
+
+ httptester_id = docker_run(args, args.httptester, options=options)[0]
+
+ if args.explain:
+ httptester_id = 'httptester_id'
+ else:
+ httptester_id = httptester_id.strip()
+
+ return httptester_id
+
+
+def inject_httptester(args):
+ """
+ :type args: CommonConfig
+ """
+ comment = ' # ansible-test httptester\n'
+ append_lines = ['127.0.0.1 %s%s' % (host, comment) for host in HTTPTESTER_HOSTS]
+ hosts_path = '/etc/hosts'
+
+ original_lines = read_text_file(hosts_path).splitlines(True)
+
+ if not any(line.endswith(comment) for line in original_lines):
+ write_text_file(hosts_path, ''.join(original_lines + append_lines))
+
+ # determine which forwarding mechanism to use
+ pfctl = find_executable('pfctl', required=False)
+ iptables = find_executable('iptables', required=False)
+
+ if pfctl:
+ kldload = find_executable('kldload', required=False)
+
+ if kldload:
+ try:
+ run_command(args, ['kldload', 'pf'], capture=True)
+ except SubprocessError:
+ pass # already loaded
+
+ rules = '''
+rdr pass inet proto tcp from any to any port 80 -> 127.0.0.1 port 8080
+rdr pass inet proto tcp from any to any port 443 -> 127.0.0.1 port 8443
+'''
+ cmd = ['pfctl', '-ef', '-']
+
+ try:
+ run_command(args, cmd, capture=True, data=rules)
+ except SubprocessError:
+ pass # non-zero exit status on success
+
+ elif iptables:
+ ports = [
+ (80, 8080),
+ (443, 8443),
+ ]
+
+ for src, dst in ports:
+ rule = ['-o', 'lo', '-p', 'tcp', '--dport', str(src), '-j', 'REDIRECT', '--to-port', str(dst)]
+
+ try:
+ # check for existing rule
+ cmd = ['iptables', '-t', 'nat', '-C', 'OUTPUT'] + rule
+ run_command(args, cmd, capture=True)
+ except SubprocessError:
+ # append rule when it does not exist
+ cmd = ['iptables', '-t', 'nat', '-A', 'OUTPUT'] + rule
+ run_command(args, cmd, capture=True)
+ else:
+ raise ApplicationError('No supported port forwarding mechanism detected.')
+
+
+def run_setup_targets(args, test_dir, target_names, targets_dict, targets_executed, inventory_path, temp_path, always):
+ """
+ :type args: IntegrationConfig
+ :type test_dir: str
+ :type target_names: list[str]
+ :type targets_dict: dict[str, IntegrationTarget]
+ :type targets_executed: set[str]
+ :type inventory_path: str
+ :type temp_path: str
+ :type always: bool
+ """
+ for target_name in target_names:
+ if not always and target_name in targets_executed:
+ continue
+
+ target = targets_dict[target_name]
+
+ if not args.explain:
+ # create a fresh test directory for each test target
+ remove_tree(test_dir)
+ make_dirs(test_dir)
+
+ if target.script_path:
+ command_integration_script(args, target, test_dir, inventory_path, temp_path)
+ else:
+ command_integration_role(args, target, None, test_dir, inventory_path, temp_path)
+
+ targets_executed.add(target_name)
+
+
+def integration_environment(args, target, test_dir, inventory_path, ansible_config, env_config):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type test_dir: str
+ :type inventory_path: str
+ :type ansible_config: str | None
+ :type env_config: CloudEnvironmentConfig | None
+ :rtype: dict[str, str]
+ """
+ env = ansible_environment(args, ansible_config=ansible_config)
+
+ if args.inject_httptester:
+ env.update(dict(
+ HTTPTESTER='1',
+ ))
+
+ callback_plugins = ['junit'] + (env_config.callback_plugins or [] if env_config else [])
+
+ integration = dict(
+ JUNIT_OUTPUT_DIR=ResultType.JUNIT.path,
+ ANSIBLE_CALLBACK_WHITELIST=','.join(sorted(set(callback_plugins))),
+ ANSIBLE_TEST_CI=args.metadata.ci_provider or get_ci_provider().code,
+ ANSIBLE_TEST_COVERAGE='check' if args.coverage_check else ('yes' if args.coverage else ''),
+ OUTPUT_DIR=test_dir,
+ INVENTORY_PATH=os.path.abspath(inventory_path),
+ )
+
+ if args.debug_strategy:
+ env.update(dict(ANSIBLE_STRATEGY='debug'))
+
+ if 'non_local/' in target.aliases:
+ if args.coverage:
+ display.warning('Skipping coverage reporting on Ansible modules for non-local test: %s' % target.name)
+
+ env.update(dict(ANSIBLE_TEST_REMOTE_INTERPRETER=''))
+
+ env.update(integration)
+
+ return env
+
+
+def command_integration_script(args, target, test_dir, inventory_path, temp_path, remote_temp_path=None):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type test_dir: str
+ :type inventory_path: str
+ :type temp_path: str
+ :type remote_temp_path: str | None
+ """
+ display.info('Running %s integration test script' % target.name)
+
+ env_config = None
+
+ if isinstance(args, PosixIntegrationConfig):
+ cloud_environment = get_cloud_environment(args, target)
+
+ if cloud_environment:
+ env_config = cloud_environment.get_environment_config()
+
+ with integration_test_environment(args, target, inventory_path) as test_env:
+ cmd = ['./%s' % os.path.basename(target.script_path)]
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config)
+ cwd = os.path.join(test_env.targets_dir, target.relative_path)
+
+ env.update(dict(
+ # support use of adhoc ansible commands in collections without specifying the fully qualified collection name
+ ANSIBLE_PLAYBOOK_DIR=cwd,
+ ))
+
+ if env_config and env_config.env_vars:
+ env.update(env_config.env_vars)
+
+ with integration_test_config_file(args, env_config, test_env.integration_dir) as config_path:
+ if config_path:
+ cmd += ['-e', '@%s' % config_path]
+
+ module_coverage = 'non_local/' not in target.aliases
+ intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path,
+ remote_temp_path=remote_temp_path, module_coverage=module_coverage)
+
+
+def command_integration_role(args, target, start_at_task, test_dir, inventory_path, temp_path, remote_temp_path=None):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type start_at_task: str | None
+ :type test_dir: str
+ :type inventory_path: str
+ :type temp_path: str
+ :type remote_temp_path: str | None
+ """
+ display.info('Running %s integration test role' % target.name)
+
+ env_config = None
+
+ vars_files = []
+ variables = dict(
+ output_dir=test_dir,
+ )
+
+ if isinstance(args, WindowsIntegrationConfig):
+ hosts = 'windows'
+ gather_facts = False
+ variables.update(dict(
+ win_output_dir=r'C:\ansible_testing',
+ ))
+ elif isinstance(args, NetworkIntegrationConfig):
+ hosts = target.network_platform
+ gather_facts = False
+ else:
+ hosts = 'testhost'
+ gather_facts = True
+
+ cloud_environment = get_cloud_environment(args, target)
+
+ if cloud_environment:
+ env_config = cloud_environment.get_environment_config()
+
+ with integration_test_environment(args, target, inventory_path) as test_env:
+ if os.path.exists(test_env.vars_file):
+ vars_files.append(os.path.relpath(test_env.vars_file, test_env.integration_dir))
+
+ play = dict(
+ hosts=hosts,
+ gather_facts=gather_facts,
+ vars_files=vars_files,
+ vars=variables,
+ roles=[
+ target.name,
+ ],
+ )
+
+ if env_config:
+ if env_config.ansible_vars:
+ variables.update(env_config.ansible_vars)
+
+ play.update(dict(
+ environment=env_config.env_vars,
+ module_defaults=env_config.module_defaults,
+ ))
+
+ playbook = json.dumps([play], indent=4, sort_keys=True)
+
+ with named_temporary_file(args=args, directory=test_env.integration_dir, prefix='%s-' % target.name, suffix='.yml', content=playbook) as playbook_path:
+ filename = os.path.basename(playbook_path)
+
+ display.info('>>> Playbook: %s\n%s' % (filename, playbook.strip()), verbosity=3)
+
+ cmd = ['ansible-playbook', filename, '-i', os.path.relpath(test_env.inventory_path, test_env.integration_dir)]
+
+ if start_at_task:
+ cmd += ['--start-at-task', start_at_task]
+
+ if args.tags:
+ cmd += ['--tags', args.tags]
+
+ if args.skip_tags:
+ cmd += ['--skip-tags', args.skip_tags]
+
+ if args.diff:
+ cmd += ['--diff']
+
+ if isinstance(args, NetworkIntegrationConfig):
+ if args.testcase:
+ cmd += ['-e', 'testcase=%s' % args.testcase]
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config)
+ cwd = test_env.integration_dir
+
+ env.update(dict(
+ # support use of adhoc ansible commands in collections without specifying the fully qualified collection name
+ ANSIBLE_PLAYBOOK_DIR=cwd,
+ ))
+
+ env['ANSIBLE_ROLES_PATH'] = test_env.targets_dir
+
+ module_coverage = 'non_local/' not in target.aliases
+ intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path,
+ remote_temp_path=remote_temp_path, module_coverage=module_coverage)
+
+
+def get_changes_filter(args):
+ """
+ :type args: TestConfig
+ :rtype: list[str]
+ """
+ paths = detect_changes(args)
+
+ if not args.metadata.change_description:
+ if paths:
+ changes = categorize_changes(args, paths, args.command)
+ else:
+ changes = ChangeDescription()
+
+ args.metadata.change_description = changes
+
+ if paths is None:
+ return [] # change detection not enabled, do not filter targets
+
+ if not paths:
+ raise NoChangesDetected()
+
+ if args.metadata.change_description.targets is None:
+ raise NoTestsForChanges()
+
+ return args.metadata.change_description.targets
+
+
+def detect_changes(args):
+ """
+ :type args: TestConfig
+ :rtype: list[str] | None
+ """
+ if args.changed:
+ paths = get_ci_provider().detect_changes(args)
+ elif args.changed_from or args.changed_path:
+ paths = args.changed_path or []
+ if args.changed_from:
+ paths += read_text_file(args.changed_from).splitlines()
+ else:
+ return None # change detection not enabled
+
+ if paths is None:
+ return None # act as though change detection not enabled, do not filter targets
+
+ display.info('Detected changes in %d file(s).' % len(paths))
+
+ for path in paths:
+ display.info(path, verbosity=1)
+
+ return paths
+
+
+def get_integration_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ if args.docker:
+ return get_integration_docker_filter(args, targets)
+
+ if args.remote:
+ return get_integration_remote_filter(args, targets)
+
+ return get_integration_local_filter(args, targets)
+
+
+def common_integration_filter(args, targets, exclude):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :type exclude: list[str]
+ """
+ override_disabled = set(target for target in args.include if target.startswith('disabled/'))
+
+ if not args.allow_disabled:
+ skip = 'disabled/'
+ override = [target.name for target in targets if override_disabled & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-disabled or prefixing with "disabled/": %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ override_unsupported = set(target for target in args.include if target.startswith('unsupported/'))
+
+ if not args.allow_unsupported:
+ skip = 'unsupported/'
+ override = [target.name for target in targets if override_unsupported & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-unsupported or prefixing with "unsupported/": %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ override_unstable = set(target for target in args.include if target.startswith('unstable/'))
+
+ if args.allow_unstable_changed:
+ override_unstable |= set(args.metadata.change_description.focused_targets or [])
+
+ if not args.allow_unstable:
+ skip = 'unstable/'
+ override = [target.name for target in targets if override_unstable & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-unstable or prefixing with "unstable/": %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ # only skip a Windows test if using --windows and all the --windows versions are defined in the aliases as skip/windows/%s
+ if isinstance(args, WindowsIntegrationConfig) and args.windows:
+ all_skipped = []
+ not_skipped = []
+
+ for target in targets:
+ if "skip/windows/" not in target.aliases:
+ continue
+
+ skip_valid = []
+ skip_missing = []
+ for version in args.windows:
+ if "skip/windows/%s/" % version in target.aliases:
+ skip_valid.append(version)
+ else:
+ skip_missing.append(version)
+
+ if skip_missing and skip_valid:
+ not_skipped.append((target.name, skip_valid, skip_missing))
+ elif skip_valid:
+ all_skipped.append(target.name)
+
+ if all_skipped:
+ exclude.extend(all_skipped)
+ skip_aliases = ["skip/windows/%s/" % w for w in args.windows]
+ display.warning('Excluding tests marked "%s" which are set to skip with --windows %s: %s'
+ % ('", "'.join(skip_aliases), ', '.join(args.windows), ', '.join(all_skipped)))
+
+ if not_skipped:
+ for target, skip_valid, skip_missing in not_skipped:
+ # warn when failing to skip due to lack of support for skipping only some versions
+ display.warning('Including test "%s" which was marked to skip for --windows %s but not %s.'
+ % (target, ', '.join(skip_valid), ', '.join(skip_missing)))
+
+
+def get_integration_local_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ exclude = []
+
+ common_integration_filter(args, targets, exclude)
+
+ if not args.allow_root and os.getuid() != 0:
+ skip = 'needs/root/'
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require --allow-root or running as root: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ override_destructive = set(target for target in args.include if target.startswith('destructive/'))
+
+ if not args.allow_destructive:
+ skip = 'destructive/'
+ override = [target.name for target in targets if override_destructive & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-destructive or prefixing with "destructive/" to run locally: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ exclude_targets_by_python_version(targets, args.python_version, exclude)
+
+ return exclude
+
+
+def get_integration_docker_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ exclude = []
+
+ common_integration_filter(args, targets, exclude)
+
+ skip = 'skip/docker/'
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which cannot run under docker: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ if not args.docker_privileged:
+ skip = 'needs/privileged/'
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require --docker-privileged to run under docker: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ python_version = get_python_version(args, get_docker_completion(), args.docker_raw)
+
+ exclude_targets_by_python_version(targets, python_version, exclude)
+
+ return exclude
+
+
+def get_integration_remote_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ remote = args.parsed_remote
+
+ exclude = []
+
+ common_integration_filter(args, targets, exclude)
+
+ skips = {
+ 'skip/%s' % remote.platform: remote.platform,
+ 'skip/%s/%s' % (remote.platform, remote.version): '%s %s' % (remote.platform, remote.version),
+ 'skip/%s%s' % (remote.platform, remote.version): '%s %s' % (remote.platform, remote.version), # legacy syntax, use above format
+ }
+
+ if remote.arch:
+ skips.update({
+ 'skip/%s/%s' % (remote.arch, remote.platform): '%s on %s' % (remote.platform, remote.arch),
+ 'skip/%s/%s/%s' % (remote.arch, remote.platform, remote.version): '%s %s on %s' % (remote.platform, remote.version, remote.arch),
+ })
+
+ for skip, description in skips.items():
+ skipped = [target.name for target in targets if skip in target.skips]
+ if skipped:
+ exclude.append(skip + '/')
+ display.warning('Excluding tests marked "%s" which are not supported on %s: %s' % (skip, description, ', '.join(skipped)))
+
+ python_version = get_python_version(args, get_remote_completion(), args.remote)
+
+ exclude_targets_by_python_version(targets, python_version, exclude)
+
+ return exclude
+
+
+def exclude_targets_by_python_version(targets, python_version, exclude):
+ """
+ :type targets: tuple[IntegrationTarget]
+ :type python_version: str
+ :type exclude: list[str]
+ """
+ if not python_version:
+ display.warning('Python version unknown. Unable to skip tests based on Python version.')
+ return
+
+ python_major_version = python_version.split('.')[0]
+
+ skip = 'skip/python%s/' % python_version
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which are not supported on python %s: %s'
+ % (skip.rstrip('/'), python_version, ', '.join(skipped)))
+
+ skip = 'skip/python%s/' % python_major_version
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which are not supported on python %s: %s'
+ % (skip.rstrip('/'), python_version, ', '.join(skipped)))
+
+
+def get_python_version(args, configs, name):
+ """
+ :type args: EnvironmentConfig
+ :type configs: dict[str, dict[str, str]]
+ :type name: str
+ """
+ config = configs.get(name, {})
+ config_python = config.get('python')
+
+ if not config or not config_python:
+ if args.python:
+ return args.python
+
+ display.warning('No Python version specified. '
+ 'Use completion config or the --python option to specify one.', unique=True)
+
+ return '' # failure to provide a version may result in failures or reduced functionality later
+
+ supported_python_versions = config_python.split(',')
+ default_python_version = supported_python_versions[0]
+
+ if args.python and args.python not in supported_python_versions:
+ raise ApplicationError('Python %s is not supported by %s. Supported Python version(s) are: %s' % (
+ args.python, name, ', '.join(sorted(supported_python_versions))))
+
+ python_version = args.python or default_python_version
+
+ return python_version
+
+
+def get_python_interpreter(args, configs, name):
+ """
+ :type args: EnvironmentConfig
+ :type configs: dict[str, dict[str, str]]
+ :type name: str
+ """
+ if args.python_interpreter:
+ return args.python_interpreter
+
+ config = configs.get(name, {})
+
+ if not config:
+ if args.python:
+ guess = 'python%s' % args.python
+ else:
+ guess = 'python'
+
+ display.warning('Using "%s" as the Python interpreter. '
+ 'Use completion config or the --python-interpreter option to specify the path.' % guess, unique=True)
+
+ return guess
+
+ python_version = get_python_version(args, configs, name)
+
+ python_dir = config.get('python_dir', '/usr/bin')
+ python_interpreter = os.path.join(python_dir, 'python%s' % python_version)
+ python_interpreter = config.get('python%s' % python_version, python_interpreter)
+
+ return python_interpreter
+
+
+class EnvironmentDescription:
+ """Description of current running environment."""
+ def __init__(self, args):
+ """Initialize snapshot of environment configuration.
+ :type args: IntegrationConfig
+ """
+ self.args = args
+
+ if self.args.explain:
+ self.data = {}
+ return
+
+ warnings = []
+
+ versions = ['']
+ versions += SUPPORTED_PYTHON_VERSIONS
+ versions += list(set(v.split('.')[0] for v in SUPPORTED_PYTHON_VERSIONS))
+
+ version_check = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'versions.py')
+ python_paths = dict((v, find_executable('python%s' % v, required=False)) for v in sorted(versions))
+ pip_paths = dict((v, find_executable('pip%s' % v, required=False)) for v in sorted(versions))
+ program_versions = dict((v, self.get_version([python_paths[v], version_check], warnings)) for v in sorted(python_paths) if python_paths[v])
+ pip_interpreters = dict((v, self.get_shebang(pip_paths[v])) for v in sorted(pip_paths) if pip_paths[v])
+ known_hosts_hash = self.get_hash(os.path.expanduser('~/.ssh/known_hosts'))
+
+ for version in sorted(versions):
+ self.check_python_pip_association(version, python_paths, pip_paths, pip_interpreters, warnings)
+
+ for warning in warnings:
+ display.warning(warning, unique=True)
+
+ self.data = dict(
+ python_paths=python_paths,
+ pip_paths=pip_paths,
+ program_versions=program_versions,
+ pip_interpreters=pip_interpreters,
+ known_hosts_hash=known_hosts_hash,
+ warnings=warnings,
+ )
+
+ @staticmethod
+ def check_python_pip_association(version, python_paths, pip_paths, pip_interpreters, warnings):
+ """
+ :type version: str
+ :param python_paths: dict[str, str]
+ :param pip_paths: dict[str, str]
+ :param pip_interpreters: dict[str, str]
+ :param warnings: list[str]
+ """
+ python_label = 'Python%s' % (' %s' % version if version else '')
+
+ pip_path = pip_paths.get(version)
+ python_path = python_paths.get(version)
+
+ if not python_path and not pip_path:
+ # neither python or pip is present for this version
+ return
+
+ if not python_path:
+ warnings.append('A %s interpreter was not found, yet a matching pip was found at "%s".' % (python_label, pip_path))
+ return
+
+ if not pip_path:
+ warnings.append('A %s interpreter was found at "%s", yet a matching pip was not found.' % (python_label, python_path))
+ return
+
+ pip_shebang = pip_interpreters.get(version)
+
+ match = re.search(r'#!\s*(?P<command>[^\s]+)', pip_shebang)
+
+ if not match:
+ warnings.append('A %s pip was found at "%s", but it does not have a valid shebang: %s' % (python_label, pip_path, pip_shebang))
+ return
+
+ pip_interpreter = os.path.realpath(match.group('command'))
+ python_interpreter = os.path.realpath(python_path)
+
+ if pip_interpreter == python_interpreter:
+ return
+
+ try:
+ identical = filecmp.cmp(pip_interpreter, python_interpreter)
+ except OSError:
+ identical = False
+
+ if identical:
+ return
+
+ warnings.append('A %s pip was found at "%s", but it uses interpreter "%s" instead of "%s".' % (
+ python_label, pip_path, pip_interpreter, python_interpreter))
+
+ def __str__(self):
+ """
+ :rtype: str
+ """
+ return json.dumps(self.data, sort_keys=True, indent=4)
+
+ def validate(self, target_name, throw):
+ """
+ :type target_name: str
+ :type throw: bool
+ :rtype: bool
+ """
+ current = EnvironmentDescription(self.args)
+
+ return self.check(self, current, target_name, throw)
+
+ @staticmethod
+ def check(original, current, target_name, throw):
+ """
+ :type original: EnvironmentDescription
+ :type current: EnvironmentDescription
+ :type target_name: str
+ :type throw: bool
+ :rtype: bool
+ """
+ original_json = str(original)
+ current_json = str(current)
+
+ if original_json == current_json:
+ return True
+
+ unified_diff = '\n'.join(difflib.unified_diff(
+ a=original_json.splitlines(),
+ b=current_json.splitlines(),
+ fromfile='original.json',
+ tofile='current.json',
+ lineterm='',
+ ))
+
+ message = ('Test target "%s" has changed the test environment!\n'
+ 'If these changes are necessary, they must be reverted before the test finishes.\n'
+ '>>> Original Environment\n'
+ '%s\n'
+ '>>> Current Environment\n'
+ '%s\n'
+ '>>> Environment Diff\n'
+ '%s'
+ % (target_name, original_json, current_json, unified_diff))
+
+ if throw:
+ raise ApplicationError(message)
+
+ display.error(message)
+
+ return False
+
+ @staticmethod
+ def get_version(command, warnings):
+ """
+ :type command: list[str]
+ :type warnings: list[text]
+ :rtype: list[str]
+ """
+ try:
+ stdout, stderr = raw_command(command, capture=True, cmd_verbosity=2)
+ except SubprocessError as ex:
+ warnings.append(u'%s' % ex)
+ return None # all failures are equal, we don't care why it failed, only that it did
+
+ return [line.strip() for line in ((stdout or '').strip() + (stderr or '').strip()).splitlines()]
+
+ @staticmethod
+ def get_shebang(path):
+ """
+ :type path: str
+ :rtype: str
+ """
+ with open_text_file(path) as script_fd:
+ return script_fd.readline().strip()
+
+ @staticmethod
+ def get_hash(path):
+ """
+ :type path: str
+ :rtype: str | None
+ """
+ if not os.path.exists(path):
+ return None
+
+ file_hash = hashlib.md5()
+
+ file_hash.update(read_binary_file(path))
+
+ return file_hash.hexdigest()
+
+
+class NoChangesDetected(ApplicationWarning):
+ """Exception when change detection was performed, but no changes were found."""
+ def __init__(self):
+ super(NoChangesDetected, self).__init__('No changes detected.')
+
+
+class NoTestsForChanges(ApplicationWarning):
+ """Exception when changes detected, but no tests trigger as a result."""
+ def __init__(self):
+ super(NoTestsForChanges, self).__init__('No tests found for detected changes.')
+
+
+class Delegate(Exception):
+ """Trigger command delegation."""
+ def __init__(self, exclude=None, require=None, integration_targets=None):
+ """
+ :type exclude: list[str] | None
+ :type require: list[str] | None
+ :type integration_targets: tuple[IntegrationTarget] | None
+ """
+ super(Delegate, self).__init__()
+
+ self.exclude = exclude or []
+ self.require = require or []
+ self.integration_targets = integration_targets or tuple()
+
+
+class AllTargetsSkipped(ApplicationWarning):
+ """All targets skipped."""
+ def __init__(self):
+ super(AllTargetsSkipped, self).__init__('All targets skipped.')
diff --git a/test/lib/ansible_test/_internal/git.py b/test/lib/ansible_test/_internal/git.py
new file mode 100644
index 00000000..acc39f3f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/git.py
@@ -0,0 +1,137 @@
+"""Wrapper around git command-line tools."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from . import types as t
+
+from .util import (
+ SubprocessError,
+ raw_command,
+)
+
+
+class Git:
+ """Wrapper around git command-line tools."""
+ def __init__(self, root=None): # type: (t.Optional[str]) -> None
+ self.git = 'git'
+ self.root = root
+
+ def get_diff(self, args, git_options=None):
+ """
+ :type args: list[str]
+ :type git_options: list[str] | None
+ :rtype: list[str]
+ """
+ cmd = ['diff'] + args
+ if git_options is None:
+ git_options = ['-c', 'core.quotePath=']
+ return self.run_git_split(git_options + cmd, '\n', str_errors='replace')
+
+ def get_diff_names(self, args):
+ """
+ :type args: list[str]
+ :rtype: list[str]
+ """
+ cmd = ['diff', '--name-only', '--no-renames', '-z'] + args
+ return self.run_git_split(cmd, '\0')
+
+ def get_submodule_paths(self): # type: () -> t.List[str]
+ """Return a list of submodule paths recursively."""
+ cmd = ['submodule', 'status', '--recursive']
+ output = self.run_git_split(cmd, '\n')
+ submodule_paths = [re.search(r'^.[0-9a-f]+ (?P<path>[^ ]+)', line).group('path') for line in output]
+
+ # status is returned for all submodules in the current git repository relative to the current directory
+ # when the current directory is not the root of the git repository this can yield relative paths which are not below the current directory
+ # this can occur when multiple collections are in a git repo and some collections are submodules when others are not
+ # specifying "." as the path to enumerate would limit results to the current directory, but can cause the git command to fail with the error:
+ # error: pathspec '.' did not match any file(s) known to git
+ # this can occur when the current directory contains no files tracked by git
+ # instead we'll filter out the relative paths, since we're only interested in those at or below the current directory
+ submodule_paths = [path for path in submodule_paths if not path.startswith('../')]
+
+ return submodule_paths
+
+ def get_file_names(self, args):
+ """
+ :type args: list[str]
+ :rtype: list[str]
+ """
+ cmd = ['ls-files', '-z'] + args
+ return self.run_git_split(cmd, '\0')
+
+ def get_branches(self):
+ """
+ :rtype: list[str]
+ """
+ cmd = ['for-each-ref', 'refs/heads/', '--format', '%(refname:strip=2)']
+ return self.run_git_split(cmd)
+
+ def get_branch(self):
+ """
+ :rtype: str
+ """
+ cmd = ['symbolic-ref', '--short', 'HEAD']
+ return self.run_git(cmd).strip()
+
+ def get_rev_list(self, commits=None, max_count=None):
+ """
+ :type commits: list[str] | None
+ :type max_count: int | None
+ :rtype: list[str]
+ """
+ cmd = ['rev-list']
+
+ if commits:
+ cmd += commits
+ else:
+ cmd += ['HEAD']
+
+ if max_count:
+ cmd += ['--max-count', '%s' % max_count]
+
+ return self.run_git_split(cmd)
+
+ def get_branch_fork_point(self, branch):
+ """
+ :type branch: str
+ :rtype: str
+ """
+ cmd = ['merge-base', '--fork-point', branch]
+ return self.run_git(cmd).strip()
+
+ def is_valid_ref(self, ref):
+ """
+ :type ref: str
+ :rtype: bool
+ """
+ cmd = ['show', ref]
+ try:
+ self.run_git(cmd, str_errors='replace')
+ return True
+ except SubprocessError:
+ return False
+
+ def run_git_split(self, cmd, separator=None, str_errors='strict'):
+ """
+ :type cmd: list[str]
+ :type separator: str | None
+ :type str_errors: str
+ :rtype: list[str]
+ """
+ output = self.run_git(cmd, str_errors=str_errors).strip(separator)
+
+ if not output:
+ return []
+
+ return output.split(separator)
+
+ def run_git(self, cmd, str_errors='strict'):
+ """
+ :type cmd: list[str]
+ :type str_errors: str
+ :rtype: str
+ """
+ return raw_command([self.git] + cmd, cwd=self.root, capture=True, str_errors=str_errors)[0]
diff --git a/test/lib/ansible_test/_internal/http.py b/test/lib/ansible_test/_internal/http.py
new file mode 100644
index 00000000..6607a10b
--- /dev/null
+++ b/test/lib/ansible_test/_internal/http.py
@@ -0,0 +1,181 @@
+"""
+Primitive replacement for requests to avoid extra dependency.
+Avoids use of urllib2 due to lack of SNI support.
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import time
+
+try:
+ from urllib import urlencode
+except ImportError:
+ # noinspection PyCompatibility, PyUnresolvedReferences
+ from urllib.parse import urlencode # pylint: disable=locally-disabled, import-error, no-name-in-module
+
+try:
+ # noinspection PyCompatibility
+ from urlparse import urlparse, urlunparse, parse_qs
+except ImportError:
+ # noinspection PyCompatibility, PyUnresolvedReferences
+ from urllib.parse import urlparse, urlunparse, parse_qs # pylint: disable=locally-disabled, ungrouped-imports
+
+from .util import (
+ ApplicationError,
+ SubprocessError,
+ display,
+)
+
+from .util_common import (
+ CommonConfig,
+ run_command,
+)
+
+
+class HttpClient:
+ """Make HTTP requests via curl."""
+ def __init__(self, args, always=False, insecure=False, proxy=None):
+ """
+ :type args: CommonConfig
+ :type always: bool
+ :type insecure: bool
+ """
+ self.args = args
+ self.always = always
+ self.insecure = insecure
+ self.proxy = proxy
+
+ self.username = None
+ self.password = None
+
+ def get(self, url):
+ """
+ :type url: str
+ :rtype: HttpResponse
+ """
+ return self.request('GET', url)
+
+ def delete(self, url):
+ """
+ :type url: str
+ :rtype: HttpResponse
+ """
+ return self.request('DELETE', url)
+
+ def put(self, url, data=None, headers=None):
+ """
+ :type url: str
+ :type data: str | None
+ :type headers: dict[str, str] | None
+ :rtype: HttpResponse
+ """
+ return self.request('PUT', url, data, headers)
+
+ def request(self, method, url, data=None, headers=None):
+ """
+ :type method: str
+ :type url: str
+ :type data: str | None
+ :type headers: dict[str, str] | None
+ :rtype: HttpResponse
+ """
+ cmd = ['curl', '-s', '-S', '-i', '-X', method]
+
+ if self.insecure:
+ cmd += ['--insecure']
+
+ if headers is None:
+ headers = {}
+
+ headers['Expect'] = '' # don't send expect continue header
+
+ if self.username:
+ if self.password:
+ display.sensitive.add(self.password)
+ cmd += ['-u', '%s:%s' % (self.username, self.password)]
+ else:
+ cmd += ['-u', self.username]
+
+ for header in headers.keys():
+ cmd += ['-H', '%s: %s' % (header, headers[header])]
+
+ if data is not None:
+ cmd += ['-d', data]
+
+ if self.proxy:
+ cmd += ['-x', self.proxy]
+
+ cmd += [url]
+
+ attempts = 0
+ max_attempts = 3
+ sleep_seconds = 3
+
+ # curl error codes which are safe to retry (request never sent to server)
+ retry_on_status = (
+ 6, # CURLE_COULDNT_RESOLVE_HOST
+ )
+
+ stdout = ''
+
+ while True:
+ attempts += 1
+
+ try:
+ stdout = run_command(self.args, cmd, capture=True, always=self.always, cmd_verbosity=2)[0]
+ break
+ except SubprocessError as ex:
+ if ex.status in retry_on_status and attempts < max_attempts:
+ display.warning(u'%s' % ex)
+ time.sleep(sleep_seconds)
+ continue
+
+ raise
+
+ if self.args.explain and not self.always:
+ return HttpResponse(method, url, 200, '')
+
+ header, body = stdout.split('\r\n\r\n', 1)
+
+ response_headers = header.split('\r\n')
+ first_line = response_headers[0]
+ http_response = first_line.split(' ')
+ status_code = int(http_response[1])
+
+ return HttpResponse(method, url, status_code, body)
+
+
+class HttpResponse:
+ """HTTP response from curl."""
+ def __init__(self, method, url, status_code, response):
+ """
+ :type method: str
+ :type url: str
+ :type status_code: int
+ :type response: str
+ """
+ self.method = method
+ self.url = url
+ self.status_code = status_code
+ self.response = response
+
+ def json(self):
+ """
+ :rtype: any
+ """
+ try:
+ return json.loads(self.response)
+ except ValueError:
+ raise HttpError(self.status_code, 'Cannot parse response to %s %s as JSON:\n%s' % (self.method, self.url, self.response))
+
+
+class HttpError(ApplicationError):
+ """HTTP response as an error."""
+ def __init__(self, status, message):
+ """
+ :type status: int
+ :type message: str
+ """
+ super(HttpError, self).__init__('%s: %s' % (status, message))
+ self.status = status
diff --git a/test/lib/ansible_test/_internal/import_analysis.py b/test/lib/ansible_test/_internal/import_analysis.py
new file mode 100644
index 00000000..9cc5376f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/import_analysis.py
@@ -0,0 +1,362 @@
+"""Analyze python import statements."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import os
+import re
+
+from . import types as t
+
+from .io import (
+ read_binary_file,
+)
+
+from .util import (
+ display,
+ ApplicationError,
+ is_subdir,
+)
+
+from .data import (
+ data_context,
+)
+
+VIRTUAL_PACKAGES = set([
+ 'ansible.module_utils.six',
+])
+
+
+def get_python_module_utils_imports(compile_targets):
+ """Return a dictionary of module_utils names mapped to sets of python file paths.
+ :type compile_targets: list[TestTarget]
+ :rtype: dict[str, set[str]]
+ """
+
+ module_utils = enumerate_module_utils()
+
+ virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
+ module_utils -= virtual_utils
+
+ imports_by_target_path = {}
+
+ for target in compile_targets:
+ imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
+
+ def recurse_import(import_name, depth=0, seen=None): # type: (str, int, t.Optional[t.Set[str]]) -> t.Set[str]
+ """Recursively expand module_utils imports from module_utils files."""
+ display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
+
+ if seen is None:
+ seen = set([import_name])
+
+ results = set([import_name])
+
+ # virtual packages depend on the modules they contain instead of the reverse
+ if import_name in VIRTUAL_PACKAGES:
+ for sub_import in sorted(virtual_utils):
+ if sub_import.startswith('%s.' % import_name):
+ if sub_import in seen:
+ continue
+
+ seen.add(sub_import)
+
+ matches = sorted(recurse_import(sub_import, depth + 1, seen))
+
+ for result in matches:
+ results.add(result)
+
+ import_path = get_import_path(import_name)
+
+ if import_path not in imports_by_target_path:
+ import_path = get_import_path(import_name, package=True)
+
+ if import_path not in imports_by_target_path:
+ raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
+
+ # process imports in reverse so the deepest imports come first
+ for name in sorted(imports_by_target_path[import_path], reverse=True):
+ if name in virtual_utils:
+ continue
+
+ if name in seen:
+ continue
+
+ seen.add(name)
+
+ matches = sorted(recurse_import(name, depth + 1, seen))
+
+ for result in matches:
+ results.add(result)
+
+ return results
+
+ for module_util in module_utils:
+ # recurse over module_utils imports while excluding self
+ module_util_imports = recurse_import(module_util)
+ module_util_imports.remove(module_util)
+
+ # add recursive imports to all path entries which import this module_util
+ for target_path in imports_by_target_path:
+ if module_util in imports_by_target_path[target_path]:
+ for module_util_import in sorted(module_util_imports):
+ if module_util_import not in imports_by_target_path[target_path]:
+ display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
+ imports_by_target_path[target_path].add(module_util_import)
+
+ imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils])
+
+ for target_path in imports_by_target_path:
+ for module_util in imports_by_target_path[target_path]:
+ imports[module_util].add(target_path)
+
+ # for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
+ for virtual_util in virtual_utils:
+ parent_package = '.'.join(virtual_util.split('.')[:-1])
+ imports[virtual_util] = imports[parent_package]
+ display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ package_path = get_import_path(module_util, package=True)
+
+ if os.path.exists(package_path) and not os.path.getsize(package_path):
+ continue # ignore empty __init__.py files
+
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_python_module_utils_name(path): # type: (str) -> str
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils'
+ else:
+ prefix = 'ansible.module_utils'
+
+ if path.endswith('/__init__.py'):
+ path = os.path.dirname(path)
+
+ if path == base_path:
+ name = prefix
+ else:
+ name = prefix + '.' + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils():
+ """Return a list of available module_utils imports.
+ :rtype: set[str]
+ """
+ module_utils = []
+
+ for path in data_context().content.walk_files(data_context().content.module_utils_path):
+ ext = os.path.splitext(path)[1]
+
+ if ext != '.py':
+ continue
+
+ module_utils.append(get_python_module_utils_name(path))
+
+ return set(module_utils)
+
+
+def extract_python_module_utils_imports(path, module_utils):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ :rtype: set[str]
+ """
+ # Python code must be read as bytes to avoid a SyntaxError when the source uses comments to declare the file encoding.
+ # See: https://www.python.org/dev/peps/pep-0263
+ # Specifically: If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised.
+ code = read_binary_file(path)
+
+ try:
+ tree = ast.parse(code)
+ except SyntaxError as ex:
+ # Treat this error as a warning so tests can be executed as best as possible.
+ # The compile test will detect and report this syntax error.
+ display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
+ return set()
+
+ finder = ModuleUtilFinder(path, module_utils)
+ finder.visit(tree)
+ return finder.imports
+
+
+def get_import_path(name, package=False): # type: (str, bool) -> str
+ """Return a path from an import name."""
+ if package:
+ filename = os.path.join(name.replace('.', '/'), '__init__.py')
+ else:
+ filename = '%s.py' % name.replace('.', '/')
+
+ if name.startswith('ansible.module_utils.') or name == 'ansible.module_utils':
+ path = os.path.join('lib', filename)
+ elif data_context().content.collection and (
+ name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name) or
+ name == 'ansible_collections.%s.plugins.module_utils' % data_context().content.collection.full_name):
+ path = '/'.join(filename.split('/')[3:])
+ else:
+ raise Exception('Unexpected import name: %s' % name)
+
+ return path
+
+
+def path_to_module(path): # type: (str) -> str
+ """Convert the given path to a module name."""
+ module = os.path.splitext(path)[0].replace(os.path.sep, '.')
+
+ if module.endswith('.__init__'):
+ module = module[:-9]
+
+ return module
+
+
+def relative_to_absolute(name, level, module, path, lineno): # type: (str, int, str, str, int) -> str
+ """Convert a relative import to an absolute import."""
+ if level <= 0:
+ absolute_name = name
+ elif not module:
+ display.warning('Cannot resolve relative import "%s%s" in unknown module at %s:%d' % ('.' * level, name, path, lineno))
+ absolute_name = 'relative.nomodule'
+ else:
+ parts = module.split('.')
+
+ if level >= len(parts):
+ display.warning('Cannot resolve relative import "%s%s" above module "%s" at %s:%d' % ('.' * level, name, module, path, lineno))
+ absolute_name = 'relative.abovelevel'
+ else:
+ absolute_name = '.'.join(parts[:-level] + [name])
+
+ return absolute_name
+
+
+class ModuleUtilFinder(ast.NodeVisitor):
+ """AST visitor to find valid module_utils imports."""
+ def __init__(self, path, module_utils):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ """
+ self.path = path
+ self.module_utils = module_utils
+ self.imports = set()
+
+ # implicitly import parent package
+
+ if path.endswith('/__init__.py'):
+ path = os.path.split(path)[0]
+
+ if path.startswith('lib/ansible/module_utils/'):
+ package = os.path.split(path)[0].replace('/', '.')[4:]
+
+ if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
+ self.add_import(package, 0)
+
+ self.module = None
+
+ if data_context().content.is_ansible:
+ # Various parts of the Ansible source tree execute within diffent modules.
+ # To support import analysis, each file which uses relative imports must reside under a path defined here.
+ # The mapping is a tuple consisting of a path pattern to match and a replacement path.
+ # During analyis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry.
+ path_map = (
+ ('^hacking/build_library/build_ansible/', 'build_ansible/'),
+ ('^lib/ansible/', 'ansible/'),
+ ('^test/lib/ansible_test/_data/sanity/validate-modules/', 'validate_modules/'),
+ ('^test/units/', 'test/units/'),
+ ('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'),
+ ('^test/integration/targets/.*/ansible_collections/(?P<ns>[^/]*)/(?P<col>[^/]*)/', r'ansible_collections/\g<ns>/\g<col>/'),
+ ('^test/integration/targets/.*/library/', 'ansible/modules/'),
+ )
+
+ for pattern, replacement in path_map:
+ if re.search(pattern, self.path):
+ revised_path = re.sub(pattern, replacement, self.path)
+ self.module = path_to_module(revised_path)
+ break
+ else:
+ # This assumes that all files within the collection are executed by Ansible as part of the collection.
+ # While that will usually be true, there are exceptions which will result in this resolution being incorrect.
+ self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path))
+
+ # noinspection PyPep8Naming
+ # pylint: disable=locally-disabled, invalid-name
+ def visit_Import(self, node):
+ """
+ :type node: ast.Import
+ """
+ self.generic_visit(node)
+
+ # import ansible.module_utils.MODULE[.MODULE]
+ # import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE]
+ self.add_imports([alias.name for alias in node.names], node.lineno)
+
+ # noinspection PyPep8Naming
+ # pylint: disable=locally-disabled, invalid-name
+ def visit_ImportFrom(self, node):
+ """
+ :type node: ast.ImportFrom
+ """
+ self.generic_visit(node)
+
+ if not node.module:
+ return
+
+ module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno)
+
+ if not module.startswith('ansible'):
+ return
+
+ # from ansible.module_utils import MODULE[, MODULE]
+ # from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
+ # from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE]
+ # from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
+ self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno)
+
+ def add_import(self, name, line_number):
+ """
+ :type name: str
+ :type line_number: int
+ """
+ import_name = name
+
+ while self.is_module_util_name(name):
+ if name in self.module_utils:
+ if name not in self.imports:
+ display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
+ self.imports.add(name)
+
+ return # duplicate imports are ignored
+
+ name = '.'.join(name.split('.')[:-1])
+
+ if is_subdir(self.path, data_context().content.test_path):
+ return # invalid imports in tests are ignored
+
+ # Treat this error as a warning so tests can be executed as best as possible.
+ # This error should be detected by unit or integration tests.
+ display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
+
+ def add_imports(self, names, line_no): # type: (t.List[str], int) -> None
+ """Add the given import names if they are module_utils imports."""
+ for name in names:
+ if self.is_module_util_name(name):
+ self.add_import(name, line_no)
+
+ @staticmethod
+ def is_module_util_name(name): # type: (str) -> bool
+ """Return True if the given name is a module_util name for the content under test. External module_utils are ignored."""
+ if data_context().content.is_ansible and name.startswith('ansible.module_utils.'):
+ return True
+
+ if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name):
+ return True
+
+ return False
diff --git a/test/lib/ansible_test/_internal/init.py b/test/lib/ansible_test/_internal/init.py
new file mode 100644
index 00000000..682e6b0c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/init.py
@@ -0,0 +1,16 @@
+"""Early initialization for ansible-test before most other imports have been performed."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import resource
+
+from .constants import (
+ SOFT_RLIMIT_NOFILE,
+)
+
+CURRENT_RLIMIT_NOFILE = resource.getrlimit(resource.RLIMIT_NOFILE)
+DESIRED_RLIMIT_NOFILE = (SOFT_RLIMIT_NOFILE, CURRENT_RLIMIT_NOFILE[1])
+
+if DESIRED_RLIMIT_NOFILE < CURRENT_RLIMIT_NOFILE:
+ resource.setrlimit(resource.RLIMIT_NOFILE, DESIRED_RLIMIT_NOFILE)
+ CURRENT_RLIMIT_NOFILE = DESIRED_RLIMIT_NOFILE
diff --git a/test/lib/ansible_test/_internal/integration/__init__.py b/test/lib/ansible_test/_internal/integration/__init__.py
new file mode 100644
index 00000000..f7be34e7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/integration/__init__.py
@@ -0,0 +1,349 @@
+"""Ansible integration test infrastructure."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import contextlib
+import json
+import os
+import shutil
+import tempfile
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..target import (
+ analyze_integration_target_dependencies,
+ walk_integration_targets,
+)
+
+from ..config import (
+ IntegrationConfig,
+ NetworkIntegrationConfig,
+ PosixIntegrationConfig,
+ WindowsIntegrationConfig,
+)
+
+from ..io import (
+ make_dirs,
+ write_text_file,
+ read_text_file,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ COVERAGE_CONFIG_NAME,
+ MODE_DIRECTORY,
+ MODE_DIRECTORY_WRITE,
+ MODE_FILE,
+)
+
+from ..util_common import (
+ named_temporary_file,
+ ResultType,
+)
+
+from ..coverage_util import (
+ generate_coverage_config,
+)
+
+from ..cache import (
+ CommonCache,
+)
+
+from ..cloud import (
+ CloudEnvironmentConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+def setup_common_temp_dir(args, path):
+ """
+ :type args: IntegrationConfig
+ :type path: str
+ """
+ if args.explain:
+ return
+
+ os.mkdir(path)
+ os.chmod(path, MODE_DIRECTORY)
+
+ if args.coverage:
+ coverage_config_path = os.path.join(path, COVERAGE_CONFIG_NAME)
+
+ coverage_config = generate_coverage_config(args)
+
+ write_text_file(coverage_config_path, coverage_config)
+
+ os.chmod(coverage_config_path, MODE_FILE)
+
+ coverage_output_path = os.path.join(path, ResultType.COVERAGE.name)
+
+ os.mkdir(coverage_output_path)
+ os.chmod(coverage_output_path, MODE_DIRECTORY_WRITE)
+
+
+def generate_dependency_map(integration_targets):
+ """
+ :type integration_targets: list[IntegrationTarget]
+ :rtype: dict[str, set[IntegrationTarget]]
+ """
+ targets_dict = dict((target.name, target) for target in integration_targets)
+ target_dependencies = analyze_integration_target_dependencies(integration_targets)
+ dependency_map = {}
+
+ invalid_targets = set()
+
+ for dependency, dependents in target_dependencies.items():
+ dependency_target = targets_dict.get(dependency)
+
+ if not dependency_target:
+ invalid_targets.add(dependency)
+ continue
+
+ for dependent in dependents:
+ if dependent not in dependency_map:
+ dependency_map[dependent] = set()
+
+ dependency_map[dependent].add(dependency_target)
+
+ if invalid_targets:
+ raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets)))
+
+ return dependency_map
+
+
+def get_files_needed(target_dependencies):
+ """
+ :type target_dependencies: list[IntegrationTarget]
+ :rtype: list[str]
+ """
+ files_needed = []
+
+ for target_dependency in target_dependencies:
+ files_needed += target_dependency.needs_file
+
+ files_needed = sorted(set(files_needed))
+
+ invalid_paths = [path for path in files_needed if not os.path.isfile(path)]
+
+ if invalid_paths:
+ raise ApplicationError('Invalid "needs/file/*" aliases:\n%s' % '\n'.join(invalid_paths))
+
+ return files_needed
+
+
+def check_inventory(args, inventory_path): # type: (IntegrationConfig, str) -> None
+ """Check the given inventory for issues."""
+ if args.docker or args.remote:
+ if os.path.exists(inventory_path):
+ inventory = read_text_file(inventory_path)
+
+ if 'ansible_ssh_private_key_file' in inventory:
+ display.warning('Use of "ansible_ssh_private_key_file" in inventory with the --docker or --remote option is unsupported and will likely fail.')
+
+
+def get_inventory_relative_path(args): # type: (IntegrationConfig) -> str
+ """Return the inventory path used for the given integration configuration relative to the content root."""
+ inventory_names = {
+ PosixIntegrationConfig: 'inventory',
+ WindowsIntegrationConfig: 'inventory.winrm',
+ NetworkIntegrationConfig: 'inventory.networking',
+ } # type: t.Dict[t.Type[IntegrationConfig], str]
+
+ return os.path.join(data_context().content.integration_path, inventory_names[type(args)])
+
+
+def delegate_inventory(args, inventory_path_src): # type: (IntegrationConfig, str) -> None
+ """Make the given inventory available during delegation."""
+ if isinstance(args, PosixIntegrationConfig):
+ return
+
+ def inventory_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """
+ Add the inventory file to the payload file list.
+ This will preserve the file during delegation even if it is ignored or is outside the content and install roots.
+ """
+ inventory_path = get_inventory_relative_path(args)
+ inventory_tuple = inventory_path_src, inventory_path
+
+ if os.path.isfile(inventory_path_src) and inventory_tuple not in files:
+ originals = [item for item in files if item[1] == inventory_path]
+
+ if originals:
+ for original in originals:
+ files.remove(original)
+
+ display.warning('Overriding inventory file "%s" with "%s".' % (inventory_path, inventory_path_src))
+ else:
+ display.notice('Sourcing inventory file "%s" from "%s".' % (inventory_path, inventory_path_src))
+
+ files.append(inventory_tuple)
+
+ data_context().register_payload_callback(inventory_callback)
+
+
+@contextlib.contextmanager
+def integration_test_environment(args, target, inventory_path_src):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type inventory_path_src: str
+ """
+ ansible_config_src = args.get_ansible_config()
+ ansible_config_relative = os.path.join(data_context().content.integration_path, '%s.cfg' % args.command)
+
+ if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases:
+ display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
+
+ integration_dir = os.path.join(data_context().content.root, data_context().content.integration_path)
+ targets_dir = os.path.join(data_context().content.root, data_context().content.integration_targets_path)
+ inventory_path = inventory_path_src
+ ansible_config = ansible_config_src
+ vars_file = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+
+ yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
+ return
+
+ # When testing a collection, the temporary directory must reside within the collection.
+ # This is necessary to enable support for the default collection for non-collection content (playbooks and roles).
+ root_temp_dir = os.path.join(ResultType.TMP.path, 'integration')
+
+ prefix = '%s-' % target.name
+ suffix = u'-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8'
+
+ if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases:
+ display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
+ suffix = '-ansible'
+
+ if args.explain:
+ temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix))
+ else:
+ make_dirs(root_temp_dir)
+ temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
+
+ try:
+ display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ inventory_path = os.path.join(temp_dir, inventory_relative_path)
+
+ cache = IntegrationCache(args)
+
+ target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set())))
+
+ files_needed = get_files_needed(target_dependencies)
+
+ integration_dir = os.path.join(temp_dir, data_context().content.integration_path)
+ targets_dir = os.path.join(temp_dir, data_context().content.integration_targets_path)
+ ansible_config = os.path.join(temp_dir, ansible_config_relative)
+
+ vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+ vars_file = os.path.join(temp_dir, data_context().content.integration_vars_path)
+
+ file_copies = [
+ (ansible_config_src, ansible_config),
+ (inventory_path_src, inventory_path),
+ ]
+
+ if os.path.exists(vars_file_src):
+ file_copies.append((vars_file_src, vars_file))
+
+ file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed]
+
+ integration_targets_relative_path = data_context().content.integration_targets_path
+
+ directory_copies = [
+ (
+ os.path.join(integration_targets_relative_path, target.relative_path),
+ os.path.join(temp_dir, integration_targets_relative_path, target.relative_path)
+ )
+ for target in target_dependencies
+ ]
+
+ directory_copies = sorted(set(directory_copies))
+ file_copies = sorted(set(file_copies))
+
+ if not args.explain:
+ make_dirs(integration_dir)
+
+ for dir_src, dir_dst in directory_copies:
+ display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2)
+
+ if not args.explain:
+ shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True)
+
+ for file_src, file_dst in file_copies:
+ display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2)
+
+ if not args.explain:
+ make_dirs(os.path.dirname(file_dst))
+ shutil.copy2(file_src, file_dst)
+
+ yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
+ finally:
+ if not args.explain:
+ shutil.rmtree(temp_dir)
+
+
+@contextlib.contextmanager
+def integration_test_config_file(args, env_config, integration_dir):
+ """
+ :type args: IntegrationConfig
+ :type env_config: CloudEnvironmentConfig
+ :type integration_dir: str
+ """
+ if not env_config:
+ yield None
+ return
+
+ config_vars = (env_config.ansible_vars or {}).copy()
+
+ config_vars.update(dict(
+ ansible_test=dict(
+ environment=env_config.env_vars,
+ module_defaults=env_config.module_defaults,
+ )
+ ))
+
+ config_file = json.dumps(config_vars, indent=4, sort_keys=True)
+
+ with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path:
+ filename = os.path.relpath(path, integration_dir)
+
+ display.info('>>> Config File: %s\n%s' % (filename, config_file), verbosity=3)
+
+ yield path
+
+
+class IntegrationEnvironment:
+ """Details about the integration environment."""
+ def __init__(self, integration_dir, targets_dir, inventory_path, ansible_config, vars_file):
+ self.integration_dir = integration_dir
+ self.targets_dir = targets_dir
+ self.inventory_path = inventory_path
+ self.ansible_config = ansible_config
+ self.vars_file = vars_file
+
+
+class IntegrationCache(CommonCache):
+ """Integration cache."""
+ @property
+ def integration_targets(self):
+ """
+ :rtype: list[IntegrationTarget]
+ """
+ return self.get('integration_targets', lambda: list(walk_integration_targets()))
+
+ @property
+ def dependency_map(self):
+ """
+ :rtype: dict[str, set[IntegrationTarget]]
+ """
+ return self.get('dependency_map', lambda: generate_dependency_map(self.integration_targets))
diff --git a/test/lib/ansible_test/_internal/io.py b/test/lib/ansible_test/_internal/io.py
new file mode 100644
index 00000000..0f61cd2d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/io.py
@@ -0,0 +1,94 @@
+"""Functions for disk IO."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import io
+import json
+import os
+
+from . import types as t
+
+from .encoding import (
+ ENCODING,
+ to_bytes,
+ to_text,
+)
+
+
+def read_json_file(path): # type: (t.AnyStr) -> t.Any
+ """Parse and return the json content from the specified path."""
+ return json.loads(read_text_file(path))
+
+
+def read_text_file(path): # type: (t.AnyStr) -> t.Text
+ """Return the contents of the specified path as text."""
+ return to_text(read_binary_file(path))
+
+
+def read_binary_file(path): # type: (t.AnyStr) -> bytes
+ """Return the contents of the specified path as bytes."""
+ with open_binary_file(path) as file:
+ return file.read()
+
+
+def make_dirs(path): # type: (str) -> None
+ """Create a directory at path, including any necessary parent directories."""
+ try:
+ os.makedirs(to_bytes(path))
+ except OSError as ex:
+ if ex.errno != errno.EEXIST:
+ raise
+
+
+def write_json_file(path, # type: str
+ content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]]
+ create_directories=False, # type: bool
+ formatted=True, # type: bool
+ encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]]
+ ): # type: (...) -> None
+ """Write the given json content to the specified path, optionally creating missing directories."""
+ text_content = json.dumps(content,
+ sort_keys=formatted,
+ indent=4 if formatted else None,
+ separators=(', ', ': ') if formatted else (',', ':'),
+ cls=encoder,
+ ) + '\n'
+
+ write_text_file(path, text_content, create_directories=create_directories)
+
+
+def write_text_file(path, content, create_directories=False): # type: (str, str, bool) -> None
+ """Write the given text content to the specified path, optionally creating missing directories."""
+ if create_directories:
+ make_dirs(os.path.dirname(path))
+
+ with open_binary_file(path, 'wb') as file:
+ file.write(to_bytes(content))
+
+
+def open_text_file(path, mode='r'): # type: (str, str) -> t.TextIO
+ """Open the given path for text access."""
+ if 'b' in mode:
+ raise Exception('mode cannot include "b" for text files: %s' % mode)
+
+ # noinspection PyTypeChecker
+ return io.open(to_bytes(path), mode, encoding=ENCODING)
+
+
+def open_binary_file(path, mode='rb'): # type: (str, str) -> t.BinaryIO
+ """Open the given path for binary access."""
+ if 'b' not in mode:
+ raise Exception('mode must include "b" for binary files: %s' % mode)
+
+ # noinspection PyTypeChecker
+ return io.open(to_bytes(path), mode)
+
+
+class SortedSetEncoder(json.JSONEncoder):
+ """Encode sets as sorted lists."""
+ def default(self, obj): # pylint: disable=method-hidden, arguments-differ
+ if isinstance(obj, set):
+ return sorted(obj)
+
+ return super(SortedSetEncoder).default(self, obj)
diff --git a/test/lib/ansible_test/_internal/manage_ci.py b/test/lib/ansible_test/_internal/manage_ci.py
new file mode 100644
index 00000000..e81dad68
--- /dev/null
+++ b/test/lib/ansible_test/_internal/manage_ci.py
@@ -0,0 +1,335 @@
+"""Access Ansible Core CI remote services."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import tempfile
+import time
+
+from .util import (
+ SubprocessError,
+ ApplicationError,
+ cmd_quote,
+ display,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from .util_common import (
+ intercept_command,
+ get_network_settings,
+ run_command,
+)
+
+from .core_ci import (
+ AnsibleCoreCI,
+)
+
+from .ansible_util import (
+ ansible_environment,
+)
+
+from .config import (
+ ShellConfig,
+)
+
+from .payload import (
+ create_payload,
+)
+
+
+class ManageWindowsCI:
+ """Manage access to a Windows instance provided by Ansible Core CI."""
+ def __init__(self, core_ci):
+ """
+ :type core_ci: AnsibleCoreCI
+ """
+ self.core_ci = core_ci
+ self.ssh_args = ['-i', self.core_ci.ssh_key.key]
+
+ ssh_options = dict(
+ BatchMode='yes',
+ StrictHostKeyChecking='no',
+ UserKnownHostsFile='/dev/null',
+ ServerAliveInterval=15,
+ ServerAliveCountMax=4,
+ )
+
+ for ssh_option in sorted(ssh_options):
+ self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])]
+
+ def setup(self, python_version):
+ """Used in delegate_remote to setup the host, no action is required for Windows.
+ :type python_version: str
+ """
+
+ def wait(self):
+ """Wait for instance to respond to ansible ping."""
+ extra_vars = [
+ 'ansible_connection=winrm',
+ 'ansible_host=%s' % self.core_ci.connection.hostname,
+ 'ansible_user=%s' % self.core_ci.connection.username,
+ 'ansible_password=%s' % self.core_ci.connection.password,
+ 'ansible_port=%s' % self.core_ci.connection.port,
+ 'ansible_winrm_server_cert_validation=ignore',
+ ]
+
+ name = 'windows_%s' % self.core_ci.version
+
+ env = ansible_environment(self.core_ci.args)
+ cmd = ['ansible', '-m', 'ansible.windows.win_ping', '-i', '%s,' % name, name, '-e', ' '.join(extra_vars)]
+
+ for dummy in range(1, 120):
+ try:
+ intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True)
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
+
+ def download(self, remote, local):
+ """
+ :type remote: str
+ :type local: str
+ """
+ self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local)
+
+ def upload(self, local, remote):
+ """
+ :type local: str
+ :type remote: str
+ """
+ self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote))
+
+ def ssh(self, command, options=None, force_pty=True):
+ """
+ :type command: str | list[str]
+ :type options: list[str] | None
+ :type force_pty: bool
+ """
+ if not options:
+ options = []
+ if force_pty:
+ options.append('-tt')
+
+ if isinstance(command, list):
+ command = ' '.join(cmd_quote(c) for c in command)
+
+ run_command(self.core_ci.args,
+ ['ssh', '-q'] + self.ssh_args +
+ options +
+ ['-p', '22',
+ '%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] +
+ [command])
+
+ def scp(self, src, dst):
+ """
+ :type src: str
+ :type dst: str
+ """
+ for dummy in range(1, 10):
+ try:
+ run_command(self.core_ci.args,
+ ['scp'] + self.ssh_args +
+ ['-P', '22', '-q', '-r', src, dst])
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Failed transfer: %s -> %s' % (src, dst))
+
+
+class ManageNetworkCI:
+ """Manage access to a network instance provided by Ansible Core CI."""
+ def __init__(self, core_ci):
+ """
+ :type core_ci: AnsibleCoreCI
+ """
+ self.core_ci = core_ci
+
+ def wait(self):
+ """Wait for instance to respond to ansible ping."""
+ settings = get_network_settings(self.core_ci.args, self.core_ci.platform, self.core_ci.version)
+
+ extra_vars = [
+ 'ansible_host=%s' % self.core_ci.connection.hostname,
+ 'ansible_port=%s' % self.core_ci.connection.port,
+ 'ansible_ssh_private_key_file=%s' % self.core_ci.ssh_key.key,
+ ] + [
+ '%s=%s' % (key, value) for key, value in settings.inventory_vars.items()
+ ]
+
+ name = '%s-%s' % (self.core_ci.platform, self.core_ci.version.replace('.', '-'))
+
+ env = ansible_environment(self.core_ci.args)
+ cmd = [
+ 'ansible',
+ '-m', '%s%s_command' % (settings.collection + '.' if settings.collection else '', self.core_ci.platform),
+ '-a', 'commands=?',
+ '-u', self.core_ci.connection.username,
+ '-i', '%s,' % name,
+ '-e', ' '.join(extra_vars),
+ name,
+ ]
+
+ for dummy in range(1, 90):
+ try:
+ intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True)
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
+
+
+class ManagePosixCI:
+ """Manage access to a POSIX instance provided by Ansible Core CI."""
+ def __init__(self, core_ci):
+ """
+ :type core_ci: AnsibleCoreCI
+ """
+ self.core_ci = core_ci
+ self.ssh_args = ['-i', self.core_ci.ssh_key.key]
+
+ ssh_options = dict(
+ BatchMode='yes',
+ StrictHostKeyChecking='no',
+ UserKnownHostsFile='/dev/null',
+ ServerAliveInterval=15,
+ ServerAliveCountMax=4,
+ )
+
+ for ssh_option in sorted(ssh_options):
+ self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])]
+
+ if self.core_ci.platform == 'freebsd':
+ if self.core_ci.provider == 'aws':
+ self.become = ['su', '-l', 'root', '-c']
+ elif self.core_ci.provider == 'azure':
+ self.become = ['sudo', '-in', 'sh', '-c']
+ else:
+ raise NotImplementedError('provider %s has not been implemented' % self.core_ci.provider)
+ elif self.core_ci.platform == 'macos':
+ self.become = ['sudo', '-in', 'PATH=/usr/local/bin:$PATH', 'sh', '-c']
+ elif self.core_ci.platform == 'osx':
+ self.become = ['sudo', '-in', 'PATH=/usr/local/bin:$PATH']
+ elif self.core_ci.platform == 'rhel' or self.core_ci.platform == 'centos':
+ self.become = ['sudo', '-in', 'bash', '-c']
+ elif self.core_ci.platform in ['aix', 'ibmi']:
+ self.become = []
+
+ def setup(self, python_version):
+ """Start instance and wait for it to become ready and respond to an ansible ping.
+ :type python_version: str
+ :rtype: str
+ """
+ pwd = self.wait()
+
+ display.info('Remote working directory: %s' % pwd, verbosity=1)
+
+ if isinstance(self.core_ci.args, ShellConfig):
+ if self.core_ci.args.raw:
+ return pwd
+
+ self.configure(python_version)
+ self.upload_source()
+
+ return pwd
+
+ def wait(self): # type: () -> str
+ """Wait for instance to respond to SSH."""
+ for dummy in range(1, 90):
+ try:
+ stdout = self.ssh('pwd', capture=True)[0]
+
+ if self.core_ci.args.explain:
+ return '/pwd'
+
+ pwd = stdout.strip().splitlines()[-1]
+
+ if not pwd.startswith('/'):
+ raise Exception('Unexpected current working directory "%s" from "pwd" command output:\n%s' % (pwd, stdout))
+
+ return pwd
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
+
+ def configure(self, python_version):
+ """Configure remote host for testing.
+ :type python_version: str
+ """
+ self.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'remote.sh'), '/tmp')
+ self.ssh('chmod +x /tmp/remote.sh && /tmp/remote.sh %s %s' % (self.core_ci.platform, python_version))
+
+ def upload_source(self):
+ """Upload and extract source."""
+ with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd:
+ remote_source_dir = '/tmp'
+ remote_source_path = os.path.join(remote_source_dir, os.path.basename(local_source_fd.name))
+
+ create_payload(self.core_ci.args, local_source_fd.name)
+
+ self.upload(local_source_fd.name, remote_source_dir)
+ # AIX does not provide the GNU tar version, leading to parameters
+ # being different and -z not being recognized. This pattern works
+ # with both versions of tar.
+ self.ssh(
+ 'rm -rf ~/ansible ~/ansible_collections && cd ~/ && gunzip --stdout %s | tar oxf - && rm %s' %
+ (remote_source_path, remote_source_path)
+ )
+
+ def download(self, remote, local):
+ """
+ :type remote: str
+ :type local: str
+ """
+ self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local)
+
+ def upload(self, local, remote):
+ """
+ :type local: str
+ :type remote: str
+ """
+ self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote))
+
+ def ssh(self, command, options=None, capture=False):
+ """
+ :type command: str | list[str]
+ :type options: list[str] | None
+ :type capture: bool
+ :rtype: str | None, str | None
+ """
+ if not options:
+ options = []
+
+ if isinstance(command, list):
+ command = ' '.join(cmd_quote(c) for c in command)
+
+ command = cmd_quote(command) if self.become else command
+ return run_command(self.core_ci.args,
+ ['ssh', '-tt', '-q'] + self.ssh_args +
+ options +
+ ['-p', str(self.core_ci.connection.port),
+ '%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] +
+ self.become + [command], capture=capture)
+
+ def scp(self, src, dst):
+ """
+ :type src: str
+ :type dst: str
+ """
+ for dummy in range(1, 10):
+ try:
+ run_command(self.core_ci.args,
+ ['scp'] + self.ssh_args +
+ ['-P', str(self.core_ci.connection.port), '-q', '-r', src, dst])
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Failed transfer: %s -> %s' % (src, dst))
diff --git a/test/lib/ansible_test/_internal/metadata.py b/test/lib/ansible_test/_internal/metadata.py
new file mode 100644
index 00000000..36575d0c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/metadata.py
@@ -0,0 +1,151 @@
+"""Test metadata for passing data to delegated tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from . import types as t
+
+from .util import (
+ display,
+)
+
+from .io import (
+ write_json_file,
+ read_json_file,
+)
+
+from .diff import (
+ parse_diff,
+ FileDiff,
+)
+
+
+class Metadata:
+ """Metadata object for passing data to delegated tests."""
+ def __init__(self):
+ """Initialize metadata."""
+ self.changes = {} # type: t.Dict[str, t.Tuple[t.Tuple[int, int]]]
+ self.cloud_config = None # type: t.Optional[t.Dict[str, str]]
+ self.instance_config = None # type: t.Optional[t.List[t.Dict[str, str]]]
+ self.change_description = None # type: t.Optional[ChangeDescription]
+ self.ci_provider = None # type: t.Optional[str]
+
+ def populate_changes(self, diff):
+ """
+ :type diff: list[str] | None
+ """
+ patches = parse_diff(diff)
+ patches = sorted(patches, key=lambda k: k.new.path) # type: t.List[FileDiff]
+
+ self.changes = dict((patch.new.path, tuple(patch.new.ranges)) for patch in patches)
+
+ renames = [patch.old.path for patch in patches if patch.old.path != patch.new.path and patch.old.exists and patch.new.exists]
+ deletes = [patch.old.path for patch in patches if not patch.new.exists]
+
+ # make sure old paths which were renamed or deleted are registered in changes
+ for path in renames + deletes:
+ if path in self.changes:
+ # old path was replaced with another file
+ continue
+
+ # failed tests involving deleted files should be using line 0 since there is no content remaining
+ self.changes[path] = ((0, 0),)
+
+ def to_dict(self):
+ """
+ :rtype: dict[str, any]
+ """
+ return dict(
+ changes=self.changes,
+ cloud_config=self.cloud_config,
+ instance_config=self.instance_config,
+ ci_provider=self.ci_provider,
+ change_description=self.change_description.to_dict(),
+ )
+
+ def to_file(self, path):
+ """
+ :type path: path
+ """
+ data = self.to_dict()
+
+ display.info('>>> Metadata: %s\n%s' % (path, data), verbosity=3)
+
+ write_json_file(path, data)
+
+ @staticmethod
+ def from_file(path):
+ """
+ :type path: str
+ :rtype: Metadata
+ """
+ data = read_json_file(path)
+ return Metadata.from_dict(data)
+
+ @staticmethod
+ def from_dict(data):
+ """
+ :type data: dict[str, any]
+ :rtype: Metadata
+ """
+ metadata = Metadata()
+ metadata.changes = data['changes']
+ metadata.cloud_config = data['cloud_config']
+ metadata.instance_config = data['instance_config']
+ metadata.ci_provider = data['ci_provider']
+ metadata.change_description = ChangeDescription.from_dict(data['change_description'])
+
+ return metadata
+
+
+class ChangeDescription:
+ """Description of changes."""
+ def __init__(self):
+ self.command = '' # type: str
+ self.changed_paths = [] # type: t.List[str]
+ self.deleted_paths = [] # type: t.List[str]
+ self.regular_command_targets = {} # type: t.Dict[str, t.List[str]]
+ self.focused_command_targets = {} # type: t.Dict[str, t.List[str]]
+ self.no_integration_paths = [] # type: t.List[str]
+
+ @property
+ def targets(self):
+ """
+ :rtype: list[str] | None
+ """
+ return self.regular_command_targets.get(self.command)
+
+ @property
+ def focused_targets(self):
+ """
+ :rtype: list[str] | None
+ """
+ return self.focused_command_targets.get(self.command)
+
+ def to_dict(self):
+ """
+ :rtype: dict[str, any]
+ """
+ return dict(
+ command=self.command,
+ changed_paths=self.changed_paths,
+ deleted_paths=self.deleted_paths,
+ regular_command_targets=self.regular_command_targets,
+ focused_command_targets=self.focused_command_targets,
+ no_integration_paths=self.no_integration_paths,
+ )
+
+ @staticmethod
+ def from_dict(data):
+ """
+ :param data: dict[str, any]
+ :rtype: ChangeDescription
+ """
+ changes = ChangeDescription()
+ changes.command = data['command']
+ changes.changed_paths = data['changed_paths']
+ changes.deleted_paths = data['deleted_paths']
+ changes.regular_command_targets = data['regular_command_targets']
+ changes.focused_command_targets = data['focused_command_targets']
+ changes.no_integration_paths = data['no_integration_paths']
+
+ return changes
diff --git a/test/lib/ansible_test/_internal/payload.py b/test/lib/ansible_test/_internal/payload.py
new file mode 100644
index 00000000..161faba0
--- /dev/null
+++ b/test/lib/ansible_test/_internal/payload.py
@@ -0,0 +1,146 @@
+"""Payload management for sending Ansible files and test content to other systems (VMs, containers)."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import atexit
+import os
+import stat
+import tarfile
+import tempfile
+import time
+
+from . import types as t
+
+from .config import (
+ IntegrationConfig,
+ ShellConfig,
+)
+
+from .util import (
+ display,
+ ANSIBLE_SOURCE_ROOT,
+ remove_tree,
+ is_subdir,
+)
+
+from .data import (
+ data_context,
+)
+
+from .util_common import (
+ CommonConfig,
+)
+
+# improve performance by disabling uid/gid lookups
+tarfile.pwd = None
+tarfile.grp = None
+
+# this bin symlink map must exactly match the contents of the bin directory
+# it is necessary for payload creation to reconstruct the bin directory when running ansible-test from an installed version of ansible
+ANSIBLE_BIN_SYMLINK_MAP = {
+ 'ansible': '../lib/ansible/cli/scripts/ansible_cli_stub.py',
+ 'ansible-config': 'ansible',
+ 'ansible-connection': '../lib/ansible/cli/scripts/ansible_connection_cli_stub.py',
+ 'ansible-console': 'ansible',
+ 'ansible-doc': 'ansible',
+ 'ansible-galaxy': 'ansible',
+ 'ansible-inventory': 'ansible',
+ 'ansible-playbook': 'ansible',
+ 'ansible-pull': 'ansible',
+ 'ansible-test': '../test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py',
+ 'ansible-vault': 'ansible',
+}
+
+
+def create_payload(args, dst_path): # type: (CommonConfig, str) -> None
+ """Create a payload for delegation."""
+ if args.explain:
+ return
+
+ files = list(data_context().ansible_source)
+ filters = {}
+
+ def make_executable(tar_info): # type: (tarfile.TarInfo) -> t.Optional[tarfile.TarInfo]
+ """Make the given file executable."""
+ tar_info.mode |= stat.S_IXUSR | stat.S_IXOTH | stat.S_IXGRP
+ return tar_info
+
+ if not ANSIBLE_SOURCE_ROOT:
+ # reconstruct the bin directory which is not available when running from an ansible install
+ files.extend(create_temporary_bin_files(args))
+ filters.update(dict((os.path.join('ansible', path[3:]), make_executable) for path in ANSIBLE_BIN_SYMLINK_MAP.values() if path.startswith('../')))
+
+ if not data_context().content.is_ansible:
+ # exclude unnecessary files when not testing ansible itself
+ files = [f for f in files if
+ is_subdir(f[1], 'bin/') or
+ is_subdir(f[1], 'lib/ansible/') or
+ is_subdir(f[1], 'test/lib/ansible_test/')]
+
+ if not isinstance(args, (ShellConfig, IntegrationConfig)):
+ # exclude built-in ansible modules when they are not needed
+ files = [f for f in files if not is_subdir(f[1], 'lib/ansible/modules/') or f[1] == 'lib/ansible/modules/__init__.py']
+
+ collection_layouts = data_context().create_collection_layouts()
+
+ content_files = []
+ extra_files = []
+
+ for layout in collection_layouts:
+ if layout == data_context().content:
+ # include files from the current collection (layout.collection.directory will be added later)
+ content_files.extend((os.path.join(layout.root, path), path) for path in data_context().content.all_files())
+ else:
+ # include files from each collection in the same collection root as the content being tested
+ extra_files.extend((os.path.join(layout.root, path), os.path.join(layout.collection.directory, path)) for path in layout.all_files())
+ else:
+ # when testing ansible itself the ansible source is the content
+ content_files = files
+ # there are no extra files when testing ansible itself
+ extra_files = []
+
+ for callback in data_context().payload_callbacks:
+ # execute callbacks only on the content paths
+ # this is done before placing them in the appropriate subdirectory (see below)
+ callback(content_files)
+
+ # place ansible source files under the 'ansible' directory on the delegated host
+ files = [(src, os.path.join('ansible', dst)) for src, dst in files]
+
+ if data_context().content.collection:
+ # place collection files under the 'ansible_collections/{namespace}/{collection}' directory on the delegated host
+ files.extend((src, os.path.join(data_context().content.collection.directory, dst)) for src, dst in content_files)
+ # extra files already have the correct destination path
+ files.extend(extra_files)
+
+ # maintain predictable file order
+ files = sorted(set(files))
+
+ display.info('Creating a payload archive containing %d files...' % len(files), verbosity=1)
+
+ start = time.time()
+
+ with tarfile.TarFile.open(dst_path, mode='w:gz', compresslevel=4, format=tarfile.GNU_FORMAT) as tar:
+ for src, dst in files:
+ display.info('%s -> %s' % (src, dst), verbosity=4)
+ tar.add(src, dst, filter=filters.get(dst))
+
+ duration = time.time() - start
+ payload_size_bytes = os.path.getsize(dst_path)
+
+ display.info('Created a %d byte payload archive containing %d files in %d seconds.' % (payload_size_bytes, len(files), duration), verbosity=1)
+
+
+def create_temporary_bin_files(args): # type: (CommonConfig) -> t.Tuple[t.Tuple[str, str], ...]
+ """Create a temporary ansible bin directory populated using the symlink map."""
+ if args.explain:
+ temp_path = '/tmp/ansible-tmp-bin'
+ else:
+ temp_path = tempfile.mkdtemp(prefix='ansible', suffix='bin')
+ atexit.register(remove_tree, temp_path)
+
+ for name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
+ path = os.path.join(temp_path, name)
+ os.symlink(dest, path)
+
+ return tuple((os.path.join(temp_path, name), os.path.join('bin', name)) for name in sorted(ANSIBLE_BIN_SYMLINK_MAP))
diff --git a/test/lib/ansible_test/_internal/powershell_import_analysis.py b/test/lib/ansible_test/_internal/powershell_import_analysis.py
new file mode 100644
index 00000000..cfc61859
--- /dev/null
+++ b/test/lib/ansible_test/_internal/powershell_import_analysis.py
@@ -0,0 +1,105 @@
+"""Analyze powershell import statements."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from .io import (
+ read_text_file,
+)
+
+from .util import (
+ display,
+)
+
+from .util_common import (
+ resolve_csharp_ps_util,
+)
+
+from .data import (
+ data_context,
+)
+
+
+def get_powershell_module_utils_imports(powershell_targets):
+ """Return a dictionary of module_utils names mapped to sets of powershell file paths.
+ :type powershell_targets: list[TestTarget]
+ :rtype: dict[str, set[str]]
+ """
+
+ module_utils = enumerate_module_utils()
+
+ imports_by_target_path = {}
+
+ for target in powershell_targets:
+ imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
+
+ imports = dict([(module_util, set()) for module_util in module_utils])
+
+ for target_path in imports_by_target_path:
+ for module_util in imports_by_target_path[target_path]:
+ imports[module_util].add(target_path)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_powershell_module_utils_name(path): # type: (str) -> str
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_powershell_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
+ else:
+ prefix = ''
+
+ name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils():
+ """Return a list of available module_utils imports.
+ :rtype: set[str]
+ """
+ return set(get_powershell_module_utils_name(p)
+ for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path)
+ if os.path.splitext(p)[1] == '.psm1')
+
+
+def extract_powershell_module_utils_imports(path, module_utils):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ :rtype: set[str]
+ """
+ imports = set()
+
+ code = read_text_file(path)
+
+ if data_context().content.is_ansible and '# POWERSHELL_COMMON' in code:
+ imports.add('Ansible.ModuleUtils.Legacy')
+
+ lines = code.splitlines()
+ line_number = 0
+
+ for line in lines:
+ line_number += 1
+ match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line)
+
+ if not match:
+ continue
+
+ import_name = resolve_csharp_ps_util(match.group(1), path)
+
+ if import_name in module_utils:
+ imports.add(import_name)
+ elif data_context().content.is_ansible or \
+ import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
+ display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
+
+ return imports
diff --git a/test/lib/ansible_test/_internal/provider/__init__.py b/test/lib/ansible_test/_internal/provider/__init__.py
new file mode 100644
index 00000000..6e034b53
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/__init__.py
@@ -0,0 +1,78 @@
+"""Provider (plugin) infrastructure for ansible-test."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import os
+
+from .. import types as t
+
+from ..util import (
+ ABC,
+ ApplicationError,
+ get_subclasses,
+)
+
+
+try:
+ TPathProvider = t.TypeVar('TPathProvider', bound='PathProvider')
+except AttributeError:
+ TPathProvider = None # pylint: disable=invalid-name
+
+
+def get_path_provider_classes(provider_type): # type: (t.Type[TPathProvider]) -> t.List[t.Type[TPathProvider]]
+ """Return a list of path provider classes of the given type."""
+ return sorted(get_subclasses(provider_type), key=lambda c: (c.priority, c.__name__))
+
+
+def find_path_provider(provider_type, # type: t.Type[TPathProvider],
+ provider_classes, # type: t.List[t.Type[TPathProvider]]
+ path, # type: str
+ walk, # type: bool
+ ): # type: (...) -> TPathProvider
+ """Return the first found path provider of the given type for the given path."""
+ sequences = sorted(set(pc.sequence for pc in provider_classes if pc.sequence > 0))
+
+ for sequence in sequences:
+ candidate_path = path
+ tier_classes = [pc for pc in provider_classes if pc.sequence == sequence]
+
+ while True:
+ for provider_class in tier_classes:
+ if provider_class.is_content_root(candidate_path):
+ return provider_class(candidate_path)
+
+ if not walk:
+ break
+
+ parent_path = os.path.dirname(candidate_path)
+
+ if parent_path == candidate_path:
+ break
+
+ candidate_path = parent_path
+
+ raise ProviderNotFoundForPath(provider_type, path)
+
+
+class ProviderNotFoundForPath(ApplicationError):
+ """Exception generated when a path based provider cannot be found for a given path."""
+ def __init__(self, provider_type, path): # type: (t.Type, str) -> None
+ super(ProviderNotFoundForPath, self).__init__('No %s found for path: %s' % (provider_type.__name__, path))
+
+ self.provider_type = provider_type
+ self.path = path
+
+
+class PathProvider(ABC):
+ """Base class for provider plugins that are path based."""
+ sequence = 500
+ priority = 500
+
+ def __init__(self, root): # type: (str) -> None
+ self.root = root
+
+ @staticmethod
+ @abc.abstractmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
diff --git a/test/lib/ansible_test/_internal/provider/layout/__init__.py b/test/lib/ansible_test/_internal/provider/layout/__init__.py
new file mode 100644
index 00000000..03d596fc
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/__init__.py
@@ -0,0 +1,232 @@
+"""Code for finding content."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import collections
+import os
+
+from ... import types as t
+
+from ...util import (
+ ANSIBLE_SOURCE_ROOT,
+)
+
+from .. import (
+ PathProvider,
+)
+
+
+class Layout:
+ """Description of content locations and helper methods to access content."""
+ def __init__(self,
+ root, # type: str
+ paths, # type: t.List[str]
+ ): # type: (...) -> None
+ self.root = root
+
+ self.__paths = paths # contains both file paths and symlinked directory paths (ending with os.path.sep)
+ self.__files = [path for path in paths if not path.endswith(os.path.sep)] # contains only file paths
+ self.__paths_tree = paths_to_tree(self.__paths)
+ self.__files_tree = paths_to_tree(self.__files)
+
+ def all_files(self, include_symlinked_directories=False): # type: (bool) -> t.List[str]
+ """Return a list of all file paths."""
+ if include_symlinked_directories:
+ return self.__paths
+
+ return self.__files
+
+ def walk_files(self, directory, include_symlinked_directories=False): # type: (str, bool) -> t.List[str]
+ """Return a list of file paths found recursively under the given directory."""
+ if include_symlinked_directories:
+ tree = self.__paths_tree
+ else:
+ tree = self.__files_tree
+
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(tree, parts)
+
+ if not item:
+ return []
+
+ directories = collections.deque(item[0].values())
+
+ files = list(item[1])
+
+ while directories:
+ item = directories.pop()
+ directories.extend(item[0].values())
+ files.extend(item[1])
+
+ return files
+
+ def get_dirs(self, directory): # type: (str) -> t.List[str]
+ """Return a list directory paths found directly under the given directory."""
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(self.__files_tree, parts)
+ return [os.path.join(directory, key) for key in item[0].keys()] if item else []
+
+ def get_files(self, directory): # type: (str) -> t.List[str]
+ """Return a list of file paths found directly under the given directory."""
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(self.__files_tree, parts)
+ return item[1] if item else []
+
+
+class ContentLayout(Layout):
+ """Information about the current Ansible content being tested."""
+ def __init__(self,
+ root, # type: str
+ paths, # type: t.List[str]
+ plugin_paths, # type: t.Dict[str, str]
+ collection, # type: t.Optional[CollectionDetail]
+ test_path, # type: str
+ results_path, # type: str
+ sanity_path, # type: str
+ sanity_messages, # type: t.Optional[LayoutMessages]
+ integration_path, # type: str
+ integration_targets_path, # type: str
+ integration_vars_path, # type: str
+ integration_messages, # type: t.Optional[LayoutMessages]
+ unit_path, # type: str
+ unit_module_path, # type: str
+ unit_module_utils_path, # type: str
+ unit_messages, # type: t.Optional[LayoutMessages]
+ ): # type: (...) -> None
+ super(ContentLayout, self).__init__(root, paths)
+
+ self.plugin_paths = plugin_paths
+ self.collection = collection
+ self.test_path = test_path
+ self.results_path = results_path
+ self.sanity_path = sanity_path
+ self.sanity_messages = sanity_messages
+ self.integration_path = integration_path
+ self.integration_targets_path = integration_targets_path
+ self.integration_vars_path = integration_vars_path
+ self.integration_messages = integration_messages
+ self.unit_path = unit_path
+ self.unit_module_path = unit_module_path
+ self.unit_module_utils_path = unit_module_utils_path
+ self.unit_messages = unit_messages
+
+ self.is_ansible = root == ANSIBLE_SOURCE_ROOT
+
+ @property
+ def prefix(self): # type: () -> str
+ """Return the collection prefix or an empty string if not a collection."""
+ if self.collection:
+ return self.collection.prefix
+
+ return ''
+
+ @property
+ def module_path(self): # type: () -> t.Optional[str]
+ """Return the path where modules are found, if any."""
+ return self.plugin_paths.get('modules')
+
+ @property
+ def module_utils_path(self): # type: () -> t.Optional[str]
+ """Return the path where module_utils are found, if any."""
+ return self.plugin_paths.get('module_utils')
+
+ @property
+ def module_utils_powershell_path(self): # type: () -> t.Optional[str]
+ """Return the path where powershell module_utils are found, if any."""
+ if self.is_ansible:
+ return os.path.join(self.plugin_paths['module_utils'], 'powershell')
+
+ return self.plugin_paths.get('module_utils')
+
+ @property
+ def module_utils_csharp_path(self): # type: () -> t.Optional[str]
+ """Return the path where csharp module_utils are found, if any."""
+ if self.is_ansible:
+ return os.path.join(self.plugin_paths['module_utils'], 'csharp')
+
+ return self.plugin_paths.get('module_utils')
+
+
+class LayoutMessages:
+ """Messages generated during layout creation that should be deferred for later display."""
+ def __init__(self):
+ self.info = [] # type: t.List[str]
+ self.warning = [] # type: t.List[str]
+ self.error = [] # type: t.List[str]
+
+
+class CollectionDetail:
+ """Details about the layout of the current collection."""
+ def __init__(self,
+ name, # type: str
+ namespace, # type: str
+ root, # type: str
+ ): # type: (...) -> None
+ self.name = name
+ self.namespace = namespace
+ self.root = root
+ self.full_name = '%s.%s' % (namespace, name)
+ self.prefix = '%s.' % self.full_name
+ self.directory = os.path.join('ansible_collections', namespace, name)
+
+
+class LayoutProvider(PathProvider):
+ """Base class for layout providers."""
+ PLUGIN_TYPES = (
+ 'action',
+ 'become',
+ 'cache',
+ 'callback',
+ 'cliconf',
+ 'connection',
+ 'doc_fragments',
+ 'filter',
+ 'httpapi',
+ 'inventory',
+ 'lookup',
+ 'module_utils',
+ 'modules',
+ 'netconf',
+ 'shell',
+ 'strategy',
+ 'terminal',
+ 'test',
+ 'vars',
+ )
+
+ @abc.abstractmethod
+ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
+ """Create a layout using the given root and paths."""
+
+
+def paths_to_tree(paths): # type: (t.List[str]) -> t.Tuple(t.Dict[str, t.Any], t.List[str])
+ """Return a filesystem tree from the given list of paths."""
+ tree = {}, []
+
+ for path in paths:
+ parts = path.split(os.path.sep)
+ root = tree
+
+ for part in parts[:-1]:
+ if part not in root[0]:
+ root[0][part] = {}, []
+
+ root = root[0][part]
+
+ root[1].append(path)
+
+ return tree
+
+
+def get_tree_item(tree, parts): # type: (t.Tuple(t.Dict[str, t.Any], t.List[str]), t.List[str]) -> t.Optional[t.Tuple(t.Dict[str, t.Any], t.List[str])]
+ """Return the portion of the tree found under the path given by parts, or None if it does not exist."""
+ root = tree
+
+ for part in parts:
+ root = root[0].get(part)
+
+ if not root:
+ return None
+
+ return root
diff --git a/test/lib/ansible_test/_internal/provider/layout/ansible.py b/test/lib/ansible_test/_internal/provider/layout/ansible.py
new file mode 100644
index 00000000..49ca482b
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/ansible.py
@@ -0,0 +1,47 @@
+"""Layout provider for Ansible source."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from . import (
+ ContentLayout,
+ LayoutProvider,
+)
+
+
+class AnsibleLayout(LayoutProvider):
+ """Layout provider for Ansible source."""
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return os.path.exists(os.path.join(path, 'setup.py')) and os.path.exists(os.path.join(path, 'bin/ansible-test'))
+
+ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
+ """Create a Layout using the given root and paths."""
+ plugin_paths = dict((p, os.path.join('lib/ansible/plugins', p)) for p in self.PLUGIN_TYPES)
+
+ plugin_paths.update(dict(
+ modules='lib/ansible/modules',
+ module_utils='lib/ansible/module_utils',
+ ))
+
+ return ContentLayout(root,
+ paths,
+ plugin_paths=plugin_paths,
+ collection=None,
+ test_path='test',
+ results_path='test/results',
+ sanity_path='test/sanity',
+ sanity_messages=None,
+ integration_path='test/integration',
+ integration_targets_path='test/integration/targets',
+ integration_vars_path='test/integration/integration_config.yml',
+ integration_messages=None,
+ unit_path='test/units',
+ unit_module_path='test/units/modules',
+ unit_module_utils_path='test/units/module_utils',
+ unit_messages=None,
+ )
diff --git a/test/lib/ansible_test/_internal/provider/layout/collection.py b/test/lib/ansible_test/_internal/provider/layout/collection.py
new file mode 100644
index 00000000..ffad29f2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/collection.py
@@ -0,0 +1,123 @@
+"""Layout provider for Ansible collections."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from . import (
+ ContentLayout,
+ LayoutProvider,
+ CollectionDetail,
+ LayoutMessages,
+)
+
+
+class CollectionLayout(LayoutProvider):
+ """Layout provider for Ansible collections."""
+ __module_path = 'plugins/modules'
+ __unit_path = 'test/unit'
+
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ if os.path.basename(os.path.dirname(os.path.dirname(path))) == 'ansible_collections':
+ return True
+
+ return False
+
+ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
+ """Create a Layout using the given root and paths."""
+ plugin_paths = dict((p, os.path.join('plugins', p)) for p in self.PLUGIN_TYPES)
+
+ collection_root = os.path.dirname(os.path.dirname(root))
+ collection_dir = os.path.relpath(root, collection_root)
+ collection_namespace, collection_name = collection_dir.split(os.path.sep)
+
+ collection_root = os.path.dirname(collection_root)
+
+ sanity_messages = LayoutMessages()
+ integration_messages = LayoutMessages()
+ unit_messages = LayoutMessages()
+
+ # these apply to all test commands
+ self.__check_test_path(paths, sanity_messages)
+ self.__check_test_path(paths, integration_messages)
+ self.__check_test_path(paths, unit_messages)
+
+ # these apply to specific test commands
+ integration_targets_path = self.__check_integration_path(paths, integration_messages)
+ self.__check_unit_path(paths, unit_messages)
+
+ return ContentLayout(root,
+ paths,
+ plugin_paths=plugin_paths,
+ collection=CollectionDetail(
+ name=collection_name,
+ namespace=collection_namespace,
+ root=collection_root,
+ ),
+ test_path='tests',
+ results_path='tests/output',
+ sanity_path='tests/sanity',
+ sanity_messages=sanity_messages,
+ integration_path='tests/integration',
+ integration_targets_path=integration_targets_path.rstrip(os.path.sep),
+ integration_vars_path='tests/integration/integration_config.yml',
+ integration_messages=integration_messages,
+ unit_path='tests/unit',
+ unit_module_path='tests/unit/plugins/modules',
+ unit_module_utils_path='tests/unit/plugins/module_utils',
+ unit_messages=unit_messages,
+ )
+
+ @staticmethod
+ def __check_test_path(paths, messages): # type: (t.List[str], LayoutMessages) -> None
+ modern_test_path = 'tests/'
+ modern_test_path_found = any(path.startswith(modern_test_path) for path in paths)
+ legacy_test_path = 'test/'
+ legacy_test_path_found = any(path.startswith(legacy_test_path) for path in paths)
+
+ if modern_test_path_found and legacy_test_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_test_path, modern_test_path))
+ elif legacy_test_path_found:
+ messages.warning.append('Ignoring tests in "%s" that should be in "%s".' % (legacy_test_path, modern_test_path))
+
+ @staticmethod
+ def __check_integration_path(paths, messages): # type: (t.List[str], LayoutMessages) -> str
+ modern_integration_path = 'roles/test/'
+ modern_integration_path_found = any(path.startswith(modern_integration_path) for path in paths)
+ legacy_integration_path = 'tests/integration/targets/'
+ legacy_integration_path_found = any(path.startswith(legacy_integration_path) for path in paths)
+
+ if modern_integration_path_found and legacy_integration_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_integration_path, modern_integration_path))
+ integration_targets_path = modern_integration_path
+ elif legacy_integration_path_found:
+ messages.info.append('Falling back to tests in "%s" because "%s" was not found.' % (legacy_integration_path, modern_integration_path))
+ integration_targets_path = legacy_integration_path
+ elif modern_integration_path_found:
+ messages.info.append('Loading tests from "%s".' % modern_integration_path)
+ integration_targets_path = modern_integration_path
+ else:
+ messages.error.append('Cannot run integration tests without "%s" or "%s".' % (modern_integration_path, legacy_integration_path))
+ integration_targets_path = modern_integration_path
+
+ return integration_targets_path
+
+ @staticmethod
+ def __check_unit_path(paths, messages): # type: (t.List[str], LayoutMessages) -> None
+ modern_unit_path = 'tests/unit/'
+ modern_unit_path_found = any(path.startswith(modern_unit_path) for path in paths)
+ legacy_unit_path = 'tests/units/' # test/units/ will be covered by the warnings for test/ vs tests/
+ legacy_unit_path_found = any(path.startswith(legacy_unit_path) for path in paths)
+
+ if modern_unit_path_found and legacy_unit_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_unit_path, modern_unit_path))
+ elif legacy_unit_path_found:
+ messages.warning.append('Rename "%s" to "%s" to run unit tests.' % (legacy_unit_path, modern_unit_path))
+ elif modern_unit_path_found:
+ pass # unit tests only run from one directory so no message is needed
+ else:
+ messages.error.append('Cannot run unit tests without "%s".' % modern_unit_path)
diff --git a/test/lib/ansible_test/_internal/provider/source/__init__.py b/test/lib/ansible_test/_internal/provider/source/__init__.py
new file mode 100644
index 00000000..fab28b09
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/__init__.py
@@ -0,0 +1,18 @@
+"""Common code for source providers."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+
+from ... import types as t
+
+from .. import (
+ PathProvider,
+)
+
+
+class SourceProvider(PathProvider):
+ """Base class for source providers."""
+ @abc.abstractmethod
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
diff --git a/test/lib/ansible_test/_internal/provider/source/git.py b/test/lib/ansible_test/_internal/provider/source/git.py
new file mode 100644
index 00000000..0bf81a1c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/git.py
@@ -0,0 +1,72 @@
+"""Source provider for a content root managed by git version control."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from ...git import (
+ Git,
+)
+
+from ...encoding import (
+ to_bytes,
+)
+
+from ...util import (
+ SubprocessError,
+)
+
+from . import (
+ SourceProvider,
+)
+
+
+class GitSource(SourceProvider):
+ """Source provider for a content root managed by git version control."""
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return os.path.exists(os.path.join(path, '.git'))
+
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ paths = self.__get_paths(path)
+
+ try:
+ submodule_paths = Git(path).get_submodule_paths()
+ except SubprocessError:
+ if path == self.root:
+ raise
+
+ # older versions of git require submodule commands to be executed from the top level of the working tree
+ # git version 2.18.1 (centos8) does not have this restriction
+ # git version 1.8.3.1 (centos7) does
+ # fall back to using the top level directory of the working tree only when needed
+ # this avoids penalizing newer git versions with a potentially slower analysis due to additional submodules
+ rel_path = os.path.relpath(path, self.root) + os.path.sep
+
+ submodule_paths = Git(self.root).get_submodule_paths()
+ submodule_paths = [os.path.relpath(p, rel_path) for p in submodule_paths if p.startswith(rel_path)]
+
+ for submodule_path in submodule_paths:
+ paths.extend(os.path.join(submodule_path, p) for p in self.__get_paths(os.path.join(path, submodule_path)))
+
+ # git reports submodule directories as regular files
+ paths = [p for p in paths if p not in submodule_paths]
+
+ return paths
+
+ @staticmethod
+ def __get_paths(path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ git = Git(path)
+ paths = git.get_file_names(['--cached', '--others', '--exclude-standard'])
+ deleted_paths = git.get_file_names(['--deleted'])
+ paths = sorted(set(paths) - set(deleted_paths))
+
+ # directory symlinks are reported by git as regular files but they need to be treated as directories
+ paths = [path + os.path.sep if os.path.isdir(to_bytes(path)) else path for path in paths]
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/provider/source/installed.py b/test/lib/ansible_test/_internal/provider/source/installed.py
new file mode 100644
index 00000000..d24a6e3d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/installed.py
@@ -0,0 +1,43 @@
+"""Source provider for content which has been installed."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from . import (
+ SourceProvider,
+)
+
+
+class InstalledSource(SourceProvider):
+ """Source provider for content which has been installed."""
+ sequence = 0 # disable automatic detection
+
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return False
+
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ paths = []
+
+ kill_extensions = (
+ '.pyc',
+ '.pyo',
+ )
+
+ for root, _dummy, file_names in os.walk(path):
+ rel_root = os.path.relpath(root, path)
+
+ if rel_root == '.':
+ rel_root = ''
+
+ paths.extend([os.path.join(rel_root, file_name) for file_name in file_names
+ if not os.path.splitext(file_name)[1] in kill_extensions])
+
+ # NOTE: directory symlinks are ignored as there should be no directory symlinks for an install
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/provider/source/unversioned.py b/test/lib/ansible_test/_internal/provider/source/unversioned.py
new file mode 100644
index 00000000..09105789
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/unversioned.py
@@ -0,0 +1,87 @@
+"""Fallback source provider when no other provider matches the content root."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from ...constants import (
+ TIMEOUT_PATH,
+)
+
+from ...encoding import (
+ to_bytes,
+)
+
+from . import (
+ SourceProvider,
+)
+
+
+class UnversionedSource(SourceProvider):
+ """Fallback source provider when no other provider matches the content root."""
+ sequence = 0 # disable automatic detection
+
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return False
+
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ paths = []
+
+ kill_any_dir = (
+ '.idea',
+ '.pytest_cache',
+ '__pycache__',
+ 'ansible.egg-info',
+ 'ansible_base.egg-info',
+ )
+
+ kill_sub_dir = {
+ 'test': (
+ 'results',
+ 'cache',
+ 'output',
+ ),
+ 'tests': (
+ 'output',
+ ),
+ 'docs/docsite': (
+ '_build',
+ ),
+ }
+
+ kill_sub_file = {
+ '': (
+ TIMEOUT_PATH,
+ ),
+ }
+
+ kill_extensions = (
+ '.pyc',
+ '.pyo',
+ '.retry',
+ )
+
+ for root, dir_names, file_names in os.walk(path):
+ rel_root = os.path.relpath(root, path)
+
+ if rel_root == '.':
+ rel_root = ''
+
+ for kill in kill_any_dir + kill_sub_dir.get(rel_root, ()):
+ if kill in dir_names:
+ dir_names.remove(kill)
+
+ kill_files = kill_sub_file.get(rel_root, ())
+
+ paths.extend([os.path.join(rel_root, file_name) for file_name in file_names
+ if not os.path.splitext(file_name)[1] in kill_extensions and file_name not in kill_files])
+
+ # include directory symlinks since they will not be traversed and would otherwise go undetected
+ paths.extend([os.path.join(rel_root, dir_name) + os.path.sep for dir_name in dir_names if os.path.islink(to_bytes(dir_name))])
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/sanity/__init__.py b/test/lib/ansible_test/_internal/sanity/__init__.py
new file mode 100644
index 00000000..976bbb2f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/__init__.py
@@ -0,0 +1,946 @@
+"""Execute Ansible sanity tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import glob
+import os
+import re
+import collections
+
+from .. import types as t
+
+from ..io import (
+ read_json_file,
+)
+
+from ..util import (
+ ApplicationError,
+ SubprocessError,
+ display,
+ import_plugins,
+ load_plugins,
+ parse_to_list_of_dict,
+ ABC,
+ ANSIBLE_TEST_DATA_ROOT,
+ is_binary_file,
+ read_lines_without_comments,
+ get_available_python_versions,
+ find_python,
+ is_subdir,
+ paths_to_dirs,
+ get_ansible_version,
+ str_to_version,
+)
+
+from ..util_common import (
+ run_command,
+ intercept_command,
+ handle_layout_messages,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+)
+
+from ..target import (
+ walk_internal_targets,
+ walk_sanity_targets,
+ TestTarget,
+)
+
+from ..executor import (
+ get_changes_filter,
+ AllTargetsSkipped,
+ Delegate,
+ install_command_requirements,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..test import (
+ TestSuccess,
+ TestFailure,
+ TestSkipped,
+ TestMessage,
+ calculate_best_confidence,
+)
+
+from ..data import (
+ data_context,
+)
+
+COMMAND = 'sanity'
+SANITY_ROOT = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'sanity')
+
+
+def command_sanity(args):
+ """
+ :type args: SanityConfig
+ """
+ handle_layout_messages(data_context().content.sanity_messages)
+
+ changes = get_changes_filter(args)
+ require = args.require + changes
+ targets = SanityTargets.create(args.include, args.exclude, require)
+
+ if not targets.include:
+ raise AllTargetsSkipped()
+
+ if args.delegate:
+ raise Delegate(require=changes, exclude=args.exclude)
+
+ tests = sanity_get_tests()
+
+ if args.test:
+ tests = [target for target in tests if target.name in args.test]
+ else:
+ disabled = [target.name for target in tests if not target.enabled and not args.allow_disabled]
+ tests = [target for target in tests if target.enabled or args.allow_disabled]
+
+ if disabled:
+ display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled)))
+
+ if args.skip_test:
+ tests = [target for target in tests if target.name not in args.skip_test]
+
+ total = 0
+ failed = []
+
+ for test in tests:
+ if args.list_tests:
+ display.info(test.name)
+ continue
+
+ available_versions = sorted(get_available_python_versions(SUPPORTED_PYTHON_VERSIONS).keys())
+
+ if args.python:
+ # specific version selected
+ versions = (args.python,)
+ elif isinstance(test, SanityMultipleVersion):
+ # try all supported versions for multi-version tests when a specific version has not been selected
+ versions = test.supported_python_versions
+ elif not test.supported_python_versions or args.python_version in test.supported_python_versions:
+ # the test works with any version or the version we're already running
+ versions = (args.python_version,)
+ else:
+ # available versions supported by the test
+ versions = tuple(sorted(set(available_versions) & set(test.supported_python_versions)))
+ # use the lowest available version supported by the test or the current version as a fallback (which will be skipped)
+ versions = versions[:1] or (args.python_version,)
+
+ for version in versions:
+ if isinstance(test, SanityMultipleVersion):
+ skip_version = version
+ else:
+ skip_version = None
+
+ options = ''
+
+ if test.supported_python_versions and version not in test.supported_python_versions:
+ display.warning("Skipping sanity test '%s' on unsupported Python %s." % (test.name, version))
+ result = SanitySkipped(test.name, skip_version)
+ elif not args.python and version not in available_versions:
+ display.warning("Skipping sanity test '%s' on Python %s due to missing interpreter." % (test.name, version))
+ result = SanitySkipped(test.name, skip_version)
+ else:
+ if test.supported_python_versions:
+ display.info("Running sanity test '%s' with Python %s" % (test.name, version))
+ else:
+ display.info("Running sanity test '%s'" % test.name)
+
+ if isinstance(test, SanityCodeSmellTest):
+ settings = test.load_processor(args)
+ elif isinstance(test, SanityMultipleVersion):
+ settings = test.load_processor(args, version)
+ elif isinstance(test, SanitySingleVersion):
+ settings = test.load_processor(args)
+ elif isinstance(test, SanityVersionNeutral):
+ settings = test.load_processor(args)
+ else:
+ raise Exception('Unsupported test type: %s' % type(test))
+
+ all_targets = targets.targets
+
+ if test.all_targets:
+ usable_targets = targets.targets
+ elif test.no_targets:
+ usable_targets = tuple()
+ else:
+ usable_targets = targets.include
+
+ all_targets = SanityTargets.filter_and_inject_targets(test, all_targets)
+ usable_targets = SanityTargets.filter_and_inject_targets(test, usable_targets)
+
+ usable_targets = sorted(test.filter_targets(list(usable_targets)))
+ usable_targets = settings.filter_skipped_targets(usable_targets)
+ sanity_targets = SanityTargets(tuple(all_targets), tuple(usable_targets))
+
+ if usable_targets or test.no_targets:
+ install_command_requirements(args, version, context=test.name, enable_pyyaml_check=True)
+
+ if isinstance(test, SanityCodeSmellTest):
+ result = test.test(args, sanity_targets, version)
+ elif isinstance(test, SanityMultipleVersion):
+ result = test.test(args, sanity_targets, version)
+ options = ' --python %s' % version
+ elif isinstance(test, SanitySingleVersion):
+ result = test.test(args, sanity_targets, version)
+ elif isinstance(test, SanityVersionNeutral):
+ result = test.test(args, sanity_targets)
+ else:
+ raise Exception('Unsupported test type: %s' % type(test))
+ else:
+ result = SanitySkipped(test.name, skip_version)
+
+ result.write(args)
+
+ total += 1
+
+ if isinstance(result, SanityFailure):
+ failed.append(result.test + options)
+
+ if failed:
+ message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % (
+ len(failed), total, '\n'.join(failed))
+
+ if args.failure_ok:
+ display.error(message)
+ else:
+ raise ApplicationError(message)
+
+
+def collect_code_smell_tests(): # type: () -> t.Tuple[SanityFunc, ...]
+ """Return a tuple of available code smell sanity tests."""
+ paths = glob.glob(os.path.join(SANITY_ROOT, 'code-smell', '*.py'))
+
+ if data_context().content.is_ansible:
+ # include Ansible specific code-smell tests which are not configured to be skipped
+ ansible_code_smell_root = os.path.join(data_context().content.root, 'test', 'sanity', 'code-smell')
+ skip_tests = read_lines_without_comments(os.path.join(ansible_code_smell_root, 'skip.txt'), remove_blank_lines=True, optional=True)
+ paths.extend(path for path in glob.glob(os.path.join(ansible_code_smell_root, '*.py')) if os.path.basename(path) not in skip_tests)
+
+ paths = sorted(p for p in paths if os.access(p, os.X_OK) and os.path.isfile(p))
+ tests = tuple(SanityCodeSmellTest(p) for p in paths)
+
+ return tests
+
+
+def sanity_get_tests():
+ """
+ :rtype: tuple[SanityFunc]
+ """
+ return SANITY_TESTS
+
+
+class SanityIgnoreParser:
+ """Parser for the consolidated sanity test ignore file."""
+ NO_CODE = '_'
+
+ def __init__(self, args): # type: (SanityConfig) -> None
+ if data_context().content.collection:
+ ansible_version = '%s.%s' % tuple(get_ansible_version().split('.')[:2])
+
+ ansible_label = 'Ansible %s' % ansible_version
+ file_name = 'ignore-%s.txt' % ansible_version
+ else:
+ ansible_label = 'Ansible'
+ file_name = 'ignore.txt'
+
+ self.args = args
+ self.relative_path = os.path.join(data_context().content.sanity_path, file_name)
+ self.path = os.path.join(data_context().content.root, self.relative_path)
+ self.ignores = collections.defaultdict(lambda: collections.defaultdict(dict)) # type: t.Dict[str, t.Dict[str, t.Dict[str, int]]]
+ self.skips = collections.defaultdict(lambda: collections.defaultdict(int)) # type: t.Dict[str, t.Dict[str, int]]
+ self.parse_errors = [] # type: t.List[t.Tuple[int, int, str]]
+ self.file_not_found_errors = [] # type: t.List[t.Tuple[int, str]]
+
+ lines = read_lines_without_comments(self.path, optional=True)
+ targets = SanityTargets.get_targets()
+ paths = set(target.path for target in targets)
+ tests_by_name = {} # type: t.Dict[str, SanityTest]
+ versioned_test_names = set() # type: t.Set[str]
+ unversioned_test_names = {} # type: t.Dict[str, str]
+ directories = paths_to_dirs(list(paths))
+ paths_by_test = {} # type: t.Dict[str, t.Set[str]]
+
+ display.info('Read %d sanity test ignore line(s) for %s from: %s' % (len(lines), ansible_label, self.relative_path), verbosity=1)
+
+ for test in sanity_get_tests():
+ test_targets = SanityTargets.filter_and_inject_targets(test, targets)
+
+ paths_by_test[test.name] = set(target.path for target in test.filter_targets(test_targets))
+
+ if isinstance(test, SanityMultipleVersion):
+ versioned_test_names.add(test.name)
+ tests_by_name.update(dict(('%s-%s' % (test.name, python_version), test) for python_version in test.supported_python_versions))
+ else:
+ unversioned_test_names.update(dict(('%s-%s' % (test.name, python_version), test.name) for python_version in SUPPORTED_PYTHON_VERSIONS))
+ tests_by_name[test.name] = test
+
+ for line_no, line in enumerate(lines, start=1):
+ if not line:
+ self.parse_errors.append((line_no, 1, "Line cannot be empty or contain only a comment"))
+ continue
+
+ parts = line.split(' ')
+ path = parts[0]
+ codes = parts[1:]
+
+ if not path:
+ self.parse_errors.append((line_no, 1, "Line cannot start with a space"))
+ continue
+
+ if path.endswith(os.path.sep):
+ if path not in directories:
+ self.file_not_found_errors.append((line_no, path))
+ continue
+ else:
+ if path not in paths:
+ self.file_not_found_errors.append((line_no, path))
+ continue
+
+ if not codes:
+ self.parse_errors.append((line_no, len(path), "Error code required after path"))
+ continue
+
+ code = codes[0]
+
+ if not code:
+ self.parse_errors.append((line_no, len(path) + 1, "Error code after path cannot be empty"))
+ continue
+
+ if len(codes) > 1:
+ self.parse_errors.append((line_no, len(path) + len(code) + 2, "Error code cannot contain spaces"))
+ continue
+
+ parts = code.split('!')
+ code = parts[0]
+ commands = parts[1:]
+
+ parts = code.split(':')
+ test_name = parts[0]
+ error_codes = parts[1:]
+
+ test = tests_by_name.get(test_name)
+
+ if not test:
+ unversioned_name = unversioned_test_names.get(test_name)
+
+ if unversioned_name:
+ self.parse_errors.append((line_no, len(path) + len(unversioned_name) + 2, "Sanity test '%s' cannot use a Python version like '%s'" % (
+ unversioned_name, test_name)))
+ elif test_name in versioned_test_names:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires a Python version like '%s-%s'" % (
+ test_name, test_name, args.python_version)))
+ else:
+ self.parse_errors.append((line_no, len(path) + 2, "Sanity test '%s' does not exist" % test_name))
+
+ continue
+
+ if path.endswith(os.path.sep) and not test.include_directories:
+ self.parse_errors.append((line_no, 1, "Sanity test '%s' does not support directory paths" % test_name))
+ continue
+
+ if path not in paths_by_test[test.name] and not test.no_targets:
+ self.parse_errors.append((line_no, 1, "Sanity test '%s' does not test path '%s'" % (test_name, path)))
+ continue
+
+ if commands and error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Error code cannot contain both '!' and ':' characters"))
+ continue
+
+ if commands:
+ command = commands[0]
+
+ if len(commands) > 1:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + len(command) + 3, "Error code cannot contain multiple '!' characters"))
+ continue
+
+ if command == 'skip':
+ if not test.can_skip:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' cannot be skipped" % test_name))
+ continue
+
+ existing_line_no = self.skips.get(test_name, {}).get(path)
+
+ if existing_line_no:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' skip for path '%s' first found on line %d" % (test_name, path, existing_line_no)))
+ continue
+
+ self.skips[test_name][path] = line_no
+ continue
+
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Command '!%s' not recognized" % command))
+ continue
+
+ if not test.can_ignore:
+ self.parse_errors.append((line_no, len(path) + 1, "Sanity test '%s' cannot be ignored" % test_name))
+ continue
+
+ if test.error_code:
+ if not error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires an error code" % test_name))
+ continue
+
+ error_code = error_codes[0]
+
+ if len(error_codes) > 1:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + len(error_code) + 3, "Error code cannot contain multiple ':' characters"))
+ continue
+
+ if error_code in test.optional_error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 3, "Optional error code '%s' cannot be ignored" % (
+ error_code)))
+ continue
+ else:
+ if error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' does not support error codes" % test_name))
+ continue
+
+ error_code = self.NO_CODE
+
+ existing = self.ignores.get(test_name, {}).get(path, {}).get(error_code)
+
+ if existing:
+ if test.error_code:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for error code '%s' for path '%s' first found on line %d" % (
+ test_name, error_code, path, existing)))
+ else:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for path '%s' first found on line %d" % (
+ test_name, path, existing)))
+
+ continue
+
+ self.ignores[test_name][path][error_code] = line_no
+
+ @staticmethod
+ def load(args): # type: (SanityConfig) -> SanityIgnoreParser
+ """Return the current SanityIgnore instance, initializing it if needed."""
+ try:
+ return SanityIgnoreParser.instance
+ except AttributeError:
+ pass
+
+ SanityIgnoreParser.instance = SanityIgnoreParser(args)
+ return SanityIgnoreParser.instance
+
+
+class SanityIgnoreProcessor:
+ """Processor for sanity test ignores for a single run of one sanity test."""
+ def __init__(self,
+ args, # type: SanityConfig
+ test, # type: SanityTest
+ python_version, # type: t.Optional[str]
+ ): # type: (...) -> None
+ name = test.name
+ code = test.error_code
+
+ if python_version:
+ full_name = '%s-%s' % (name, python_version)
+ else:
+ full_name = name
+
+ self.args = args
+ self.test = test
+ self.code = code
+ self.parser = SanityIgnoreParser.load(args)
+ self.ignore_entries = self.parser.ignores.get(full_name, {})
+ self.skip_entries = self.parser.skips.get(full_name, {})
+ self.used_line_numbers = set() # type: t.Set[int]
+
+ def filter_skipped_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given targets, with any skipped paths filtered out."""
+ return sorted(target for target in targets if target.path not in self.skip_entries)
+
+ def process_errors(self, errors, paths): # type: (t.List[SanityMessage], t.List[str]) -> t.List[SanityMessage]
+ """Return the given errors filtered for ignores and with any settings related errors included."""
+ errors = self.filter_messages(errors)
+ errors.extend(self.get_errors(paths))
+
+ errors = sorted(set(errors))
+
+ return errors
+
+ def filter_messages(self, messages): # type: (t.List[SanityMessage]) -> t.List[SanityMessage]
+ """Return a filtered list of the given messages using the entries that have been loaded."""
+ filtered = []
+
+ for message in messages:
+ if message.code in self.test.optional_error_codes and not self.args.enable_optional_errors:
+ continue
+
+ path_entry = self.ignore_entries.get(message.path)
+
+ if path_entry:
+ code = message.code if self.code else SanityIgnoreParser.NO_CODE
+ line_no = path_entry.get(code)
+
+ if line_no:
+ self.used_line_numbers.add(line_no)
+ continue
+
+ filtered.append(message)
+
+ return filtered
+
+ def get_errors(self, paths): # type: (t.List[str]) -> t.List[SanityMessage]
+ """Return error messages related to issues with the file."""
+ messages = []
+
+ # unused errors
+
+ unused = [] # type: t.List[t.Tuple[int, str, str]]
+
+ if self.test.no_targets or self.test.all_targets:
+ # tests which do not accept a target list, or which use all targets, always return all possible errors, so all ignores can be checked
+ targets = SanityTargets.get_targets()
+ test_targets = SanityTargets.filter_and_inject_targets(self.test, targets)
+ paths = [target.path for target in test_targets]
+
+ for path in paths:
+ path_entry = self.ignore_entries.get(path)
+
+ if not path_entry:
+ continue
+
+ unused.extend((line_no, path, code) for code, line_no in path_entry.items() if line_no not in self.used_line_numbers)
+
+ messages.extend(SanityMessage(
+ code=self.code,
+ message="Ignoring '%s' on '%s' is unnecessary" % (code, path) if self.code else "Ignoring '%s' is unnecessary" % path,
+ path=self.parser.relative_path,
+ line=line,
+ column=1,
+ confidence=calculate_best_confidence(((self.parser.path, line), (path, 0)), self.args.metadata) if self.args.metadata.changes else None,
+ ) for line, path, code in unused)
+
+ return messages
+
+
+class SanitySuccess(TestSuccess):
+ """Sanity test success."""
+ def __init__(self, test, python_version=None):
+ """
+ :type test: str
+ :type python_version: str
+ """
+ super(SanitySuccess, self).__init__(COMMAND, test, python_version)
+
+
+class SanitySkipped(TestSkipped):
+ """Sanity test skipped."""
+ def __init__(self, test, python_version=None):
+ """
+ :type test: str
+ :type python_version: str
+ """
+ super(SanitySkipped, self).__init__(COMMAND, test, python_version)
+
+
+class SanityFailure(TestFailure):
+ """Sanity test failure."""
+ def __init__(self, test, python_version=None, messages=None, summary=None):
+ """
+ :type test: str
+ :type python_version: str
+ :type messages: list[SanityMessage]
+ :type summary: unicode
+ """
+ super(SanityFailure, self).__init__(COMMAND, test, python_version, messages, summary)
+
+
+class SanityMessage(TestMessage):
+ """Single sanity test message for one file."""
+
+
+class SanityTargets:
+ """Sanity test target information."""
+ def __init__(self, targets, include): # type: (t.Tuple[TestTarget], t.Tuple[TestTarget]) -> None
+ self.targets = targets
+ self.include = include
+
+ @staticmethod
+ def create(include, exclude, require): # type: (t.List[str], t.List[str], t.List[str]) -> SanityTargets
+ """Create a SanityTargets instance from the given include, exclude and require lists."""
+ _targets = SanityTargets.get_targets()
+ _include = walk_internal_targets(_targets, include, exclude, require)
+ return SanityTargets(_targets, _include)
+
+ @staticmethod
+ def filter_and_inject_targets(test, targets): # type: (SanityTest, t.Iterable[TestTarget]) -> t.List[TestTarget]
+ """Filter and inject targets based on test requirements and the given target list."""
+ test_targets = list(targets)
+
+ if not test.include_symlinks:
+ # remove all symlinks unless supported by the test
+ test_targets = [target for target in test_targets if not target.symlink]
+
+ if not test.include_directories or not test.include_symlinks:
+ # exclude symlinked directories unless supported by the test
+ test_targets = [target for target in test_targets if not target.path.endswith(os.path.sep)]
+
+ if test.include_directories:
+ # include directories containing any of the included files
+ test_targets += tuple(TestTarget(path, None, None, '') for path in paths_to_dirs([target.path for target in test_targets]))
+
+ if not test.include_symlinks:
+ # remove all directory symlinks unless supported by the test
+ test_targets = [target for target in test_targets if not target.symlink]
+
+ return test_targets
+
+ @staticmethod
+ def get_targets(): # type: () -> t.Tuple[TestTarget, ...]
+ """Return a tuple of sanity test targets. Uses a cached version when available."""
+ try:
+ return SanityTargets.get_targets.targets
+ except AttributeError:
+ SanityTargets.get_targets.targets = tuple(sorted(walk_sanity_targets()))
+
+ return SanityTargets.get_targets.targets
+
+
+class SanityTest(ABC):
+ """Sanity test base class."""
+ __metaclass__ = abc.ABCMeta
+
+ ansible_only = False
+
+ def __init__(self, name):
+ self.name = name
+ self.enabled = True
+
+ # Optional error codes represent errors which spontaneously occur without changes to the content under test, such as those based on the current date.
+ # Because these errors can be unpredictable they behave differently than normal error codes:
+ # * They are not reported by default. The `--enable-optional-errors` option must be used to display these errors.
+ # * They cannot be ignored. This is done to maintain the integrity of the ignore system.
+ self.optional_error_codes = set()
+
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return None
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return True
+
+ @property
+ def can_skip(self): # type: () -> bool
+ """True if the test supports skip entries."""
+ return not self.all_targets and not self.no_targets
+
+ @property
+ def all_targets(self): # type: () -> bool
+ """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return False
+
+ @property
+ def include_directories(self): # type: () -> bool
+ """True if the test targets should include directories."""
+ return False
+
+ @property
+ def include_symlinks(self): # type: () -> bool
+ """True if the test targets should include symlinks."""
+ return False
+
+ @property
+ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return tuple(python_version for python_version in SUPPORTED_PYTHON_VERSIONS if python_version.startswith('3.'))
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] # pylint: disable=unused-argument
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ if self.no_targets:
+ return []
+
+ raise NotImplementedError('Sanity test "%s" must implement "filter_targets" or set "no_targets" to True.' % self.name)
+
+
+class SanityCodeSmellTest(SanityTest):
+ """Sanity test script."""
+ def __init__(self, path):
+ name = os.path.splitext(os.path.basename(path))[0]
+ config_path = os.path.splitext(path)[0] + '.json'
+
+ super(SanityCodeSmellTest, self).__init__(name)
+
+ self.path = path
+ self.config_path = config_path if os.path.exists(config_path) else None
+ self.config = None
+
+ if self.config_path:
+ self.config = read_json_file(self.config_path)
+
+ if self.config:
+ self.enabled = not self.config.get('disabled')
+
+ self.output = self.config.get('output') # type: t.Optional[str]
+ self.extensions = self.config.get('extensions') # type: t.List[str]
+ self.prefixes = self.config.get('prefixes') # type: t.List[str]
+ self.files = self.config.get('files') # type: t.List[str]
+ self.text = self.config.get('text') # type: t.Optional[bool]
+ self.ignore_self = self.config.get('ignore_self') # type: bool
+ self.intercept = self.config.get('intercept') # type: bool
+ self.minimum_python_version = self.config.get('minimum_python_version') # type: t.Optional[str]
+
+ self.__all_targets = self.config.get('all_targets') # type: bool
+ self.__no_targets = self.config.get('no_targets') # type: bool
+ self.__include_directories = self.config.get('include_directories') # type: bool
+ self.__include_symlinks = self.config.get('include_symlinks') # type: bool
+ else:
+ self.output = None
+ self.extensions = []
+ self.prefixes = []
+ self.files = []
+ self.text = None # type: t.Optional[bool]
+ self.ignore_self = False
+ self.intercept = False
+ self.minimum_python_version = None # type: t.Optional[str]
+
+ self.__all_targets = False
+ self.__no_targets = True
+ self.__include_directories = False
+ self.__include_symlinks = False
+
+ if self.no_targets:
+ mutually_exclusive = (
+ 'extensions',
+ 'prefixes',
+ 'files',
+ 'text',
+ 'ignore_self',
+ 'all_targets',
+ 'include_directories',
+ 'include_symlinks',
+ )
+
+ problems = sorted(name for name in mutually_exclusive if getattr(self, name))
+
+ if problems:
+ raise ApplicationError('Sanity test "%s" option "no_targets" is mutually exclusive with options: %s' % (self.name, ', '.join(problems)))
+
+ @property
+ def all_targets(self): # type: () -> bool
+ """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets."""
+ return self.__all_targets
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return self.__no_targets
+
+ @property
+ def include_directories(self): # type: () -> bool
+ """True if the test targets should include directories."""
+ return self.__include_directories
+
+ @property
+ def include_symlinks(self): # type: () -> bool
+ """True if the test targets should include symlinks."""
+ return self.__include_symlinks
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ if self.no_targets:
+ return []
+
+ if self.text is not None:
+ if self.text:
+ targets = [target for target in targets if not is_binary_file(target.path)]
+ else:
+ targets = [target for target in targets if is_binary_file(target.path)]
+
+ if self.extensions:
+ targets = [target for target in targets if os.path.splitext(target.path)[1] in self.extensions
+ or (is_subdir(target.path, 'bin') and '.py' in self.extensions)]
+
+ if self.prefixes:
+ targets = [target for target in targets if any(target.path.startswith(pre) for pre in self.prefixes)]
+
+ if self.files:
+ targets = [target for target in targets if os.path.basename(target.path) in self.files]
+
+ if self.ignore_self and data_context().content.is_ansible:
+ relative_self_path = os.path.relpath(self.path, data_context().content.root)
+ targets = [target for target in targets if target.path != relative_self_path]
+
+ return targets
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ if self.minimum_python_version:
+ if str_to_version(python_version) < str_to_version(self.minimum_python_version):
+ display.warning("Skipping sanity test '%s' on unsupported Python %s; requires Python %s or newer." % (
+ self.name, python_version, self.minimum_python_version))
+ return SanitySkipped(self.name, 'Test requires Python %s or newer' % (self.minimum_python_version, ))
+
+ cmd = [find_python(python_version), self.path]
+
+ env = ansible_environment(args, color=False)
+
+ pattern = None
+ data = None
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if self.config:
+ if self.output == 'path-line-column-message':
+ pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+ elif self.output == 'path-message':
+ pattern = '^(?P<path>[^:]*): (?P<message>.*)$'
+ else:
+ pattern = ApplicationError('Unsupported output type: %s' % self.output)
+
+ if not self.no_targets:
+ data = '\n'.join(paths)
+
+ if data:
+ display.info(data, verbosity=4)
+
+ try:
+ if self.intercept:
+ stdout, stderr = intercept_command(args, cmd, target_name='sanity.%s' % self.name, data=data, env=env, capture=True, disable_coverage=True)
+ else:
+ stdout, stderr = run_command(args, cmd, data=data, env=env, capture=True)
+
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if stdout and not stderr:
+ if pattern:
+ matches = parse_to_list_of_dict(pattern, stdout)
+
+ messages = [SanityMessage(
+ message=m['message'],
+ path=m['path'],
+ line=int(m.get('line', 0)),
+ column=int(m.get('column', 0)),
+ ) for m in matches]
+
+ messages = settings.process_errors(messages, paths)
+
+ if not messages:
+ return SanitySuccess(self.name)
+
+ return SanityFailure(self.name, messages=messages)
+
+ if stderr or status:
+ summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ return SanityFailure(self.name, summary=summary)
+
+ messages = settings.process_errors([], paths)
+
+ if messages:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
+
+ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+
+class SanityFunc(SanityTest):
+ """Base class for sanity test plugins."""
+ def __init__(self):
+ name = self.__class__.__name__
+ name = re.sub(r'Test$', '', name) # drop Test suffix
+ name = re.sub(r'(.)([A-Z][a-z]+)', r'\1-\2', name).lower() # use dashes instead of capitalization
+
+ super(SanityFunc, self).__init__(name)
+
+
+class SanityVersionNeutral(SanityFunc):
+ """Base class for sanity test plugins which are idependent of the python version being used."""
+ @abc.abstractmethod
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+
+ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+ @property
+ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return None
+
+
+class SanitySingleVersion(SanityFunc):
+ """Base class for sanity test plugins which should run on a single python version."""
+ @abc.abstractmethod
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+
+ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+
+class SanityMultipleVersion(SanityFunc):
+ """Base class for sanity test plugins which should run on multiple python versions."""
+ @abc.abstractmethod
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+
+ def load_processor(self, args, python_version): # type: (SanityConfig, str) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, python_version)
+
+ @property
+ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return SUPPORTED_PYTHON_VERSIONS
+
+
+SANITY_TESTS = (
+)
+
+
+def sanity_init():
+ """Initialize full sanity test list (includes code-smell scripts determined at runtime)."""
+ import_plugins('sanity')
+ sanity_plugins = {} # type: t.Dict[str, t.Type[SanityFunc]]
+ load_plugins(SanityFunc, sanity_plugins)
+ sanity_tests = tuple([plugin() for plugin in sanity_plugins.values() if data_context().content.is_ansible or not plugin.ansible_only])
+ global SANITY_TESTS # pylint: disable=locally-disabled, global-statement
+ SANITY_TESTS = tuple(sorted(sanity_tests + collect_code_smell_tests(), key=lambda k: k.name))
diff --git a/test/lib/ansible_test/_internal/sanity/ansible_doc.py b/test/lib/ansible_test/_internal/sanity/ansible_doc.py
new file mode 100644
index 00000000..c6b997cf
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/ansible_doc.py
@@ -0,0 +1,144 @@
+"""Sanity test for ansible-doc."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import os
+import re
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityFailure,
+ SanitySuccess,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ is_subdir,
+)
+
+from ..util_common import (
+ intercept_command,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..coverage_util import (
+ coverage_context,
+)
+
+
+class AnsibleDocTest(SanitySingleVersion):
+ """Sanity test for ansible-doc."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ # This should use documentable plugins from constants instead
+ unsupported_plugin_types = set([
+ # not supported by ansible-doc
+ 'action',
+ 'doc_fragments',
+ 'filter',
+ 'module_utils',
+ 'terminal',
+ 'test',
+ ])
+
+ plugin_paths = [plugin_path for plugin_type, plugin_path in data_context().content.plugin_paths.items() if plugin_type not in unsupported_plugin_types]
+
+ return [target for target in targets
+ if os.path.splitext(target.path)[1] == '.py'
+ and os.path.basename(target.path) != '__init__.py'
+ and any(is_subdir(target.path, path) for path in plugin_paths)
+ ]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ doc_targets = collections.defaultdict(list)
+ target_paths = collections.defaultdict(dict)
+
+ remap_types = dict(
+ modules='module',
+ )
+
+ for plugin_type, plugin_path in data_context().content.plugin_paths.items():
+ plugin_type = remap_types.get(plugin_type, plugin_type)
+
+ for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
+ plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
+
+ if plugin_name.startswith('_'):
+ plugin_name = plugin_name[1:]
+
+ doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
+ target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
+
+ env = ansible_environment(args, color=False)
+ error_messages = []
+
+ for doc_type in sorted(doc_targets):
+ for format_option in [None, '--json']:
+ cmd = ['ansible-doc', '-t', doc_type]
+ if format_option is not None:
+ cmd.append(format_option)
+ cmd.extend(sorted(doc_targets[doc_type]))
+
+ try:
+ with coverage_context(args):
+ stdout, stderr = intercept_command(args, cmd, target_name='ansible-doc', env=env, capture=True, python_version=python_version)
+
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if status:
+ summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr)
+ return SanityFailure(self.name, summary=summary)
+
+ if stdout:
+ display.info(stdout.strip(), verbosity=3)
+
+ if stderr:
+ # ignore removed module/plugin warnings
+ stderr = re.sub(r'\[WARNING\]: [^ ]+ [^ ]+ has been removed\n', '', stderr).strip()
+
+ if stderr:
+ summary = u'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr)
+ return SanityFailure(self.name, summary=summary)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ error_messages = settings.process_errors(error_messages, paths)
+
+ if error_messages:
+ return SanityFailure(self.name, messages=error_messages)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/bin_symlinks.py b/test/lib/ansible_test/_internal/sanity/bin_symlinks.py
new file mode 100644
index 00000000..bd0ba58e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/bin_symlinks.py
@@ -0,0 +1,110 @@
+"""Sanity test for symlinks in the bin directory."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..payload import (
+ ANSIBLE_BIN_SYMLINK_MAP,
+ __file__ as symlink_map_full_path,
+)
+
+from ..util import (
+ ANSIBLE_BIN_PATH,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+
+class BinSymlinksTest(SanityVersionNeutral):
+ """Sanity test for symlinks in the bin directory."""
+ ansible_only = True
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ # noinspection PyUnusedLocal
+ def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ bin_root = ANSIBLE_BIN_PATH
+ bin_names = os.listdir(bin_root)
+ bin_paths = sorted(os.path.join(bin_root, path) for path in bin_names)
+
+ injector_root = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector')
+ injector_names = os.listdir(injector_root)
+
+ errors = [] # type: t.List[t.Tuple[str, str]]
+
+ symlink_map_path = os.path.relpath(symlink_map_full_path, data_context().content.root)
+
+ for bin_path in bin_paths:
+ if not os.path.islink(bin_path):
+ errors.append((bin_path, 'not a symbolic link'))
+ continue
+
+ dest = os.readlink(bin_path)
+
+ if not os.path.exists(bin_path):
+ errors.append((bin_path, 'points to non-existent path "%s"' % dest))
+ continue
+
+ if not os.path.isfile(bin_path):
+ errors.append((bin_path, 'points to non-file "%s"' % dest))
+ continue
+
+ map_dest = ANSIBLE_BIN_SYMLINK_MAP.get(os.path.basename(bin_path))
+
+ if not map_dest:
+ errors.append((bin_path, 'missing from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % symlink_map_path))
+ continue
+
+ if dest != map_dest:
+ errors.append((bin_path, 'points to "%s" instead of "%s" from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, map_dest, symlink_map_path)))
+ continue
+
+ if not os.access(bin_path, os.X_OK):
+ errors.append((bin_path, 'points to non-executable file "%s"' % dest))
+ continue
+
+ for bin_name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
+ if bin_name not in bin_names:
+ bin_path = os.path.join(bin_root, bin_name)
+ errors.append((bin_path, 'missing symlink to "%s" defined in ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, symlink_map_path)))
+
+ if bin_name not in injector_names:
+ injector_path = os.path.join(injector_root, bin_name)
+ errors.append((injector_path, 'missing symlink to "python.py"'))
+
+ messages = [SanityMessage(message=message, path=os.path.relpath(path, data_context().content.root), confidence=100) for path, message in errors]
+
+ if errors:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/compile.py b/test/lib/ansible_test/_internal/sanity/compile.py
new file mode 100644
index 00000000..5a517272
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/compile.py
@@ -0,0 +1,92 @@
+"""Sanity test for proper python syntax."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityMultipleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ find_python,
+ parse_to_list_of_dict,
+ is_subdir,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class CompileTest(SanityMultipleVersion):
+ """Sanity test for proper python syntax."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ settings = self.load_processor(args, python_version)
+
+ paths = [target.path for target in targets.include]
+
+ cmd = [find_python(python_version), os.path.join(SANITY_ROOT, 'compile', 'compile.py')]
+
+ data = '\n'.join(paths)
+
+ display.info(data, verbosity=4)
+
+ try:
+ stdout, stderr = run_command(args, cmd, data=data, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name, python_version=python_version)
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, stdout)
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=r['path'].replace('./', ''),
+ line=int(r['line']),
+ column=int(r['column']),
+ ) for r in results]
+
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results, python_version=python_version)
+
+ return SanitySuccess(self.name, python_version=python_version)
diff --git a/test/lib/ansible_test/_internal/sanity/ignores.py b/test/lib/ansible_test/_internal/sanity/ignores.py
new file mode 100644
index 00000000..8b6df50c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/ignores.py
@@ -0,0 +1,89 @@
+"""Sanity test for the sanity ignore file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..sanity import (
+ SanityFailure,
+ SanityIgnoreParser,
+ SanityVersionNeutral,
+ SanitySuccess,
+ SanityMessage,
+)
+
+from ..test import (
+ calculate_confidence,
+ calculate_best_confidence,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class IgnoresTest(SanityVersionNeutral):
+ """Sanity test for sanity test ignore entries."""
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ # noinspection PyUnusedLocal
+ def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ sanity_ignore = SanityIgnoreParser.load(args)
+
+ messages = []
+
+ # parse errors
+
+ messages.extend(SanityMessage(
+ message=message,
+ path=sanity_ignore.relative_path,
+ line=line,
+ column=column,
+ confidence=calculate_confidence(sanity_ignore.path, line, args.metadata) if args.metadata.changes else None,
+ ) for line, column, message in sanity_ignore.parse_errors)
+
+ # file not found errors
+
+ messages.extend(SanityMessage(
+ message="%s '%s' does not exist" % ("Directory" if path.endswith(os.path.sep) else "File", path),
+ path=sanity_ignore.relative_path,
+ line=line,
+ column=1,
+ confidence=calculate_best_confidence(((sanity_ignore.path, line), (path, 0)), args.metadata) if args.metadata.changes else None,
+ ) for line, path in sanity_ignore.file_not_found_errors)
+
+ # conflicting ignores and skips
+
+ for test_name, ignores in sanity_ignore.ignores.items():
+ for ignore_path, ignore_entry in ignores.items():
+ skip_line_no = sanity_ignore.skips.get(test_name, {}).get(ignore_path)
+
+ if not skip_line_no:
+ continue
+
+ for ignore_line_no in ignore_entry.values():
+ messages.append(SanityMessage(
+ message="Ignoring '%s' is unnecessary due to skip entry on line %d" % (ignore_path, skip_line_no),
+ path=sanity_ignore.relative_path,
+ line=ignore_line_no,
+ column=1,
+ confidence=calculate_confidence(sanity_ignore.path, ignore_line_no, args.metadata) if args.metadata.changes else None,
+ ))
+
+ if messages:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/import.py b/test/lib/ansible_test/_internal/sanity/import.py
new file mode 100644
index 00000000..7d4776ae
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/import.py
@@ -0,0 +1,184 @@
+"""Sanity test for proper import exception handling."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityMultipleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ remove_tree,
+ display,
+ parse_to_list_of_dict,
+ is_subdir,
+ generate_pip_command,
+ find_python,
+)
+
+from ..util_common import (
+ intercept_command,
+ run_command,
+ ResultType,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+)
+
+from ..executor import (
+ generate_pip_install,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..coverage_util import (
+ coverage_context,
+)
+
+from ..venv import (
+ create_virtual_environment,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class ImportTest(SanityMultipleVersion):
+ """Sanity test for proper import exception handling."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' and
+ (is_subdir(target.path, data_context().content.module_path) or is_subdir(target.path, data_context().content.module_utils_path))]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ capture_pip = args.verbosity < 2
+
+ python = find_python(python_version)
+
+ if python_version.startswith('2.') and args.requirements:
+ # hack to make sure that virtualenv is available under Python 2.x
+ # on Python 3.x we can use the built-in venv
+ pip = generate_pip_command(python)
+ run_command(args, generate_pip_install(pip, '', packages=['virtualenv']), capture=capture_pip)
+
+ settings = self.load_processor(args, python_version)
+
+ paths = [target.path for target in targets.include]
+
+ env = ansible_environment(args, color=False)
+
+ temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import')
+
+ # create a clean virtual environment to minimize the available imports beyond the python standard library
+ virtual_environment_path = os.path.join(temp_root, 'minimal-py%s' % python_version.replace('.', ''))
+ virtual_environment_bin = os.path.join(virtual_environment_path, 'bin')
+
+ remove_tree(virtual_environment_path)
+
+ if not create_virtual_environment(args, python_version, virtual_environment_path):
+ display.warning("Skipping sanity test '%s' on Python %s due to missing virtual environment support." % (self.name, python_version))
+ return SanitySkipped(self.name, python_version)
+
+ # add the importer to our virtual environment so it can be accessed through the coverage injector
+ importer_path = os.path.join(virtual_environment_bin, 'importer.py')
+ yaml_to_json_path = os.path.join(virtual_environment_bin, 'yaml_to_json.py')
+ if not args.explain:
+ os.symlink(os.path.abspath(os.path.join(SANITY_ROOT, 'import', 'importer.py')), importer_path)
+ os.symlink(os.path.abspath(os.path.join(SANITY_ROOT, 'import', 'yaml_to_json.py')), yaml_to_json_path)
+
+ # activate the virtual environment
+ env['PATH'] = '%s:%s' % (virtual_environment_bin, env['PATH'])
+
+ env.update(
+ SANITY_TEMP_PATH=ResultType.TMP.path,
+ )
+
+ if data_context().content.collection:
+ env.update(
+ SANITY_COLLECTION_FULL_NAME=data_context().content.collection.full_name,
+ SANITY_EXTERNAL_PYTHON=python,
+ )
+
+ virtualenv_python = os.path.join(virtual_environment_bin, 'python')
+ virtualenv_pip = generate_pip_command(virtualenv_python)
+
+ # make sure coverage is available in the virtual environment if needed
+ if args.coverage:
+ run_command(args, generate_pip_install(virtualenv_pip, '', packages=['setuptools']), env=env, capture=capture_pip)
+ run_command(args, generate_pip_install(virtualenv_pip, '', packages=['coverage']), env=env, capture=capture_pip)
+
+ try:
+ # In some environments pkg_resources is installed as a separate pip package which needs to be removed.
+ # For example, using Python 3.8 on Ubuntu 18.04 a virtualenv is created with only pip and setuptools.
+ # However, a venv is created with an additional pkg-resources package which is independent of setuptools.
+ # Making sure pkg-resources is removed preserves the import test consistency between venv and virtualenv.
+ # Additionally, in the above example, the pyparsing package vendored with pkg-resources is out-of-date and generates deprecation warnings.
+ # Thus it is important to remove pkg-resources to prevent system installed packages from generating deprecation warnings.
+ run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'pkg-resources'], env=env, capture=capture_pip)
+ except SubprocessError:
+ pass
+
+ run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'setuptools'], env=env, capture=capture_pip)
+ run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'pip'], env=env, capture=capture_pip)
+
+ cmd = ['importer.py']
+
+ data = '\n'.join(paths)
+
+ display.info(data, verbosity=4)
+
+ results = []
+
+ try:
+ with coverage_context(args):
+ stdout, stderr = intercept_command(args, cmd, self.name, env, capture=True, data=data, python_version=python_version,
+ virtualenv=virtualenv_python)
+
+ if stdout or stderr:
+ raise SubprocessError(cmd, stdout=stdout, stderr=stderr)
+ except SubprocessError as ex:
+ if ex.status != 10 or ex.stderr or not ex.stdout:
+ raise
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, ex.stdout)
+
+ relative_temp_root = os.path.relpath(temp_root, data_context().content.root) + os.path.sep
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=os.path.relpath(r['path'], relative_temp_root) if r['path'].startswith(relative_temp_root) else r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ ) for r in results]
+
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results, python_version=python_version)
+
+ return SanitySuccess(self.name, python_version=python_version)
diff --git a/test/lib/ansible_test/_internal/sanity/integration_aliases.py b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
new file mode 100644
index 00000000..e21c093a
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
@@ -0,0 +1,399 @@
+"""Sanity test to check integration test aliases."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import textwrap
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..target import (
+ filter_targets,
+ walk_posix_integration_targets,
+ walk_windows_integration_targets,
+ walk_integration_targets,
+ walk_module_targets,
+)
+
+from ..cloud import (
+ get_cloud_platforms,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ display,
+ find_python,
+ raw_command,
+)
+
+from ..util_common import (
+ write_json_test_results,
+ ResultType,
+)
+
+
+class IntegrationAliasesTest(SanityVersionNeutral):
+ """Sanity test to evaluate integration test aliases."""
+ CI_YML = '.azure-pipelines/azure-pipelines.yml'
+ TEST_ALIAS_PREFIX = 'shippable' # this will be changed at some point in the future
+
+ DISABLED = 'disabled/'
+ UNSTABLE = 'unstable/'
+ UNSUPPORTED = 'unsupported/'
+
+ EXPLAIN_URL = 'https://docs.ansible.com/ansible/devel/dev_guide/testing/sanity/integration-aliases.html'
+
+ TEMPLATE_DISABLED = """
+ The following integration tests are **disabled** [[explain]({explain_url}#disabled)]:
+
+ {tests}
+
+ Consider fixing the integration tests before or alongside changes.
+ """
+
+ TEMPLATE_UNSTABLE = """
+ The following integration tests are **unstable** [[explain]({explain_url}#unstable)]:
+
+ {tests}
+
+ Tests may need to be restarted due to failures unrelated to changes.
+ """
+
+ TEMPLATE_UNSUPPORTED = """
+ The following integration tests are **unsupported** [[explain]({explain_url}#unsupported)]:
+
+ {tests}
+
+ Consider running the tests manually or extending test infrastructure to add support.
+ """
+
+ TEMPLATE_UNTESTED = """
+ The following modules have **no integration tests** [[explain]({explain_url}#untested)]:
+
+ {tests}
+
+ Consider adding integration tests before or alongside changes.
+ """
+
+ ansible_only = True
+
+ def __init__(self):
+ super(IntegrationAliasesTest, self).__init__()
+
+ self._ci_config = {} # type: t.Dict[str, t.Any]
+ self._ci_test_groups = {} # type: t.Dict[str, t.List[int]]
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ def load_ci_config(self, args): # type: (SanityConfig) -> t.Dict[str, t.Any]
+ """Load and return the CI YAML configuration."""
+ if not self._ci_config:
+ self._ci_config = self.load_yaml(args, self.CI_YML)
+
+ return self._ci_config
+
+ @property
+ def ci_test_groups(self): # type: () -> t.Dict[str, t.List[int]]
+ """Return a dictionary of CI test names and their group(s)."""
+ if not self._ci_test_groups:
+ test_groups = {}
+
+ for stage in self._ci_config['stages']:
+ for job in stage['jobs']:
+ if job.get('template') != 'templates/matrix.yml':
+ continue
+
+ parameters = job['parameters']
+
+ groups = parameters.get('groups', [])
+ test_format = parameters.get('testFormat', '{0}')
+ test_group_format = parameters.get('groupFormat', '{0}/{{1}}')
+
+ for target in parameters['targets']:
+ test = target.get('test') or target.get('name')
+
+ if groups:
+ tests_formatted = [test_group_format.format(test_format).format(test, group) for group in groups]
+ else:
+ tests_formatted = [test_format.format(test)]
+
+ for test_formatted in tests_formatted:
+ parts = test_formatted.split('/')
+ key = parts[0]
+
+ if key in ('sanity', 'units'):
+ continue
+
+ try:
+ group = int(parts[-1])
+ except ValueError:
+ continue
+
+ if group < 1 or group > 99:
+ continue
+
+ group_set = test_groups.setdefault(key, set())
+ group_set.add(group)
+
+ self._ci_test_groups = dict((key, sorted(value)) for key, value in test_groups.items())
+
+ return self._ci_test_groups
+
+ def format_test_group_alias(self, name, fallback=''):
+ """
+ :type name: str
+ :type fallback: str
+ :rtype: str
+ """
+ group_numbers = self.ci_test_groups.get(name, None)
+
+ if group_numbers:
+ if min(group_numbers) != 1:
+ display.warning('Min test group "%s" in %s is %d instead of 1.' % (name, self.CI_YML, min(group_numbers)), unique=True)
+
+ if max(group_numbers) != len(group_numbers):
+ display.warning('Max test group "%s" in %s is %d instead of %d.' % (name, self.CI_YML, max(group_numbers), len(group_numbers)), unique=True)
+
+ if max(group_numbers) > 9:
+ alias = '%s/%s/group(%s)/' % (self.TEST_ALIAS_PREFIX, name, '|'.join(str(i) for i in range(min(group_numbers), max(group_numbers) + 1)))
+ elif len(group_numbers) > 1:
+ alias = '%s/%s/group[%d-%d]/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers), max(group_numbers))
+ else:
+ alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers))
+ elif fallback:
+ alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, fallback, 1)
+ else:
+ raise Exception('cannot find test group "%s" in %s' % (name, self.CI_YML))
+
+ return alias
+
+ def load_yaml(self, args, path): # type: (SanityConfig, str) -> t.Dict[str, t.Any]
+ """Load the specified YAML file and return the contents."""
+ yaml_to_json_path = os.path.join(SANITY_ROOT, self.name, 'yaml_to_json.py')
+ python = find_python(args.python_version)
+
+ return json.loads(raw_command([python, yaml_to_json_path], data=read_text_file(path), capture=True)[0])
+
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if not os.path.isfile(self.CI_YML):
+ return SanityFailure(self.name, messages=[SanityMessage(
+ message='file missing',
+ path=self.CI_YML,
+ )])
+
+ results = dict(
+ comments=[],
+ labels={},
+ )
+
+ self.load_ci_config(args)
+ self.check_changes(args, results)
+
+ write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results)
+
+ messages = []
+
+ messages += self.check_posix_targets(args)
+ messages += self.check_windows_targets()
+
+ if messages:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
+
+ def check_posix_targets(self, args):
+ """
+ :type args: SanityConfig
+ :rtype: list[SanityMessage]
+ """
+ posix_targets = tuple(walk_posix_integration_targets())
+
+ clouds = get_cloud_platforms(args, posix_targets)
+ cloud_targets = ['cloud/%s/' % cloud for cloud in clouds]
+
+ all_cloud_targets = tuple(filter_targets(posix_targets, ['cloud/'], include=True, directories=False, errors=False))
+ invalid_cloud_targets = tuple(filter_targets(all_cloud_targets, cloud_targets, include=False, directories=False, errors=False))
+
+ messages = []
+
+ for target in invalid_cloud_targets:
+ for alias in target.aliases:
+ if alias.startswith('cloud/') and alias != 'cloud/':
+ if any(alias.startswith(cloud_target) for cloud_target in cloud_targets):
+ continue
+
+ messages.append(SanityMessage('invalid alias `%s`' % alias, '%s/aliases' % target.path))
+
+ messages += self.check_ci_group(
+ targets=tuple(filter_targets(posix_targets, ['cloud/', '%s/generic/' % self.TEST_ALIAS_PREFIX], include=False,
+ directories=False, errors=False)),
+ find=self.format_test_group_alias('linux').replace('linux', 'posix'),
+ find_incidental=['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX],
+ )
+
+ messages += self.check_ci_group(
+ targets=tuple(filter_targets(posix_targets, ['%s/generic/' % self.TEST_ALIAS_PREFIX], include=True, directories=False,
+ errors=False)),
+ find=self.format_test_group_alias('generic'),
+ )
+
+ for cloud in clouds:
+ messages += self.check_ci_group(
+ targets=tuple(filter_targets(posix_targets, ['cloud/%s/' % cloud], include=True, directories=False, errors=False)),
+ find=self.format_test_group_alias(cloud, 'cloud'),
+ find_incidental=['%s/%s/incidental/' % (self.TEST_ALIAS_PREFIX, cloud), '%s/cloud/incidental/' % self.TEST_ALIAS_PREFIX],
+ )
+
+ return messages
+
+ def check_windows_targets(self):
+ """
+ :rtype: list[SanityMessage]
+ """
+ windows_targets = tuple(walk_windows_integration_targets())
+
+ messages = []
+
+ messages += self.check_ci_group(
+ targets=windows_targets,
+ find=self.format_test_group_alias('windows'),
+ find_incidental=['%s/windows/incidental/' % self.TEST_ALIAS_PREFIX],
+ )
+
+ return messages
+
+ def check_ci_group(self, targets, find, find_incidental=None):
+ """
+ :type targets: tuple[CompletionTarget]
+ :type find: str
+ :type find_incidental: list[str] | None
+ :rtype: list[SanityMessage]
+ """
+ all_paths = set(target.path for target in targets)
+ supported_paths = set(target.path for target in filter_targets(targets, [find], include=True, directories=False, errors=False))
+ unsupported_paths = set(target.path for target in filter_targets(targets, [self.UNSUPPORTED], include=True, directories=False, errors=False))
+
+ if find_incidental:
+ incidental_paths = set(target.path for target in filter_targets(targets, find_incidental, include=True, directories=False, errors=False))
+ else:
+ incidental_paths = set()
+
+ unassigned_paths = all_paths - supported_paths - unsupported_paths - incidental_paths
+ conflicting_paths = supported_paths & unsupported_paths
+
+ unassigned_message = 'missing alias `%s` or `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
+ conflicting_message = 'conflicting alias `%s` and `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
+
+ messages = []
+
+ for path in unassigned_paths:
+ messages.append(SanityMessage(unassigned_message, '%s/aliases' % path))
+
+ for path in conflicting_paths:
+ messages.append(SanityMessage(conflicting_message, '%s/aliases' % path))
+
+ return messages
+
+ def check_changes(self, args, results):
+ """
+ :type args: SanityConfig
+ :type results: dict[str, any]
+ """
+ integration_targets = list(walk_integration_targets())
+ module_targets = list(walk_module_targets())
+
+ integration_targets_by_name = dict((target.name, target) for target in integration_targets)
+ module_names_by_path = dict((target.path, target.module) for target in module_targets)
+
+ disabled_targets = []
+ unstable_targets = []
+ unsupported_targets = []
+
+ for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]:
+ for target in args.metadata.change_description.focused_command_targets[command]:
+ if self.DISABLED in integration_targets_by_name[target].aliases:
+ disabled_targets.append(target)
+ elif self.UNSTABLE in integration_targets_by_name[target].aliases:
+ unstable_targets.append(target)
+ elif self.UNSUPPORTED in integration_targets_by_name[target].aliases:
+ unsupported_targets.append(target)
+
+ untested_modules = []
+
+ for path in args.metadata.change_description.no_integration_paths:
+ module = module_names_by_path.get(path)
+
+ if module:
+ untested_modules.append(module)
+
+ comments = [
+ self.format_comment(self.TEMPLATE_DISABLED, disabled_targets),
+ self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets),
+ self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets),
+ self.format_comment(self.TEMPLATE_UNTESTED, untested_modules),
+ ]
+
+ comments = [comment for comment in comments if comment]
+
+ labels = dict(
+ needs_tests=bool(untested_modules),
+ disabled_tests=bool(disabled_targets),
+ unstable_tests=bool(unstable_targets),
+ unsupported_tests=bool(unsupported_targets),
+ )
+
+ results['comments'] += comments
+ results['labels'].update(labels)
+
+ def format_comment(self, template, targets):
+ """
+ :type template: str
+ :type targets: list[str]
+ :rtype: str | None
+ """
+ if not targets:
+ return None
+
+ tests = '\n'.join('- %s' % target for target in targets)
+
+ data = dict(
+ explain_url=self.EXPLAIN_URL,
+ tests=tests,
+ )
+
+ message = textwrap.dedent(template).strip().format(**data)
+
+ return message
diff --git a/test/lib/ansible_test/_internal/sanity/pep8.py b/test/lib/ansible_test/_internal/sanity/pep8.py
new file mode 100644
index 00000000..9eb40dbc
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/pep8.py
@@ -0,0 +1,109 @@
+"""Sanity test for PEP 8 style guidelines using pycodestyle."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ read_lines_without_comments,
+ parse_to_list_of_dict,
+ find_python,
+ is_subdir,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class Pep8Test(SanitySingleVersion):
+ """Sanity test for PEP 8 style guidelines using pycodestyle."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'A100'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ current_ignore_file = os.path.join(SANITY_ROOT, 'pep8', 'current-ignore.txt')
+ current_ignore = sorted(read_lines_without_comments(current_ignore_file, remove_blank_lines=True))
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ cmd = [
+ find_python(python_version),
+ '-m', 'pycodestyle',
+ '--max-line-length', '160',
+ '--config', '/dev/null',
+ '--ignore', ','.join(sorted(current_ignore)),
+ ] + paths
+
+ if paths:
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ else:
+ stdout = None
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if stdout:
+ pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<code>[WE][0-9]{3}) (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, stdout)
+ else:
+ results = []
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ level='warning' if r['code'].startswith('W') else 'error',
+ code=r['code'],
+ ) for r in results]
+
+ errors = settings.process_errors(results, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/pslint.py b/test/lib/ansible_test/_internal/sanity/pslint.py
new file mode 100644
index 00000000..256eee04
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/pslint.py
@@ -0,0 +1,121 @@
+"""Sanity test using PSScriptAnalyzer."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ find_executable,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class PslintTest(SanityVersionNeutral):
+ """Sanity test using PSScriptAnalyzer."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'AnsibleTest'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1', '.psd1')]
+
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if not find_executable('pwsh', required='warning'):
+ return SanitySkipped(self.name)
+
+ cmds = []
+
+ if args.requirements:
+ cmds.append([os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'sanity.ps1')])
+
+ cmds.append([os.path.join(SANITY_ROOT, 'pslint', 'pslint.ps1')] + paths)
+
+ stdout = ''
+
+ for cmd in cmds:
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ severity = [
+ 'Information',
+ 'Warning',
+ 'Error',
+ 'ParseError',
+ ]
+
+ cwd = data_context().content.root + '/'
+
+ # replace unicode smart quotes and ellipsis with ascii versions
+ stdout = re.sub(u'[\u2018\u2019]', "'", stdout)
+ stdout = re.sub(u'[\u201c\u201d]', '"', stdout)
+ stdout = re.sub(u'[\u2026]', '...', stdout)
+
+ messages = json.loads(stdout)
+
+ errors = [SanityMessage(
+ code=m['RuleName'],
+ message=m['Message'],
+ path=m['ScriptPath'].replace(cwd, ''),
+ line=m['Line'] or 0,
+ column=m['Column'] or 0,
+ level=severity[m['Severity']],
+ ) for m in messages]
+
+ errors = settings.process_errors(errors, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/pylint.py b/test/lib/ansible_test/_internal/sanity/pylint.py
new file mode 100644
index 00000000..324e5873
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/pylint.py
@@ -0,0 +1,289 @@
+"""Sanity test using pylint."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import itertools
+import json
+import os
+import datetime
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ ConfigParser,
+ is_subdir,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+ get_collection_detail,
+ CollectionDetail,
+ CollectionDetailError,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class PylintTest(SanitySingleVersion):
+ """Sanity test using pylint."""
+
+ def __init__(self):
+ super(PylintTest, self).__init__()
+ self.optional_error_codes.update([
+ 'ansible-deprecated-date',
+ 'too-complex',
+ ])
+
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'ansible-test'
+
+ @property
+ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ # Python 3.9 is not supported on pylint < 2.5.0.
+ # Unfortunately pylint 2.5.0 and later include an unfixed regression.
+ # See: https://github.com/PyCQA/pylint/issues/3701
+ return tuple(python_version for python_version in super(PylintTest, self).supported_python_versions if python_version not in ('3.9',))
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ plugin_dir = os.path.join(SANITY_ROOT, 'pylint', 'plugins')
+ plugin_names = sorted(p[0] for p in [
+ os.path.splitext(p) for p in os.listdir(plugin_dir)] if p[1] == '.py' and p[0] != '__init__')
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ module_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in
+ paths if is_subdir(p, data_context().content.module_path)]
+ module_dirs = sorted(set([p[0] for p in module_paths if len(p) > 1]))
+
+ large_module_group_threshold = 500
+ large_module_groups = [key for key, value in
+ itertools.groupby(module_paths, lambda p: p[0] if len(p) > 1 else '') if len(list(value)) > large_module_group_threshold]
+
+ large_module_group_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in paths
+ if any(is_subdir(p, os.path.join(data_context().content.module_path, g)) for g in large_module_groups)]
+ large_module_group_dirs = sorted(set([os.path.sep.join(p[:2]) for p in large_module_group_paths if len(p) > 2]))
+
+ contexts = []
+ remaining_paths = set(paths)
+
+ def add_context(available_paths, context_name, context_filter):
+ """
+ :type available_paths: set[str]
+ :type context_name: str
+ :type context_filter: (str) -> bool
+ """
+ filtered_paths = set(p for p in available_paths if context_filter(p))
+ contexts.append((context_name, sorted(filtered_paths)))
+ available_paths -= filtered_paths
+
+ def filter_path(path_filter=None):
+ """
+ :type path_filter: str
+ :rtype: (str) -> bool
+ """
+ def context_filter(path_to_filter):
+ """
+ :type path_to_filter: str
+ :rtype: bool
+ """
+ return is_subdir(path_to_filter, path_filter)
+
+ return context_filter
+
+ for large_module_dir in large_module_group_dirs:
+ add_context(remaining_paths, 'modules/%s' % large_module_dir, filter_path(os.path.join(data_context().content.module_path, large_module_dir)))
+
+ for module_dir in module_dirs:
+ add_context(remaining_paths, 'modules/%s' % module_dir, filter_path(os.path.join(data_context().content.module_path, module_dir)))
+
+ add_context(remaining_paths, 'modules', filter_path(data_context().content.module_path))
+ add_context(remaining_paths, 'module_utils', filter_path(data_context().content.module_utils_path))
+
+ add_context(remaining_paths, 'units', filter_path(data_context().content.unit_path))
+
+ if data_context().content.collection:
+ add_context(remaining_paths, 'collection', lambda p: True)
+ else:
+ add_context(remaining_paths, 'validate-modules', filter_path('test/lib/ansible_test/_data/sanity/validate-modules/'))
+ add_context(remaining_paths, 'validate-modules-unit', filter_path('test/lib/ansible_test/tests/validate-modules-unit/'))
+ add_context(remaining_paths, 'sanity', filter_path('test/lib/ansible_test/_data/sanity/'))
+ add_context(remaining_paths, 'ansible-test', filter_path('test/lib/'))
+ add_context(remaining_paths, 'test', filter_path('test/'))
+ add_context(remaining_paths, 'hacking', filter_path('hacking/'))
+ add_context(remaining_paths, 'ansible', lambda p: True)
+
+ messages = []
+ context_times = []
+
+ python = find_python(python_version)
+
+ collection_detail = None
+
+ if data_context().content.collection:
+ try:
+ collection_detail = get_collection_detail(args, python)
+
+ if not collection_detail.version:
+ display.warning('Skipping pylint collection version checks since no collection version was found.')
+ except CollectionDetailError as ex:
+ display.warning('Skipping pylint collection version checks since collection detail loading failed: %s' % ex.reason)
+
+ test_start = datetime.datetime.utcnow()
+
+ for context, context_paths in sorted(contexts):
+ if not context_paths:
+ continue
+
+ context_start = datetime.datetime.utcnow()
+ messages += self.pylint(args, context, context_paths, plugin_dir, plugin_names, python, collection_detail)
+ context_end = datetime.datetime.utcnow()
+
+ context_times.append('%s: %d (%s)' % (context, len(context_paths), context_end - context_start))
+
+ test_end = datetime.datetime.utcnow()
+
+ for context_time in context_times:
+ display.info(context_time, verbosity=4)
+
+ display.info('total: %d (%s)' % (len(paths), test_end - test_start), verbosity=4)
+
+ errors = [SanityMessage(
+ message=m['message'].replace('\n', ' '),
+ path=m['path'],
+ line=int(m['line']),
+ column=int(m['column']),
+ level=m['type'],
+ code=m['symbol'],
+ ) for m in messages]
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ errors = settings.process_errors(errors, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
+
+ @staticmethod
+ def pylint(
+ args, # type: SanityConfig
+ context, # type: str
+ paths, # type: t.List[str]
+ plugin_dir, # type: str
+ plugin_names, # type: t.List[str]
+ python, # type: str
+ collection_detail, # type: CollectionDetail
+ ): # type: (...) -> t.List[t.Dict[str, str]]
+ """Run pylint using the config specified by the context on the specified paths."""
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', context.split('/')[0] + '.cfg')
+
+ if not os.path.exists(rcfile):
+ if data_context().content.collection:
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'collection.cfg')
+ else:
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'default.cfg')
+
+ parser = ConfigParser()
+ parser.read(rcfile)
+
+ if parser.has_section('ansible-test'):
+ config = dict(parser.items('ansible-test'))
+ else:
+ config = dict()
+
+ disable_plugins = set(i.strip() for i in config.get('disable-plugins', '').split(',') if i)
+ load_plugins = set(plugin_names + ['pylint.extensions.mccabe']) - disable_plugins
+
+ cmd = [
+ python,
+ '-m', 'pylint',
+ '--jobs', '0',
+ '--reports', 'n',
+ '--max-line-length', '160',
+ '--max-complexity', '20',
+ '--rcfile', rcfile,
+ '--output-format', 'json',
+ '--load-plugins', ','.join(load_plugins),
+ ] + paths
+
+ if data_context().content.collection:
+ cmd.extend(['--collection-name', data_context().content.collection.full_name])
+
+ if collection_detail and collection_detail.version:
+ cmd.extend(['--collection-version', collection_detail.version])
+
+ append_python_path = [plugin_dir]
+
+ if data_context().content.collection:
+ append_python_path.append(data_context().content.collection.root)
+
+ env = ansible_environment(args)
+ env['PYTHONPATH'] += os.path.pathsep + os.path.pathsep.join(append_python_path)
+
+ # expose plugin paths for use in custom plugins
+ env.update(dict(('ANSIBLE_TEST_%s_PATH' % k.upper(), os.path.abspath(v) + os.path.sep) for k, v in data_context().content.plugin_paths.items()))
+
+ if paths:
+ display.info('Checking %d file(s) in context "%s" with config: %s' % (len(paths), context, rcfile), verbosity=1)
+
+ try:
+ stdout, stderr = run_command(args, cmd, env=env, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status >= 32:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ else:
+ stdout = None
+
+ if not args.explain and stdout:
+ messages = json.loads(stdout)
+ else:
+ messages = []
+
+ return messages
diff --git a/test/lib/ansible_test/_internal/sanity/rstcheck.py b/test/lib/ansible_test/_internal/sanity/rstcheck.py
new file mode 100644
index 00000000..2d8a01d5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/rstcheck.py
@@ -0,0 +1,95 @@
+"""Sanity test using rstcheck."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ parse_to_list_of_dict,
+ read_lines_without_comments,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class RstcheckTest(SanitySingleVersion):
+ """Sanity test using rstcheck."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] in ('.rst',)]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ ignore_file = os.path.join(SANITY_ROOT, 'rstcheck', 'ignore-substitutions.txt')
+ ignore_substitutions = sorted(set(read_lines_without_comments(ignore_file, remove_blank_lines=True)))
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ cmd = [
+ find_python(python_version),
+ '-m', 'rstcheck',
+ '--report', 'warning',
+ '--ignore-substitutions', ','.join(ignore_substitutions),
+ ] + paths
+
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stdout:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+): \((?P<level>INFO|WARNING|ERROR|SEVERE)/[0-4]\) (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, stderr)
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=0,
+ level=r['level'],
+ ) for r in results]
+
+ settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/sanity_docs.py b/test/lib/ansible_test/_internal/sanity/sanity_docs.py
new file mode 100644
index 00000000..44638075
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/sanity_docs.py
@@ -0,0 +1,62 @@
+"""Sanity test for documentation of sanity tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ sanity_get_tests,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class SanityDocsTest(SanityVersionNeutral):
+ """Sanity test for documentation of sanity tests."""
+ ansible_only = True
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ # noinspection PyUnusedLocal
+ def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ sanity_dir = 'docs/docsite/rst/dev_guide/testing/sanity'
+ sanity_docs = set(part[0] for part in (os.path.splitext(os.path.basename(path)) for path in data_context().content.get_files(sanity_dir))
+ if part[1] == '.rst')
+ sanity_tests = set(sanity_test.name for sanity_test in sanity_get_tests())
+
+ missing = sanity_tests - sanity_docs
+
+ results = []
+
+ results += [SanityMessage(
+ message='missing docs for ansible-test sanity --test %s' % r,
+ path=os.path.join(sanity_dir, '%s.rst' % r),
+ ) for r in sorted(missing)]
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/shellcheck.py b/test/lib/ansible_test/_internal/sanity/shellcheck.py
new file mode 100644
index 00000000..82689ced
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/shellcheck.py
@@ -0,0 +1,110 @@
+"""Sanity test using shellcheck."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from xml.etree.ElementTree import (
+ fromstring,
+ Element,
+)
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ read_lines_without_comments,
+ find_executable,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class ShellcheckTest(SanityVersionNeutral):
+ """Sanity test using shellcheck."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'AT1000'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.sh']
+
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ exclude_file = os.path.join(SANITY_ROOT, 'shellcheck', 'exclude.txt')
+ exclude = set(read_lines_without_comments(exclude_file, remove_blank_lines=True, optional=True))
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if not find_executable('shellcheck', required='warning'):
+ return SanitySkipped(self.name)
+
+ cmd = [
+ 'shellcheck',
+ '-e', ','.join(sorted(exclude)),
+ '--format', 'checkstyle',
+ ] + paths
+
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status > 1:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ # json output is missing file paths in older versions of shellcheck, so we'll use xml instead
+ root = fromstring(stdout) # type: Element
+
+ results = []
+
+ for item in root: # type: Element
+ for entry in item: # type: Element
+ results.append(SanityMessage(
+ message=entry.attrib['message'],
+ path=item.attrib['name'],
+ line=int(entry.attrib['line']),
+ column=int(entry.attrib['column']),
+ level=entry.attrib['severity'],
+ code=entry.attrib['source'].replace('ShellCheck.', ''),
+ ))
+
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/validate_modules.py b/test/lib/ansible_test/_internal/sanity/validate_modules.py
new file mode 100644
index 00000000..add3cdc7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/validate_modules.py
@@ -0,0 +1,149 @@
+"""Sanity test using validate-modules."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+ get_collection_detail,
+ CollectionDetailError,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..ci import (
+ get_ci_provider,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class ValidateModulesTest(SanitySingleVersion):
+ """Sanity test using validate-modules."""
+
+ def __init__(self):
+ super(ValidateModulesTest, self).__init__()
+ self.optional_error_codes.update([
+ 'deprecated-date',
+ ])
+
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'A100'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if target.module]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ env = ansible_environment(args, color=False)
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ python = find_python(python_version)
+
+ cmd = [
+ python,
+ os.path.join(SANITY_ROOT, 'validate-modules', 'validate-modules'),
+ '--format', 'json',
+ '--arg-spec',
+ ] + paths
+
+ if data_context().content.collection:
+ cmd.extend(['--collection', data_context().content.collection.directory])
+
+ try:
+ collection_detail = get_collection_detail(args, python)
+
+ if collection_detail.version:
+ cmd.extend(['--collection-version', collection_detail.version])
+ else:
+ display.warning('Skipping validate-modules collection version checks since no collection version was found.')
+ except CollectionDetailError as ex:
+ display.warning('Skipping validate-modules collection version checks since collection detail loading failed: %s' % ex.reason)
+ else:
+ base_branch = args.base_branch or get_ci_provider().get_base_branch()
+
+ if base_branch:
+ cmd.extend([
+ '--base-branch', base_branch,
+ ])
+ else:
+ display.warning('Cannot perform module comparison against the base branch because the base branch was not detected.')
+
+ try:
+ stdout, stderr = run_command(args, cmd, env=env, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status not in (0, 3):
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ messages = json.loads(stdout)
+
+ errors = []
+
+ for filename in messages:
+ output = messages[filename]
+
+ for item in output['errors']:
+ errors.append(SanityMessage(
+ path=filename,
+ line=int(item['line']) if 'line' in item else 0,
+ column=int(item['column']) if 'column' in item else 0,
+ level='error',
+ code='%s' % item['code'],
+ message=item['msg'],
+ ))
+
+ errors = settings.process_errors(errors, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/yamllint.py b/test/lib/ansible_test/_internal/sanity/yamllint.py
new file mode 100644
index 00000000..85a576d0
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/yamllint.py
@@ -0,0 +1,136 @@
+"""Sanity test using yamllint."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from .. import types as t
+
+from ..import ansible_util
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySkipped,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ is_subdir,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class YamllintTest(SanitySingleVersion):
+ """Sanity test using yamllint."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'ansible-test'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ yaml_targets = [target for target in targets if os.path.splitext(target.path)[1] in ('.yml', '.yaml')]
+
+ for plugin_type, plugin_path in sorted(data_context().content.plugin_paths.items()):
+ if plugin_type == 'module_utils':
+ continue
+
+ yaml_targets.extend([target for target in targets if
+ os.path.splitext(target.path)[1] == '.py' and
+ os.path.basename(target.path) != '__init__.py' and
+ is_subdir(target.path, plugin_path)])
+
+ return yaml_targets
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ pyyaml_presence = ansible_util.check_pyyaml(args, python_version, quiet=True)
+ if not pyyaml_presence['cloader']:
+ display.warning("Skipping sanity test '%s' due to missing libyaml support in PyYAML."
+ % self.name)
+ return SanitySkipped(self.name)
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ python = find_python(python_version)
+
+ results = self.test_paths(args, paths, python)
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
+
+ @staticmethod
+ def test_paths(args, paths, python):
+ """
+ :type args: SanityConfig
+ :type paths: list[str]
+ :type python: str
+ :rtype: list[SanityMessage]
+ """
+ cmd = [
+ python,
+ os.path.join(SANITY_ROOT, 'yamllint', 'yamllinter.py'),
+ ]
+
+ data = '\n'.join(paths)
+
+ display.info(data, verbosity=4)
+
+ try:
+ stdout, stderr = run_command(args, cmd, data=data, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return []
+
+ results = json.loads(stdout)['messages']
+
+ results = [SanityMessage(
+ code=r['code'],
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ level=r['level'],
+ ) for r in results]
+
+ return results
diff --git a/test/lib/ansible_test/_internal/target.py b/test/lib/ansible_test/_internal/target.py
new file mode 100644
index 00000000..7bafd717
--- /dev/null
+++ b/test/lib/ansible_test/_internal/target.py
@@ -0,0 +1,694 @@
+"""Test target identification, iteration and inclusion/exclusion."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import os
+import re
+import itertools
+import abc
+
+from . import types as t
+
+from .encoding import (
+ to_bytes,
+ to_text,
+)
+
+from .io import (
+ read_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ display,
+ read_lines_without_comments,
+ is_subdir,
+)
+
+from .data import (
+ data_context,
+)
+
+MODULE_EXTENSIONS = '.py', '.ps1'
+
+try:
+ TCompletionTarget = t.TypeVar('TCompletionTarget', bound='CompletionTarget')
+except AttributeError:
+ TCompletionTarget = None # pylint: disable=invalid-name
+
+try:
+ TIntegrationTarget = t.TypeVar('TIntegrationTarget', bound='IntegrationTarget')
+except AttributeError:
+ TIntegrationTarget = None # pylint: disable=invalid-name
+
+
+def find_target_completion(target_func, prefix):
+ """
+ :type target_func: () -> collections.Iterable[CompletionTarget]
+ :type prefix: unicode
+ :rtype: list[str]
+ """
+ try:
+ targets = target_func()
+ short = os.environ.get('COMP_TYPE') == '63' # double tab completion from bash
+ matches = walk_completion_targets(targets, prefix, short)
+ return matches
+ except Exception as ex: # pylint: disable=locally-disabled, broad-except
+ return [u'%s' % ex]
+
+
+def walk_completion_targets(targets, prefix, short=False):
+ """
+ :type targets: collections.Iterable[CompletionTarget]
+ :type prefix: str
+ :type short: bool
+ :rtype: tuple[str]
+ """
+ aliases = set(alias for target in targets for alias in target.aliases)
+
+ if prefix.endswith('/') and prefix in aliases:
+ aliases.remove(prefix)
+
+ matches = [alias for alias in aliases if alias.startswith(prefix) and '/' not in alias[len(prefix):-1]]
+
+ if short:
+ offset = len(os.path.dirname(prefix))
+ if offset:
+ offset += 1
+ relative_matches = [match[offset:] for match in matches if len(match) > offset]
+ if len(relative_matches) > 1:
+ matches = relative_matches
+
+ return tuple(sorted(matches))
+
+
+def walk_internal_targets(targets, includes=None, excludes=None, requires=None):
+ """
+ :type targets: collections.Iterable[T <= CompletionTarget]
+ :type includes: list[str]
+ :type excludes: list[str]
+ :type requires: list[str]
+ :rtype: tuple[T <= CompletionTarget]
+ """
+ targets = tuple(targets)
+
+ include_targets = sorted(filter_targets(targets, includes, errors=True, directories=False), key=lambda include_target: include_target.name)
+
+ if requires:
+ require_targets = set(filter_targets(targets, requires, errors=True, directories=False))
+ include_targets = [require_target for require_target in include_targets if require_target in require_targets]
+
+ if excludes:
+ list(filter_targets(targets, excludes, errors=True, include=False, directories=False))
+
+ internal_targets = set(filter_targets(include_targets, excludes, errors=False, include=False, directories=False))
+ return tuple(sorted(internal_targets, key=lambda sort_target: sort_target.name))
+
+
+def filter_targets(targets, # type: t.Iterable[TCompletionTarget]
+ patterns, # type: t.List[str]
+ include=True, # type: bool
+ directories=True, # type: bool
+ errors=True, # type: bool
+ ): # type: (...) -> t.Iterable[TCompletionTarget]
+ """Iterate over the given targets and filter them based on the supplied arguments."""
+ unmatched = set(patterns or ())
+ compiled_patterns = dict((p, re.compile('^%s$' % p)) for p in patterns) if patterns else None
+
+ for target in targets:
+ matched_directories = set()
+ match = False
+
+ if patterns:
+ for alias in target.aliases:
+ for pattern in patterns:
+ if compiled_patterns[pattern].match(alias):
+ match = True
+
+ try:
+ unmatched.remove(pattern)
+ except KeyError:
+ pass
+
+ if alias.endswith('/'):
+ if target.base_path and len(target.base_path) > len(alias):
+ matched_directories.add(target.base_path)
+ else:
+ matched_directories.add(alias)
+ elif include:
+ match = True
+ if not target.base_path:
+ matched_directories.add('.')
+ for alias in target.aliases:
+ if alias.endswith('/'):
+ if target.base_path and len(target.base_path) > len(alias):
+ matched_directories.add(target.base_path)
+ else:
+ matched_directories.add(alias)
+
+ if match != include:
+ continue
+
+ if directories and matched_directories:
+ yield DirectoryTarget(to_text(sorted(matched_directories, key=len)[0]), target.modules)
+ else:
+ yield target
+
+ if errors:
+ if unmatched:
+ raise TargetPatternsNotMatched(unmatched)
+
+
+def walk_module_targets():
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ for target in walk_test_targets(path=data_context().content.module_path, module_path=data_context().content.module_path, extensions=MODULE_EXTENSIONS):
+ if not target.module:
+ continue
+
+ yield target
+
+
+def walk_units_targets():
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(path=data_context().content.unit_path, module_path=data_context().content.unit_module_path, extensions=('.py',), prefix='test_')
+
+
+def walk_compile_targets(include_symlinks=True):
+ """
+ :type include_symlinks: bool
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(module_path=data_context().content.module_path, extensions=('.py',), extra_dirs=('bin',), include_symlinks=include_symlinks)
+
+
+def walk_powershell_targets(include_symlinks=True):
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(module_path=data_context().content.module_path, extensions=('.ps1', '.psm1'), include_symlinks=include_symlinks)
+
+
+def walk_sanity_targets():
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(module_path=data_context().content.module_path, include_symlinks=True, include_symlinked_directories=True)
+
+
+def walk_posix_integration_targets(include_hidden=False):
+ """
+ :type include_hidden: bool
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ for target in walk_integration_targets():
+ if 'posix/' in target.aliases or (include_hidden and 'hidden/posix/' in target.aliases):
+ yield target
+
+
+def walk_network_integration_targets(include_hidden=False):
+ """
+ :type include_hidden: bool
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ for target in walk_integration_targets():
+ if 'network/' in target.aliases or (include_hidden and 'hidden/network/' in target.aliases):
+ yield target
+
+
+def walk_windows_integration_targets(include_hidden=False):
+ """
+ :type include_hidden: bool
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ for target in walk_integration_targets():
+ if 'windows/' in target.aliases or (include_hidden and 'hidden/windows/' in target.aliases):
+ yield target
+
+
+def walk_integration_targets():
+ """
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ path = data_context().content.integration_targets_path
+ modules = frozenset(target.module for target in walk_module_targets())
+ paths = data_context().content.walk_files(path)
+ prefixes = load_integration_prefixes()
+ targets_path_tuple = tuple(path.split(os.path.sep))
+
+ entry_dirs = (
+ 'defaults',
+ 'files',
+ 'handlers',
+ 'meta',
+ 'tasks',
+ 'templates',
+ 'vars',
+ )
+
+ entry_files = (
+ 'main.yml',
+ 'main.yaml',
+ )
+
+ entry_points = []
+
+ for entry_dir in entry_dirs:
+ for entry_file in entry_files:
+ entry_points.append(os.path.join(os.path.sep, entry_dir, entry_file))
+
+ # any directory with at least one file is a target
+ path_tuples = set(tuple(os.path.dirname(p).split(os.path.sep))
+ for p in paths)
+
+ # also detect targets which are ansible roles, looking for standard entry points
+ path_tuples.update(tuple(os.path.dirname(os.path.dirname(p)).split(os.path.sep))
+ for p in paths if any(p.endswith(entry_point) for entry_point in entry_points))
+
+ # remove the top-level directory if it was included
+ if targets_path_tuple in path_tuples:
+ path_tuples.remove(targets_path_tuple)
+
+ previous_path_tuple = None
+ paths = []
+
+ for path_tuple in sorted(path_tuples):
+ if previous_path_tuple and previous_path_tuple == path_tuple[:len(previous_path_tuple)]:
+ # ignore nested directories
+ continue
+
+ previous_path_tuple = path_tuple
+ paths.append(os.path.sep.join(path_tuple))
+
+ for path in paths:
+ yield IntegrationTarget(to_text(path), modules, prefixes)
+
+
+def load_integration_prefixes():
+ """
+ :rtype: dict[str, str]
+ """
+ path = data_context().content.integration_path
+ file_paths = sorted(f for f in data_context().content.get_files(path) if os.path.splitext(os.path.basename(f))[0] == 'target-prefixes')
+ prefixes = {}
+
+ for file_path in file_paths:
+ prefix = os.path.splitext(file_path)[1][1:]
+ prefixes.update(dict((k, prefix) for k in read_text_file(file_path).splitlines()))
+
+ return prefixes
+
+
+def walk_test_targets(path=None, module_path=None, extensions=None, prefix=None, extra_dirs=None, include_symlinks=False, include_symlinked_directories=False):
+ """
+ :type path: str | None
+ :type module_path: str | None
+ :type extensions: tuple[str] | None
+ :type prefix: str | None
+ :type extra_dirs: tuple[str] | None
+ :type include_symlinks: bool
+ :type include_symlinked_directories: bool
+ :rtype: collections.Iterable[TestTarget]
+ """
+ if path:
+ file_paths = data_context().content.walk_files(path, include_symlinked_directories=include_symlinked_directories)
+ else:
+ file_paths = data_context().content.all_files(include_symlinked_directories=include_symlinked_directories)
+
+ for file_path in file_paths:
+ name, ext = os.path.splitext(os.path.basename(file_path))
+
+ if extensions and ext not in extensions:
+ continue
+
+ if prefix and not name.startswith(prefix):
+ continue
+
+ symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
+
+ if symlink and not include_symlinks:
+ continue
+
+ yield TestTarget(to_text(file_path), module_path, prefix, path, symlink)
+
+ file_paths = []
+
+ if extra_dirs:
+ for extra_dir in extra_dirs:
+ for file_path in data_context().content.get_files(extra_dir):
+ file_paths.append(file_path)
+
+ for file_path in file_paths:
+ symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
+
+ if symlink and not include_symlinks:
+ continue
+
+ yield TestTarget(file_path, module_path, prefix, path, symlink)
+
+
+def analyze_integration_target_dependencies(integration_targets):
+ """
+ :type integration_targets: list[IntegrationTarget]
+ :rtype: dict[str,set[str]]
+ """
+ real_target_root = os.path.realpath(data_context().content.integration_targets_path) + '/'
+
+ role_targets = [target for target in integration_targets if target.type == 'role']
+ hidden_role_target_names = set(target.name for target in role_targets if 'hidden/' in target.aliases)
+
+ dependencies = collections.defaultdict(set)
+
+ # handle setup dependencies
+ for target in integration_targets:
+ for setup_target_name in target.setup_always + target.setup_once:
+ dependencies[setup_target_name].add(target.name)
+
+ # handle target dependencies
+ for target in integration_targets:
+ for need_target in target.needs_target:
+ dependencies[need_target].add(target.name)
+
+ # handle symlink dependencies between targets
+ # this use case is supported, but discouraged
+ for target in integration_targets:
+ for path in data_context().content.walk_files(target.path):
+ if not os.path.islink(to_bytes(path.rstrip(os.path.sep))):
+ continue
+
+ real_link_path = os.path.realpath(path)
+
+ if not real_link_path.startswith(real_target_root):
+ continue
+
+ link_target = real_link_path[len(real_target_root):].split('/')[0]
+
+ if link_target == target.name:
+ continue
+
+ dependencies[link_target].add(target.name)
+
+ # intentionally primitive analysis of role meta to avoid a dependency on pyyaml
+ # script based targets are scanned as they may execute a playbook with role dependencies
+ for target in integration_targets:
+ meta_dir = os.path.join(target.path, 'meta')
+
+ if not os.path.isdir(meta_dir):
+ continue
+
+ meta_paths = data_context().content.get_files(meta_dir)
+
+ for meta_path in meta_paths:
+ if os.path.exists(meta_path):
+ # try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file)
+ try:
+ meta_lines = read_text_file(meta_path).splitlines()
+ except UnicodeDecodeError:
+ continue
+
+ for meta_line in meta_lines:
+ if re.search(r'^ *#.*$', meta_line):
+ continue
+
+ if not meta_line.strip():
+ continue
+
+ for hidden_target_name in hidden_role_target_names:
+ if hidden_target_name in meta_line:
+ dependencies[hidden_target_name].add(target.name)
+
+ while True:
+ changes = 0
+
+ for dummy, dependent_target_names in dependencies.items():
+ for dependent_target_name in list(dependent_target_names):
+ new_target_names = dependencies.get(dependent_target_name)
+
+ if new_target_names:
+ for new_target_name in new_target_names:
+ if new_target_name not in dependent_target_names:
+ dependent_target_names.add(new_target_name)
+ changes += 1
+
+ if not changes:
+ break
+
+ for target_name in sorted(dependencies):
+ consumers = dependencies[target_name]
+
+ if not consumers:
+ continue
+
+ display.info('%s:' % target_name, verbosity=4)
+
+ for consumer in sorted(consumers):
+ display.info(' %s' % consumer, verbosity=4)
+
+ return dependencies
+
+
+class CompletionTarget:
+ """Command-line argument completion target base class."""
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self):
+ self.name = None
+ self.path = None
+ self.base_path = None
+ self.modules = tuple()
+ self.aliases = tuple()
+
+ def __eq__(self, other):
+ if isinstance(other, CompletionTarget):
+ return self.__repr__() == other.__repr__()
+
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ return self.name.__lt__(other.name)
+
+ def __gt__(self, other):
+ return self.name.__gt__(other.name)
+
+ def __hash__(self):
+ return hash(self.__repr__())
+
+ def __repr__(self):
+ if self.modules:
+ return '%s (%s)' % (self.name, ', '.join(self.modules))
+
+ return self.name
+
+
+class DirectoryTarget(CompletionTarget):
+ """Directory target."""
+ def __init__(self, path, modules):
+ """
+ :type path: str
+ :type modules: tuple[str]
+ """
+ super(DirectoryTarget, self).__init__()
+
+ self.name = path
+ self.path = path
+ self.modules = modules
+
+
+class TestTarget(CompletionTarget):
+ """Generic test target."""
+ def __init__(self, path, module_path, module_prefix, base_path, symlink=None):
+ """
+ :type path: str
+ :type module_path: str | None
+ :type module_prefix: str | None
+ :type base_path: str
+ :type symlink: bool | None
+ """
+ super(TestTarget, self).__init__()
+
+ if symlink is None:
+ symlink = os.path.islink(to_bytes(path.rstrip(os.path.sep)))
+
+ self.name = path
+ self.path = path
+ self.base_path = base_path + '/' if base_path else None
+ self.symlink = symlink
+
+ name, ext = os.path.splitext(os.path.basename(self.path))
+
+ if module_path and is_subdir(path, module_path) and name != '__init__' and ext in MODULE_EXTENSIONS:
+ self.module = name[len(module_prefix or ''):].lstrip('_')
+ self.modules = (self.module,)
+ else:
+ self.module = None
+ self.modules = tuple()
+
+ aliases = [self.path, self.module]
+ parts = self.path.split('/')
+
+ for i in range(1, len(parts)):
+ alias = '%s/' % '/'.join(parts[:i])
+ aliases.append(alias)
+
+ aliases = [a for a in aliases if a]
+
+ self.aliases = tuple(sorted(aliases))
+
+
+class IntegrationTarget(CompletionTarget):
+ """Integration test target."""
+ non_posix = frozenset((
+ 'network',
+ 'windows',
+ ))
+
+ categories = frozenset(non_posix | frozenset((
+ 'posix',
+ 'module',
+ 'needs',
+ 'skip',
+ )))
+
+ def __init__(self, path, modules, prefixes):
+ """
+ :type path: str
+ :type modules: frozenset[str]
+ :type prefixes: dict[str, str]
+ """
+ super(IntegrationTarget, self).__init__()
+
+ self.relative_path = os.path.relpath(path, data_context().content.integration_targets_path)
+ self.name = self.relative_path.replace(os.path.sep, '.')
+ self.path = path
+
+ # script_path and type
+
+ file_paths = data_context().content.get_files(path)
+ runme_path = os.path.join(path, 'runme.sh')
+
+ if runme_path in file_paths:
+ self.type = 'script'
+ self.script_path = runme_path
+ else:
+ self.type = 'role' # ansible will consider these empty roles, so ansible-test should as well
+ self.script_path = None
+
+ # static_aliases
+
+ aliases_path = os.path.join(path, 'aliases')
+
+ if aliases_path in file_paths:
+ static_aliases = tuple(read_lines_without_comments(aliases_path, remove_blank_lines=True))
+ else:
+ static_aliases = tuple()
+
+ # modules
+
+ if self.name in modules:
+ module_name = self.name
+ elif self.name.startswith('win_') and self.name[4:] in modules:
+ module_name = self.name[4:]
+ else:
+ module_name = None
+
+ self.modules = tuple(sorted(a for a in static_aliases + tuple([module_name]) if a in modules))
+
+ # groups
+
+ groups = [self.type]
+ groups += [a for a in static_aliases if a not in modules]
+ groups += ['module/%s' % m for m in self.modules]
+
+ if not self.modules:
+ groups.append('non_module')
+
+ if 'destructive' not in groups:
+ groups.append('non_destructive')
+
+ if '_' in self.name:
+ prefix = self.name[:self.name.find('_')]
+ else:
+ prefix = None
+
+ if prefix in prefixes:
+ group = prefixes[prefix]
+
+ if group != prefix:
+ group = '%s/%s' % (group, prefix)
+
+ groups.append(group)
+
+ if self.name.startswith('win_'):
+ groups.append('windows')
+
+ if self.name.startswith('connection_'):
+ groups.append('connection')
+
+ if self.name.startswith('setup_') or self.name.startswith('prepare_'):
+ groups.append('hidden')
+
+ if self.type not in ('script', 'role'):
+ groups.append('hidden')
+
+ targets_relative_path = data_context().content.integration_targets_path
+
+ # Collect skip entries before group expansion to avoid registering more specific skip entries as less specific versions.
+ self.skips = tuple(g for g in groups if g.startswith('skip/'))
+
+ # Collect file paths before group expansion to avoid including the directories.
+ # Ignore references to test targets, as those must be defined using `needs/target/*` or other target references.
+ self.needs_file = tuple(sorted(set('/'.join(g.split('/')[2:]) for g in groups if
+ g.startswith('needs/file/') and not g.startswith('needs/file/%s/' % targets_relative_path))))
+
+ # network platform
+ networks = [g.split('/')[1] for g in groups if g.startswith('network/')]
+ self.network_platform = networks[0] if networks else None
+
+ for group in itertools.islice(groups, 0, len(groups)):
+ if '/' in group:
+ parts = group.split('/')
+ for i in range(1, len(parts)):
+ groups.append('/'.join(parts[:i]))
+
+ if not any(g in self.non_posix for g in groups):
+ groups.append('posix')
+
+ # aliases
+
+ aliases = [self.name] + \
+ ['%s/' % g for g in groups] + \
+ ['%s/%s' % (g, self.name) for g in groups if g not in self.categories]
+
+ if 'hidden/' in aliases:
+ aliases = ['hidden/'] + ['hidden/%s' % a for a in aliases if not a.startswith('hidden/')]
+
+ self.aliases = tuple(sorted(set(aliases)))
+
+ # configuration
+
+ self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/'))))
+ self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/'))))
+ self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/'))))
+
+
+class TargetPatternsNotMatched(ApplicationError):
+ """One or more targets were not matched when a match was required."""
+ def __init__(self, patterns):
+ """
+ :type patterns: set[str]
+ """
+ self.patterns = sorted(patterns)
+
+ if len(patterns) > 1:
+ message = 'Target patterns not matched:\n%s' % '\n'.join(self.patterns)
+ else:
+ message = 'Target pattern not matched: %s' % self.patterns[0]
+
+ super(TargetPatternsNotMatched, self).__init__(message)
diff --git a/test/lib/ansible_test/_internal/test.py b/test/lib/ansible_test/_internal/test.py
new file mode 100644
index 00000000..8d9629a9
--- /dev/null
+++ b/test/lib/ansible_test/_internal/test.py
@@ -0,0 +1,524 @@
+"""Classes for storing and processing test results."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import re
+
+from . import types as t
+
+from .util import (
+ display,
+ get_ansible_version,
+)
+
+from .util_common import (
+ write_text_test_results,
+ write_json_test_results,
+ ResultType,
+)
+
+from .config import (
+ TestConfig,
+)
+
+
+def calculate_best_confidence(choices, metadata):
+ """
+ :type choices: tuple[tuple[str, int]]
+ :type metadata: Metadata
+ :rtype: int
+ """
+ best_confidence = 0
+
+ for path, line in choices:
+ confidence = calculate_confidence(path, line, metadata)
+ best_confidence = max(confidence, best_confidence)
+
+ return best_confidence
+
+
+def calculate_confidence(path, line, metadata):
+ """
+ :type path: str
+ :type line: int
+ :type metadata: Metadata
+ :rtype: int
+ """
+ ranges = metadata.changes.get(path)
+
+ # no changes were made to the file
+ if not ranges:
+ return 0
+
+ # changes were made to the same file and line
+ if any(r[0] <= line <= r[1] in r for r in ranges):
+ return 100
+
+ # changes were made to the same file and the line number is unknown
+ if line == 0:
+ return 75
+
+ # changes were made to the same file and the line number is different
+ return 50
+
+
+class TestResult:
+ """Base class for test results."""
+ def __init__(self, command, test, python_version=None):
+ """
+ :type command: str
+ :type test: str
+ :type python_version: str
+ """
+ self.command = command
+ self.test = test
+ self.python_version = python_version
+ self.name = self.test or self.command
+
+ if self.python_version:
+ self.name += '-python-%s' % self.python_version
+
+ try:
+ import junit_xml
+ except ImportError:
+ junit_xml = None
+
+ self.junit = junit_xml
+
+ def write(self, args):
+ """
+ :type args: TestConfig
+ """
+ self.write_console()
+ self.write_bot(args)
+
+ if args.lint:
+ self.write_lint()
+
+ if args.junit:
+ if self.junit:
+ self.write_junit(args)
+ else:
+ display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True)
+
+ def write_console(self):
+ """Write results to console."""
+
+ def write_lint(self):
+ """Write lint results to stdout."""
+
+ def write_bot(self, args):
+ """
+ :type args: TestConfig
+ """
+
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+
+ def create_result_name(self, extension):
+ """
+ :type extension: str
+ :rtype: str
+ """
+ name = 'ansible-test-%s' % self.command
+
+ if self.test:
+ name += '-%s' % self.test
+
+ if self.python_version:
+ name += '-python-%s' % self.python_version
+
+ name += extension
+
+ return name
+
+ def save_junit(self, args, test_case, properties=None):
+ """
+ :type args: TestConfig
+ :type test_case: junit_xml.TestCase
+ :type properties: dict[str, str] | None
+ :rtype: str | None
+ """
+ test_suites = [
+ self.junit.TestSuite(
+ name='ansible-test',
+ test_cases=[test_case],
+ timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(),
+ properties=properties,
+ ),
+ ]
+
+ # the junit_xml API is changing in version 2.0.0
+ # TestSuite.to_xml_string is being replaced with to_xml_report_string
+ # see: https://github.com/kyrus/python-junit-xml/blob/63db26da353790500642fd02cae1543eb41aab8b/junit_xml/__init__.py#L249-L261
+ try:
+ to_xml_string = self.junit.to_xml_report_string
+ except AttributeError:
+ to_xml_string = self.junit.TestSuite.to_xml_string
+
+ report = to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8')
+
+ if args.explain:
+ return
+
+ write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
+
+
+class TestTimeout(TestResult):
+ """Test timeout."""
+ def __init__(self, timeout_duration):
+ """
+ :type timeout_duration: int
+ """
+ super(TestTimeout, self).__init__(command='timeout', test='')
+
+ self.timeout_duration = timeout_duration
+
+ def write(self, args):
+ """
+ :type args: TestConfig
+ """
+ message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration
+
+ # Include a leading newline to improve readability on Shippable "Tests" tab.
+ # Without this, the first line becomes indented.
+ output = '''
+One or more of the following situations may be responsible:
+
+- Code changes have resulted in tests that hang or run for an excessive amount of time.
+- Tests have been added which exceed the time limit when combined with existing tests.
+- Test infrastructure and/or external dependencies are operating slower than normal.'''
+
+ if args.coverage:
+ output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.'
+
+ output += '\n\nConsult the console log for additional details on where the timeout occurred.'
+
+ timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
+
+ # hack to avoid requiring junit-xml, which may not be pre-installed outside our test containers
+ xml = '''
+<?xml version="1.0" encoding="utf-8"?>
+<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0">
+\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None">
+\t\t<testcase classname="timeout" name="timeout">
+\t\t\t<error message="%s" type="error">%s</error>
+\t\t</testcase>
+\t</testsuite>
+</testsuites>
+''' % (timestamp, message, output)
+
+ write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), xml.lstrip())
+
+
+class TestSuccess(TestResult):
+ """Test success."""
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+ test_case = self.junit.TestCase(classname=self.command, name=self.name)
+
+ self.save_junit(args, test_case)
+
+
+class TestSkipped(TestResult):
+ """Test skipped."""
+ def write_console(self):
+ """Write results to console."""
+ display.info('No tests applicable.', verbosity=1)
+
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+ test_case = self.junit.TestCase(classname=self.command, name=self.name)
+ test_case.add_skipped_info('No tests applicable.')
+
+ self.save_junit(args, test_case)
+
+
+class TestFailure(TestResult):
+ """Test failure."""
+ def __init__(self, command, test, python_version=None, messages=None, summary=None):
+ """
+ :type command: str
+ :type test: str
+ :type python_version: str | None
+ :type messages: list[TestMessage] | None
+ :type summary: unicode | None
+ """
+ super(TestFailure, self).__init__(command, test, python_version)
+
+ if messages:
+ messages = sorted(messages)
+ else:
+ messages = []
+
+ self.messages = messages
+ self.summary = summary
+
+ def write(self, args):
+ """
+ :type args: TestConfig
+ """
+ if args.metadata.changes:
+ self.populate_confidence(args.metadata)
+
+ super(TestFailure, self).write(args)
+
+ def write_console(self):
+ """Write results to console."""
+ if self.summary:
+ display.error(self.summary)
+ else:
+ if self.python_version:
+ specifier = ' on python %s' % self.python_version
+ else:
+ specifier = ''
+
+ display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier))
+
+ for message in self.messages:
+ display.error(message.format(show_confidence=True))
+
+ doc_url = self.find_docs()
+ if doc_url:
+ display.info('See documentation for help: %s' % doc_url)
+
+ def write_lint(self):
+ """Write lint results to stdout."""
+ if self.summary:
+ command = self.format_command()
+ message = 'The test `%s` failed. See stderr output for details.' % command
+ path = ''
+ message = TestMessage(message, path)
+ print(message)
+ else:
+ for message in self.messages:
+ print(message)
+
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+ title = self.format_title()
+ output = self.format_block()
+
+ test_case = self.junit.TestCase(classname=self.command, name=self.name)
+
+ # Include a leading newline to improve readability on Shippable "Tests" tab.
+ # Without this, the first line becomes indented.
+ test_case.add_failure_info(message=title, output='\n%s' % output)
+
+ self.save_junit(args, test_case)
+
+ def write_bot(self, args):
+ """
+ :type args: TestConfig
+ """
+ docs = self.find_docs()
+ message = self.format_title(help_link=docs)
+ output = self.format_block()
+
+ if self.messages:
+ verified = all((m.confidence or 0) >= 50 for m in self.messages)
+ else:
+ verified = False
+
+ bot_data = dict(
+ verified=verified,
+ docs=docs,
+ results=[
+ dict(
+ message=message,
+ output=output,
+ ),
+ ],
+ )
+
+ if args.explain:
+ return
+
+ write_json_test_results(ResultType.BOT, self.create_result_name('.json'), bot_data)
+
+ def populate_confidence(self, metadata):
+ """
+ :type metadata: Metadata
+ """
+ for message in self.messages:
+ if message.confidence is None:
+ message.confidence = calculate_confidence(message.path, message.line, metadata)
+
+ def format_command(self):
+ """
+ :rtype: str
+ """
+ command = 'ansible-test %s' % self.command
+
+ if self.test:
+ command += ' --test %s' % self.test
+
+ if self.python_version:
+ command += ' --python %s' % self.python_version
+
+ return command
+
+ def find_docs(self):
+ """
+ :rtype: str
+ """
+ if self.command != 'sanity':
+ return None # only sanity tests have docs links
+
+ # Use the major.minor version for the URL only if this a release that
+ # matches the pattern 2.4.0, otherwise, use 'devel'
+ ansible_version = get_ansible_version()
+ url_version = 'devel'
+ if re.search(r'^[0-9.]+$', ansible_version):
+ url_version = '.'.join(ansible_version.split('.')[:2])
+
+ testing_docs_url = 'https://docs.ansible.com/ansible/%s/dev_guide/testing' % url_version
+
+ url = '%s/%s/' % (testing_docs_url, self.command)
+
+ if self.test:
+ url += '%s.html' % self.test
+
+ return url
+
+ def format_title(self, help_link=None):
+ """
+ :type help_link: str | None
+ :rtype: str
+ """
+ command = self.format_command()
+
+ if self.summary:
+ reason = 'the error'
+ else:
+ reason = '1 error' if len(self.messages) == 1 else '%d errors' % len(self.messages)
+
+ if help_link:
+ help_link_markup = ' [[explain](%s)]' % help_link
+ else:
+ help_link_markup = ''
+
+ title = 'The test `%s`%s failed with %s:' % (command, help_link_markup, reason)
+
+ return title
+
+ def format_block(self):
+ """
+ :rtype: str
+ """
+ if self.summary:
+ block = self.summary
+ else:
+ block = '\n'.join(m.format() for m in self.messages)
+
+ message = block.strip()
+
+ # Hack to remove ANSI color reset code from SubprocessError messages.
+ message = message.replace(display.clear, '')
+
+ return message
+
+
+class TestMessage:
+ """Single test message for one file."""
+ def __init__(self, message, path, line=0, column=0, level='error', code=None, confidence=None):
+ """
+ :type message: str
+ :type path: str
+ :type line: int
+ :type column: int
+ :type level: str
+ :type code: str | None
+ :type confidence: int | None
+ """
+ self.__path = path
+ self.__line = line
+ self.__column = column
+ self.__level = level
+ self.__code = code
+ self.__message = message
+
+ self.confidence = confidence
+
+ @property
+ def path(self): # type: () -> str
+ """Return the path."""
+ return self.__path
+
+ @property
+ def line(self): # type: () -> int
+ """Return the line number, or 0 if none is available."""
+ return self.__line
+
+ @property
+ def column(self): # type: () -> int
+ """Return the column number, or 0 if none is available."""
+ return self.__column
+
+ @property
+ def level(self): # type: () -> str
+ """Return the level."""
+ return self.__level
+
+ @property
+ def code(self): # type: () -> t.Optional[str]
+ """Return the code, if any."""
+ return self.__code
+
+ @property
+ def message(self): # type: () -> str
+ """Return the message."""
+ return self.__message
+
+ @property
+ def tuple(self): # type: () -> t.Tuple[str, int, int, str, t.Optional[str], str]
+ """Return a tuple with all the immutable values of this test message."""
+ return self.__path, self.__line, self.__column, self.__level, self.__code, self.__message
+
+ def __lt__(self, other):
+ return self.tuple < other.tuple
+
+ def __le__(self, other):
+ return self.tuple <= other.tuple
+
+ def __eq__(self, other):
+ return self.tuple == other.tuple
+
+ def __ne__(self, other):
+ return self.tuple != other.tuple
+
+ def __gt__(self, other):
+ return self.tuple > other.tuple
+
+ def __ge__(self, other):
+ return self.tuple >= other.tuple
+
+ def __hash__(self):
+ return hash(self.tuple)
+
+ def __str__(self):
+ return self.format()
+
+ def format(self, show_confidence=False):
+ """
+ :type show_confidence: bool
+ :rtype: str
+ """
+ if self.__code:
+ msg = '%s: %s' % (self.__code, self.__message)
+ else:
+ msg = self.__message
+
+ if show_confidence and self.confidence is not None:
+ msg += ' (%d%%)' % self.confidence
+
+ return '%s:%s:%s: %s' % (self.__path, self.__line, self.__column, msg)
diff --git a/test/lib/ansible_test/_internal/thread.py b/test/lib/ansible_test/_internal/thread.py
new file mode 100644
index 00000000..49fbc1ba
--- /dev/null
+++ b/test/lib/ansible_test/_internal/thread.py
@@ -0,0 +1,57 @@
+"""Python threading tools."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import threading
+import sys
+
+try:
+ # noinspection PyPep8Naming
+ import Queue as queue
+except ImportError:
+ # noinspection PyUnresolvedReferences
+ import queue # pylint: disable=locally-disabled, import-error
+
+
+class WrappedThread(threading.Thread):
+ """Wrapper around Thread which captures results and exceptions."""
+ def __init__(self, action):
+ """
+ :type action: () -> any
+ """
+ # noinspection PyOldStyleClasses
+ super(WrappedThread, self).__init__()
+ self._result = queue.Queue()
+ self.action = action
+ self.result = None
+
+ def run(self):
+ """
+ Run action and capture results or exception.
+ Do not override. Do not call directly. Executed by the start() method.
+ """
+ # We truly want to catch anything that the worker thread might do including call sys.exit.
+ # Therefore we catch *everything* (including old-style class exceptions)
+ # noinspection PyBroadException, PyPep8
+ try:
+ self._result.put((self.action(), None))
+ # pylint: disable=locally-disabled, bare-except
+ except: # noqa
+ self._result.put((None, sys.exc_info()))
+
+ def wait_for_result(self):
+ """
+ Wait for thread to exit and return the result or raise an exception.
+ :rtype: any
+ """
+ result, exception = self._result.get()
+
+ if exception:
+ if sys.version_info[0] > 2:
+ raise exception[1].with_traceback(exception[2])
+ # noinspection PyRedundantParentheses
+ exec('raise exception[0], exception[1], exception[2]') # pylint: disable=locally-disabled, exec-used
+
+ self.result = result
+
+ return result
diff --git a/test/lib/ansible_test/_internal/types.py b/test/lib/ansible_test/_internal/types.py
new file mode 100644
index 00000000..46ef7066
--- /dev/null
+++ b/test/lib/ansible_test/_internal/types.py
@@ -0,0 +1,32 @@
+"""Import wrapper for type hints when available."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+TYPE_CHECKING = False
+
+try:
+ from typing import (
+ Any,
+ AnyStr,
+ BinaryIO,
+ Callable,
+ Dict,
+ FrozenSet,
+ Generator,
+ IO,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Pattern,
+ Set,
+ Text,
+ TextIO,
+ Tuple,
+ Type,
+ TYPE_CHECKING,
+ TypeVar,
+ Union,
+ )
+except ImportError:
+ pass
diff --git a/test/lib/ansible_test/_internal/units/__init__.py b/test/lib/ansible_test/_internal/units/__init__.py
new file mode 100644
index 00000000..22145431
--- /dev/null
+++ b/test/lib/ansible_test/_internal/units/__init__.py
@@ -0,0 +1,159 @@
+"""Execute unit tests using pytest."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from ..util import (
+ ANSIBLE_TEST_DATA_ROOT,
+ display,
+ get_available_python_versions,
+ is_subdir,
+ SubprocessError,
+ REMOTE_ONLY_PYTHON_VERSIONS,
+)
+
+from ..util_common import (
+ intercept_command,
+ ResultType,
+ handle_layout_messages,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+ check_pyyaml,
+)
+
+from ..target import (
+ walk_internal_targets,
+ walk_units_targets,
+)
+
+from ..config import (
+ UnitsConfig,
+)
+
+from ..coverage_util import (
+ coverage_context,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..executor import (
+ AllTargetsSkipped,
+ Delegate,
+ get_changes_filter,
+ install_command_requirements,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+
+def command_units(args):
+ """
+ :type args: UnitsConfig
+ """
+ handle_layout_messages(data_context().content.unit_messages)
+
+ changes = get_changes_filter(args)
+ require = args.require + changes
+ include = walk_internal_targets(walk_units_targets(), args.include, args.exclude, require)
+
+ paths = [target.path for target in include]
+ remote_paths = [path for path in paths
+ if is_subdir(path, data_context().content.unit_module_path)
+ or is_subdir(path, data_context().content.unit_module_utils_path)]
+
+ if not paths:
+ raise AllTargetsSkipped()
+
+ if args.python and args.python in REMOTE_ONLY_PYTHON_VERSIONS and not remote_paths:
+ raise AllTargetsSkipped()
+
+ if args.delegate:
+ raise Delegate(require=changes, exclude=args.exclude)
+
+ version_commands = []
+
+ available_versions = sorted(get_available_python_versions(list(SUPPORTED_PYTHON_VERSIONS)).keys())
+
+ for version in SUPPORTED_PYTHON_VERSIONS:
+ # run all versions unless version given, in which case run only that version
+ if args.python and version != args.python_version:
+ continue
+
+ if not args.python and version not in available_versions:
+ display.warning("Skipping unit tests on Python %s due to missing interpreter." % version)
+ continue
+
+ if args.requirements_mode != 'skip':
+ install_command_requirements(args, version)
+
+ env = ansible_environment(args)
+
+ cmd = [
+ 'pytest',
+ '--boxed',
+ '-r', 'a',
+ '-n', str(args.num_workers) if args.num_workers else 'auto',
+ '--color',
+ 'yes' if args.color else 'no',
+ '-p', 'no:cacheprovider',
+ '-c', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest.ini'),
+ '--junit-xml', os.path.join(ResultType.JUNIT.path, 'python%s-units.xml' % version),
+ ]
+
+ if not data_context().content.collection:
+ cmd.append('--durations=25')
+
+ if version != '2.6':
+ # added in pytest 4.5.0, which requires python 2.7+
+ cmd.append('--strict-markers')
+
+ plugins = []
+
+ if args.coverage:
+ plugins.append('ansible_pytest_coverage')
+
+ if data_context().content.collection:
+ plugins.append('ansible_pytest_collections')
+
+ if plugins:
+ env['PYTHONPATH'] += ':%s' % os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest/plugins')
+ env['PYTEST_PLUGINS'] = ','.join(plugins)
+
+ if args.collect_only:
+ cmd.append('--collect-only')
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ if version in REMOTE_ONLY_PYTHON_VERSIONS:
+ test_paths = remote_paths
+ else:
+ test_paths = paths
+
+ if not test_paths:
+ continue
+
+ cmd.extend(test_paths)
+
+ version_commands.append((version, cmd, env))
+
+ if args.requirements_mode == 'only':
+ sys.exit()
+
+ for version, command, env in version_commands:
+ check_pyyaml(args, version)
+
+ display.info('Unit test with Python %s' % version)
+
+ try:
+ with coverage_context(args):
+ intercept_command(args, command, target_name='units', env=env, python_version=version)
+ except SubprocessError as ex:
+ # pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case
+ if ex.status != 5:
+ raise
diff --git a/test/lib/ansible_test/_internal/util.py b/test/lib/ansible_test/_internal/util.py
new file mode 100644
index 00000000..005c3e05
--- /dev/null
+++ b/test/lib/ansible_test/_internal/util.py
@@ -0,0 +1,853 @@
+"""Miscellaneous utility functions and classes."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import contextlib
+import errno
+import fcntl
+import inspect
+import os
+import pkgutil
+import random
+import re
+import shutil
+import socket
+import stat
+import string
+import subprocess
+import sys
+import tempfile
+import time
+import zipfile
+
+from struct import unpack, pack
+from termios import TIOCGWINSZ
+
+try:
+ from abc import ABC
+except ImportError:
+ from abc import ABCMeta
+ ABC = ABCMeta('ABC', (), {})
+
+try:
+ # noinspection PyCompatibility
+ from configparser import ConfigParser
+except ImportError:
+ # noinspection PyCompatibility,PyUnresolvedReferences
+ from ConfigParser import SafeConfigParser as ConfigParser
+
+try:
+ # noinspection PyProtectedMember
+ from shlex import quote as cmd_quote
+except ImportError:
+ # noinspection PyProtectedMember
+ from pipes import quote as cmd_quote
+
+from . import types as t
+
+from .encoding import (
+ to_bytes,
+ to_optional_bytes,
+ to_optional_text,
+)
+
+from .io import (
+ open_binary_file,
+ read_text_file,
+)
+
+try:
+ C = t.TypeVar('C')
+except AttributeError:
+ C = None
+
+
+PYTHON_PATHS = {} # type: t.Dict[str, str]
+
+try:
+ # noinspection PyUnresolvedReferences
+ MAXFD = subprocess.MAXFD
+except AttributeError:
+ MAXFD = -1
+
+COVERAGE_CONFIG_NAME = 'coveragerc'
+
+ANSIBLE_TEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+# assume running from install
+ANSIBLE_ROOT = os.path.dirname(ANSIBLE_TEST_ROOT)
+ANSIBLE_BIN_PATH = os.path.dirname(os.path.abspath(sys.argv[0]))
+ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'ansible')
+ANSIBLE_SOURCE_ROOT = None
+
+if not os.path.exists(ANSIBLE_LIB_ROOT):
+ # running from source
+ ANSIBLE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(ANSIBLE_TEST_ROOT)))
+ ANSIBLE_BIN_PATH = os.path.join(ANSIBLE_ROOT, 'bin')
+ ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'lib', 'ansible')
+ ANSIBLE_SOURCE_ROOT = ANSIBLE_ROOT
+
+ANSIBLE_TEST_DATA_ROOT = os.path.join(ANSIBLE_TEST_ROOT, '_data')
+ANSIBLE_TEST_CONFIG_ROOT = os.path.join(ANSIBLE_TEST_ROOT, 'config')
+
+# Modes are set to allow all users the same level of access.
+# This permits files to be used in tests that change users.
+# The only exception is write access to directories for the user creating them.
+# This avoids having to modify the directory permissions a second time.
+
+MODE_READ = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
+
+MODE_FILE = MODE_READ
+MODE_FILE_EXECUTE = MODE_FILE | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+MODE_FILE_WRITE = MODE_FILE | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
+
+MODE_DIRECTORY = MODE_READ | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+MODE_DIRECTORY_WRITE = MODE_DIRECTORY | stat.S_IWGRP | stat.S_IWOTH
+
+REMOTE_ONLY_PYTHON_VERSIONS = (
+ '2.6',
+)
+
+SUPPORTED_PYTHON_VERSIONS = (
+ '2.6',
+ '2.7',
+ '3.5',
+ '3.6',
+ '3.7',
+ '3.8',
+ '3.9',
+)
+
+
+def remove_file(path):
+ """
+ :type path: str
+ """
+ if os.path.isfile(path):
+ os.remove(path)
+
+
+def read_lines_without_comments(path, remove_blank_lines=False, optional=False): # type: (str, bool, bool) -> t.List[str]
+ """
+ Returns lines from the specified text file with comments removed.
+ Comments are any content from a hash symbol to the end of a line.
+ Any spaces immediately before a comment are also removed.
+ """
+ if optional and not os.path.exists(path):
+ return []
+
+ lines = read_text_file(path).splitlines()
+
+ lines = [re.sub(r' *#.*$', '', line) for line in lines]
+
+ if remove_blank_lines:
+ lines = [line for line in lines if line]
+
+ return lines
+
+
+def find_executable(executable, cwd=None, path=None, required=True):
+ """
+ :type executable: str
+ :type cwd: str
+ :type path: str
+ :type required: bool | str
+ :rtype: str | None
+ """
+ match = None
+ real_cwd = os.getcwd()
+
+ if not cwd:
+ cwd = real_cwd
+
+ if os.path.dirname(executable):
+ target = os.path.join(cwd, executable)
+ if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
+ match = executable
+ else:
+ if path is None:
+ path = os.environ.get('PATH', os.path.defpath)
+
+ if path:
+ path_dirs = path.split(os.path.pathsep)
+ seen_dirs = set()
+
+ for path_dir in path_dirs:
+ if path_dir in seen_dirs:
+ continue
+
+ seen_dirs.add(path_dir)
+
+ if os.path.abspath(path_dir) == real_cwd:
+ path_dir = cwd
+
+ candidate = os.path.join(path_dir, executable)
+
+ if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
+ match = candidate
+ break
+
+ if not match and required:
+ message = 'Required program "%s" not found.' % executable
+
+ if required != 'warning':
+ raise ApplicationError(message)
+
+ display.warning(message)
+
+ return match
+
+
+def find_python(version, path=None, required=True):
+ """
+ :type version: str
+ :type path: str | None
+ :type required: bool
+ :rtype: str
+ """
+ version_info = tuple(int(n) for n in version.split('.'))
+
+ if not path and version_info == sys.version_info[:len(version_info)]:
+ python_bin = sys.executable
+ else:
+ python_bin = find_executable('python%s' % version, path=path, required=required)
+
+ return python_bin
+
+
+def get_ansible_version(): # type: () -> str
+ """Return the Ansible version."""
+ try:
+ return get_ansible_version.version
+ except AttributeError:
+ pass
+
+ # ansible may not be in our sys.path
+ # avoids a symlink to release.py since ansible placement relative to ansible-test may change during delegation
+ load_module(os.path.join(ANSIBLE_LIB_ROOT, 'release.py'), 'ansible_release')
+
+ # noinspection PyUnresolvedReferences
+ from ansible_release import __version__ as ansible_version # pylint: disable=import-error
+
+ get_ansible_version.version = ansible_version
+
+ return ansible_version
+
+
+def get_available_python_versions(versions): # type: (t.List[str]) -> t.Dict[str, str]
+ """Return a dictionary indicating which of the requested Python versions are available."""
+ try:
+ return get_available_python_versions.result
+ except AttributeError:
+ pass
+
+ get_available_python_versions.result = dict((version, path) for version, path in
+ ((version, find_python(version, required=False)) for version in versions) if path)
+
+ return get_available_python_versions.result
+
+
+def generate_pip_command(python):
+ """
+ :type python: str
+ :rtype: list[str]
+ """
+ return [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'quiet_pip.py')]
+
+
+def raw_command(cmd, capture=False, env=None, data=None, cwd=None, explain=False, stdin=None, stdout=None,
+ cmd_verbosity=1, str_errors='strict'):
+ """
+ :type cmd: collections.Iterable[str]
+ :type capture: bool
+ :type env: dict[str, str] | None
+ :type data: str | None
+ :type cwd: str | None
+ :type explain: bool
+ :type stdin: file | None
+ :type stdout: file | None
+ :type cmd_verbosity: int
+ :type str_errors: str
+ :rtype: str | None, str | None
+ """
+ if not cwd:
+ cwd = os.getcwd()
+
+ if not env:
+ env = common_environment()
+
+ cmd = list(cmd)
+
+ escaped_cmd = ' '.join(cmd_quote(c) for c in cmd)
+
+ display.info('Run command: %s' % escaped_cmd, verbosity=cmd_verbosity, truncate=True)
+ display.info('Working directory: %s' % cwd, verbosity=2)
+
+ program = find_executable(cmd[0], cwd=cwd, path=env['PATH'], required='warning')
+
+ if program:
+ display.info('Program found: %s' % program, verbosity=2)
+
+ for key in sorted(env.keys()):
+ display.info('%s=%s' % (key, env[key]), verbosity=2)
+
+ if explain:
+ return None, None
+
+ communicate = False
+
+ if stdin is not None:
+ data = None
+ communicate = True
+ elif data is not None:
+ stdin = subprocess.PIPE
+ communicate = True
+
+ if stdout:
+ communicate = True
+
+ if capture:
+ stdout = stdout or subprocess.PIPE
+ stderr = subprocess.PIPE
+ communicate = True
+ else:
+ stderr = None
+
+ start = time.time()
+ process = None
+
+ try:
+ try:
+ cmd_bytes = [to_bytes(c) for c in cmd]
+ env_bytes = dict((to_bytes(k), to_bytes(v)) for k, v in env.items())
+ process = subprocess.Popen(cmd_bytes, env=env_bytes, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd)
+ except OSError as ex:
+ if ex.errno == errno.ENOENT:
+ raise ApplicationError('Required program "%s" not found.' % cmd[0])
+ raise
+
+ if communicate:
+ data_bytes = to_optional_bytes(data)
+ stdout_bytes, stderr_bytes = process.communicate(data_bytes)
+ stdout_text = to_optional_text(stdout_bytes, str_errors) or u''
+ stderr_text = to_optional_text(stderr_bytes, str_errors) or u''
+ else:
+ process.wait()
+ stdout_text, stderr_text = None, None
+ finally:
+ if process and process.returncode is None:
+ process.kill()
+ display.info('') # the process we're interrupting may have completed a partial line of output
+ display.notice('Killed command to avoid an orphaned child process during handling of an unexpected exception.')
+
+ status = process.returncode
+ runtime = time.time() - start
+
+ display.info('Command exited with status %s after %s seconds.' % (status, runtime), verbosity=4)
+
+ if status == 0:
+ return stdout_text, stderr_text
+
+ raise SubprocessError(cmd, status, stdout_text, stderr_text, runtime)
+
+
+def common_environment():
+ """Common environment used for executing all programs."""
+ env = dict(
+ LC_ALL='en_US.UTF-8',
+ PATH=os.environ.get('PATH', os.path.defpath),
+ )
+
+ required = (
+ 'HOME',
+ )
+
+ optional = (
+ 'HTTPTESTER',
+ 'LD_LIBRARY_PATH',
+ 'SSH_AUTH_SOCK',
+ # MacOS High Sierra Compatibility
+ # http://sealiesoftware.com/blog/archive/2017/6/5/Objective-C_and_fork_in_macOS_1013.html
+ # Example configuration for macOS:
+ # export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
+ 'OBJC_DISABLE_INITIALIZE_FORK_SAFETY',
+ 'ANSIBLE_KEEP_REMOTE_FILES',
+ # MacOS Homebrew Compatibility
+ # https://cryptography.io/en/latest/installation/#building-cryptography-on-macos
+ # This may also be required to install pyyaml with libyaml support when installed in non-standard locations.
+ # Example configuration for brew on macOS:
+ # export LDFLAGS="-L$(brew --prefix openssl)/lib/ -L$(brew --prefix libyaml)/lib/"
+ # export CFLAGS="-I$(brew --prefix openssl)/include/ -I$(brew --prefix libyaml)/include/"
+ # However, this is not adequate for PyYAML 3.13, which is the latest version supported on Python 2.6.
+ # For that version the standard location must be used, or `pip install` must be invoked with additional options:
+ # --global-option=build_ext --global-option=-L{path_to_lib_dir}
+ 'LDFLAGS',
+ 'CFLAGS',
+ )
+
+ env.update(pass_vars(required=required, optional=optional))
+
+ return env
+
+
+def pass_vars(required, optional):
+ """
+ :type required: collections.Iterable[str]
+ :type optional: collections.Iterable[str]
+ :rtype: dict[str, str]
+ """
+ env = {}
+
+ for name in required:
+ if name not in os.environ:
+ raise MissingEnvironmentVariable(name)
+ env[name] = os.environ[name]
+
+ for name in optional:
+ if name not in os.environ:
+ continue
+ env[name] = os.environ[name]
+
+ return env
+
+
+def deepest_path(path_a, path_b):
+ """Return the deepest of two paths, or None if the paths are unrelated.
+ :type path_a: str
+ :type path_b: str
+ :rtype: str | None
+ """
+ if path_a == '.':
+ path_a = ''
+
+ if path_b == '.':
+ path_b = ''
+
+ if path_a.startswith(path_b):
+ return path_a or '.'
+
+ if path_b.startswith(path_a):
+ return path_b or '.'
+
+ return None
+
+
+def remove_tree(path):
+ """
+ :type path: str
+ """
+ try:
+ shutil.rmtree(to_bytes(path))
+ except OSError as ex:
+ if ex.errno != errno.ENOENT:
+ raise
+
+
+def is_binary_file(path):
+ """
+ :type path: str
+ :rtype: bool
+ """
+ assume_text = set([
+ '.cfg',
+ '.conf',
+ '.crt',
+ '.cs',
+ '.css',
+ '.html',
+ '.ini',
+ '.j2',
+ '.js',
+ '.json',
+ '.md',
+ '.pem',
+ '.ps1',
+ '.psm1',
+ '.py',
+ '.rst',
+ '.sh',
+ '.txt',
+ '.xml',
+ '.yaml',
+ '.yml',
+ ])
+
+ assume_binary = set([
+ '.bin',
+ '.eot',
+ '.gz',
+ '.ico',
+ '.iso',
+ '.jpg',
+ '.otf',
+ '.p12',
+ '.png',
+ '.pyc',
+ '.rpm',
+ '.ttf',
+ '.woff',
+ '.woff2',
+ '.zip',
+ ])
+
+ ext = os.path.splitext(path)[1]
+
+ if ext in assume_text:
+ return False
+
+ if ext in assume_binary:
+ return True
+
+ with open_binary_file(path) as path_fd:
+ # noinspection PyTypeChecker
+ return b'\0' in path_fd.read(4096)
+
+
+def generate_password():
+ """Generate a random password.
+ :rtype: str
+ """
+ chars = [
+ string.ascii_letters,
+ string.digits,
+ string.ascii_letters,
+ string.digits,
+ '-',
+ ] * 4
+
+ password = ''.join([random.choice(char) for char in chars[:-1]])
+
+ display.sensitive.add(password)
+
+ return password
+
+
+class Display:
+ """Manages color console output."""
+ clear = '\033[0m'
+ red = '\033[31m'
+ green = '\033[32m'
+ yellow = '\033[33m'
+ blue = '\033[34m'
+ purple = '\033[35m'
+ cyan = '\033[36m'
+
+ verbosity_colors = {
+ 0: None,
+ 1: green,
+ 2: blue,
+ 3: cyan,
+ }
+
+ def __init__(self):
+ self.verbosity = 0
+ self.color = sys.stdout.isatty()
+ self.warnings = []
+ self.warnings_unique = set()
+ self.info_stderr = False
+ self.rows = 0
+ self.columns = 0
+ self.truncate = 0
+ self.redact = True
+ self.sensitive = set()
+
+ if os.isatty(0):
+ self.rows, self.columns = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[:2]
+
+ def __warning(self, message):
+ """
+ :type message: str
+ """
+ self.print_message('WARNING: %s' % message, color=self.purple, fd=sys.stderr)
+
+ def review_warnings(self):
+ """Review all warnings which previously occurred."""
+ if not self.warnings:
+ return
+
+ self.__warning('Reviewing previous %d warning(s):' % len(self.warnings))
+
+ for warning in self.warnings:
+ self.__warning(warning)
+
+ def warning(self, message, unique=False, verbosity=0):
+ """
+ :type message: str
+ :type unique: bool
+ :type verbosity: int
+ """
+ if verbosity > self.verbosity:
+ return
+
+ if unique:
+ if message in self.warnings_unique:
+ return
+
+ self.warnings_unique.add(message)
+
+ self.__warning(message)
+ self.warnings.append(message)
+
+ def notice(self, message):
+ """
+ :type message: str
+ """
+ self.print_message('NOTICE: %s' % message, color=self.purple, fd=sys.stderr)
+
+ def error(self, message):
+ """
+ :type message: str
+ """
+ self.print_message('ERROR: %s' % message, color=self.red, fd=sys.stderr)
+
+ def info(self, message, verbosity=0, truncate=False):
+ """
+ :type message: str
+ :type verbosity: int
+ :type truncate: bool
+ """
+ if self.verbosity >= verbosity:
+ color = self.verbosity_colors.get(verbosity, self.yellow)
+ self.print_message(message, color=color, fd=sys.stderr if self.info_stderr else sys.stdout, truncate=truncate)
+
+ def print_message(self, message, color=None, fd=sys.stdout, truncate=False): # pylint: disable=locally-disabled, invalid-name
+ """
+ :type message: str
+ :type color: str | None
+ :type fd: file
+ :type truncate: bool
+ """
+ if self.redact and self.sensitive:
+ for item in self.sensitive:
+ if not item:
+ continue
+
+ message = message.replace(item, '*' * len(item))
+
+ if truncate:
+ if len(message) > self.truncate > 5:
+ message = message[:self.truncate - 5] + ' ...'
+
+ if color and self.color:
+ # convert color resets in message to desired color
+ message = message.replace(self.clear, color)
+ message = '%s%s%s' % (color, message, self.clear)
+
+ if sys.version_info[0] == 2:
+ message = to_bytes(message)
+
+ print(message, file=fd)
+ fd.flush()
+
+
+class ApplicationError(Exception):
+ """General application error."""
+
+
+class ApplicationWarning(Exception):
+ """General application warning which interrupts normal program flow."""
+
+
+class SubprocessError(ApplicationError):
+ """Error resulting from failed subprocess execution."""
+ def __init__(self, cmd, status=0, stdout=None, stderr=None, runtime=None):
+ """
+ :type cmd: list[str]
+ :type status: int
+ :type stdout: str | None
+ :type stderr: str | None
+ :type runtime: float | None
+ """
+ message = 'Command "%s" returned exit status %s.\n' % (' '.join(cmd_quote(c) for c in cmd), status)
+
+ if stderr:
+ message += '>>> Standard Error\n'
+ message += '%s%s\n' % (stderr.strip(), Display.clear)
+
+ if stdout:
+ message += '>>> Standard Output\n'
+ message += '%s%s\n' % (stdout.strip(), Display.clear)
+
+ message = message.strip()
+
+ super(SubprocessError, self).__init__(message)
+
+ self.cmd = cmd
+ self.message = message
+ self.status = status
+ self.stdout = stdout
+ self.stderr = stderr
+ self.runtime = runtime
+
+
+class MissingEnvironmentVariable(ApplicationError):
+ """Error caused by missing environment variable."""
+ def __init__(self, name):
+ """
+ :type name: str
+ """
+ super(MissingEnvironmentVariable, self).__init__('Missing environment variable: %s' % name)
+
+ self.name = name
+
+
+def parse_to_list_of_dict(pattern, value):
+ """
+ :type pattern: str
+ :type value: str
+ :return: list[dict[str, str]]
+ """
+ matched = []
+ unmatched = []
+
+ for line in value.splitlines():
+ match = re.search(pattern, line)
+
+ if match:
+ matched.append(match.groupdict())
+ else:
+ unmatched.append(line)
+
+ if unmatched:
+ raise Exception('Pattern "%s" did not match values:\n%s' % (pattern, '\n'.join(unmatched)))
+
+ return matched
+
+
+def get_available_port():
+ """
+ :rtype: int
+ """
+ # this relies on the kernel not reusing previously assigned ports immediately
+ socket_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+ with contextlib.closing(socket_fd):
+ socket_fd.bind(('', 0))
+ return socket_fd.getsockname()[1]
+
+
+def get_subclasses(class_type): # type: (t.Type[C]) -> t.Set[t.Type[C]]
+ """Returns the set of types that are concrete subclasses of the given type."""
+ subclasses = set() # type: t.Set[t.Type[C]]
+ queue = [class_type] # type: t.List[t.Type[C]]
+
+ while queue:
+ parent = queue.pop()
+
+ for child in parent.__subclasses__():
+ if child not in subclasses:
+ if not inspect.isabstract(child):
+ subclasses.add(child)
+ queue.append(child)
+
+ return subclasses
+
+
+def is_subdir(candidate_path, path): # type: (str, str) -> bool
+ """Returns true if candidate_path is path or a subdirectory of path."""
+ if not path.endswith(os.path.sep):
+ path += os.path.sep
+
+ if not candidate_path.endswith(os.path.sep):
+ candidate_path += os.path.sep
+
+ return candidate_path.startswith(path)
+
+
+def paths_to_dirs(paths): # type: (t.List[str]) -> t.List[str]
+ """Returns a list of directories extracted from the given list of paths."""
+ dir_names = set()
+
+ for path in paths:
+ while True:
+ path = os.path.dirname(path)
+
+ if not path or path == os.path.sep:
+ break
+
+ dir_names.add(path + os.path.sep)
+
+ return sorted(dir_names)
+
+
+def str_to_version(version): # type: (str) -> t.Tuple[int, ...]
+ """Return a version tuple from a version string."""
+ return tuple(int(n) for n in version.split('.'))
+
+
+def version_to_str(version): # type: (t.Tuple[int, ...]) -> str
+ """Return a version string from a version tuple."""
+ return '.'.join(str(n) for n in version)
+
+
+def import_plugins(directory, root=None): # type: (str, t.Optional[str]) -> None
+ """
+ Import plugins from the given directory relative to the given root.
+ If the root is not provided, the 'lib' directory for the test runner will be used.
+ """
+ if root is None:
+ root = os.path.dirname(__file__)
+
+ path = os.path.join(root, directory)
+ package = __name__.rsplit('.', 1)[0]
+ prefix = '%s.%s.' % (package, directory.replace(os.path.sep, '.'))
+
+ for (_module_loader, name, _ispkg) in pkgutil.iter_modules([path], prefix=prefix):
+ module_path = os.path.join(root, name[len(package) + 1:].replace('.', os.path.sep) + '.py')
+ load_module(module_path, name)
+
+
+def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None
+ """
+ Load plugins of the specified type and track them in the specified database.
+ Only plugins which have already been imported will be loaded.
+ """
+ plugins = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) # type: t.Dict[str, t.Type[C]]
+
+ for plugin in plugins:
+ database[plugin] = plugins[plugin]
+
+
+def load_module(path, name): # type: (str, str) -> None
+ """Load a Python module using the given name and path."""
+ if name in sys.modules:
+ return
+
+ if sys.version_info >= (3, 4):
+ # noinspection PyUnresolvedReferences
+ import importlib.util
+
+ # noinspection PyUnresolvedReferences
+ spec = importlib.util.spec_from_file_location(name, path)
+ # noinspection PyUnresolvedReferences
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+
+ sys.modules[name] = module
+ else:
+ # noinspection PyDeprecation
+ import imp
+
+ # load_source (and thus load_module) require a file opened with `open` in text mode
+ with open(to_bytes(path)) as module_file:
+ # noinspection PyDeprecation
+ imp.load_module(name, module_file, path, ('.py', 'r', imp.PY_SOURCE))
+
+
+@contextlib.contextmanager
+def tempdir(): # type: () -> str
+ """Creates a temporary directory that is deleted outside the context scope."""
+ temp_path = tempfile.mkdtemp()
+ yield temp_path
+ shutil.rmtree(temp_path)
+
+
+@contextlib.contextmanager
+def open_zipfile(path, mode='r'):
+ """Opens a zip file and closes the file automatically."""
+ zib_obj = zipfile.ZipFile(path, mode=mode)
+ yield zib_obj
+ zib_obj.close()
+
+
+display = Display() # pylint: disable=locally-disabled, invalid-name
diff --git a/test/lib/ansible_test/_internal/util_common.py b/test/lib/ansible_test/_internal/util_common.py
new file mode 100644
index 00000000..1ac2e60d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/util_common.py
@@ -0,0 +1,487 @@
+"""Common utility code that depends on CommonConfig."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import atexit
+import contextlib
+import os
+import shutil
+import sys
+import tempfile
+import textwrap
+
+from . import types as t
+
+from .encoding import (
+ to_bytes,
+)
+
+from .util import (
+ common_environment,
+ COVERAGE_CONFIG_NAME,
+ display,
+ find_python,
+ remove_tree,
+ MODE_DIRECTORY,
+ MODE_FILE_EXECUTE,
+ PYTHON_PATHS,
+ raw_command,
+ read_lines_without_comments,
+ ANSIBLE_TEST_DATA_ROOT,
+ ApplicationError,
+)
+
+from .io import (
+ write_text_file,
+ write_json_file,
+)
+
+from .data import (
+ data_context,
+)
+
+from .provider.layout import (
+ LayoutMessages,
+)
+
+DOCKER_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]]
+REMOTE_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]]
+NETWORK_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]]
+
+
+class ResultType:
+ """Test result type."""
+ BOT = None # type: ResultType
+ COVERAGE = None # type: ResultType
+ DATA = None # type: ResultType
+ JUNIT = None # type: ResultType
+ LOGS = None # type: ResultType
+ REPORTS = None # type: ResultType
+ TMP = None # type: ResultType
+
+ @staticmethod
+ def _populate():
+ ResultType.BOT = ResultType('bot')
+ ResultType.COVERAGE = ResultType('coverage')
+ ResultType.DATA = ResultType('data')
+ ResultType.JUNIT = ResultType('junit')
+ ResultType.LOGS = ResultType('logs')
+ ResultType.REPORTS = ResultType('reports')
+ ResultType.TMP = ResultType('.tmp')
+
+ def __init__(self, name): # type: (str) -> None
+ self.name = name
+
+ @property
+ def relative_path(self): # type: () -> str
+ """The content relative path to the results."""
+ return os.path.join(data_context().content.results_path, self.name)
+
+ @property
+ def path(self): # type: () -> str
+ """The absolute path to the results."""
+ return os.path.join(data_context().content.root, self.relative_path)
+
+ def __str__(self): # type: () -> str
+ return self.name
+
+
+# noinspection PyProtectedMember
+ResultType._populate() # pylint: disable=protected-access
+
+
+class CommonConfig:
+ """Configuration common to all commands."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ self.command = command
+
+ self.color = args.color # type: bool
+ self.explain = args.explain # type: bool
+ self.verbosity = args.verbosity # type: int
+ self.debug = args.debug # type: bool
+ self.truncate = args.truncate # type: int
+ self.redact = args.redact # type: bool
+
+ self.info_stderr = False # type: bool
+
+ self.cache = {}
+
+ def get_ansible_config(self): # type: () -> str
+ """Return the path to the Ansible config for the given config."""
+ return os.path.join(ANSIBLE_TEST_DATA_ROOT, 'ansible.cfg')
+
+
+class NetworkPlatformSettings:
+ """Settings required for provisioning a network platform."""
+ def __init__(self, collection, inventory_vars): # type: (str, t.Type[str, str]) -> None
+ self.collection = collection
+ self.inventory_vars = inventory_vars
+
+
+def get_docker_completion():
+ """
+ :rtype: dict[str, dict[str, str]]
+ """
+ return get_parameterized_completion(DOCKER_COMPLETION, 'docker')
+
+
+def get_remote_completion():
+ """
+ :rtype: dict[str, dict[str, str]]
+ """
+ return get_parameterized_completion(REMOTE_COMPLETION, 'remote')
+
+
+def get_network_completion():
+ """
+ :rtype: dict[str, dict[str, str]]
+ """
+ return get_parameterized_completion(NETWORK_COMPLETION, 'network')
+
+
+def get_parameterized_completion(cache, name):
+ """
+ :type cache: dict[str, dict[str, str]]
+ :type name: str
+ :rtype: dict[str, dict[str, str]]
+ """
+ if not cache:
+ if data_context().content.collection:
+ context = 'collection'
+ else:
+ context = 'ansible-base'
+
+ images = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', '%s.txt' % name), remove_blank_lines=True)
+
+ cache.update(dict(kvp for kvp in [parse_parameterized_completion(i) for i in images] if kvp and kvp[1].get('context', context) == context))
+
+ return cache
+
+
+def parse_parameterized_completion(value): # type: (str) -> t.Optional[t.Tuple[str, t.Dict[str, str]]]
+ """Parse the given completion entry, returning the entry name and a dictionary of key/value settings."""
+ values = value.split()
+
+ if not values:
+ return None
+
+ name = values[0]
+ data = dict((kvp[0], kvp[1] if len(kvp) > 1 else '') for kvp in [item.split('=', 1) for item in values[1:]])
+
+ return name, data
+
+
+def docker_qualify_image(name):
+ """
+ :type name: str
+ :rtype: str
+ """
+ config = get_docker_completion().get(name, {})
+
+ return config.get('name', name)
+
+
+def get_network_settings(args, platform, version): # type: (NetworkIntegrationConfig, str, str) -> NetworkPlatformSettings
+ """Returns settings for the given network platform and version."""
+ platform_version = '%s/%s' % (platform, version)
+ completion = get_network_completion().get(platform_version, {})
+ collection = args.platform_collection.get(platform, completion.get('collection'))
+
+ settings = NetworkPlatformSettings(
+ collection,
+ dict(
+ ansible_connection=args.platform_connection.get(platform, completion.get('connection')),
+ ansible_network_os='%s.%s' % (collection, platform) if collection else platform,
+ )
+ )
+
+ return settings
+
+
+def handle_layout_messages(messages): # type: (t.Optional[LayoutMessages]) -> None
+ """Display the given layout messages."""
+ if not messages:
+ return
+
+ for message in messages.info:
+ display.info(message, verbosity=1)
+
+ for message in messages.warning:
+ display.warning(message)
+
+ if messages.error:
+ raise ApplicationError('\n'.join(messages.error))
+
+
+@contextlib.contextmanager
+def named_temporary_file(args, prefix, suffix, directory, content):
+ """
+ :param args: CommonConfig
+ :param prefix: str
+ :param suffix: str
+ :param directory: str
+ :param content: str | bytes | unicode
+ :rtype: str
+ """
+ if args.explain:
+ yield os.path.join(directory, '%stemp%s' % (prefix, suffix))
+ else:
+ with tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, dir=directory) as tempfile_fd:
+ tempfile_fd.write(to_bytes(content))
+ tempfile_fd.flush()
+
+ yield tempfile_fd.name
+
+
+def write_json_test_results(category, # type: ResultType
+ name, # type: str
+ content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]]
+ formatted=True, # type: bool
+ encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]]
+ ): # type: (...) -> None
+ """Write the given json content to the specified test results path, creating directories as needed."""
+ path = os.path.join(category.path, name)
+ write_json_file(path, content, create_directories=True, formatted=formatted, encoder=encoder)
+
+
+def write_text_test_results(category, name, content): # type: (ResultType, str, str) -> None
+ """Write the given text content to the specified test results path, creating directories as needed."""
+ path = os.path.join(category.path, name)
+ write_text_file(path, content, create_directories=True)
+
+
+def get_python_path(args, interpreter):
+ """
+ :type args: TestConfig
+ :type interpreter: str
+ :rtype: str
+ """
+ python_path = PYTHON_PATHS.get(interpreter)
+
+ if python_path:
+ return python_path
+
+ prefix = 'python-'
+ suffix = '-ansible'
+
+ root_temp_dir = '/tmp'
+
+ if args.explain:
+ return os.path.join(root_temp_dir, ''.join((prefix, 'temp', suffix)))
+
+ python_path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
+ injected_interpreter = os.path.join(python_path, 'python')
+
+ # A symlink is faster than the execv wrapper, but isn't compatible with virtual environments.
+ # Attempt to detect when it is safe to use a symlink by checking the real path of the interpreter.
+ use_symlink = os.path.dirname(os.path.realpath(interpreter)) == os.path.dirname(interpreter)
+
+ if use_symlink:
+ display.info('Injecting "%s" as a symlink to the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1)
+
+ os.symlink(interpreter, injected_interpreter)
+ else:
+ display.info('Injecting "%s" as a execv wrapper for the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1)
+
+ create_interpreter_wrapper(interpreter, injected_interpreter)
+
+ os.chmod(python_path, MODE_DIRECTORY)
+
+ if not PYTHON_PATHS:
+ atexit.register(cleanup_python_paths)
+
+ PYTHON_PATHS[interpreter] = python_path
+
+ return python_path
+
+
+def create_temp_dir(prefix=None, suffix=None, base_dir=None): # type: (t.Optional[str], t.Optional[str], t.Optional[str]) -> str
+ """Create a temporary directory that persists until the current process exits."""
+ temp_path = tempfile.mkdtemp(prefix=prefix or 'tmp', suffix=suffix or '', dir=base_dir)
+ atexit.register(remove_tree, temp_path)
+ return temp_path
+
+
+def create_interpreter_wrapper(interpreter, injected_interpreter): # type: (str, str) -> None
+ """Create a wrapper for the given Python interpreter at the specified path."""
+ # sys.executable is used for the shebang to guarantee it is a binary instead of a script
+ # injected_interpreter could be a script from the system or our own wrapper created for the --venv option
+ shebang_interpreter = sys.executable
+
+ code = textwrap.dedent('''
+ #!%s
+
+ from __future__ import absolute_import
+
+ from os import execv
+ from sys import argv
+
+ python = '%s'
+
+ execv(python, [python] + argv[1:])
+ ''' % (shebang_interpreter, interpreter)).lstrip()
+
+ write_text_file(injected_interpreter, code)
+
+ os.chmod(injected_interpreter, MODE_FILE_EXECUTE)
+
+
+def cleanup_python_paths():
+ """Clean up all temporary python directories."""
+ for path in sorted(PYTHON_PATHS.values()):
+ display.info('Cleaning up temporary python directory: %s' % path, verbosity=2)
+ shutil.rmtree(path)
+
+
+def get_coverage_environment(args, target_name, version, temp_path, module_coverage, remote_temp_path=None):
+ """
+ :type args: TestConfig
+ :type target_name: str
+ :type version: str
+ :type temp_path: str
+ :type module_coverage: bool
+ :type remote_temp_path: str | None
+ :rtype: dict[str, str]
+ """
+ if temp_path:
+ # integration tests (both localhost and the optional testhost)
+ # config and results are in a temporary directory
+ coverage_config_base_path = temp_path
+ coverage_output_base_path = temp_path
+ elif args.coverage_config_base_path:
+ # unit tests, sanity tests and other special cases (localhost only)
+ # config is in a temporary directory
+ # results are in the source tree
+ coverage_config_base_path = args.coverage_config_base_path
+ coverage_output_base_path = os.path.join(data_context().content.root, data_context().content.results_path)
+ else:
+ raise Exception('No temp path and no coverage config base path. Check for missing coverage_context usage.')
+
+ config_file = os.path.join(coverage_config_base_path, COVERAGE_CONFIG_NAME)
+ coverage_file = os.path.join(coverage_output_base_path, ResultType.COVERAGE.name, '%s=%s=%s=%s=coverage' % (
+ args.command, target_name, args.coverage_label or 'local-%s' % version, 'python-%s' % version))
+
+ if not args.explain and not os.path.exists(config_file):
+ raise Exception('Missing coverage config file: %s' % config_file)
+
+ if args.coverage_check:
+ # cause the 'coverage' module to be found, but not imported or enabled
+ coverage_file = ''
+
+ # Enable code coverage collection on local Python programs (this does not include Ansible modules).
+ # Used by the injectors to support code coverage.
+ # Used by the pytest unit test plugin to support code coverage.
+ # The COVERAGE_FILE variable is also used directly by the 'coverage' module.
+ env = dict(
+ COVERAGE_CONF=config_file,
+ COVERAGE_FILE=coverage_file,
+ )
+
+ if module_coverage:
+ # Enable code coverage collection on Ansible modules (both local and remote).
+ # Used by the AnsiballZ wrapper generator in lib/ansible/executor/module_common.py to support code coverage.
+ env.update(dict(
+ _ANSIBLE_COVERAGE_CONFIG=config_file,
+ _ANSIBLE_COVERAGE_OUTPUT=coverage_file,
+ ))
+
+ if remote_temp_path:
+ # Include the command, target and label so the remote host can create a filename with that info. The remote
+ # is responsible for adding '={language version}=coverage.{hostname}.{pid}.{id}'
+ env['_ANSIBLE_COVERAGE_REMOTE_OUTPUT'] = os.path.join(remote_temp_path, '%s=%s=%s' % (
+ args.command, target_name, args.coverage_label or 'remote'))
+ env['_ANSIBLE_COVERAGE_REMOTE_WHITELIST'] = os.path.join(data_context().content.root, '*')
+
+ return env
+
+
+def intercept_command(args, cmd, target_name, env, capture=False, data=None, cwd=None, python_version=None, temp_path=None, module_coverage=True,
+ virtualenv=None, disable_coverage=False, remote_temp_path=None):
+ """
+ :type args: TestConfig
+ :type cmd: collections.Iterable[str]
+ :type target_name: str
+ :type env: dict[str, str]
+ :type capture: bool
+ :type data: str | None
+ :type cwd: str | None
+ :type python_version: str | None
+ :type temp_path: str | None
+ :type module_coverage: bool
+ :type virtualenv: str | None
+ :type disable_coverage: bool
+ :type remote_temp_path: str | None
+ :rtype: str | None, str | None
+ """
+ if not env:
+ env = common_environment()
+ else:
+ env = env.copy()
+
+ cmd = list(cmd)
+ version = python_version or args.python_version
+ interpreter = virtualenv or find_python(version)
+ inject_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector')
+
+ if not virtualenv:
+ # injection of python into the path is required when not activating a virtualenv
+ # otherwise scripts may find the wrong interpreter or possibly no interpreter
+ python_path = get_python_path(args, interpreter)
+ inject_path = python_path + os.path.pathsep + inject_path
+
+ env['PATH'] = inject_path + os.path.pathsep + env['PATH']
+ env['ANSIBLE_TEST_PYTHON_VERSION'] = version
+ env['ANSIBLE_TEST_PYTHON_INTERPRETER'] = interpreter
+
+ if args.coverage and not disable_coverage:
+ # add the necessary environment variables to enable code coverage collection
+ env.update(get_coverage_environment(args, target_name, version, temp_path, module_coverage,
+ remote_temp_path=remote_temp_path))
+
+ return run_command(args, cmd, capture=capture, env=env, data=data, cwd=cwd)
+
+
+def resolve_csharp_ps_util(import_name, path):
+ """
+ :type import_name: str
+ :type path: str
+ """
+ if data_context().content.is_ansible or not import_name.startswith('.'):
+ # We don't support relative paths for builtin utils, there's no point.
+ return import_name
+
+ packages = import_name.split('.')
+ module_packages = path.split(os.path.sep)
+
+ for package in packages:
+ if not module_packages or package:
+ break
+ del module_packages[-1]
+
+ return 'ansible_collections.%s%s' % (data_context().content.prefix,
+ '.'.join(module_packages + [p for p in packages if p]))
+
+
+def run_command(args, cmd, capture=False, env=None, data=None, cwd=None, always=False, stdin=None, stdout=None,
+ cmd_verbosity=1, str_errors='strict'):
+ """
+ :type args: CommonConfig
+ :type cmd: collections.Iterable[str]
+ :type capture: bool
+ :type env: dict[str, str] | None
+ :type data: str | None
+ :type cwd: str | None
+ :type always: bool
+ :type stdin: file | None
+ :type stdout: file | None
+ :type cmd_verbosity: int
+ :type str_errors: str
+ :rtype: str | None, str | None
+ """
+ explain = args.explain and not always
+ return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout,
+ cmd_verbosity=cmd_verbosity, str_errors=str_errors)
diff --git a/test/lib/ansible_test/_internal/venv.py b/test/lib/ansible_test/_internal/venv.py
new file mode 100644
index 00000000..37eef367
--- /dev/null
+++ b/test/lib/ansible_test/_internal/venv.py
@@ -0,0 +1,227 @@
+"""Virtual environment management."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import sys
+
+from . import types as t
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .util import (
+ find_python,
+ SubprocessError,
+ get_available_python_versions,
+ SUPPORTED_PYTHON_VERSIONS,
+ ANSIBLE_TEST_DATA_ROOT,
+ display,
+ remove_tree,
+)
+
+from .util_common import (
+ run_command,
+)
+
+
+def create_virtual_environment(args, # type: EnvironmentConfig
+ version, # type: str
+ path, # type: str
+ system_site_packages=False, # type: bool
+ pip=True, # type: bool
+ ): # type: (...) -> bool
+ """Create a virtual environment using venv or virtualenv for the requested Python version."""
+ if os.path.isdir(path):
+ display.info('Using existing Python %s virtual environment: %s' % (version, path), verbosity=1)
+ return True
+
+ python = find_python(version, required=False)
+ python_version = tuple(int(v) for v in version.split('.'))
+
+ if not python:
+ # the requested python version could not be found
+ return False
+
+ if python_version >= (3, 0):
+ # use the built-in 'venv' module on Python 3.x
+ # creating a virtual environment using 'venv' when running in a virtual environment created by 'virtualenv' results
+ # in a copy of the original virtual environment instead of creation of a new one
+ # avoid this issue by only using "real" python interpreters to invoke 'venv'
+ for real_python in iterate_real_pythons(args, version):
+ if run_venv(args, real_python, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "venv": %s' % (version, path), verbosity=1)
+ return True
+
+ # something went wrong, most likely the package maintainer for the Python installation removed ensurepip
+ # which will prevent creation of a virtual environment without installation of other OS packages
+
+ # use the installed 'virtualenv' module on the Python requested version
+ if run_virtualenv(args, python, python, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "virtualenv": %s' % (version, path), verbosity=1)
+ return True
+
+ available_pythons = get_available_python_versions(SUPPORTED_PYTHON_VERSIONS)
+
+ for available_python_version, available_python_interpreter in sorted(available_pythons.items()):
+ virtualenv_version = get_virtualenv_version(args, available_python_interpreter)
+
+ if not virtualenv_version:
+ # virtualenv not available for this Python or we were unable to detect the version
+ continue
+
+ if python_version == (2, 6) and virtualenv_version >= (16, 0, 0):
+ # virtualenv 16.0.0 dropped python 2.6 support: https://virtualenv.pypa.io/en/latest/changes/#v16-0-0-2018-05-16
+ continue
+
+ # try using 'virtualenv' from another Python to setup the desired version
+ if run_virtualenv(args, available_python_interpreter, python, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "virtualenv" on Python %s: %s' % (version, available_python_version, path), verbosity=1)
+ return True
+
+ # no suitable 'virtualenv' available
+ return False
+
+
+def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t.Iterable[str]
+ """
+ Iterate through available real python interpreters of the requested version.
+ The current interpreter will be checked and then the path will be searched.
+ """
+ version_info = tuple(int(n) for n in version.split('.'))
+ current_python = None
+
+ if version_info == sys.version_info[:len(version_info)]:
+ current_python = sys.executable
+ real_prefix = get_python_real_prefix(args, current_python)
+
+ if real_prefix:
+ current_python = find_python(version, os.path.join(real_prefix, 'bin'))
+
+ if current_python:
+ yield current_python
+
+ path = os.environ.get('PATH', os.path.defpath)
+
+ if not path:
+ return
+
+ found_python = find_python(version, path)
+
+ if not found_python:
+ return
+
+ if found_python == current_python:
+ return
+
+ real_prefix = get_python_real_prefix(args, found_python)
+
+ if real_prefix:
+ found_python = find_python(version, os.path.join(real_prefix, 'bin'))
+
+ if found_python:
+ yield found_python
+
+
+def get_python_real_prefix(args, path): # type: (EnvironmentConfig, str) -> t.Optional[str]
+ """
+ Return the real prefix of the specified interpreter or None if the interpreter is not a virtual environment created by 'virtualenv'.
+ """
+ cmd = [path, os.path.join(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'virtualenvcheck.py'))]
+ check_result = json.loads(run_command(args, cmd, capture=True, always=True)[0])
+ real_prefix = check_result['real_prefix']
+ return real_prefix
+
+
+def run_venv(args, # type: EnvironmentConfig
+ run_python, # type: str
+ system_site_packages, # type: bool
+ pip, # type: bool
+ path, # type: str
+ ): # type: (...) -> bool
+ """Create a virtual environment using the 'venv' module. Not available on Python 2.x."""
+ cmd = [run_python, '-m', 'venv']
+
+ if system_site_packages:
+ cmd.append('--system-site-packages')
+
+ if not pip:
+ cmd.append('--without-pip')
+
+ cmd.append(path)
+
+ try:
+ run_command(args, cmd, capture=True)
+ except SubprocessError as ex:
+ remove_tree(path)
+
+ if args.verbosity > 1:
+ display.error(ex)
+
+ return False
+
+ return True
+
+
+def run_virtualenv(args, # type: EnvironmentConfig
+ run_python, # type: str
+ env_python, # type: str
+ system_site_packages, # type: bool
+ pip, # type: bool
+ path, # type: str
+ ): # type: (...) -> bool
+ """Create a virtual environment using the 'virtualenv' module."""
+ # always specify --python to guarantee the desired interpreter is provided
+ # otherwise virtualenv may select a different interpreter than the one running virtualenv
+ cmd = [run_python, '-m', 'virtualenv', '--python', env_python]
+
+ if system_site_packages:
+ cmd.append('--system-site-packages')
+
+ if not pip:
+ cmd.append('--no-pip')
+
+ cmd.append(path)
+
+ try:
+ run_command(args, cmd, capture=True)
+ except SubprocessError as ex:
+ remove_tree(path)
+
+ if args.verbosity > 1:
+ display.error(ex)
+
+ return False
+
+ return True
+
+
+def get_virtualenv_version(args, python): # type: (EnvironmentConfig, str) -> t.Optional[t.Tuple[int, ...]]
+ """Get the virtualenv version for the given python intepreter, if available."""
+ try:
+ return get_virtualenv_version.result
+ except AttributeError:
+ pass
+
+ get_virtualenv_version.result = None
+
+ cmd = [python, '-m', 'virtualenv', '--version']
+
+ try:
+ stdout = run_command(args, cmd, capture=True)[0]
+ except SubprocessError as ex:
+ if args.verbosity > 1:
+ display.error(ex)
+
+ stdout = ''
+
+ if stdout:
+ # noinspection PyBroadException
+ try:
+ get_virtualenv_version.result = tuple(int(v) for v in stdout.strip().split('.'))
+ except Exception: # pylint: disable=broad-except
+ pass
+
+ return get_virtualenv_version.result